Akıllı Kamera Sistemleri için AI Entegrasyonu: Görüntü İşleme ile Nasıl Başlanır?
Katmera SBC'leri ile bilgisayarlı görü projelerinizi hayata geçirmenin pratik yollarını keşfedin.
AI Donanım Gereksinimleri
GPU Hızlandırması
Mali-G52 GPU Özellikleri:•Shader çekirdek sayısı: 2 çekirdek•Grafik API desteği: OpenGL ES 3.2, Vulkan 1.1•Compute API: OpenCL 2.0 Full Profile•NEON SIMD desteği: ARM v8.2 uyumluPerformans Optimizasyonu:
python
import cv2
import numpy as npGPU hızlandırma kontrolü
def check_gpu_support():
try:
# OpenCL desteği kontrolü
print("OpenCV build bilgileri:")
print(cv2.getBuildInformation())
# GPU cihazlarını listele
if cv2.ocl.haveOpenCL():
print("✓ OpenCL desteği mevcut")
cv2.ocl.setUseOpenCL(True)
else:
print("✗ OpenCL desteği yok")
except Exception as e:
print(f"GPU kontrol hatası: {e}")check_gpu_support()
Bellek Optimizasyonu
LPDDR4 RAM Yapılandırması:•Kapasite seçenekleri: 2GB, 4GB, 8GB•Hız: 3200 MHz•Bandwidth: 25.6 GB/sModel Cache Stratejileri:
python
import tensorflow as tfclass ModelCache:
def __init__(self, cache_size=512):
self.cache_size = cache_size 1024 1024 # MB to bytes
self.models = {}
def load_model(self, model_path):
if model_path in self.models:
return self.models[model_path]
# Model yükleme ve quantization
interpreter = tf.lite.Interpreter(
model_path=model_path,
num_threads=4 # Quad-core optimizasyonu
)
interpreter.allocate_tensors()
self.models[model_path] = interpreter
return interpreter
Kullanım
cache = ModelCache(cache_size=128) # 128MB cache
model = cache.load_model("yolo_quantized.tflite")
Popüler AI Kütüphaneleri
OpenCV Kurulumu ve Optimizasyonu
Sistem gereksinimleri ve kurulum:bash
Sistem paketlerini güncelleyin
sudo apt update && sudo apt upgrade -yOpenCV bağımlılıkları
sudo apt install -y python3-pip python3-dev
sudo apt install -y libopencv-dev python3-opencv
sudo apt install -y libatlas-base-dev liblapack-dev libeigen3-dev
sudo apt install -y libgtk-3-dev libavcodec-dev libavformat-dev
sudo apt install -y libswscale-dev libv4l-dev libxvidcore-dev libx264-devPython OpenCV kurulumu
pip3 install opencv-python==4.8.1.78
pip3 install opencv-contrib-python==4.8.1.78GPU desteği için ek paketler
pip3 install opencv-python-headless
Performans testi:
python
import cv2
import time
import numpy as npdef benchmark_opencv():
# Test görüntüsü oluştur
img = np.random.randint(0, 255, (1080, 1920, 3), dtype=np.uint8)
# CPU vs GPU performans karşılaştırması
iterations = 100
# CPU testi
start_time = time.time()
for _ in range(iterations):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (15, 15), 0)
cpu_time = time.time() - start_time
# GPU testi (eğer mevcut)
if cv2.ocl.haveOpenCL():
cv2.ocl.setUseOpenCL(True)
gpu_img = cv2.UMat(img)
start_time = time.time()
for _ in range(iterations):
gray = cv2.cvtColor(gpu_img, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (15, 15), 0)
gpu_time = time.time() - start_time
print(f"CPU Süresi: {cpu_time:.3f}s")
print(f"GPU Süresi: {gpu_time:.3f}s")
print(f"Hızlanma: {cpu_time/gpu_time:.2f}x")
else:
print(f"CPU Süresi: {cpu_time:.3f}s")
print("GPU desteği bulunamadı")
benchmark_opencv()
TensorFlow Lite Edge Optimizasyonu
Model hazırlama ve quantization:python
import tensorflow as tfdef convert_model_to_tflite(model_path, quantize=True):
# Keras modelini yükle
model = tf.keras.models.load_model(model_path)
# TFLite converter
converter = tf.lite.TFLiteConverter.from_keras_model(model)
if quantize:
# INT8 quantization (daha hızlı çıkarım)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_types = [tf.int8]
# Representative dataset (opsiyonel)
def representative_data_gen():
for _ in range(100):
data = np.random.random((1, 224, 224, 3))
yield [data.astype(np.float32)]
converter.representative_dataset = representative_data_gen
# Model dönüştürme
tflite_model = converter.convert()
# Quantized modeli kaydet
output_path = model_path.replace('.h5', '_quantized.tflite')
with open(output_path, 'wb') as f:
f.write(tflite_model)
return output_path
Kullanım
quantized_model = convert_model_to_tflite('my_model.h5')
print(f"Quantized model kaydedildi: {quantized_model}")
MediaPipe Framework Uygulamaları
Real-time yüz tanıma sistemi:python
import cv2
import mediapipe as mpclass FaceDetectionSystem:
def __init__(self):
self.mp_face_detection = mp.solutions.face_detection
self.mp_drawing = mp.solutions.drawing_utils
self.face_detection = self.mp_face_detection.FaceDetection(
model_selection=0, min_detection_confidence=0.5)
def detect_faces(self, image):
# BGR'yi RGB'ye çevir
rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
rgb_image.flags.writeable = False
# Yüz tespiti
results = self.face_detection.process(rgb_image)
# Sonuçları işle
rgb_image.flags.writeable = True
image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
if results.detections:
for detection in results.detections:
self.mp_drawing.draw_detection(image, detection)
# Güven skoru
confidence = detection.score[0]
print(f"Yüz tespit edildi: %{confidence*100:.1f} güven")
return image
def run_camera(self):
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
cap.set(cv2.CAP_PROP_FPS, 30)
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
# Yüz tespiti
frame = self.detect_faces(frame)
# FPS hesaplama
cv2.putText(frame, f"FPS: {cap.get(cv2.CAP_PROP_FPS):.1f}",
(10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
cv2.imshow('Face Detection', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
Kullanım
detector = FaceDetectionSystem()
detector.run_camera()
Pratik Projeler
1. Nesne Tanıma Sistemi
YOLO v5 implementasyonu:python
import torch
import cv2
from ultralytics import YOLOclass ObjectDetector:
def __init__(self, model_path='yolov5s.pt'):
self.model = YOLO(model_path)
self.classes = self.model.names
def detect(self, image):
# YOLO çıkarımı
results = self.model(image, conf=0.5)
# Sonuçları işle
for result in results:
boxes = result.boxes
for box in boxes:
# Koordinatlar
x1, y1, x2, y2 = box.xyxy[0]
confidence = box.conf[0]
class_id = box.cls[0]
# Çizim
cv2.rectangle(image, (int(x1), int(y1)), (int(x2), int(y2)), (0, 255, 0), 2)
cv2.putText(image, f'{self.classes[int(class_id)]}: {confidence:.2f}',
(int(x1), int(y1-10)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
return image
def real_time_detection(self):
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
if not ret:
break
# Nesne tespiti
frame = self.detect(frame)
cv2.imshow('Object Detection', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
Kullanım
detector = ObjectDetector()
detector.real_time_detection()
2. Yüz Tanıma Güvenlik Sistemi
Face encoding ve database entegrasyonu:python
import face_recognition
import numpy as np
import sqlite3
import pickleclass FaceRecognitionSecurity:
def __init__(self, db_path='faces.db'):
self.db_path = db_path
self.init_database()
self.known_encodings = []
self.known_names = []
self.load_known_faces()
def init_database(self):
conn = sqlite3.connect(self.db_path)
cursor = conn.cursor()
cursor.execute('''
CREATE TABLE IF NOT EXISTS faces (
id INTEGER PRIMARY KEY,
name TEXT UNIQUE,
encoding BLOB,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
)
''')
conn.commit()
conn.close()
def register_face(self, image_path, name):
# Yüz encoding'i hesapla
image = face_recognition.load_image_file(image_path)
encodings = face_recognition.face_encodings(image)
if len(encodings) > 0:
encoding = encodings[0]
# Database'e kaydet
conn = sqlite3.connect(self.db_path)
cursor = conn.cursor()
encoding_blob = pickle.dumps(encoding)
cursor.execute('INSERT OR REPLACE INTO faces (name, encoding) VALUES (?, ?)',
(name, encoding_blob))
conn.commit()
conn.close()
# Memory'ye yükle
self.known_encodings.append(encoding)
self.known_names.append(name)
print(f"✓ {name} başarıyla kayıt edildi")
return True
else:
print("✗ Yüz bulunamadı")
return False
def load_known_faces(self):
conn = sqlite3.connect(self.db_path)
cursor = conn.cursor()
cursor.execute('SELECT name, encoding FROM faces')
for name, encoding_blob in cursor.fetchall():
encoding = pickle.loads(encoding_blob)
self.known_encodings.append(encoding)
self.known_names.append(name)
conn.close()
print(f"✓ {len(self.known_names)} kayıtlı yüz yüklendi")
def recognize_face(self, image):
# Yüz tespiti
face_locations = face_recognition.face_locations(image)
face_encodings = face_recognition.face_encodings(image, face_locations)
recognized_faces = []
for face_encoding in face_encodings:
# Karşılaştırma
distances = face_recognition.face_distance(self.known_encodings, face_encoding)
matches = face_recognition.compare_faces(self.known_encodings, face_encoding, tolerance=0.6)
name = "Bilinmiyor"
confidence = 0
if True in matches:
match_index = np.argmin(distances)
if matches[match_index]:
name = self.known_names[match_index]
confidence = 1 - distances[match_index]
recognized_faces.append({
'name': name,
'confidence': confidence,
'location': face_locations[len(recognized_faces)]
})
return recognized_faces
Kullanım
security = FaceRecognitionSecurity()
security.register_face('person1.jpg', 'Ahmet')
faces = security.recognize_face(image)
3. Trafik Analizi Sistemi
Araç sayma ve hız ölçümü:python
import cv2
import numpy as np
from collections import defaultdict
import timeclass TrafficAnalyzer:
def __init__(self):
self.vehicle_count = 0
self.speed_data = []
self.tracking_history = defaultdict(list)
self.vehicle_classes = ['car', 'truck', 'bus', 'motorbike']
def calculate_speed(self, track_id, current_pos, timestamp, pixel_to_meter=0.1):
history = self.tracking_history[track_id]
history.append((current_pos, timestamp))
# En az 2 nokta gerekli
if len(history) < 2:
return 0
# Son 5 ölçümü kullan
if len(history) > 5:
history = history[-5:]
# Mesafe ve süre hesapla
start_pos, start_time = history[0]
end_pos, end_time = history[-1]
distance = np.sqrt((end_pos[0] - start_pos[0])2 + (end_pos[1] - start_pos[1])2)
time_diff = end_time - start_time
if time_diff > 0:
# Pixel/saniye'den km/saat'e çevir
speed_mps = (distance * pixel_to_meter) / time_diff
speed_kmh = speed_mps * 3.6
return speed_kmh
return 0
def analyze_traffic(self, video_path):
# YOLO modeli yükle
net = cv2.dnn.readNet('yolov4.weights', 'yolov4.cfg')
cap = cv2.VideoCapture(video_path)
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
# Object detection
blob = cv2.dnn.blobFromImage(frame, 1/255.0, (416, 416), swapRB=True, crop=False)
net.setInput(blob)
outputs = net.forward()
# Araçları tespit et
vehicles = self.detect_vehicles(frame, outputs)
# Tracking ve hız hesaplama
for vehicle in vehicles:
track_id = vehicle['id']
position = vehicle['center']
timestamp = time.time()
speed = self.calculate_speed(track_id, position, timestamp)
# Görselleştirme
x, y, w, h = vehicle['bbox']
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.putText(frame, f'ID: {track_id}', (x, y-30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cv2.putText(frame, f'Speed: {speed:.1f} km/h', (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
# İstatistikler
cv2.putText(frame, f'Vehicles: {len(vehicles)}', (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
cv2.putText(frame, f'Total Count: {self.vehicle_count}', (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
cv2.imshow('Traffic Analysis', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
Kullanım
analyzer = TrafficAnalyzer()
analyzer.analyze_traffic('traffic_video.mp4')
Performans İyileştirme Teknikleri
Model Pruning ve Optimization
TensorFlow Model pruning:python
import tensorflow_model_optimization as tfmotdef optimize_model(model):
# Pruning configuration
pruning_params = {
'pruning_schedule': tfmot.sparsity.keras.PolynomialDecay(
initial_sparsity=0.50,
final_sparsity=0.80,
begin_step=1000,
end_step=2000
)
}
# Model pruning
model_for_pruning = tfmot.sparsity.keras.prune_low_magnitude(model, pruning_params)
# Compile
model_for_pruning.compile(
optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)
return model_for_pruning
Kullanım
optimized_model = optimize_model(your_model)
Multi-threading Optimization
Paralel işleme için thread pool:python
import threading
import queue
from concurrent.futures import ThreadPoolExecutorclass MultiThreadProcessor:
def __init__(self, num_threads=4):
self.num_threads = num_threads
self.frame_queue = queue.Queue(maxsize=10)
self.result_queue = queue.Queue()
self.executor = ThreadPoolExecutor(max_workers=num_threads)
def process_frame(self, frame):
# AI inference burada
result = your_ai_model.predict(frame)
return result
def worker(self):
while True:
try:
frame = self.frame_queue.get(timeout=1)
result = self.process_frame(frame)
self.result_queue.put(result)
self.frame_queue.task_done()
except queue.Empty:
continue
def start_processing(self):
# Worker thread'lerini başlat
for _ in range(self.num_threads):
self.executor.submit(self.worker)
Kullanım
processor = MultiThreadProcessor(num_threads=4)
processor.start_processing()