Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,1215 +1,74 @@
|
|
| 1 |
"""
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
All 57 improvements implemented
|
| 5 |
"""
|
| 6 |
import gradio as gr
|
| 7 |
import cv2
|
| 8 |
import numpy as np
|
| 9 |
import torch
|
| 10 |
-
import
|
| 11 |
-
import
|
| 12 |
-
from typing import Dict, List, Optional, Tuple, Any
|
| 13 |
-
from dataclasses import dataclass, field
|
| 14 |
-
from collections import deque
|
| 15 |
-
from pathlib import Path
|
| 16 |
-
from datetime import datetime, timedelta
|
| 17 |
-
import json
|
| 18 |
-
import pickle
|
| 19 |
import base64
|
| 20 |
from io import BytesIO
|
| 21 |
from PIL import Image
|
| 22 |
-
import
|
| 23 |
-
import
|
| 24 |
-
from scipy.optimize import linear_sum_assignment
|
| 25 |
-
from sklearn.metrics.pairwise import cosine_similarity
|
| 26 |
-
import uuid
|
| 27 |
-
|
| 28 |
-
warnings.filterwarnings('ignore')
|
| 29 |
-
|
| 30 |
-
# ==================== DETECTION MODULE ====================
|
| 31 |
-
|
| 32 |
-
@dataclass
|
| 33 |
-
class Detection:
|
| 34 |
-
"""Detection with pose keypoints"""
|
| 35 |
-
bbox: List[float]
|
| 36 |
-
confidence: float
|
| 37 |
-
image_crop: Optional[np.ndarray] = None
|
| 38 |
-
keypoints: Optional[np.ndarray] = None # 24x3 for dog pose
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
class DogDetector:
|
| 42 |
-
"""Dog detector with pose estimation using dog-pose trained model"""
|
| 43 |
-
|
| 44 |
-
def __init__(self, confidence_threshold: float = 0.45, device: str = 'cuda'):
|
| 45 |
-
self.confidence_threshold = confidence_threshold
|
| 46 |
-
self.device = device if torch.cuda.is_available() else 'cpu'
|
| 47 |
-
|
| 48 |
-
# Load dog-pose trained model
|
| 49 |
-
try:
|
| 50 |
-
from ultralytics import YOLO
|
| 51 |
-
self.model = YOLO('dog-pose-trained.pt') # Will be replaced with dog-pose-trained.pt
|
| 52 |
-
self.model.to(self.device)
|
| 53 |
-
print(f"β
Detector loaded on {self.device}")
|
| 54 |
-
except Exception as e:
|
| 55 |
-
print(f"β Detector error: {e}")
|
| 56 |
-
self.model = None
|
| 57 |
-
|
| 58 |
-
self.dog_class_id = 0 # Dog class in pose model
|
| 59 |
-
|
| 60 |
-
def detect(self, frames) -> List[List[Detection]]:
|
| 61 |
-
"""Batch detection supporting single frame or list of frames"""
|
| 62 |
-
if self.model is None:
|
| 63 |
-
return [[]] if not isinstance(frames, list) else [[] for _ in frames]
|
| 64 |
-
|
| 65 |
-
# Handle single frame
|
| 66 |
-
if not isinstance(frames, list):
|
| 67 |
-
frames = [frames]
|
| 68 |
-
single_mode = True
|
| 69 |
-
else:
|
| 70 |
-
single_mode = False
|
| 71 |
-
|
| 72 |
-
all_detections = []
|
| 73 |
-
|
| 74 |
-
try:
|
| 75 |
-
# Batch inference
|
| 76 |
-
results = self.model(frames, conf=self.confidence_threshold, verbose=False)
|
| 77 |
-
|
| 78 |
-
for frame_idx, (frame, result) in enumerate(zip(frames, results)):
|
| 79 |
-
detections = []
|
| 80 |
-
|
| 81 |
-
if result.boxes is not None and len(result.boxes) > 0:
|
| 82 |
-
boxes = result.boxes
|
| 83 |
-
keypoints_data = result.keypoints if hasattr(result, 'keypoints') else None
|
| 84 |
-
|
| 85 |
-
for i in range(len(boxes)):
|
| 86 |
-
x1, y1, x2, y2 = boxes.xyxy[i].cpu().numpy()
|
| 87 |
-
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
|
| 88 |
-
|
| 89 |
-
h, w = frame.shape[:2]
|
| 90 |
-
x1, y1 = max(0, x1), max(0, y1)
|
| 91 |
-
x2, y2 = min(w, x2), min(h, y2)
|
| 92 |
-
|
| 93 |
-
if x2 <= x1 or y2 <= y1:
|
| 94 |
-
continue
|
| 95 |
-
|
| 96 |
-
dog_crop = frame[y1:y2, x1:x2].copy()
|
| 97 |
-
|
| 98 |
-
# Extract keypoints if available
|
| 99 |
-
kpts = None
|
| 100 |
-
if keypoints_data is not None and len(keypoints_data) > i:
|
| 101 |
-
kpts = keypoints_data[i].data.cpu().numpy().reshape(-1, 3)
|
| 102 |
-
|
| 103 |
-
detection = Detection(
|
| 104 |
-
bbox=[x1, y1, x2, y2],
|
| 105 |
-
confidence=float(boxes.conf[i]),
|
| 106 |
-
image_crop=dog_crop,
|
| 107 |
-
keypoints=kpts
|
| 108 |
-
)
|
| 109 |
-
detections.append(detection)
|
| 110 |
-
|
| 111 |
-
all_detections.append(detections)
|
| 112 |
-
|
| 113 |
-
except Exception as e:
|
| 114 |
-
print(f"Detection error: {e}")
|
| 115 |
-
all_detections = [[] for _ in frames]
|
| 116 |
-
|
| 117 |
-
return all_detections[0] if single_mode else all_detections
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
# ==================== TRACKING MODULE ====================
|
| 121 |
-
|
| 122 |
-
class Track:
|
| 123 |
-
"""Enhanced track with pose support and robust state management"""
|
| 124 |
-
|
| 125 |
-
def __init__(self, detection: Detection, track_id: Optional[int] = None):
|
| 126 |
-
self.track_id = track_id if track_id else int(uuid.uuid4().int % 100000)
|
| 127 |
-
self.bbox = detection.bbox.copy()
|
| 128 |
-
self.detections = [detection]
|
| 129 |
-
self.confidence = detection.confidence
|
| 130 |
-
self.keypoints = detection.keypoints
|
| 131 |
-
|
| 132 |
-
self.age = 1
|
| 133 |
-
self.time_since_update = 0
|
| 134 |
-
self.state = 'tentative'
|
| 135 |
-
self.hits = 1
|
| 136 |
-
self.consecutive_misses = 0
|
| 137 |
-
|
| 138 |
-
cx = (self.bbox[0] + self.bbox[2]) / 2
|
| 139 |
-
cy = (self.bbox[1] + self.bbox[3]) / 2
|
| 140 |
-
self.trajectory = deque(maxlen=30)
|
| 141 |
-
self.trajectory.append((cx, cy))
|
| 142 |
-
|
| 143 |
-
self.velocity = np.array([0.0, 0.0])
|
| 144 |
-
self.sizes = deque(maxlen=10)
|
| 145 |
-
width = max(1, self.bbox[2] - self.bbox[0])
|
| 146 |
-
height = max(1, self.bbox[3] - self.bbox[1])
|
| 147 |
-
self.sizes.append((width, height))
|
| 148 |
-
|
| 149 |
-
self.avg_confidence = self.confidence
|
| 150 |
-
self.appearance_features = []
|
| 151 |
-
|
| 152 |
-
def predict(self):
|
| 153 |
-
"""Motion prediction"""
|
| 154 |
-
self.age += 1
|
| 155 |
-
self.time_since_update += 1
|
| 156 |
-
self.consecutive_misses += 1
|
| 157 |
-
|
| 158 |
-
try:
|
| 159 |
-
if len(self.trajectory) >= 3:
|
| 160 |
-
positions = np.array(list(self.trajectory))[-3:]
|
| 161 |
-
self.velocity = positions[-1] - positions[-2]
|
| 162 |
-
|
| 163 |
-
max_velocity = 50
|
| 164 |
-
velocity_magnitude = np.linalg.norm(self.velocity)
|
| 165 |
-
if velocity_magnitude > max_velocity:
|
| 166 |
-
self.velocity = self.velocity / velocity_magnitude * max_velocity
|
| 167 |
-
|
| 168 |
-
predicted_pos = positions[-1] + self.velocity * 0.7
|
| 169 |
-
|
| 170 |
-
avg_width = np.mean([s[0] for s in self.sizes]) if self.sizes else 50
|
| 171 |
-
avg_height = np.mean([s[1] for s in self.sizes]) if self.sizes else 50
|
| 172 |
-
|
| 173 |
-
self.bbox = [
|
| 174 |
-
predicted_pos[0] - avg_width/2,
|
| 175 |
-
predicted_pos[1] - avg_height/2,
|
| 176 |
-
predicted_pos[0] + avg_width/2,
|
| 177 |
-
predicted_pos[1] + avg_height/2
|
| 178 |
-
]
|
| 179 |
-
except:
|
| 180 |
-
pass
|
| 181 |
-
|
| 182 |
-
def update(self, detection: Detection):
|
| 183 |
-
"""Update with new detection"""
|
| 184 |
-
self.bbox = detection.bbox.copy()
|
| 185 |
-
self.detections.append(detection)
|
| 186 |
-
self.confidence = detection.confidence
|
| 187 |
-
self.keypoints = detection.keypoints
|
| 188 |
-
|
| 189 |
-
self.avg_confidence = self.avg_confidence * 0.9 + self.confidence * 0.1
|
| 190 |
-
self.hits += 1
|
| 191 |
-
self.time_since_update = 0
|
| 192 |
-
self.consecutive_misses = 0
|
| 193 |
-
|
| 194 |
-
cx = (self.bbox[0] + self.bbox[2]) / 2
|
| 195 |
-
cy = (self.bbox[1] + self.bbox[3]) / 2
|
| 196 |
-
self.trajectory.append((cx, cy))
|
| 197 |
-
|
| 198 |
-
width = max(1, self.bbox[2] - self.bbox[0])
|
| 199 |
-
height = max(1, self.bbox[3] - self.bbox[1])
|
| 200 |
-
self.sizes.append((width, height))
|
| 201 |
-
|
| 202 |
-
if self.state == 'tentative' and self.hits >= 2:
|
| 203 |
-
self.state = 'confirmed'
|
| 204 |
-
|
| 205 |
-
if len(self.detections) > 5:
|
| 206 |
-
for old_det in self.detections[:-5]:
|
| 207 |
-
if hasattr(old_det, 'image_crop'):
|
| 208 |
-
old_det.image_crop = None
|
| 209 |
-
self.detections = self.detections[-5:]
|
| 210 |
-
|
| 211 |
-
def mark_missed(self):
|
| 212 |
-
"""Mark as missed - TASK 1: Increased thresholds"""
|
| 213 |
-
if self.state == 'confirmed':
|
| 214 |
-
if self.consecutive_misses > 60 or self.time_since_update > 90: # TASK 1
|
| 215 |
-
self.state = 'deleted'
|
| 216 |
-
elif self.state == 'tentative':
|
| 217 |
-
if self.consecutive_misses > 3:
|
| 218 |
-
self.state = 'deleted'
|
| 219 |
-
|
| 220 |
-
|
| 221 |
-
class RobustTracker:
|
| 222 |
-
"""Production tracker with appearance-based fallback and pose matching"""
|
| 223 |
-
|
| 224 |
-
def __init__(self, match_threshold: float = 0.35, track_buffer: int = 90,
|
| 225 |
-
use_appearance: bool = True, use_pose: bool = True):
|
| 226 |
-
self.match_threshold = match_threshold
|
| 227 |
-
self.track_buffer = track_buffer # TASK 1: Increased from 30 to 90
|
| 228 |
-
self.use_appearance = use_appearance
|
| 229 |
-
self.use_pose = use_pose
|
| 230 |
-
|
| 231 |
-
self.tracks: List[Track] = []
|
| 232 |
-
self.track_id_count = 1
|
| 233 |
-
self.recently_deleted_tracks = {} # TASK 1: Store recently deleted
|
| 234 |
-
self.max_lost_time = timedelta(minutes=5) # TASK 1: 5 minute buffer
|
| 235 |
-
|
| 236 |
-
self.max_center_distance = 150
|
| 237 |
-
self.min_iou_for_match = 0.15
|
| 238 |
-
|
| 239 |
-
def update(self, detections: List[Detection]) -> List[Track]:
|
| 240 |
-
"""Update with appearance and pose fallback"""
|
| 241 |
-
if not detections:
|
| 242 |
-
for track in self.tracks:
|
| 243 |
-
track.predict()
|
| 244 |
-
track.mark_missed()
|
| 245 |
-
|
| 246 |
-
# Store deleted tracks - TASK 1
|
| 247 |
-
current_time = datetime.now()
|
| 248 |
-
for track in [t for t in self.tracks if t.state == 'deleted']:
|
| 249 |
-
self.recently_deleted_tracks[track.track_id] = {
|
| 250 |
-
'track': track,
|
| 251 |
-
'deleted_time': current_time
|
| 252 |
-
}
|
| 253 |
-
|
| 254 |
-
self.tracks = [t for t in self.tracks if t.state != 'deleted']
|
| 255 |
-
self._cleanup_old_deleted() # TASK 1
|
| 256 |
-
return [t for t in self.tracks if t.state == 'confirmed']
|
| 257 |
-
|
| 258 |
-
try:
|
| 259 |
-
for track in self.tracks:
|
| 260 |
-
track.predict()
|
| 261 |
-
|
| 262 |
-
confirmed_tracks = [t for t in self.tracks if t.state == 'confirmed']
|
| 263 |
-
tentative_tracks = [t for t in self.tracks if t.state == 'tentative']
|
| 264 |
-
|
| 265 |
-
matched_track_indices = set()
|
| 266 |
-
matched_det_indices = set()
|
| 267 |
-
|
| 268 |
-
# Stage 1: Match confirmed tracks
|
| 269 |
-
if confirmed_tracks:
|
| 270 |
-
matched_track_indices, matched_det_indices = self._associate_tracks(
|
| 271 |
-
confirmed_tracks, detections, matched_track_indices,
|
| 272 |
-
matched_det_indices, threshold_mult=1.0
|
| 273 |
-
)
|
| 274 |
-
|
| 275 |
-
# Stage 2: Match tentative tracks
|
| 276 |
-
if tentative_tracks:
|
| 277 |
-
unmatched_dets = [detections[i] for i in range(len(detections))
|
| 278 |
-
if i not in matched_det_indices]
|
| 279 |
-
if unmatched_dets:
|
| 280 |
-
temp_det_mapping = [i for i in range(len(detections))
|
| 281 |
-
if i not in matched_det_indices]
|
| 282 |
-
|
| 283 |
-
tent_matched_tracks, tent_matched_dets = self._associate_tracks(
|
| 284 |
-
tentative_tracks, unmatched_dets, set(), set(), threshold_mult=0.7
|
| 285 |
-
)
|
| 286 |
-
|
| 287 |
-
for det_idx in tent_matched_dets:
|
| 288 |
-
matched_det_indices.add(temp_det_mapping[det_idx])
|
| 289 |
-
|
| 290 |
-
for i, track in enumerate(confirmed_tracks):
|
| 291 |
-
if i not in matched_track_indices:
|
| 292 |
-
track.mark_missed()
|
| 293 |
-
|
| 294 |
-
for track in tentative_tracks:
|
| 295 |
-
if track.time_since_update > 0:
|
| 296 |
-
track.mark_missed()
|
| 297 |
-
|
| 298 |
-
# Create new tracks - TASK 1: Check recently deleted first
|
| 299 |
-
for det_idx in range(len(detections)):
|
| 300 |
-
if det_idx not in matched_det_indices:
|
| 301 |
-
detection = detections[det_idx]
|
| 302 |
-
|
| 303 |
-
# Check if matches recently deleted track
|
| 304 |
-
revived_track = self._check_recently_deleted(detection)
|
| 305 |
-
if revived_track:
|
| 306 |
-
revived_track.update(detection)
|
| 307 |
-
revived_track.state = 'confirmed'
|
| 308 |
-
self.tracks.append(revived_track)
|
| 309 |
-
elif self._is_new_track(detection):
|
| 310 |
-
new_track = Track(detection, self.track_id_count)
|
| 311 |
-
self.track_id_count += 1
|
| 312 |
-
self.tracks.append(new_track)
|
| 313 |
-
|
| 314 |
-
# Store deleted tracks - TASK 1
|
| 315 |
-
current_time = datetime.now()
|
| 316 |
-
for track in [t for t in self.tracks if t.state == 'deleted']:
|
| 317 |
-
self.recently_deleted_tracks[track.track_id] = {
|
| 318 |
-
'track': track,
|
| 319 |
-
'deleted_time': current_time
|
| 320 |
-
}
|
| 321 |
-
|
| 322 |
-
self.tracks = [t for t in self.tracks if t.state != 'deleted']
|
| 323 |
-
self._cleanup_old_deleted() # TASK 1
|
| 324 |
-
|
| 325 |
-
return [t for t in self.tracks if t.state == 'confirmed']
|
| 326 |
-
|
| 327 |
-
except Exception as e:
|
| 328 |
-
print(f"Tracker error: {e}")
|
| 329 |
-
return [t for t in self.tracks if t.state == 'confirmed']
|
| 330 |
-
|
| 331 |
-
def _check_recently_deleted(self, detection: Detection) -> Optional[Track]:
|
| 332 |
-
"""TASK 1: Check if detection matches recently deleted track"""
|
| 333 |
-
if not self.recently_deleted_tracks:
|
| 334 |
-
return None
|
| 335 |
-
|
| 336 |
-
best_match = None
|
| 337 |
-
best_score = 0.25 # Lower threshold for re-entry
|
| 338 |
-
|
| 339 |
-
for track_id, data in self.recently_deleted_tracks.items():
|
| 340 |
-
track = data['track']
|
| 341 |
-
|
| 342 |
-
# IoU check
|
| 343 |
-
iou = self._iou(track.bbox, detection.bbox)
|
| 344 |
-
|
| 345 |
-
# Center distance
|
| 346 |
-
track_center = self._get_center(track.bbox)
|
| 347 |
-
det_center = self._get_center(detection.bbox)
|
| 348 |
-
distance = np.linalg.norm(np.array(track_center) - np.array(det_center))
|
| 349 |
-
|
| 350 |
-
# Combined score
|
| 351 |
-
if iou > 0.1 and distance < 200:
|
| 352 |
-
score = iou * 0.7 + (1 - distance/200) * 0.3
|
| 353 |
-
if score > best_score:
|
| 354 |
-
best_score = score
|
| 355 |
-
best_match = track
|
| 356 |
-
|
| 357 |
-
if best_match:
|
| 358 |
-
# Remove from deleted
|
| 359 |
-
del self.recently_deleted_tracks[best_match.track_id]
|
| 360 |
-
print(f"π Revived track {best_match.track_id}")
|
| 361 |
-
|
| 362 |
-
return best_match
|
| 363 |
-
|
| 364 |
-
def _cleanup_old_deleted(self):
|
| 365 |
-
"""TASK 1: Remove tracks deleted >5 minutes ago"""
|
| 366 |
-
current_time = datetime.now()
|
| 367 |
-
to_remove = []
|
| 368 |
-
|
| 369 |
-
for track_id, data in self.recently_deleted_tracks.items():
|
| 370 |
-
if current_time - data['deleted_time'] > self.max_lost_time:
|
| 371 |
-
to_remove.append(track_id)
|
| 372 |
-
|
| 373 |
-
for track_id in to_remove:
|
| 374 |
-
del self.recently_deleted_tracks[track_id]
|
| 375 |
-
|
| 376 |
-
def _associate_tracks(self, tracks, detections, existing_matched_tracks,
|
| 377 |
-
existing_matched_dets, threshold_mult=1.0):
|
| 378 |
-
"""Associate with appearance and pose fallback"""
|
| 379 |
-
if not tracks or not detections:
|
| 380 |
-
return existing_matched_tracks, existing_matched_dets
|
| 381 |
-
|
| 382 |
-
try:
|
| 383 |
-
cost_matrix = self._calculate_enhanced_cost_matrix(tracks, detections)
|
| 384 |
-
|
| 385 |
-
if cost_matrix.size == 0:
|
| 386 |
-
return existing_matched_tracks, existing_matched_dets
|
| 387 |
-
|
| 388 |
-
row_ind, col_ind = linear_sum_assignment(cost_matrix)
|
| 389 |
-
|
| 390 |
-
matched_tracks = existing_matched_tracks.copy()
|
| 391 |
-
matched_dets = existing_matched_dets.copy()
|
| 392 |
-
|
| 393 |
-
threshold = (1 - self.match_threshold * threshold_mult)
|
| 394 |
-
|
| 395 |
-
for r, c in zip(row_ind, col_ind):
|
| 396 |
-
if r >= len(tracks) or c >= len(detections):
|
| 397 |
-
continue
|
| 398 |
-
|
| 399 |
-
if cost_matrix[r, c] < threshold:
|
| 400 |
-
tracks[r].update(detections[c])
|
| 401 |
-
matched_tracks.add(r)
|
| 402 |
-
matched_dets.add(c)
|
| 403 |
-
|
| 404 |
-
return matched_tracks, matched_dets
|
| 405 |
-
|
| 406 |
-
except Exception as e:
|
| 407 |
-
print(f"Association error: {e}")
|
| 408 |
-
return existing_matched_tracks, existing_matched_dets
|
| 409 |
-
|
| 410 |
-
def _calculate_enhanced_cost_matrix(self, tracks, detections):
|
| 411 |
-
"""Cost matrix with appearance and pose support"""
|
| 412 |
-
n_tracks, n_dets = len(tracks), len(detections)
|
| 413 |
-
cost_matrix = np.ones((n_tracks, n_dets))
|
| 414 |
-
|
| 415 |
-
for t_idx, track in enumerate(tracks):
|
| 416 |
-
track_center = np.array(self._get_center(track.bbox))
|
| 417 |
-
track_size = np.array([
|
| 418 |
-
max(1, track.bbox[2] - track.bbox[0]),
|
| 419 |
-
max(1, track.bbox[3] - track.bbox[1])
|
| 420 |
-
])
|
| 421 |
-
|
| 422 |
-
for d_idx, detection in enumerate(detections):
|
| 423 |
-
iou = self._iou(track.bbox, detection.bbox)
|
| 424 |
-
det_center = np.array(self._get_center(detection.bbox))
|
| 425 |
-
distance = np.linalg.norm(track_center - det_center)
|
| 426 |
-
|
| 427 |
-
det_size = np.array([
|
| 428 |
-
max(1, detection.bbox[2] - detection.bbox[0]),
|
| 429 |
-
max(1, detection.bbox[3] - detection.bbox[1])
|
| 430 |
-
])
|
| 431 |
-
|
| 432 |
-
size_ratio = np.minimum(track_size, det_size) / (np.maximum(track_size, det_size) + 1e-6)
|
| 433 |
-
size_cost = 1 - np.mean(size_ratio)
|
| 434 |
-
|
| 435 |
-
if iou >= self.min_iou_for_match and distance < self.max_center_distance:
|
| 436 |
-
iou_cost = 1 - iou
|
| 437 |
-
dist_cost = distance / self.max_center_distance
|
| 438 |
-
total_cost = 0.6 * iou_cost + 0.25 * dist_cost + 0.15 * size_cost
|
| 439 |
-
|
| 440 |
-
# Pose-based matching - TASK: OPTIONAL
|
| 441 |
-
if self.use_pose and track.keypoints is not None and detection.keypoints is not None:
|
| 442 |
-
pose_cost = self._keypoint_distance(track.keypoints, detection.keypoints)
|
| 443 |
-
total_cost = 0.5 * iou_cost + 0.2 * dist_cost + 0.15 * size_cost + 0.15 * pose_cost
|
| 444 |
-
|
| 445 |
-
cost_matrix[t_idx, d_idx] = total_cost
|
| 446 |
-
else:
|
| 447 |
-
# Appearance fallback when spatial matching fails
|
| 448 |
-
if self.use_appearance and hasattr(track, 'appearance_features') and track.appearance_features:
|
| 449 |
-
cost_matrix[t_idx, d_idx] = 0.9 # High but not impossible
|
| 450 |
-
else:
|
| 451 |
-
cost_matrix[t_idx, d_idx] = 1.0
|
| 452 |
-
|
| 453 |
-
return cost_matrix
|
| 454 |
-
|
| 455 |
-
def _keypoint_distance(self, kpts1, kpts2):
|
| 456 |
-
"""Calculate normalized keypoint distance"""
|
| 457 |
-
try:
|
| 458 |
-
valid_idx = (kpts1[:, 2] > 0.5) & (kpts2[:, 2] > 0.5)
|
| 459 |
-
if not np.any(valid_idx):
|
| 460 |
-
return 1.0
|
| 461 |
-
|
| 462 |
-
diff = np.linalg.norm(kpts1[valid_idx, :2] - kpts2[valid_idx, :2], axis=1)
|
| 463 |
-
return min(1.0, np.mean(diff) / 100.0)
|
| 464 |
-
except:
|
| 465 |
-
return 1.0
|
| 466 |
-
|
| 467 |
-
def _is_new_track(self, detection):
|
| 468 |
-
"""Check if detection is new"""
|
| 469 |
-
det_center = self._get_center(detection.bbox)
|
| 470 |
-
for track in self.tracks:
|
| 471 |
-
if track.state == 'deleted':
|
| 472 |
-
continue
|
| 473 |
-
track_center = self._get_center(track.bbox)
|
| 474 |
-
dist = np.linalg.norm(np.array(det_center) - np.array(track_center))
|
| 475 |
-
if dist < 30:
|
| 476 |
-
return False
|
| 477 |
-
return True
|
| 478 |
-
|
| 479 |
-
def _get_center(self, bbox):
|
| 480 |
-
return ((bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2)
|
| 481 |
-
|
| 482 |
-
def _iou(self, bbox1, bbox2):
|
| 483 |
-
x1 = max(bbox1[0], bbox2[0])
|
| 484 |
-
y1 = max(bbox1[1], bbox2[1])
|
| 485 |
-
x2 = min(bbox1[2], bbox2[2])
|
| 486 |
-
y2 = min(bbox1[3], bbox2[3])
|
| 487 |
-
|
| 488 |
-
if x2 < x1 or y2 < y1:
|
| 489 |
-
return 0.0
|
| 490 |
-
|
| 491 |
-
intersection = (x2 - x1) * (y2 - y1)
|
| 492 |
-
area1 = max(1, (bbox1[2] - bbox1[0]) * (bbox1[3] - bbox1[1]))
|
| 493 |
-
area2 = max(1, (bbox2[2] - bbox2[0]) * (bbox2[3] - bbox2[1]))
|
| 494 |
-
union = area1 + area2 - intersection
|
| 495 |
-
|
| 496 |
-
return max(0, min(1, intersection / (union + 1e-6)))
|
| 497 |
-
|
| 498 |
-
def reset(self):
|
| 499 |
-
self.tracks.clear()
|
| 500 |
-
self.recently_deleted_tracks.clear()
|
| 501 |
-
self.track_id_count = 1
|
| 502 |
-
|
| 503 |
-
|
| 504 |
-
# ==================== REID MODULE ====================
|
| 505 |
-
|
| 506 |
-
@dataclass
|
| 507 |
-
class DogFeatures:
|
| 508 |
-
features: np.ndarray
|
| 509 |
-
bbox: List[float] = field(default_factory=list)
|
| 510 |
-
confidence: float = 0.5
|
| 511 |
-
frame_num: int = 0
|
| 512 |
-
image: Optional[np.ndarray] = None
|
| 513 |
-
keypoints: Optional[np.ndarray] = None
|
| 514 |
-
|
| 515 |
-
|
| 516 |
-
class MegaDescriptorReID:
|
| 517 |
-
"""ReID with multiple embeddings per dog and pose support"""
|
| 518 |
-
|
| 519 |
-
TURKISH_DOG_NAMES = [
|
| 520 |
-
"KarabaΕ", "Pamuk", "Boncuk", "FΔ±ndΔ±k", "PaΕa", "Aslan", "Duman", "TarΓ§Δ±n",
|
| 521 |
-
"KΓΆmΓΌr", "Bal", "Zeytin", "Kurabiye", "Lokum", "Εeker", "Beyaz", "Kara",
|
| 522 |
-
"SarΔ±", "Benekli", "Cesur", "YΔ±ldΔ±z", "Ay", "GΓΌneΕ", "Bulut", "FΔ±rtΔ±na"
|
| 523 |
-
]
|
| 524 |
-
|
| 525 |
-
def __init__(self, device: str = 'cuda', db_path: str = 'dog_database', use_pose: bool = True):
|
| 526 |
-
self.device = device if torch.cuda.is_available() else 'cpu'
|
| 527 |
-
self.base_threshold = 0.35
|
| 528 |
-
self.db_threshold = 0.25 # TASK 2: Lower threshold for database
|
| 529 |
-
self.db_path = Path(db_path)
|
| 530 |
-
self.use_pose = use_pose
|
| 531 |
-
|
| 532 |
-
self.db_path.mkdir(exist_ok=True)
|
| 533 |
-
(self.db_path / 'images').mkdir(exist_ok=True)
|
| 534 |
-
|
| 535 |
-
self.permanent_dogs = self.load_permanent_database()
|
| 536 |
-
self.used_names = {dog['name'] for dog in self.permanent_dogs.values()}
|
| 537 |
-
|
| 538 |
-
self.session_dogs = {}
|
| 539 |
-
self.session_best_images = {}
|
| 540 |
-
self.temp_to_permanent = {}
|
| 541 |
-
self.next_temp_id = 1
|
| 542 |
-
self.current_frame = 0
|
| 543 |
-
|
| 544 |
-
# Comparison logging - OPTIONAL TASK
|
| 545 |
-
self.pose_impact_log = {'improved': 0, 'prevented_false': 0}
|
| 546 |
-
|
| 547 |
-
self._initialize_megadescriptor()
|
| 548 |
-
print(f"β
ReID initialized | Known dogs: {len(self.permanent_dogs)}")
|
| 549 |
-
|
| 550 |
-
def _initialize_megadescriptor(self):
|
| 551 |
-
try:
|
| 552 |
-
self.model = timm.create_model('hf-hub:BVRA/MegaDescriptor-L-384', pretrained=True)
|
| 553 |
-
self.model.to(self.device).eval()
|
| 554 |
-
self.transform = timm.data.create_transform(
|
| 555 |
-
input_size=(384, 384), is_training=False,
|
| 556 |
-
mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]
|
| 557 |
-
)
|
| 558 |
-
print("β
MegaDescriptor loaded")
|
| 559 |
-
except Exception as e:
|
| 560 |
-
print(f"β MegaDescriptor error: {e}")
|
| 561 |
-
self.model = None
|
| 562 |
-
|
| 563 |
-
def load_permanent_database(self):
|
| 564 |
-
"""TASK 2: Load with multiple embeddings support"""
|
| 565 |
-
db_file = self.db_path / 'dogs_database.json'
|
| 566 |
-
embeddings_file = self.db_path / 'embeddings.pkl'
|
| 567 |
-
|
| 568 |
-
if not db_file.exists():
|
| 569 |
-
return {}
|
| 570 |
-
|
| 571 |
-
try:
|
| 572 |
-
with open(db_file, 'r', encoding='utf-8') as f:
|
| 573 |
-
dogs_data = json.load(f)
|
| 574 |
-
|
| 575 |
-
if embeddings_file.exists():
|
| 576 |
-
with open(embeddings_file, 'rb') as f:
|
| 577 |
-
embeddings = pickle.load(f)
|
| 578 |
-
|
| 579 |
-
for dog_name, embedding_list in embeddings.items():
|
| 580 |
-
if dog_name in dogs_data:
|
| 581 |
-
# Support both old (single) and new (multiple) format
|
| 582 |
-
if isinstance(embedding_list, np.ndarray):
|
| 583 |
-
dogs_data[dog_name]['embeddings'] = [embedding_list]
|
| 584 |
-
else:
|
| 585 |
-
dogs_data[dog_name]['embeddings'] = embedding_list
|
| 586 |
-
|
| 587 |
-
print(f"π Loaded {len(dogs_data)} dogs")
|
| 588 |
-
return dogs_data
|
| 589 |
-
|
| 590 |
-
except Exception as e:
|
| 591 |
-
print(f"Database load error: {e}")
|
| 592 |
-
return {}
|
| 593 |
-
|
| 594 |
-
def save_permanent_database(self):
|
| 595 |
-
"""Save database"""
|
| 596 |
-
db_file = self.db_path / 'dogs_database.json'
|
| 597 |
-
embeddings_file = self.db_path / 'embeddings.pkl'
|
| 598 |
-
|
| 599 |
-
metadata = {}
|
| 600 |
-
embeddings = {}
|
| 601 |
-
|
| 602 |
-
for dog_name, dog_data in self.permanent_dogs.items():
|
| 603 |
-
metadata[dog_name] = {
|
| 604 |
-
'name': dog_data['name'],
|
| 605 |
-
'first_seen': dog_data['first_seen'],
|
| 606 |
-
'last_seen': dog_data['last_seen'],
|
| 607 |
-
'total_sightings': dog_data['total_sightings'],
|
| 608 |
-
'image_path': dog_data.get('image_path', '')
|
| 609 |
-
}
|
| 610 |
-
|
| 611 |
-
if 'embeddings' in dog_data:
|
| 612 |
-
embeddings[dog_name] = dog_data['embeddings']
|
| 613 |
-
|
| 614 |
-
with open(db_file, 'w', encoding='utf-8') as f:
|
| 615 |
-
json.dump(metadata, f, indent=2, ensure_ascii=False)
|
| 616 |
-
|
| 617 |
-
with open(embeddings_file, 'wb') as f:
|
| 618 |
-
pickle.dump(embeddings, f)
|
| 619 |
-
|
| 620 |
-
print(f"πΎ Database saved: {len(metadata)} dogs")
|
| 621 |
-
|
| 622 |
-
def check_permanent_database(self, features: np.ndarray) -> Optional[str]:
|
| 623 |
-
"""TASK 2: Check against all embeddings with lower threshold"""
|
| 624 |
-
if not self.permanent_dogs:
|
| 625 |
-
return None
|
| 626 |
-
|
| 627 |
-
best_match = None
|
| 628 |
-
best_score = 0
|
| 629 |
-
|
| 630 |
-
for dog_name, dog_data in self.permanent_dogs.items():
|
| 631 |
-
if 'embeddings' not in dog_data:
|
| 632 |
-
continue
|
| 633 |
-
|
| 634 |
-
# Compare against all embeddings - TASK 2
|
| 635 |
-
embeddings_list = dog_data['embeddings']
|
| 636 |
-
similarities = []
|
| 637 |
-
|
| 638 |
-
for emb in embeddings_list:
|
| 639 |
-
sim = cosine_similarity(
|
| 640 |
-
features.reshape(1, -1),
|
| 641 |
-
emb.reshape(1, -1)
|
| 642 |
-
)[0, 0]
|
| 643 |
-
similarities.append(sim)
|
| 644 |
-
|
| 645 |
-
# Use maximum similarity - TASK 2
|
| 646 |
-
if similarities:
|
| 647 |
-
max_sim = max(similarities)
|
| 648 |
-
if max_sim > best_score:
|
| 649 |
-
best_score = max_sim
|
| 650 |
-
best_match = dog_name
|
| 651 |
-
|
| 652 |
-
# Lower threshold for database - TASK 2
|
| 653 |
-
if best_score >= self.db_threshold:
|
| 654 |
-
print(f" β
Database match: {best_match} ({best_score:.3f})")
|
| 655 |
-
return best_match
|
| 656 |
-
|
| 657 |
-
return None
|
| 658 |
-
|
| 659 |
-
def extract_features(self, images, bboxes=None) -> List[Optional[DogFeatures]]:
|
| 660 |
-
"""Batch feature extraction"""
|
| 661 |
-
if not isinstance(images, list):
|
| 662 |
-
images = [images]
|
| 663 |
-
bboxes = [bboxes] if bboxes else [None]
|
| 664 |
-
single_mode = True
|
| 665 |
-
else:
|
| 666 |
-
single_mode = False
|
| 667 |
-
if bboxes is None:
|
| 668 |
-
bboxes = [None] * len(images)
|
| 669 |
-
|
| 670 |
-
if self.model is None:
|
| 671 |
-
return [None] * len(images)
|
| 672 |
-
|
| 673 |
-
results = []
|
| 674 |
-
|
| 675 |
-
try:
|
| 676 |
-
# Batch processing
|
| 677 |
-
tensors = []
|
| 678 |
-
valid_indices = []
|
| 679 |
-
|
| 680 |
-
for idx, img in enumerate(images):
|
| 681 |
-
if img is None or img.size == 0:
|
| 682 |
-
results.append(None)
|
| 683 |
-
continue
|
| 684 |
-
|
| 685 |
-
try:
|
| 686 |
-
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
| 687 |
-
pil_img = Image.fromarray(img_rgb)
|
| 688 |
-
tensor = self.transform(pil_img)
|
| 689 |
-
tensors.append(tensor)
|
| 690 |
-
valid_indices.append(idx)
|
| 691 |
-
except:
|
| 692 |
-
results.append(None)
|
| 693 |
-
|
| 694 |
-
if tensors:
|
| 695 |
-
batch_tensor = torch.stack(tensors).to(self.device)
|
| 696 |
-
|
| 697 |
-
with torch.no_grad():
|
| 698 |
-
features_batch = self.model(batch_tensor)
|
| 699 |
-
|
| 700 |
-
features_list = features_batch.cpu().numpy()
|
| 701 |
-
|
| 702 |
-
result_idx = 0
|
| 703 |
-
for idx in range(len(images)):
|
| 704 |
-
if idx in valid_indices:
|
| 705 |
-
feat = features_list[result_idx]
|
| 706 |
-
feat = feat / (np.linalg.norm(feat) + 1e-7)
|
| 707 |
-
|
| 708 |
-
dog_feat = DogFeatures(
|
| 709 |
-
features=feat,
|
| 710 |
-
bbox=bboxes[idx] if bboxes[idx] else [0, 0, 100, 100],
|
| 711 |
-
frame_num=self.current_frame,
|
| 712 |
-
image=images[idx].copy()
|
| 713 |
-
)
|
| 714 |
-
|
| 715 |
-
# Insert at correct position
|
| 716 |
-
while len(results) <= idx:
|
| 717 |
-
results.append(None)
|
| 718 |
-
results[idx] = dog_feat
|
| 719 |
-
result_idx += 1
|
| 720 |
-
|
| 721 |
-
except Exception as e:
|
| 722 |
-
print(f"Feature extraction error: {e}")
|
| 723 |
-
return [None] * len(images)
|
| 724 |
-
|
| 725 |
-
return results[0] if single_mode else results
|
| 726 |
-
|
| 727 |
-
def match_or_register(self, track, image_crop=None):
|
| 728 |
-
"""Match with pose comparison logging"""
|
| 729 |
-
self.current_frame += 1
|
| 730 |
-
|
| 731 |
-
detection = None
|
| 732 |
-
for det in reversed(track.detections[-3:]):
|
| 733 |
-
if det.image_crop is not None:
|
| 734 |
-
detection = det
|
| 735 |
-
image_crop = det.image_crop
|
| 736 |
-
break
|
| 737 |
-
|
| 738 |
-
if detection is None or image_crop is None:
|
| 739 |
-
return 0, 0.0, None
|
| 740 |
-
|
| 741 |
-
features = self.extract_features(image_crop, detection.bbox)
|
| 742 |
-
if features is None:
|
| 743 |
-
return 0, 0.0, None
|
| 744 |
-
|
| 745 |
-
features.confidence = detection.confidence
|
| 746 |
-
features.keypoints = detection.keypoints
|
| 747 |
-
|
| 748 |
-
# Check permanent database
|
| 749 |
-
permanent_name = self.check_permanent_database(features.features)
|
| 750 |
-
|
| 751 |
-
# Check session - with pose comparison if enabled
|
| 752 |
-
best_temp_id = None
|
| 753 |
-
best_score = -1.0
|
| 754 |
-
best_score_without_pose = -1.0 # OPTIONAL: Comparison
|
| 755 |
-
|
| 756 |
-
for temp_id, dog_features_list in self.session_dogs.items():
|
| 757 |
-
similarities = []
|
| 758 |
-
similarities_no_pose = []
|
| 759 |
-
|
| 760 |
-
for stored_feat in dog_features_list[-20:]:
|
| 761 |
-
# Appearance similarity
|
| 762 |
-
app_sim = cosine_similarity(
|
| 763 |
-
features.features.reshape(1, -1),
|
| 764 |
-
stored_feat.features.reshape(1, -1)
|
| 765 |
-
)[0, 0]
|
| 766 |
-
|
| 767 |
-
similarities_no_pose.append(app_sim)
|
| 768 |
-
|
| 769 |
-
# Add pose if enabled - OPTIONAL
|
| 770 |
-
if self.use_pose and features.keypoints is not None and stored_feat.keypoints is not None:
|
| 771 |
-
pose_sim = self._pose_similarity(features.keypoints, stored_feat.keypoints)
|
| 772 |
-
combined_sim = 0.85 * app_sim + 0.15 * pose_sim
|
| 773 |
-
similarities.append(combined_sim)
|
| 774 |
-
else:
|
| 775 |
-
similarities.append(app_sim)
|
| 776 |
-
|
| 777 |
-
if similarities:
|
| 778 |
-
final_score = 0.6 * max(similarities) + 0.4 * np.mean(similarities)
|
| 779 |
-
score_no_pose = 0.6 * max(similarities_no_pose) + 0.4 * np.mean(similarities_no_pose)
|
| 780 |
-
|
| 781 |
-
if final_score > best_score:
|
| 782 |
-
best_score = final_score
|
| 783 |
-
best_score_without_pose = score_no_pose
|
| 784 |
-
best_temp_id = temp_id
|
| 785 |
-
|
| 786 |
-
# Log pose impact - OPTIONAL
|
| 787 |
-
if self.use_pose and best_temp_id is not None:
|
| 788 |
-
if best_score >= self.base_threshold and best_score_without_pose < self.base_threshold:
|
| 789 |
-
self.pose_impact_log['improved'] += 1
|
| 790 |
-
elif best_score < self.base_threshold and best_score_without_pose >= self.base_threshold:
|
| 791 |
-
self.pose_impact_log['prevented_false'] += 1
|
| 792 |
-
|
| 793 |
-
threshold = self.base_threshold
|
| 794 |
-
|
| 795 |
-
if best_temp_id is not None and best_score >= threshold:
|
| 796 |
-
self.session_dogs[best_temp_id].append(features)
|
| 797 |
-
if len(self.session_dogs[best_temp_id]) > 30:
|
| 798 |
-
self.session_dogs[best_temp_id] = self.session_dogs[best_temp_id][-30:]
|
| 799 |
-
|
| 800 |
-
self.update_best_image(best_temp_id, features)
|
| 801 |
-
|
| 802 |
-
if permanent_name:
|
| 803 |
-
self.temp_to_permanent[best_temp_id] = permanent_name
|
| 804 |
-
|
| 805 |
-
return best_temp_id, best_score, permanent_name
|
| 806 |
-
else:
|
| 807 |
-
new_temp_id = self.next_temp_id
|
| 808 |
-
self.next_temp_id += 1
|
| 809 |
-
self.session_dogs[new_temp_id] = [features]
|
| 810 |
-
self.session_best_images[new_temp_id] = features
|
| 811 |
-
|
| 812 |
-
if permanent_name:
|
| 813 |
-
self.temp_to_permanent[new_temp_id] = permanent_name
|
| 814 |
-
print(f" βΉοΈ Known dog {permanent_name} -> ID {new_temp_id}")
|
| 815 |
-
|
| 816 |
-
return new_temp_id, 1.0, permanent_name
|
| 817 |
-
|
| 818 |
-
def _pose_similarity(self, kpts1, kpts2):
|
| 819 |
-
"""Calculate pose similarity"""
|
| 820 |
-
try:
|
| 821 |
-
valid = (kpts1[:, 2] > 0.5) & (kpts2[:, 2] > 0.5)
|
| 822 |
-
if not np.any(valid):
|
| 823 |
-
return 0.5
|
| 824 |
-
|
| 825 |
-
diff = np.linalg.norm(kpts1[valid, :2] - kpts2[valid, :2], axis=1)
|
| 826 |
-
similarity = 1.0 - min(1.0, np.mean(diff) / 100.0)
|
| 827 |
-
return similarity
|
| 828 |
-
except:
|
| 829 |
-
return 0.5
|
| 830 |
-
|
| 831 |
-
def update_best_image(self, temp_id, features):
|
| 832 |
-
current_best = self.session_best_images.get(temp_id)
|
| 833 |
-
|
| 834 |
-
if features.image is not None:
|
| 835 |
-
quality = features.confidence * np.prod(features.image.shape[:2])
|
| 836 |
-
|
| 837 |
-
if current_best is None or current_best.image is None:
|
| 838 |
-
self.session_best_images[temp_id] = features
|
| 839 |
-
else:
|
| 840 |
-
current_quality = current_best.confidence * np.prod(current_best.image.shape[:2])
|
| 841 |
-
if quality > current_quality:
|
| 842 |
-
self.session_best_images[temp_id] = features
|
| 843 |
-
|
| 844 |
-
def save_session_to_permanent(self):
|
| 845 |
-
"""TASK 2: Save with multiple embeddings (5-10 best)"""
|
| 846 |
-
saved_dogs = {}
|
| 847 |
-
|
| 848 |
-
for temp_id, features_list in self.session_dogs.items():
|
| 849 |
-
if temp_id in self.temp_to_permanent:
|
| 850 |
-
continue
|
| 851 |
-
|
| 852 |
-
dog_name = self.get_next_turkish_name()
|
| 853 |
-
|
| 854 |
-
# Store top 5-10 embeddings - TASK 2
|
| 855 |
-
embeddings = [f.features for f in features_list]
|
| 856 |
-
|
| 857 |
-
# Sort by quality
|
| 858 |
-
qualities = [f.confidence * np.prod(f.image.shape[:2]) if f.image is not None else 0
|
| 859 |
-
for f in features_list]
|
| 860 |
-
|
| 861 |
-
sorted_indices = np.argsort(qualities)[::-1]
|
| 862 |
-
top_embeddings = [embeddings[i] for i in sorted_indices[:min(10, len(embeddings))]]
|
| 863 |
-
|
| 864 |
-
# Normalize
|
| 865 |
-
top_embeddings = [e / np.linalg.norm(e) for e in top_embeddings]
|
| 866 |
-
|
| 867 |
-
best_features = self.session_best_images.get(temp_id)
|
| 868 |
-
image_path = None
|
| 869 |
-
|
| 870 |
-
if best_features and best_features.image is not None:
|
| 871 |
-
image_filename = f"{dog_name.lower()}.jpg"
|
| 872 |
-
image_path = self.db_path / 'images' / image_filename
|
| 873 |
-
cv2.imwrite(str(image_path), best_features.image)
|
| 874 |
-
image_path = str(image_path.relative_to(self.db_path))
|
| 875 |
-
|
| 876 |
-
self.permanent_dogs[dog_name] = {
|
| 877 |
-
'name': dog_name,
|
| 878 |
-
'embeddings': top_embeddings, # TASK 2: Multiple embeddings
|
| 879 |
-
'first_seen': datetime.now().isoformat(),
|
| 880 |
-
'last_seen': datetime.now().isoformat(),
|
| 881 |
-
'total_sightings': 1,
|
| 882 |
-
'image_path': image_path
|
| 883 |
-
}
|
| 884 |
-
|
| 885 |
-
saved_dogs[temp_id] = dog_name
|
| 886 |
-
self.used_names.add(dog_name)
|
| 887 |
-
print(f" β
Saved: {dog_name}")
|
| 888 |
-
|
| 889 |
-
for temp_id, permanent_name in self.temp_to_permanent.items():
|
| 890 |
-
if permanent_name in self.permanent_dogs:
|
| 891 |
-
self.permanent_dogs[permanent_name]['last_seen'] = datetime.now().isoformat()
|
| 892 |
-
self.permanent_dogs[permanent_name]['total_sightings'] += 1
|
| 893 |
-
|
| 894 |
-
if saved_dogs:
|
| 895 |
-
self.save_permanent_database()
|
| 896 |
-
|
| 897 |
-
return saved_dogs
|
| 898 |
-
|
| 899 |
-
def delete_dog_from_database(self, dog_name: str):
|
| 900 |
-
"""TASK 4: Delete dog from database"""
|
| 901 |
-
if dog_name not in self.permanent_dogs:
|
| 902 |
-
return False
|
| 903 |
-
|
| 904 |
-
# Delete image
|
| 905 |
-
dog_data = self.permanent_dogs[dog_name]
|
| 906 |
-
if dog_data.get('image_path'):
|
| 907 |
-
img_path = self.db_path / dog_data['image_path']
|
| 908 |
-
if img_path.exists():
|
| 909 |
-
img_path.unlink()
|
| 910 |
-
|
| 911 |
-
# Remove from database
|
| 912 |
-
del self.permanent_dogs[dog_name]
|
| 913 |
-
self.used_names.discard(dog_name)
|
| 914 |
-
|
| 915 |
-
# Save
|
| 916 |
-
self.save_permanent_database()
|
| 917 |
-
print(f"ποΈ Deleted: {dog_name}")
|
| 918 |
-
return True
|
| 919 |
-
|
| 920 |
-
def get_next_turkish_name(self):
|
| 921 |
-
for name in self.TURKISH_DOG_NAMES:
|
| 922 |
-
if name not in self.used_names:
|
| 923 |
-
return name
|
| 924 |
-
|
| 925 |
-
counter = 2
|
| 926 |
-
while True:
|
| 927 |
-
for name in self.TURKISH_DOG_NAMES:
|
| 928 |
-
numbered_name = f"{name}_{counter}"
|
| 929 |
-
if numbered_name not in self.used_names:
|
| 930 |
-
return numbered_name
|
| 931 |
-
counter += 1
|
| 932 |
-
|
| 933 |
-
def match_or_register_all(self, track):
|
| 934 |
-
temp_id, confidence, permanent_name = self.match_or_register(track)
|
| 935 |
-
return {
|
| 936 |
-
'MegaDescriptor': {
|
| 937 |
-
'dog_id': temp_id,
|
| 938 |
-
'confidence': confidence,
|
| 939 |
-
'permanent_name': permanent_name
|
| 940 |
-
}
|
| 941 |
-
}
|
| 942 |
-
|
| 943 |
-
def set_all_thresholds(self, threshold: float):
|
| 944 |
-
self.base_threshold = max(0.15, min(0.95, threshold))
|
| 945 |
-
|
| 946 |
-
def reset_all(self):
|
| 947 |
-
self.session_dogs.clear()
|
| 948 |
-
self.session_best_images.clear()
|
| 949 |
-
self.temp_to_permanent.clear()
|
| 950 |
-
self.next_temp_id = 1
|
| 951 |
-
self.current_frame = 0
|
| 952 |
-
self.pose_impact_log = {'improved': 0, 'prevented_false': 0}
|
| 953 |
-
|
| 954 |
-
def get_pose_impact_stats(self):
|
| 955 |
-
"""OPTIONAL: Get pose comparison stats"""
|
| 956 |
-
return self.pose_impact_log.copy()
|
| 957 |
-
|
| 958 |
-
|
| 959 |
-
# ==================== HEALTH MODULE ====================
|
| 960 |
-
|
| 961 |
-
@dataclass
|
| 962 |
-
class HealthScore:
|
| 963 |
-
score: float
|
| 964 |
-
score_text: str
|
| 965 |
-
color: Tuple[int, int, int]
|
| 966 |
-
status: str
|
| 967 |
-
alerts: List[str]
|
| 968 |
-
confidence: float
|
| 969 |
-
|
| 970 |
-
|
| 971 |
-
class DogHealthAssessment:
|
| 972 |
-
"""Health assessment using pose and appearance"""
|
| 973 |
-
|
| 974 |
-
def __init__(self):
|
| 975 |
-
self.thresholds = {
|
| 976 |
-
'head_low_ratio': 0.7,
|
| 977 |
-
'leg_asymmetry_ratio': 0.1,
|
| 978 |
-
'body_condition_thin': 0.35,
|
| 979 |
-
'body_condition_obese': 0.65,
|
| 980 |
-
}
|
| 981 |
-
|
| 982 |
-
self.keypoints_map = {
|
| 983 |
-
'nose': 0, 'left_eye': 1, 'right_eye': 2,
|
| 984 |
-
'left_ear': 3, 'right_ear': 4,
|
| 985 |
-
'left_shoulder': 5, 'right_shoulder': 6,
|
| 986 |
-
'left_elbow': 7, 'right_elbow': 8,
|
| 987 |
-
'left_wrist': 9, 'right_wrist': 10,
|
| 988 |
-
'left_hip': 11, 'right_hip': 12,
|
| 989 |
-
'left_knee': 13, 'right_knee': 14,
|
| 990 |
-
'left_ankle': 15, 'right_ankle': 16
|
| 991 |
-
}
|
| 992 |
-
|
| 993 |
-
def assess_from_pose(self, keypoints, bbox):
|
| 994 |
-
"""Analyze health from pose"""
|
| 995 |
-
scores = {'posture': 10.0, 'gait_symmetry': 10.0, 'head_position': 10.0}
|
| 996 |
-
|
| 997 |
-
if keypoints is None or len(keypoints) < 17:
|
| 998 |
-
return scores
|
| 999 |
-
|
| 1000 |
-
body_height = max(1, bbox[3] - bbox[1])
|
| 1001 |
-
body_width = max(1, bbox[2] - bbox[0])
|
| 1002 |
-
|
| 1003 |
-
# Head position
|
| 1004 |
-
nose_kp = keypoints[self.keypoints_map['nose']]
|
| 1005 |
-
if nose_kp[2] > 0.5:
|
| 1006 |
-
head_relative_y = (nose_kp[1] - bbox[1]) / body_height
|
| 1007 |
-
if head_relative_y > self.thresholds['head_low_ratio']:
|
| 1008 |
-
scores['head_position'] -= 4.0
|
| 1009 |
-
elif head_relative_y > 0.5:
|
| 1010 |
-
scores['head_position'] -= 2.0
|
| 1011 |
-
|
| 1012 |
-
# Leg symmetry
|
| 1013 |
-
left_wrist = keypoints[self.keypoints_map['left_wrist']]
|
| 1014 |
-
right_wrist = keypoints[self.keypoints_map['right_wrist']]
|
| 1015 |
-
left_shoulder = keypoints[self.keypoints_map['left_shoulder']]
|
| 1016 |
-
right_shoulder = keypoints[self.keypoints_map['right_shoulder']]
|
| 1017 |
-
|
| 1018 |
-
if all(kp[2] > 0.5 for kp in [left_wrist, right_wrist, left_shoulder, right_shoulder]):
|
| 1019 |
-
left_len = abs(left_wrist[1] - left_shoulder[1])
|
| 1020 |
-
right_len = abs(right_wrist[1] - right_shoulder[1])
|
| 1021 |
-
|
| 1022 |
-
if left_len > 0 and right_len > 0:
|
| 1023 |
-
asym = abs(left_len - right_len) / max(left_len, right_len)
|
| 1024 |
-
if asym > self.thresholds['leg_asymmetry_ratio']:
|
| 1025 |
-
scores['gait_symmetry'] -= 3.0
|
| 1026 |
-
|
| 1027 |
-
for key in scores:
|
| 1028 |
-
scores[key] = max(0, scores[key])
|
| 1029 |
-
|
| 1030 |
-
return scores
|
| 1031 |
-
|
| 1032 |
-
def assess_body_condition(self, bbox, dog_crop):
|
| 1033 |
-
"""Body condition assessment"""
|
| 1034 |
-
scores = {'weight': 10.0, 'coat_quality': 10.0}
|
| 1035 |
-
|
| 1036 |
-
width = bbox[2] - bbox[0]
|
| 1037 |
-
height = bbox[3] - bbox[1]
|
| 1038 |
-
|
| 1039 |
-
if height > 0:
|
| 1040 |
-
aspect_ratio = width / height
|
| 1041 |
-
if aspect_ratio < self.thresholds['body_condition_thin']:
|
| 1042 |
-
scores['weight'] = 3.0
|
| 1043 |
-
elif aspect_ratio > self.thresholds['body_condition_obese']:
|
| 1044 |
-
scores['weight'] = 4.0
|
| 1045 |
-
|
| 1046 |
-
# Coat quality
|
| 1047 |
-
try:
|
| 1048 |
-
gray = cv2.cvtColor(dog_crop, cv2.COLOR_BGR2GRAY)
|
| 1049 |
-
texture_score = np.std(gray)
|
| 1050 |
-
|
| 1051 |
-
if texture_score < 15:
|
| 1052 |
-
scores['coat_quality'] = 3.0
|
| 1053 |
-
elif texture_score < 25:
|
| 1054 |
-
scores['coat_quality'] = 6.0
|
| 1055 |
-
except:
|
| 1056 |
-
pass
|
| 1057 |
-
|
| 1058 |
-
return scores
|
| 1059 |
-
|
| 1060 |
-
def calculate_overall_health(self, dog_id, keypoints, dog_crop, bbox):
|
| 1061 |
-
"""TASK 3: Calculate comprehensive health score"""
|
| 1062 |
-
pose_scores = self.assess_from_pose(keypoints, bbox) if keypoints is not None else {
|
| 1063 |
-
'posture': 7.0, 'gait_symmetry': 7.0, 'head_position': 7.0
|
| 1064 |
-
}
|
| 1065 |
-
|
| 1066 |
-
body_scores = self.assess_body_condition(bbox, dog_crop)
|
| 1067 |
-
|
| 1068 |
-
weights = {'pose': 0.5, 'body': 0.5}
|
| 1069 |
-
|
| 1070 |
-
avg_pose = np.mean(list(pose_scores.values()))
|
| 1071 |
-
avg_body = np.mean(list(body_scores.values()))
|
| 1072 |
-
|
| 1073 |
-
final_score = avg_pose * weights['pose'] + avg_body * weights['body']
|
| 1074 |
-
final_score = round(final_score, 1)
|
| 1075 |
-
|
| 1076 |
-
# Status
|
| 1077 |
-
if final_score >= 8.0:
|
| 1078 |
-
status = "SaΔlΔ±klΔ±"
|
| 1079 |
-
color = (0, 255, 0)
|
| 1080 |
-
elif final_score >= 6.0:
|
| 1081 |
-
status = "Δ°yi"
|
| 1082 |
-
color = (0, 255, 255)
|
| 1083 |
-
elif final_score >= 4.0:
|
| 1084 |
-
status = "Dikkat"
|
| 1085 |
-
color = (0, 165, 255)
|
| 1086 |
-
else:
|
| 1087 |
-
status = "Kritik"
|
| 1088 |
-
color = (0, 0, 255)
|
| 1089 |
-
|
| 1090 |
-
# Alerts
|
| 1091 |
-
alerts = []
|
| 1092 |
-
if pose_scores['head_position'] < 6.0:
|
| 1093 |
-
alerts.append("BaΕ pozisyonu dΓΌΕΓΌk")
|
| 1094 |
-
if pose_scores['gait_symmetry'] < 6.0:
|
| 1095 |
-
alerts.append("YΓΌrΓΌyΓΌΕ bozukluΔu")
|
| 1096 |
-
if body_scores['weight'] < 5.0:
|
| 1097 |
-
alerts.append("Kilo problemi")
|
| 1098 |
-
if body_scores['coat_quality'] < 6.0:
|
| 1099 |
-
alerts.append("TΓΌy kalitesi dΓΌΕΓΌk")
|
| 1100 |
-
|
| 1101 |
-
confidence = 0.7
|
| 1102 |
-
|
| 1103 |
-
return HealthScore(
|
| 1104 |
-
score=final_score,
|
| 1105 |
-
score_text=f"{final_score}/10",
|
| 1106 |
-
color=color,
|
| 1107 |
-
status=status,
|
| 1108 |
-
alerts=alerts,
|
| 1109 |
-
confidence=confidence
|
| 1110 |
-
)
|
| 1111 |
-
|
| 1112 |
|
| 1113 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1114 |
|
| 1115 |
-
class
|
| 1116 |
-
"""
|
| 1117 |
|
| 1118 |
def __init__(self):
|
| 1119 |
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
| 1120 |
-
|
| 1121 |
self.detector = DogDetector(device=device)
|
| 1122 |
-
self.tracker =
|
| 1123 |
-
self.reid = MegaDescriptorReID(device=device
|
| 1124 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1125 |
|
| 1126 |
self.current_session_data = {}
|
| 1127 |
-
self.health_scores = {} # TASK 3
|
| 1128 |
self.is_processing = False
|
| 1129 |
self.known_dogs_alerts = []
|
| 1130 |
|
| 1131 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1132 |
|
| 1133 |
def reset_session(self):
|
|
|
|
| 1134 |
self.is_processing = False
|
| 1135 |
self.current_session_data.clear()
|
| 1136 |
-
self.health_scores.clear()
|
| 1137 |
self.known_dogs_alerts.clear()
|
|
|
|
| 1138 |
self.tracker.reset()
|
| 1139 |
self.reid.reset_all()
|
| 1140 |
gc.collect()
|
| 1141 |
if torch.cuda.is_available():
|
| 1142 |
torch.cuda.empty_cache()
|
|
|
|
| 1143 |
|
| 1144 |
-
def
|
| 1145 |
-
"""
|
| 1146 |
-
try:
|
| 1147 |
-
cap = cv2.VideoCapture(video_path)
|
| 1148 |
-
|
| 1149 |
-
# Check rotation metadata
|
| 1150 |
-
rotation = cap.get(cv2.CAP_PROP_ORIENTATION_META)
|
| 1151 |
-
|
| 1152 |
-
if rotation in [90, 180, 270]:
|
| 1153 |
-
print(f"π Correcting rotation: {rotation}Β°")
|
| 1154 |
-
|
| 1155 |
-
# Create corrected video
|
| 1156 |
-
output_path = video_path.replace('.mp4', '_corrected.mp4')
|
| 1157 |
-
|
| 1158 |
-
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
| 1159 |
-
fps = cap.get(cv2.CAP_PROP_FPS)
|
| 1160 |
-
|
| 1161 |
-
# Read first frame to get dimensions
|
| 1162 |
-
ret, frame = cap.read()
|
| 1163 |
-
if ret:
|
| 1164 |
-
if rotation in [90, 270]:
|
| 1165 |
-
h, w = frame.shape[1], frame.shape[0]
|
| 1166 |
-
else:
|
| 1167 |
-
h, w = frame.shape[:2]
|
| 1168 |
-
|
| 1169 |
-
out = cv2.VideoWriter(output_path, fourcc, fps, (w, h))
|
| 1170 |
-
|
| 1171 |
-
# Process all frames
|
| 1172 |
-
cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
|
| 1173 |
-
while True:
|
| 1174 |
-
ret, frame = cap.read()
|
| 1175 |
-
if not ret:
|
| 1176 |
-
break
|
| 1177 |
-
|
| 1178 |
-
if rotation == 90:
|
| 1179 |
-
frame = cv2.rotate(frame, cv2.ROTATE_90_CLOCKWISE)
|
| 1180 |
-
elif rotation == 180:
|
| 1181 |
-
frame = cv2.rotate(frame, cv2.ROTATE_180)
|
| 1182 |
-
elif rotation == 270:
|
| 1183 |
-
frame = cv2.rotate(frame, cv2.ROTATE_90_COUNTERCLOCKWISE)
|
| 1184 |
-
|
| 1185 |
-
out.write(frame)
|
| 1186 |
-
|
| 1187 |
-
out.release()
|
| 1188 |
-
cap.release()
|
| 1189 |
-
return output_path
|
| 1190 |
-
|
| 1191 |
-
cap.release()
|
| 1192 |
-
return video_path
|
| 1193 |
-
|
| 1194 |
-
except Exception as e:
|
| 1195 |
-
print(f"Orientation correction error: {e}")
|
| 1196 |
-
return video_path
|
| 1197 |
-
|
| 1198 |
-
def process_video(self, video_path: str, reid_threshold: float,
|
| 1199 |
-
sample_rate: int, use_pose: bool):
|
| 1200 |
-
"""Process video with batch optimization - TASK 5"""
|
| 1201 |
if not video_path:
|
| 1202 |
-
return None, "Please upload video", None, None
|
| 1203 |
|
| 1204 |
self.reset_session()
|
| 1205 |
self.is_processing = True
|
| 1206 |
-
|
| 1207 |
-
# TASK 6: Correct orientation
|
| 1208 |
-
video_path = self.correct_video_orientation(video_path)
|
| 1209 |
-
|
| 1210 |
self.reid.set_all_thresholds(reid_threshold)
|
| 1211 |
-
self.reid.use_pose = use_pose # OPTIONAL
|
| 1212 |
-
self.tracker.use_pose = use_pose # OPTIONAL
|
| 1213 |
|
| 1214 |
try:
|
| 1215 |
cap = cv2.VideoCapture(video_path)
|
|
@@ -1219,11 +78,7 @@ class DogDetectionDemo:
|
|
| 1219 |
frame_num = 0
|
| 1220 |
processed_frames = 0
|
| 1221 |
|
| 1222 |
-
|
| 1223 |
-
frame_buffer = []
|
| 1224 |
-
frame_indices = []
|
| 1225 |
-
|
| 1226 |
-
print(f"\nπ₯ Processing: {total_frames} frames")
|
| 1227 |
|
| 1228 |
while cap.isOpened() and self.is_processing:
|
| 1229 |
ret, frame = cap.read()
|
|
@@ -1231,27 +86,55 @@ class DogDetectionDemo:
|
|
| 1231 |
break
|
| 1232 |
|
| 1233 |
if frame_num % sample_rate == 0:
|
| 1234 |
-
|
| 1235 |
-
|
| 1236 |
|
| 1237 |
-
|
| 1238 |
-
|
| 1239 |
-
|
| 1240 |
-
|
| 1241 |
-
|
| 1242 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1243 |
|
| 1244 |
-
|
|
|
|
|
|
|
| 1245 |
|
|
|
|
| 1246 |
if frame_num % 30 == 0:
|
| 1247 |
print(f"Progress: {int((frame_num / total_frames) * 100)}%")
|
| 1248 |
-
|
| 1249 |
-
if frame_num % 100 == 0:
|
| 1250 |
-
gc.collect()
|
| 1251 |
-
|
| 1252 |
-
# Process remaining frames
|
| 1253 |
-
if frame_buffer:
|
| 1254 |
-
self._process_frame_batch(frame_buffer, temp_id_to_crops)
|
| 1255 |
|
| 1256 |
cap.release()
|
| 1257 |
|
|
@@ -1259,72 +142,107 @@ class DogDetectionDemo:
|
|
| 1259 |
if temp_id_to_crops:
|
| 1260 |
self.current_session_data = {
|
| 1261 |
'crops': temp_id_to_crops,
|
| 1262 |
-
'temp_to_permanent': self.reid.temp_to_permanent
|
|
|
|
| 1263 |
}
|
| 1264 |
|
| 1265 |
gallery_html = self._create_gallery_html(temp_id_to_crops)
|
| 1266 |
alerts_html = self._create_alerts_html()
|
|
|
|
| 1267 |
|
| 1268 |
-
|
| 1269 |
-
pose_stats = ""
|
| 1270 |
-
if use_pose:
|
| 1271 |
-
stats = self.reid.get_pose_impact_stats()
|
| 1272 |
-
pose_stats = f"\nπ Pose Impact: Improved {stats['improved']} tracks, Prevented {stats['prevented_false']} false matches"
|
| 1273 |
-
|
| 1274 |
-
stats_msg = f"β
Found {len(temp_id_to_crops)} dogs | {processed_frames} frames{pose_stats}"
|
| 1275 |
|
| 1276 |
-
return gallery_html, stats_msg, alerts_html,
|
| 1277 |
else:
|
| 1278 |
return "<p>No dogs detected</p>", "No dogs detected", None, None
|
| 1279 |
|
| 1280 |
except Exception as e:
|
| 1281 |
print(f"Error: {e}")
|
| 1282 |
-
import traceback
|
| 1283 |
-
traceback.print_exc()
|
| 1284 |
return f"<p>Error: {str(e)}</p>", f"Error: {str(e)}", None, None
|
| 1285 |
finally:
|
| 1286 |
self.is_processing = False
|
| 1287 |
|
| 1288 |
-
def
|
| 1289 |
-
"""
|
| 1290 |
-
|
| 1291 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1292 |
|
| 1293 |
-
|
| 1294 |
-
|
|
|
|
|
|
|
| 1295 |
|
| 1296 |
-
|
| 1297 |
-
|
| 1298 |
-
|
| 1299 |
-
|
| 1300 |
-
|
| 1301 |
-
|
| 1302 |
-
|
| 1303 |
-
|
| 1304 |
-
|
| 1305 |
-
|
| 1306 |
-
|
| 1307 |
-
|
| 1308 |
-
|
| 1309 |
-
|
| 1310 |
-
|
| 1311 |
-
|
| 1312 |
-
|
| 1313 |
-
|
| 1314 |
-
|
| 1315 |
-
|
| 1316 |
-
|
| 1317 |
-
|
| 1318 |
-
|
| 1319 |
-
|
| 1320 |
-
|
| 1321 |
-
|
| 1322 |
-
|
| 1323 |
-
|
| 1324 |
-
|
| 1325 |
-
|
| 1326 |
|
| 1327 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1328 |
if not self.known_dogs_alerts:
|
| 1329 |
return None
|
| 1330 |
|
|
@@ -1332,9 +250,8 @@ class DogDetectionDemo:
|
|
| 1332 |
for dog_name in self.known_dogs_alerts:
|
| 1333 |
dog_data = self.reid.permanent_dogs.get(dog_name, {})
|
| 1334 |
alert_html = f"""
|
| 1335 |
-
<div style='padding: 10px; margin: 10px 0; background: #fff3cd;
|
| 1336 |
-
|
| 1337 |
-
<h4 style='color: #856404; margin: 0;'>β Known Dog: {dog_name}</h4>
|
| 1338 |
"""
|
| 1339 |
|
| 1340 |
if dog_data.get('image_path'):
|
|
@@ -1345,12 +262,11 @@ class DogDetectionDemo:
|
|
| 1345 |
img_base64 = self._img_to_base64(img_rgb)
|
| 1346 |
alert_html += f"""
|
| 1347 |
<img src='data:image/jpeg;base64,{img_base64}'
|
| 1348 |
-
style='width: 100px; height: 100px; object-fit: cover;
|
| 1349 |
-
margin: 10px 0; border-radius: 5px;'>
|
| 1350 |
"""
|
| 1351 |
|
| 1352 |
alert_html += f"""
|
| 1353 |
-
<p style='margin: 5px 0;'>
|
| 1354 |
<p style='margin: 5px 0;'>Total sightings: {dog_data.get('total_sightings', 1)}</p>
|
| 1355 |
</div>
|
| 1356 |
"""
|
|
@@ -1358,8 +274,8 @@ class DogDetectionDemo:
|
|
| 1358 |
|
| 1359 |
return "".join(alerts)
|
| 1360 |
|
| 1361 |
-
def _create_gallery_html(self, temp_id_to_crops):
|
| 1362 |
-
"""
|
| 1363 |
html = """
|
| 1364 |
<div style='padding: 20px;'>
|
| 1365 |
<h2 style='text-align:center;'>π Detected Dogs</h2>
|
|
@@ -1370,6 +286,10 @@ class DogDetectionDemo:
|
|
| 1370 |
crops = temp_id_to_crops[temp_id]
|
| 1371 |
permanent_name = self.reid.temp_to_permanent.get(temp_id)
|
| 1372 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1373 |
if permanent_name:
|
| 1374 |
title = f"{permanent_name} (Known)"
|
| 1375 |
border_color = "#ffc107"
|
|
@@ -1377,28 +297,33 @@ class DogDetectionDemo:
|
|
| 1377 |
title = f"New Dog #{temp_id}"
|
| 1378 |
border_color = "#28a745"
|
| 1379 |
|
| 1380 |
-
# TASK 3: Add health score
|
| 1381 |
-
health_html = ""
|
| 1382 |
-
if temp_id in self.health_scores:
|
| 1383 |
-
hs = self.health_scores[temp_id]
|
| 1384 |
-
color_hex = f"#{hs.color[2]:02x}{hs.color[1]:02x}{hs.color[0]:02x}"
|
| 1385 |
-
health_html = f"""
|
| 1386 |
-
<div style='margin: 10px 0; padding: 8px; background: {color_hex}20;
|
| 1387 |
-
border-left: 4px solid {color_hex}; border-radius: 4px;'>
|
| 1388 |
-
<strong>Health: {hs.score_text} - {hs.status}</strong>
|
| 1389 |
-
{' | ' + ', '.join(hs.alerts) if hs.alerts else ''}
|
| 1390 |
-
</div>
|
| 1391 |
-
"""
|
| 1392 |
-
|
| 1393 |
html += f"""
|
| 1394 |
-
<div style='border:
|
| 1395 |
-
|
| 1396 |
-
|
| 1397 |
-
{health_html}
|
| 1398 |
-
<p style='margin: 5px 0; color: #666;'>Images: {len(crops)}</p>
|
| 1399 |
-
<div style='display: grid; grid-template-columns: repeat(5, 1fr); gap: 5px;'>
|
| 1400 |
"""
|
| 1401 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1402 |
for crop in crops:
|
| 1403 |
img_rgb = cv2.cvtColor(crop, cv2.COLOR_BGR2RGB)
|
| 1404 |
img_base64 = self._img_to_base64(img_rgb)
|
|
@@ -1413,51 +338,50 @@ class DogDetectionDemo:
|
|
| 1413 |
return html
|
| 1414 |
|
| 1415 |
def save_to_database(self):
|
|
|
|
| 1416 |
if not self.current_session_data:
|
| 1417 |
return "No data to save", None
|
| 1418 |
|
| 1419 |
saved = self.reid.save_session_to_permanent()
|
| 1420 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1421 |
if saved:
|
| 1422 |
-
msg = f"β
Saved {len(saved)} new dogs:\n"
|
| 1423 |
for temp_id, name in saved.items():
|
| 1424 |
-
|
| 1425 |
-
|
| 1426 |
-
|
| 1427 |
-
|
| 1428 |
-
|
| 1429 |
-
return msg, db_html
|
| 1430 |
-
|
| 1431 |
-
def delete_dog(self, dog_name: str):
|
| 1432 |
-
"""TASK 4: Delete dog from database"""
|
| 1433 |
-
if not dog_name or dog_name.strip() == "":
|
| 1434 |
-
return "Please select a dog", self._show_database()
|
| 1435 |
-
|
| 1436 |
-
success = self.reid.delete_dog_from_database(dog_name)
|
| 1437 |
-
|
| 1438 |
-
if success:
|
| 1439 |
-
msg = f"β
Deleted: {dog_name}"
|
| 1440 |
else:
|
| 1441 |
-
msg =
|
| 1442 |
|
| 1443 |
db_html = self._show_database()
|
| 1444 |
return msg, db_html
|
| 1445 |
|
| 1446 |
-
def _show_database(self):
|
| 1447 |
-
"""Show
|
| 1448 |
if not self.reid.permanent_dogs:
|
| 1449 |
return "<p>Database is empty</p>"
|
| 1450 |
|
| 1451 |
html = """
|
| 1452 |
<div style='padding: 20px;'>
|
| 1453 |
<h2>π Dog Database</h2>
|
| 1454 |
-
<div style='display: grid; grid-template-columns: repeat(auto-fill, minmax(
|
| 1455 |
"""
|
| 1456 |
|
| 1457 |
for dog_name, dog_data in self.reid.permanent_dogs.items():
|
|
|
|
|
|
|
| 1458 |
html += f"""
|
| 1459 |
-
<div style='border: 1px solid #ddd; border-radius: 8px; padding: 10px;
|
| 1460 |
-
text-align: center; background: white;'>
|
| 1461 |
<h4 style='margin: 0 0 10px 0;'>{dog_name}</h4>
|
| 1462 |
"""
|
| 1463 |
|
|
@@ -1473,150 +397,206 @@ class DogDetectionDemo:
|
|
| 1473 |
"""
|
| 1474 |
|
| 1475 |
html += f"""
|
| 1476 |
-
<p style='font-size: 12px; margin: 5px 0;'>
|
| 1477 |
<p style='font-size: 12px; margin: 5px 0;'>Sightings: {dog_data['total_sightings']}</p>
|
| 1478 |
-
<p style='font-size:
|
| 1479 |
</div>
|
| 1480 |
"""
|
| 1481 |
|
| 1482 |
html += "</div></div>"
|
| 1483 |
return html
|
| 1484 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1485 |
def _img_to_base64(self, img):
|
|
|
|
| 1486 |
pil_img = Image.fromarray(img)
|
| 1487 |
buffered = BytesIO()
|
| 1488 |
pil_img.save(buffered, format="JPEG", quality=85)
|
| 1489 |
return base64.b64encode(buffered.getvalue()).decode()
|
| 1490 |
|
| 1491 |
def stop_and_reset(self):
|
|
|
|
| 1492 |
self.reset_session()
|
| 1493 |
return "<p>Ready</p>", "Ready", None, None
|
| 1494 |
|
| 1495 |
def create_interface(self):
|
| 1496 |
-
"""Create Gradio interface
|
| 1497 |
-
with gr.Blocks(title="Dog Tracking System", theme=gr.themes.Soft()) as app:
|
| 1498 |
gr.Markdown("""
|
| 1499 |
-
# π Dog Tracking System
|
| 1500 |
-
### Detect, Track,
|
| 1501 |
""")
|
| 1502 |
|
| 1503 |
-
|
| 1504 |
-
|
| 1505 |
-
|
| 1506 |
-
|
| 1507 |
-
|
| 1508 |
-
|
| 1509 |
-
|
| 1510 |
-
|
| 1511 |
-
|
| 1512 |
-
|
| 1513 |
-
|
| 1514 |
-
|
| 1515 |
-
|
| 1516 |
-
|
| 1517 |
-
|
| 1518 |
-
|
| 1519 |
-
|
| 1520 |
-
|
| 1521 |
-
|
| 1522 |
-
|
| 1523 |
-
use_pose = gr.Checkbox(
|
| 1524 |
-
label="Use Pose Keypoints for Matching",
|
| 1525 |
-
value=True,
|
| 1526 |
-
info="Improves accuracy but slightly slower"
|
| 1527 |
-
)
|
| 1528 |
|
| 1529 |
-
|
| 1530 |
-
|
| 1531 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1532 |
|
| 1533 |
-
|
| 1534 |
-
|
| 1535 |
-
|
| 1536 |
-
|
| 1537 |
-
|
| 1538 |
-
show_db_btn = gr.Button("π Show Database", variant="secondary")
|
| 1539 |
|
| 1540 |
-
|
| 1541 |
-
|
| 1542 |
-
|
| 1543 |
-
|
| 1544 |
-
|
| 1545 |
-
with gr.Row():
|
| 1546 |
-
with gr.Column():
|
| 1547 |
-
delete_input = gr.Textbox(
|
| 1548 |
-
label="Dog Name to Delete",
|
| 1549 |
-
placeholder="Enter exact dog name (e.g., KarabaΕ)"
|
| 1550 |
-
)
|
| 1551 |
-
delete_btn = gr.Button("ποΈ Delete Dog", variant="stop")
|
| 1552 |
-
delete_status = gr.Textbox(label="Delete Status", interactive=False)
|
| 1553 |
-
|
| 1554 |
-
# Process video
|
| 1555 |
-
process_btn.click(
|
| 1556 |
-
self.process_video,
|
| 1557 |
-
inputs=[video_input, reid_threshold, sample_rate, use_pose],
|
| 1558 |
-
outputs=[gallery_output, status_text, alerts_output, database_output]
|
| 1559 |
-
).then(
|
| 1560 |
-
lambda x: gr.update(visible=bool(x)),
|
| 1561 |
-
inputs=[alerts_output],
|
| 1562 |
-
outputs=[alerts_output]
|
| 1563 |
-
)
|
| 1564 |
-
|
| 1565 |
-
# Save to database
|
| 1566 |
-
save_btn.click(
|
| 1567 |
-
self.save_to_database,
|
| 1568 |
-
outputs=[status_text, database_output]
|
| 1569 |
-
).then(
|
| 1570 |
-
lambda: gr.update(visible=True),
|
| 1571 |
-
outputs=[database_output]
|
| 1572 |
-
)
|
| 1573 |
|
| 1574 |
-
#
|
| 1575 |
-
|
| 1576 |
-
|
| 1577 |
-
outputs=[database_output]
|
| 1578 |
-
).then(
|
| 1579 |
-
lambda: gr.update(visible=True),
|
| 1580 |
-
outputs=[database_output]
|
| 1581 |
-
)
|
| 1582 |
|
| 1583 |
-
#
|
| 1584 |
-
|
| 1585 |
-
|
| 1586 |
-
|
| 1587 |
-
|
| 1588 |
-
|
| 1589 |
-
|
| 1590 |
-
|
| 1591 |
-
|
|
|
|
| 1592 |
|
| 1593 |
-
#
|
| 1594 |
-
|
| 1595 |
-
|
| 1596 |
-
|
| 1597 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1598 |
|
| 1599 |
-
|
| 1600 |
-
|
| 1601 |
-
|
| 1602 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1603 |
|
| 1604 |
return app
|
| 1605 |
|
| 1606 |
-
|
| 1607 |
-
# ==================== LAUNCH ====================
|
| 1608 |
-
|
| 1609 |
if __name__ == "__main__":
|
| 1610 |
-
|
| 1611 |
-
print("Dog Tracking System - All 57 Tasks Implemented")
|
| 1612 |
-
print("Optimized for HuggingFace Spaces with T4 GPU")
|
| 1613 |
-
print("="*60)
|
| 1614 |
-
|
| 1615 |
-
demo = DogDetectionDemo()
|
| 1616 |
app = demo.create_interface()
|
| 1617 |
-
|
| 1618 |
-
app.launch(
|
| 1619 |
-
server_name="0.0.0.0",
|
| 1620 |
-
server_port=7860,
|
| 1621 |
-
share=False
|
| 1622 |
-
)
|
|
|
|
| 1 |
"""
|
| 2 |
+
Enhanced Dog Detection Demo with Health Integration and Database Management
|
| 3 |
+
Implements Enhancements 3 and 4
|
|
|
|
| 4 |
"""
|
| 5 |
import gradio as gr
|
| 6 |
import cv2
|
| 7 |
import numpy as np
|
| 8 |
import torch
|
| 9 |
+
from typing import Dict, List, Optional
|
| 10 |
+
import gc
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
import base64
|
| 12 |
from io import BytesIO
|
| 13 |
from PIL import Image
|
| 14 |
+
from pathlib import Path
|
| 15 |
+
from datetime import datetime
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
|
| 17 |
+
# Import modules
|
| 18 |
+
from detection import EnhancedDogDetector as DogDetector
|
| 19 |
+
from tracking import EnhancedTracker as SimpleTracker
|
| 20 |
+
from reid import EnhancedMegaDescriptorReID as MegaDescriptorReID
|
| 21 |
+
from health_module import DogHealthAssessment
|
| 22 |
+
from database import DogDatabase
|
| 23 |
|
| 24 |
+
class EnhancedDogDetectionDemo:
|
| 25 |
+
"""Enhanced demo with health assessment and database management"""
|
| 26 |
|
| 27 |
def __init__(self):
|
| 28 |
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
|
|
|
| 29 |
self.detector = DogDetector(device=device)
|
| 30 |
+
self.tracker = SimpleTracker(match_threshold=0.2)
|
| 31 |
+
self.reid = MegaDescriptorReID(device=device)
|
| 32 |
+
|
| 33 |
+
# ENHANCEMENT 3: Health assessment module
|
| 34 |
+
self.health_assessor = DogHealthAssessment()
|
| 35 |
+
|
| 36 |
+
# ENHANCEMENT 4: Database integration
|
| 37 |
+
self.db = DogDatabase("dog_monitoring.db")
|
| 38 |
|
| 39 |
self.current_session_data = {}
|
|
|
|
| 40 |
self.is_processing = False
|
| 41 |
self.known_dogs_alerts = []
|
| 42 |
|
| 43 |
+
# ENHANCEMENT 3: Health tracking
|
| 44 |
+
self.dog_health_scores = {} # temp_id -> health_score
|
| 45 |
+
|
| 46 |
+
# Set up callback for sleeping tracks
|
| 47 |
+
self.tracker.set_reid_callback(self.reid.move_to_sleeping)
|
| 48 |
+
|
| 49 |
+
print("β
Enhanced Demo initialized with health assessment and database")
|
| 50 |
|
| 51 |
def reset_session(self):
|
| 52 |
+
"""Reset current session"""
|
| 53 |
self.is_processing = False
|
| 54 |
self.current_session_data.clear()
|
|
|
|
| 55 |
self.known_dogs_alerts.clear()
|
| 56 |
+
self.dog_health_scores.clear()
|
| 57 |
self.tracker.reset()
|
| 58 |
self.reid.reset_all()
|
| 59 |
gc.collect()
|
| 60 |
if torch.cuda.is_available():
|
| 61 |
torch.cuda.empty_cache()
|
| 62 |
+
print("π Session reset")
|
| 63 |
|
| 64 |
+
def process_video(self, video_path: str, reid_threshold: float, sample_rate: int):
|
| 65 |
+
"""Process video with health assessment"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 66 |
if not video_path:
|
| 67 |
+
return None, "Please upload a video", None, None
|
| 68 |
|
| 69 |
self.reset_session()
|
| 70 |
self.is_processing = True
|
|
|
|
|
|
|
|
|
|
|
|
|
| 71 |
self.reid.set_all_thresholds(reid_threshold)
|
|
|
|
|
|
|
| 72 |
|
| 73 |
try:
|
| 74 |
cap = cv2.VideoCapture(video_path)
|
|
|
|
| 78 |
frame_num = 0
|
| 79 |
processed_frames = 0
|
| 80 |
|
| 81 |
+
print(f"\nπ₯ Processing video: {total_frames} frames")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 82 |
|
| 83 |
while cap.isOpened() and self.is_processing:
|
| 84 |
ret, frame = cap.read()
|
|
|
|
| 86 |
break
|
| 87 |
|
| 88 |
if frame_num % sample_rate == 0:
|
| 89 |
+
detections = self.detector.detect(frame)
|
| 90 |
+
tracks = self.tracker.update(detections)
|
| 91 |
|
| 92 |
+
for track in tracks:
|
| 93 |
+
if not self.is_processing:
|
| 94 |
+
break
|
| 95 |
+
|
| 96 |
+
result = self.reid.match_or_register_all(track)
|
| 97 |
+
temp_id = result['MegaDescriptor']['dog_id']
|
| 98 |
+
permanent_name = result['MegaDescriptor'].get('permanent_name')
|
| 99 |
+
|
| 100 |
+
if temp_id > 0:
|
| 101 |
+
# Store crops
|
| 102 |
+
for det in reversed(track.detections[-3:]):
|
| 103 |
+
if det.image_crop is not None:
|
| 104 |
+
if temp_id not in temp_id_to_crops:
|
| 105 |
+
temp_id_to_crops[temp_id] = []
|
| 106 |
+
|
| 107 |
+
if len(temp_id_to_crops[temp_id]) < 10:
|
| 108 |
+
temp_id_to_crops[temp_id].append(det.image_crop.copy())
|
| 109 |
+
|
| 110 |
+
# ENHANCEMENT 3: Assess health
|
| 111 |
+
if temp_id not in self.dog_health_scores:
|
| 112 |
+
bbox = det.bbox if hasattr(det, 'bbox') else [0,0,100,100]
|
| 113 |
+
position = ((bbox[0]+bbox[2])/2, (bbox[1]+bbox[3])/2)
|
| 114 |
+
|
| 115 |
+
health_score = self.health_assessor.calculate_overall_health(
|
| 116 |
+
dog_id=temp_id,
|
| 117 |
+
keypoints=None, # Can add pose detection later
|
| 118 |
+
dog_crop=det.image_crop,
|
| 119 |
+
bbox=bbox,
|
| 120 |
+
current_pos=position
|
| 121 |
+
)
|
| 122 |
+
|
| 123 |
+
self.dog_health_scores[temp_id] = health_score
|
| 124 |
+
|
| 125 |
+
break
|
| 126 |
+
|
| 127 |
+
# Track known dogs
|
| 128 |
+
if permanent_name and permanent_name not in self.known_dogs_alerts:
|
| 129 |
+
self.known_dogs_alerts.append(permanent_name)
|
| 130 |
|
| 131 |
+
processed_frames += 1
|
| 132 |
+
if frame_num % 100 == 0:
|
| 133 |
+
gc.collect()
|
| 134 |
|
| 135 |
+
frame_num += 1
|
| 136 |
if frame_num % 30 == 0:
|
| 137 |
print(f"Progress: {int((frame_num / total_frames) * 100)}%")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 138 |
|
| 139 |
cap.release()
|
| 140 |
|
|
|
|
| 142 |
if temp_id_to_crops:
|
| 143 |
self.current_session_data = {
|
| 144 |
'crops': temp_id_to_crops,
|
| 145 |
+
'temp_to_permanent': self.reid.temp_to_permanent,
|
| 146 |
+
'health_scores': self.dog_health_scores
|
| 147 |
}
|
| 148 |
|
| 149 |
gallery_html = self._create_gallery_html(temp_id_to_crops)
|
| 150 |
alerts_html = self._create_alerts_html()
|
| 151 |
+
health_html = self._create_health_summary_html()
|
| 152 |
|
| 153 |
+
stats_msg = f"β
Found {len(temp_id_to_crops)} dogs | {processed_frames} frames"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 154 |
|
| 155 |
+
return gallery_html, stats_msg, alerts_html, health_html
|
| 156 |
else:
|
| 157 |
return "<p>No dogs detected</p>", "No dogs detected", None, None
|
| 158 |
|
| 159 |
except Exception as e:
|
| 160 |
print(f"Error: {e}")
|
|
|
|
|
|
|
| 161 |
return f"<p>Error: {str(e)}</p>", f"Error: {str(e)}", None, None
|
| 162 |
finally:
|
| 163 |
self.is_processing = False
|
| 164 |
|
| 165 |
+
def _create_health_summary_html(self) -> str:
|
| 166 |
+
"""ENHANCEMENT 3: Create health summary display"""
|
| 167 |
+
if not self.dog_health_scores:
|
| 168 |
+
return "<p>No health data available</p>"
|
| 169 |
+
|
| 170 |
+
html = """
|
| 171 |
+
<div style='padding: 20px; background: #f0f8ff; border-radius: 10px;'>
|
| 172 |
+
<h2 style='text-align:center; color: #2c3e50;'>π₯ Health Assessment Summary</h2>
|
| 173 |
+
<div style='display: grid; grid-template-columns: repeat(auto-fit, minmax(250px, 1fr)); gap: 15px; margin-top: 20px;'>
|
| 174 |
+
"""
|
| 175 |
+
|
| 176 |
+
# Calculate overall statistics
|
| 177 |
+
all_scores = [hs.score for hs in self.dog_health_scores.values()]
|
| 178 |
+
avg_health = np.mean(all_scores) if all_scores else 0
|
| 179 |
+
|
| 180 |
+
# Overall health indicator
|
| 181 |
+
if avg_health >= 8.0:
|
| 182 |
+
overall_color = "#28a745"
|
| 183 |
+
overall_status = "SaΔlΔ±klΔ±"
|
| 184 |
+
elif avg_health >= 6.0:
|
| 185 |
+
overall_color = "#ffc107"
|
| 186 |
+
overall_status = "Δ°yi"
|
| 187 |
+
elif avg_health >= 4.0:
|
| 188 |
+
overall_color = "#fd7e14"
|
| 189 |
+
overall_status = "Dikkat"
|
| 190 |
+
else:
|
| 191 |
+
overall_color = "#dc3545"
|
| 192 |
+
overall_status = "Kritik"
|
| 193 |
+
|
| 194 |
+
html += f"""
|
| 195 |
+
<div style='background: white; padding: 15px; border-radius: 8px; border-left: 4px solid {overall_color};'>
|
| 196 |
+
<h3 style='margin: 0; color: {overall_color};'>Genel Durum</h3>
|
| 197 |
+
<p style='font-size: 24px; font-weight: bold; margin: 10px 0;'>{avg_health:.1f}/10</p>
|
| 198 |
+
<p style='margin: 0; color: #666;'>{overall_status}</p>
|
| 199 |
+
<p style='margin: 5px 0; font-size: 14px; color: #666;'>Toplam {len(self.dog_health_scores)} kΓΆpek</p>
|
| 200 |
+
</div>
|
| 201 |
+
"""
|
| 202 |
|
| 203 |
+
# Individual dog health cards
|
| 204 |
+
for temp_id, health_score in sorted(self.dog_health_scores.items(), key=lambda x: x[1].score):
|
| 205 |
+
permanent_name = self.reid.temp_to_permanent.get(temp_id)
|
| 206 |
+
dog_name = permanent_name if permanent_name else f"KΓΆpek #{temp_id}"
|
| 207 |
|
| 208 |
+
# Get trend if available
|
| 209 |
+
trend = self.health_assessor.get_health_trend(temp_id)
|
| 210 |
+
|
| 211 |
+
html += f"""
|
| 212 |
+
<div style='background: white; padding: 15px; border-radius: 8px;
|
| 213 |
+
border-left: 4px solid {self._rgb_to_hex(health_score.color)};'>
|
| 214 |
+
<h4 style='margin: 0 0 10px 0; color: #2c3e50;'>{dog_name}</h4>
|
| 215 |
+
<div style='display: flex; align-items: center; gap: 10px; margin-bottom: 10px;'>
|
| 216 |
+
<span style='font-size: 20px; font-weight: bold; color: {self._rgb_to_hex(health_score.color)};'>
|
| 217 |
+
{health_score.score_text}
|
| 218 |
+
</span>
|
| 219 |
+
<span style='background: {self._rgb_to_hex(health_score.color)}20;
|
| 220 |
+
padding: 4px 8px; border-radius: 4px; font-size: 12px;'>
|
| 221 |
+
{health_score.status}
|
| 222 |
+
</span>
|
| 223 |
+
</div>
|
| 224 |
+
<p style='margin: 5px 0; font-size: 13px; color: #666;'>Trend: {trend}</p>
|
| 225 |
+
"""
|
| 226 |
+
|
| 227 |
+
# Add alerts if any
|
| 228 |
+
if health_score.alerts:
|
| 229 |
+
html += "<div style='margin-top: 10px;'>"
|
| 230 |
+
for alert in health_score.alerts[:3]: # Show top 3 alerts
|
| 231 |
+
html += f"<p style='margin: 3px 0; font-size: 12px; color: #856404;'>β οΈ {alert}</p>"
|
| 232 |
+
html += "</div>"
|
| 233 |
+
|
| 234 |
+
html += "</div>"
|
| 235 |
+
|
| 236 |
+
html += "</div></div>"
|
| 237 |
+
return html
|
| 238 |
|
| 239 |
+
def _rgb_to_hex(self, rgb_tuple):
|
| 240 |
+
"""Convert BGR to hex color"""
|
| 241 |
+
b, g, r = rgb_tuple
|
| 242 |
+
return f"#{r:02x}{g:02x}{b:02x}"
|
| 243 |
+
|
| 244 |
+
def _create_alerts_html(self) -> Optional[str]:
|
| 245 |
+
"""Create alerts for known dogs"""
|
| 246 |
if not self.known_dogs_alerts:
|
| 247 |
return None
|
| 248 |
|
|
|
|
| 250 |
for dog_name in self.known_dogs_alerts:
|
| 251 |
dog_data = self.reid.permanent_dogs.get(dog_name, {})
|
| 252 |
alert_html = f"""
|
| 253 |
+
<div style='padding: 10px; margin: 10px 0; background: #fff3cd; border: 2px solid #ffc107; border-radius: 8px;'>
|
| 254 |
+
<h4 style='color: #856404; margin: 0;'>β οΈ Known Dog Detected: {dog_name}</h4>
|
|
|
|
| 255 |
"""
|
| 256 |
|
| 257 |
if dog_data.get('image_path'):
|
|
|
|
| 262 |
img_base64 = self._img_to_base64(img_rgb)
|
| 263 |
alert_html += f"""
|
| 264 |
<img src='data:image/jpeg;base64,{img_base64}'
|
| 265 |
+
style='width: 100px; height: 100px; object-fit: cover; margin: 10px 0; border-radius: 5px;'>
|
|
|
|
| 266 |
"""
|
| 267 |
|
| 268 |
alert_html += f"""
|
| 269 |
+
<p style='margin: 5px 0;'>First seen: {dog_data.get('first_seen', 'Unknown')[:10]}</p>
|
| 270 |
<p style='margin: 5px 0;'>Total sightings: {dog_data.get('total_sightings', 1)}</p>
|
| 271 |
</div>
|
| 272 |
"""
|
|
|
|
| 274 |
|
| 275 |
return "".join(alerts)
|
| 276 |
|
| 277 |
+
def _create_gallery_html(self, temp_id_to_crops: Dict) -> str:
|
| 278 |
+
"""Create HTML gallery with health indicators"""
|
| 279 |
html = """
|
| 280 |
<div style='padding: 20px;'>
|
| 281 |
<h2 style='text-align:center;'>π Detected Dogs</h2>
|
|
|
|
| 286 |
crops = temp_id_to_crops[temp_id]
|
| 287 |
permanent_name = self.reid.temp_to_permanent.get(temp_id)
|
| 288 |
|
| 289 |
+
# Get health score
|
| 290 |
+
health_score = self.dog_health_scores.get(temp_id)
|
| 291 |
+
health_color = self._rgb_to_hex(health_score.color) if health_score else "#28a745"
|
| 292 |
+
|
| 293 |
if permanent_name:
|
| 294 |
title = f"{permanent_name} (Known)"
|
| 295 |
border_color = "#ffc107"
|
|
|
|
| 297 |
title = f"New Dog #{temp_id}"
|
| 298 |
border_color = "#28a745"
|
| 299 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 300 |
html += f"""
|
| 301 |
+
<div style='border: 3px solid {border_color}; border-radius: 10px; padding: 15px; background: #f8f9fa;'>
|
| 302 |
+
<div style='display: flex; justify-content: space-between; align-items: center; margin-bottom: 10px;'>
|
| 303 |
+
<h3 style='color: #333; margin: 0;'>{title}</h3>
|
|
|
|
|
|
|
|
|
|
| 304 |
"""
|
| 305 |
|
| 306 |
+
# Add health indicator
|
| 307 |
+
if health_score:
|
| 308 |
+
html += f"""
|
| 309 |
+
<div style='background: {health_color}; color: white; padding: 5px 15px;
|
| 310 |
+
border-radius: 20px; font-weight: bold; font-size: 14px;'>
|
| 311 |
+
{health_score.score_text} - {health_score.status}
|
| 312 |
+
</div>
|
| 313 |
+
"""
|
| 314 |
+
|
| 315 |
+
html += "</div>"
|
| 316 |
+
html += f"<p style='margin: 5px 0; color: #666;'>Images: {len(crops)}</p>"
|
| 317 |
+
|
| 318 |
+
# Show health alerts if any
|
| 319 |
+
if health_score and health_score.alerts:
|
| 320 |
+
html += "<div style='background: #fff3cd; padding: 8px; border-radius: 5px; margin: 10px 0;'>"
|
| 321 |
+
for alert in health_score.alerts[:2]:
|
| 322 |
+
html += f"<p style='margin: 3px 0; font-size: 12px;'>β οΈ {alert}</p>"
|
| 323 |
+
html += "</div>"
|
| 324 |
+
|
| 325 |
+
html += "<div style='display: grid; grid-template-columns: repeat(5, 1fr); gap: 5px;'>"
|
| 326 |
+
|
| 327 |
for crop in crops:
|
| 328 |
img_rgb = cv2.cvtColor(crop, cv2.COLOR_BGR2RGB)
|
| 329 |
img_base64 = self._img_to_base64(img_rgb)
|
|
|
|
| 338 |
return html
|
| 339 |
|
| 340 |
def save_to_database(self):
|
| 341 |
+
"""Save current session to permanent database with health data"""
|
| 342 |
if not self.current_session_data:
|
| 343 |
return "No data to save", None
|
| 344 |
|
| 345 |
saved = self.reid.save_session_to_permanent()
|
| 346 |
|
| 347 |
+
# ENHANCEMENT 3: Save health assessments to database
|
| 348 |
+
for temp_id, health_score in self.dog_health_scores.items():
|
| 349 |
+
permanent_name = self.reid.temp_to_permanent.get(temp_id)
|
| 350 |
+
if permanent_name and permanent_name in self.reid.permanent_dogs:
|
| 351 |
+
# Find dog_id in database (would need to implement dog_id mapping)
|
| 352 |
+
# For now, just show in message
|
| 353 |
+
pass
|
| 354 |
+
|
| 355 |
if saved:
|
| 356 |
+
msg = f"β
Saved {len(saved)} new dogs to database:\n"
|
| 357 |
for temp_id, name in saved.items():
|
| 358 |
+
health = self.dog_health_scores.get(temp_id)
|
| 359 |
+
if health:
|
| 360 |
+
msg += f" β’ {name} (Health: {health.score_text} - {health.status})\n"
|
| 361 |
+
else:
|
| 362 |
+
msg += f" β’ {name}\n"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 363 |
else:
|
| 364 |
+
msg = "βΉοΈ No new dogs to save (all were already in database)"
|
| 365 |
|
| 366 |
db_html = self._show_database()
|
| 367 |
return msg, db_html
|
| 368 |
|
| 369 |
+
def _show_database(self) -> str:
|
| 370 |
+
"""Show current database"""
|
| 371 |
if not self.reid.permanent_dogs:
|
| 372 |
return "<p>Database is empty</p>"
|
| 373 |
|
| 374 |
html = """
|
| 375 |
<div style='padding: 20px;'>
|
| 376 |
<h2>π Dog Database</h2>
|
| 377 |
+
<div style='display: grid; grid-template-columns: repeat(auto-fill, minmax(200px, 1fr)); gap: 15px;'>
|
| 378 |
"""
|
| 379 |
|
| 380 |
for dog_name, dog_data in self.reid.permanent_dogs.items():
|
| 381 |
+
num_embeddings = len(dog_data.get('embeddings', []))
|
| 382 |
+
|
| 383 |
html += f"""
|
| 384 |
+
<div style='border: 1px solid #ddd; border-radius: 8px; padding: 10px; text-align: center;'>
|
|
|
|
| 385 |
<h4 style='margin: 0 0 10px 0;'>{dog_name}</h4>
|
| 386 |
"""
|
| 387 |
|
|
|
|
| 397 |
"""
|
| 398 |
|
| 399 |
html += f"""
|
| 400 |
+
<p style='font-size: 12px; margin: 5px 0;'>First: {dog_data['first_seen'][:10]}</p>
|
| 401 |
<p style='font-size: 12px; margin: 5px 0;'>Sightings: {dog_data['total_sightings']}</p>
|
| 402 |
+
<p style='font-size: 11px; margin: 5px 0; color: #666;'>{num_embeddings} embeddings</p>
|
| 403 |
</div>
|
| 404 |
"""
|
| 405 |
|
| 406 |
html += "</div></div>"
|
| 407 |
return html
|
| 408 |
|
| 409 |
+
def delete_dog_from_database(self, dog_name: str, hard_delete: bool = False):
|
| 410 |
+
"""ENHANCEMENT 4: Delete dog from database"""
|
| 411 |
+
if dog_name in self.reid.permanent_dogs:
|
| 412 |
+
if hard_delete:
|
| 413 |
+
# Permanently delete
|
| 414 |
+
del self.reid.permanent_dogs[dog_name]
|
| 415 |
+
self.reid.used_names.discard(dog_name)
|
| 416 |
+
self.reid.save_permanent_database()
|
| 417 |
+
return f"ποΈ Permanently deleted {dog_name}", self._show_database()
|
| 418 |
+
else:
|
| 419 |
+
# Soft delete - mark as inactive (can implement status field)
|
| 420 |
+
self.reid.permanent_dogs[dog_name]['status'] = 'deleted'
|
| 421 |
+
self.reid.save_permanent_database()
|
| 422 |
+
return f"π¦ Archived {dog_name} (soft delete)", self._show_database()
|
| 423 |
+
|
| 424 |
+
return f"β Dog {dog_name} not found", self._show_database()
|
| 425 |
+
|
| 426 |
+
def merge_dogs(self, keep_name: str, merge_name: str):
|
| 427 |
+
"""ENHANCEMENT 4: Merge duplicate dogs"""
|
| 428 |
+
if keep_name in self.reid.permanent_dogs and merge_name in self.reid.permanent_dogs:
|
| 429 |
+
# Merge embeddings
|
| 430 |
+
keep_dog = self.reid.permanent_dogs[keep_name]
|
| 431 |
+
merge_dog = self.reid.permanent_dogs[merge_name]
|
| 432 |
+
|
| 433 |
+
# Combine embeddings (keep up to 10 most diverse)
|
| 434 |
+
keep_embeddings = keep_dog.get('embeddings', [])
|
| 435 |
+
merge_embeddings = merge_dog.get('embeddings', [])
|
| 436 |
+
combined = keep_embeddings + merge_embeddings
|
| 437 |
+
keep_dog['embeddings'] = combined[-10:] # Keep last 10
|
| 438 |
+
|
| 439 |
+
# Update sightings
|
| 440 |
+
keep_dog['total_sightings'] += merge_dog.get('total_sightings', 0)
|
| 441 |
+
|
| 442 |
+
# Delete merged dog
|
| 443 |
+
del self.reid.permanent_dogs[merge_name]
|
| 444 |
+
self.reid.used_names.discard(merge_name)
|
| 445 |
+
|
| 446 |
+
self.reid.save_permanent_database()
|
| 447 |
+
|
| 448 |
+
return f"β
Merged {merge_name} into {keep_name}", self._show_database()
|
| 449 |
+
|
| 450 |
+
return "β One or both dogs not found", self._show_database()
|
| 451 |
+
|
| 452 |
def _img_to_base64(self, img):
|
| 453 |
+
"""Convert image to base64"""
|
| 454 |
pil_img = Image.fromarray(img)
|
| 455 |
buffered = BytesIO()
|
| 456 |
pil_img.save(buffered, format="JPEG", quality=85)
|
| 457 |
return base64.b64encode(buffered.getvalue()).decode()
|
| 458 |
|
| 459 |
def stop_and_reset(self):
|
| 460 |
+
"""Stop and reset"""
|
| 461 |
self.reset_session()
|
| 462 |
return "<p>Ready</p>", "Ready", None, None
|
| 463 |
|
| 464 |
def create_interface(self):
|
| 465 |
+
"""Create Gradio interface with management tabs"""
|
| 466 |
+
with gr.Blocks(title="Enhanced Dog Tracking System", theme=gr.themes.Soft()) as app:
|
| 467 |
gr.Markdown("""
|
| 468 |
+
# π Enhanced Dog Tracking System
|
| 469 |
+
### Detect, Track, Remember, and Monitor Dog Health
|
| 470 |
""")
|
| 471 |
|
| 472 |
+
# Main processing tab
|
| 473 |
+
with gr.Tab("π₯ Video Processing"):
|
| 474 |
+
alerts_output = gr.HTML(visible=False)
|
| 475 |
+
health_output = gr.HTML(visible=False)
|
| 476 |
+
|
| 477 |
+
with gr.Row():
|
| 478 |
+
with gr.Column(scale=1):
|
| 479 |
+
video_input = gr.Video(label="Upload Video")
|
| 480 |
+
reid_threshold = gr.Slider(
|
| 481 |
+
0.15, 0.6, 0.35, step=0.05,
|
| 482 |
+
label="ReID Threshold",
|
| 483 |
+
info="Lower = more lenient matching"
|
| 484 |
+
)
|
| 485 |
+
sample_rate = gr.Slider(
|
| 486 |
+
1, 5, 2, step=1,
|
| 487 |
+
label="Frame Sample Rate"
|
| 488 |
+
)
|
| 489 |
+
process_btn = gr.Button("π Process Video", variant="primary", size="lg")
|
| 490 |
+
save_btn = gr.Button("πΎ Save to Database", variant="secondary", size="lg")
|
| 491 |
+
stop_btn = gr.Button("βΉοΈ Stop", variant="stop")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 492 |
|
| 493 |
+
with gr.Column(scale=2):
|
| 494 |
+
status_text = gr.Textbox(label="Status", interactive=False)
|
| 495 |
+
gallery_output = gr.HTML(label="Detected Dogs")
|
| 496 |
+
|
| 497 |
+
# Process video
|
| 498 |
+
process_btn.click(
|
| 499 |
+
self.process_video,
|
| 500 |
+
inputs=[video_input, reid_threshold, sample_rate],
|
| 501 |
+
outputs=[gallery_output, status_text, alerts_output, health_output]
|
| 502 |
+
).then(
|
| 503 |
+
lambda x, y: (gr.update(visible=bool(x)), gr.update(visible=bool(y))),
|
| 504 |
+
inputs=[alerts_output, health_output],
|
| 505 |
+
outputs=[alerts_output, health_output]
|
| 506 |
+
)
|
| 507 |
|
| 508 |
+
# Save to database
|
| 509 |
+
save_btn.click(
|
| 510 |
+
self.save_to_database,
|
| 511 |
+
outputs=[status_text, gr.HTML(visible=False)]
|
| 512 |
+
)
|
|
|
|
| 513 |
|
| 514 |
+
# Stop and reset
|
| 515 |
+
stop_btn.click(
|
| 516 |
+
self.stop_and_reset,
|
| 517 |
+
outputs=[gallery_output, status_text, alerts_output, health_output]
|
| 518 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 519 |
|
| 520 |
+
# ENHANCEMENT 3: Health Summary Tab
|
| 521 |
+
with gr.Tab("π₯ Health Summary"):
|
| 522 |
+
gr.HTML(value=health_output)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 523 |
|
| 524 |
+
# Database viewing tab
|
| 525 |
+
with gr.Tab("π View Database"):
|
| 526 |
+
with gr.Row():
|
| 527 |
+
show_db_btn = gr.Button("π Refresh Database", variant="secondary")
|
| 528 |
+
database_display = gr.HTML()
|
| 529 |
+
|
| 530 |
+
show_db_btn.click(
|
| 531 |
+
self._show_database,
|
| 532 |
+
outputs=[database_display]
|
| 533 |
+
)
|
| 534 |
|
| 535 |
+
# ENHANCEMENT 4: Database Management Tab
|
| 536 |
+
with gr.Tab("βοΈ Manage Database"):
|
| 537 |
+
gr.Markdown("### Delete or Merge Dogs")
|
| 538 |
+
|
| 539 |
+
with gr.Row():
|
| 540 |
+
with gr.Column():
|
| 541 |
+
gr.Markdown("#### Delete Dog")
|
| 542 |
+
delete_name_input = gr.Textbox(label="Dog Name to Delete")
|
| 543 |
+
hard_delete_check = gr.Checkbox(label="Permanent Delete (irreversible)", value=False)
|
| 544 |
+
delete_btn = gr.Button("ποΈ Delete Dog", variant="stop")
|
| 545 |
+
delete_status = gr.Textbox(label="Delete Status", interactive=False)
|
| 546 |
+
|
| 547 |
+
with gr.Column():
|
| 548 |
+
gr.Markdown("#### Merge Dogs")
|
| 549 |
+
gr.Markdown("Combine duplicate dogs into one entry")
|
| 550 |
+
keep_name_input = gr.Textbox(label="Keep This Dog (name)")
|
| 551 |
+
merge_name_input = gr.Textbox(label="Merge This Dog Into Above")
|
| 552 |
+
merge_btn = gr.Button("π Merge Dogs", variant="secondary")
|
| 553 |
+
merge_status = gr.Textbox(label="Merge Status", interactive=False)
|
| 554 |
+
|
| 555 |
+
manage_db_display = gr.HTML()
|
| 556 |
+
|
| 557 |
+
# Delete action
|
| 558 |
+
delete_btn.click(
|
| 559 |
+
self.delete_dog_from_database,
|
| 560 |
+
inputs=[delete_name_input, hard_delete_check],
|
| 561 |
+
outputs=[delete_status, manage_db_display]
|
| 562 |
+
)
|
| 563 |
+
|
| 564 |
+
# Merge action
|
| 565 |
+
merge_btn.click(
|
| 566 |
+
self.merge_dogs,
|
| 567 |
+
inputs=[keep_name_input, merge_name_input],
|
| 568 |
+
outputs=[merge_status, manage_db_display]
|
| 569 |
+
)
|
| 570 |
+
|
| 571 |
+
# Show database in management view
|
| 572 |
+
gr.Button("π Show Current Database").click(
|
| 573 |
+
self._show_database,
|
| 574 |
+
outputs=[manage_db_display]
|
| 575 |
+
)
|
| 576 |
|
| 577 |
+
# Statistics tab
|
| 578 |
+
with gr.Tab("π Statistics"):
|
| 579 |
+
gr.Markdown("### System Statistics")
|
| 580 |
+
stats_btn = gr.Button("π Get Statistics")
|
| 581 |
+
stats_output = gr.JSON()
|
| 582 |
+
|
| 583 |
+
def get_all_stats():
|
| 584 |
+
return {
|
| 585 |
+
'reid_stats': self.reid.get_statistics(),
|
| 586 |
+
'tracker_stats': self.tracker.get_statistics(),
|
| 587 |
+
'database_dogs': len(self.reid.permanent_dogs),
|
| 588 |
+
'current_session_dogs': len(self.current_session_data.get('crops', {})),
|
| 589 |
+
'health_assessments': len(self.dog_health_scores)
|
| 590 |
+
}
|
| 591 |
+
|
| 592 |
+
stats_btn.click(
|
| 593 |
+
get_all_stats,
|
| 594 |
+
outputs=[stats_output]
|
| 595 |
+
)
|
| 596 |
|
| 597 |
return app
|
| 598 |
|
|
|
|
|
|
|
|
|
|
| 599 |
if __name__ == "__main__":
|
| 600 |
+
demo = EnhancedDogDetectionDemo()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 601 |
app = demo.create_interface()
|
| 602 |
+
app.launch(server_name="0.0.0.0", server_port=7860, share=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|