LaurianeMD commited on
Commit
1e9ddab
·
verified ·
1 Parent(s): 3970640

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +89 -89
app.py CHANGED
@@ -1,89 +1,89 @@
1
- import gradio as gr
2
- import cv2
3
- from ultralytics import YOLO
4
- import tempfile
5
- import os
6
-
7
- # Charger le modèle
8
- model = YOLO("yolo12m.pt") # ou yolov8n.pt
9
-
10
- # Classes à détecter
11
- target_classes = ["backpack", "suitcase", "handbag"]
12
- class_name_to_id = {name: idx for idx, name in model.names.items()}
13
- target_ids = [class_name_to_id[c] for c in target_classes]
14
-
15
- def process_video(video_path):
16
- cap = cv2.VideoCapture(video_path)
17
- ret, frame = cap.read()
18
- if not ret:
19
- return "Erreur lecture vidéo"
20
-
21
- H, W = frame.shape[:2]
22
- line_y = int(H * 0.6)
23
- tolerance = 25
24
-
25
- counted_ids = set()
26
- class_counts = {c: 0 for c in target_classes}
27
- total_count = 0
28
-
29
- # Output temporaire
30
- temp_out = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4")
31
- fourcc = cv2.VideoWriter_fourcc(*'mp4v')
32
- out = cv2.VideoWriter(temp_out.name, fourcc, 20, (W, H))
33
-
34
- cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
35
-
36
- while True:
37
- ret, frame = cap.read()
38
- if not ret:
39
- break
40
-
41
- results = model.track(frame, persist=True, classes=target_ids, verbose=False)
42
-
43
- if results[0].boxes.id is not None:
44
- ids = results[0].boxes.id.int().cpu().tolist()
45
- clss = results[0].boxes.cls.int().cpu().tolist()
46
- boxes = results[0].boxes.xyxy.cpu().tolist()
47
-
48
- for obj_id, cls, box in zip(ids, clss, boxes):
49
- x1, y1, x2, y2 = map(int, box)
50
- center_x = (x1 + x2) // 2
51
- center_y = (y1 + y2) // 2
52
- class_name = model.names[cls]
53
-
54
- cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
55
- cv2.putText(frame, f"{class_name} ID:{obj_id}", (x1, y1 - 10),
56
- cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
57
-
58
- if obj_id not in counted_ids and (line_y - tolerance < center_y < line_y + tolerance):
59
- counted_ids.add(obj_id)
60
- total_count += 1
61
- class_counts[class_name] += 1
62
-
63
- # Affichage des résultats
64
- y_offset = 30
65
- for name in target_classes:
66
- cv2.putText(frame, f"{name}: {class_counts[name]}", (20, y_offset),
67
- cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 255), 2)
68
- y_offset += 30
69
- cv2.putText(frame, f"Total: {total_count}", (20, y_offset + 10),
70
- cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
71
- cv2.line(frame, (0, line_y), (W, line_y), (0, 0, 255), 2)
72
-
73
- out.write(frame)
74
-
75
- cap.release()
76
- out.release()
77
- return temp_out.name
78
-
79
- # Interface Gradio
80
- demo = gr.Interface(
81
- fn=process_video,
82
- inputs=gr.Video(label="Importer une vidéo de bagages"),
83
- outputs=gr.Video(label="Vidéo annotée avec comptage"),
84
- title="🎒 Compteur intelligent de bagages",
85
- description="Détecte et compte les sacs, valises, et sacs à dos avec YOLO."
86
- )
87
-
88
- if __name__ == "__main__":
89
- demo.launch()
 
1
+ import gradio as gr
2
+ import cv2
3
+ from ultralytics import YOLO
4
+ import tempfile
5
+ import os
6
+
7
+ # Charger le modèle
8
+ model = YOLO("./yolo12m.pt") # ou yolov8n.pt
9
+
10
+ # Classes à détecter
11
+ target_classes = ["backpack", "suitcase", "handbag"]
12
+ class_name_to_id = {name: idx for idx, name in model.names.items()}
13
+ target_ids = [class_name_to_id[c] for c in target_classes]
14
+
15
+ def process_video(video_path):
16
+ cap = cv2.VideoCapture(video_path)
17
+ ret, frame = cap.read()
18
+ if not ret:
19
+ return "Erreur lecture vidéo"
20
+
21
+ H, W = frame.shape[:2]
22
+ line_y = int(H * 0.6)
23
+ tolerance = 25
24
+
25
+ counted_ids = set()
26
+ class_counts = {c: 0 for c in target_classes}
27
+ total_count = 0
28
+
29
+ # Output temporaire
30
+ temp_out = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4")
31
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
32
+ out = cv2.VideoWriter(temp_out.name, fourcc, 20, (W, H))
33
+
34
+ cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
35
+
36
+ while True:
37
+ ret, frame = cap.read()
38
+ if not ret:
39
+ break
40
+
41
+ results = model.track(frame, persist=True, classes=target_ids, verbose=False)
42
+
43
+ if results[0].boxes.id is not None:
44
+ ids = results[0].boxes.id.int().cpu().tolist()
45
+ clss = results[0].boxes.cls.int().cpu().tolist()
46
+ boxes = results[0].boxes.xyxy.cpu().tolist()
47
+
48
+ for obj_id, cls, box in zip(ids, clss, boxes):
49
+ x1, y1, x2, y2 = map(int, box)
50
+ center_x = (x1 + x2) // 2
51
+ center_y = (y1 + y2) // 2
52
+ class_name = model.names[cls]
53
+
54
+ cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
55
+ cv2.putText(frame, f"{class_name} ID:{obj_id}", (x1, y1 - 10),
56
+ cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
57
+
58
+ if obj_id not in counted_ids and (line_y - tolerance < center_y < line_y + tolerance):
59
+ counted_ids.add(obj_id)
60
+ total_count += 1
61
+ class_counts[class_name] += 1
62
+
63
+ # Affichage des résultats
64
+ y_offset = 30
65
+ for name in target_classes:
66
+ cv2.putText(frame, f"{name}: {class_counts[name]}", (20, y_offset),
67
+ cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 255), 2)
68
+ y_offset += 30
69
+ cv2.putText(frame, f"Total: {total_count}", (20, y_offset + 10),
70
+ cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
71
+ cv2.line(frame, (0, line_y), (W, line_y), (0, 0, 255), 2)
72
+
73
+ out.write(frame)
74
+
75
+ cap.release()
76
+ out.release()
77
+ return temp_out.name
78
+
79
+ # Interface Gradio
80
+ demo = gr.Interface(
81
+ fn=process_video,
82
+ inputs=gr.Video(label="Importer une vidéo de bagages"),
83
+ outputs=gr.Video(label="Vidéo annotée avec comptage"),
84
+ title="🎒 Compteur intelligent de bagages",
85
+ description="Détecte et compte les sacs, valises, et sacs à dos avec YOLO."
86
+ )
87
+
88
+ if __name__ == "__main__":
89
+ demo.launch()