ModuMLTECH commited on
Commit
7bb70f6
·
verified ·
1 Parent(s): 14f6022

Upload 3 files

Browse files
Files changed (3) hide show
  1. app.py +88 -0
  2. best.pt +3 -0
  3. requirements.txt +5 -0
app.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """Untitled8.ipynb
3
+
4
+ Automatically generated by Colab.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/17hKCetX4b9rOVZK9WPeKXBDYONdYYktD
8
+ """
9
+
10
+ import streamlit as st
11
+ import cv2
12
+ import tempfile
13
+ import os
14
+ import time
15
+ import numpy as np
16
+ import pandas as pd
17
+ from collections import defaultdict
18
+ from ultralytics import YOLO
19
+
20
+ # --- FONCTIONS UTILES ---
21
+
22
+ def get_color_for_id(track_id):
23
+ np.random.seed(track_id)
24
+ return tuple(np.random.randint(0, 255, size=3).tolist())
25
+
26
+ def draw_text_with_background(image, text, position, font=cv2.FONT_HERSHEY_SIMPLEX,
27
+ font_scale=1, font_thickness=2, text_color=(255, 255, 255), bg_color=(0, 0, 0), padding=5):
28
+
29
+ text_size = cv2.getTextSize(text, font, font_scale, font_thickness)[0]
30
+ text_width, text_height = text_size
31
+
32
+ x, y = position
33
+ top_left = (x, y - text_height - padding)
34
+ bottom_right = (x + text_width + padding * 2, y + padding)
35
+
36
+ cv2.rectangle(image, top_left, bottom_right, bg_color, -1)
37
+ cv2.putText(image, text, (x + padding, y), font, font_scale, text_color, font_thickness, cv2.LINE_AA)
38
+
39
+ # --- CLASSE DE TRAITEMENT YOLO ---
40
+ class YOLOVideoProcessor:
41
+ def __init__(self, model_path, video_path, output_path, tracker_method="bot"):
42
+ self.model = YOLO(model_path, task="detect")
43
+ self.tracker_method = tracker_method
44
+ self.video_path = video_path
45
+ self.output_path = output_path
46
+
47
+ def process_video(self):
48
+ cap = cv2.VideoCapture(self.video_path)
49
+ frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
50
+ frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
51
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
52
+ out = cv2.VideoWriter(self.output_path, fourcc, 30, (frame_width, frame_height))
53
+
54
+ while cap.isOpened():
55
+ success, frame = cap.read()
56
+ if not success:
57
+ break
58
+
59
+ tracker = "botsort.yaml" if self.tracker_method.lower() == "bot" else "bytetrack.yaml"
60
+ results = self.model.track(frame, persist=True, tracker=tracker, conf=0.25)
61
+
62
+ draw_text_with_background(frame, "Processing...", (7, 30))
63
+
64
+ out.write(frame)
65
+
66
+ cap.release()
67
+ out.release()
68
+ cv2.destroyAllWindows()
69
+
70
+ # --- INTERFACE STREAMLIT ---
71
+ st.title("🚗 Détection de Véhicules avec YOLO")
72
+
73
+ uploaded_file = st.file_uploader("📂 Upload une vidéo", type=["mp4", "avi", "mov"])
74
+
75
+ if uploaded_file is not None:
76
+ tfile = tempfile.NamedTemporaryFile(delete=False, suffix='.mp4')
77
+ tfile.write(uploaded_file.read())
78
+
79
+ st.video(tfile.name)
80
+
81
+ model_path = "best.pt" # Mettre le bon chemin du modèle
82
+ output_path = "output_video.mp4"
83
+
84
+ if st.button("▶️ Lancer l'analyse"):
85
+ processor = YOLOVideoProcessor(model_path, tfile.name, output_path)
86
+ processor.process_video()
87
+ st.success("✅ Traitement terminé !")
88
+ st.video(output_path)
best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9005aa4c734a5102d3d69e84ee77f4f992be1b025b640357436674d2748b59a2
3
+ size 6239907
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ streamlit
2
+ ultralytics
3
+ opencv-python
4
+ numpy
5
+ pandas