Spaces:
Sleeping
Sleeping
Deploy TrafficPulse app with Dockerfile and start.sh
Browse files- .dockerignore +10 -0
- Dockerfile +46 -0
- Requirements.txt +7 -0
- app.py +7 -0
- main.py +80 -0
- speed.py +100 -0
- start.sh +20 -0
- streamlit_app.py +40 -0
- track.py +76 -0
- tracker.py +42 -0
.dockerignore
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__pycache__/
|
| 2 |
+
*.pyc
|
| 3 |
+
.git
|
| 4 |
+
.vscode/
|
| 5 |
+
models/
|
| 6 |
+
weights/
|
| 7 |
+
data/
|
| 8 |
+
*.ckpt
|
| 9 |
+
.DS_Store
|
| 10 |
+
.env
|
Dockerfile
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
2 — Replace/update your Dockerfile (copy this exact version)
|
| 2 |
+
|
| 3 |
+
This is a robust, minimal Dockerfile that:
|
| 4 |
+
|
| 5 |
+
uses a modern slim base,
|
| 6 |
+
|
| 7 |
+
updates packages,
|
| 8 |
+
|
| 9 |
+
installs small system libs (ffmpeg, libgl1) for computer-vision tasks,
|
| 10 |
+
|
| 11 |
+
installs pip deps from Requirements.txt,
|
| 12 |
+
|
| 13 |
+
copies code and runs app.py.
|
| 14 |
+
|
| 15 |
+
Create/replace Dockerfile with:
|
| 16 |
+
|
| 17 |
+
# Use newer Debian-based Python slim
|
| 18 |
+
FROM python:3.11-slim-bookworm
|
| 19 |
+
|
| 20 |
+
ENV DEBIAN_FRONTEND=noninteractive
|
| 21 |
+
WORKDIR /app
|
| 22 |
+
|
| 23 |
+
# Install system deps (ffmpeg for video, git if needed, libs for OpenCV)
|
| 24 |
+
RUN apt-get update && apt-get upgrade -y \
|
| 25 |
+
&& apt-get install -y --no-install-recommends \
|
| 26 |
+
git \
|
| 27 |
+
ffmpeg \
|
| 28 |
+
build-essential \
|
| 29 |
+
libgl1 \
|
| 30 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 31 |
+
|
| 32 |
+
# Install Python deps
|
| 33 |
+
COPY Requirements.txt /app/Requirements.txt
|
| 34 |
+
RUN python -m pip install --upgrade pip \
|
| 35 |
+
&& pip install --no-cache-dir -r /app/Requirements.txt
|
| 36 |
+
|
| 37 |
+
# Copy project files
|
| 38 |
+
COPY . /app
|
| 39 |
+
|
| 40 |
+
# Expose port (Streamlit default; Hugging Face will map its port)
|
| 41 |
+
ENV PORT=7860
|
| 42 |
+
EXPOSE 7860
|
| 43 |
+
|
| 44 |
+
# Use start.sh if present, otherwise run wrapper app.py
|
| 45 |
+
# If you added start.sh earlier, use that; otherwise run app.py directly.
|
| 46 |
+
CMD ["bash", "-lc", "if [ -x ./start.sh ]; then ./start.sh; else python app.py; fi"]
|
Requirements.txt
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
streamlit==1.28.0
|
| 2 |
+
ultralytics==8.1.4
|
| 3 |
+
opencv-python-headless==4.8.0.74
|
| 4 |
+
pandas==2.1.1
|
| 5 |
+
plotly==6.2.0
|
| 6 |
+
numpy==1.26.2
|
| 7 |
+
torch==2.3.1
|
app.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
|
| 3 |
+
def greet(name):
|
| 4 |
+
return "Hello " + name + "!!"
|
| 5 |
+
|
| 6 |
+
demo = gr.Interface(fn=greet, inputs="text", outputs="text")
|
| 7 |
+
demo.launch()
|
main.py
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import pandas as pd
|
| 3 |
+
from ultralytics import YOLO
|
| 4 |
+
from tracker import*
|
| 5 |
+
|
| 6 |
+
model=YOLO('yolov8s.pt')
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def RGB(event, x, y, flags, param):
|
| 11 |
+
if event == cv2.EVENT_MOUSEMOVE :
|
| 12 |
+
colorsBGR = [x, y]
|
| 13 |
+
print(colorsBGR)
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
cv2.namedWindow('RGB')
|
| 17 |
+
cv2.setMouseCallback('RGB', RGB)
|
| 18 |
+
|
| 19 |
+
cap=cv2.VideoCapture('veh2.mp4')
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
my_file = open("coco.txt", "r")
|
| 23 |
+
data = my_file.read()
|
| 24 |
+
class_list = data.split("\n")
|
| 25 |
+
#print(class_list)
|
| 26 |
+
|
| 27 |
+
count=0
|
| 28 |
+
|
| 29 |
+
tracker=Tracker()
|
| 30 |
+
|
| 31 |
+
cy1=322
|
| 32 |
+
cy2=368
|
| 33 |
+
offset=6
|
| 34 |
+
|
| 35 |
+
while True:
|
| 36 |
+
ret,frame = cap.read()
|
| 37 |
+
if not ret:
|
| 38 |
+
break
|
| 39 |
+
count += 1
|
| 40 |
+
if count % 3 != 0:
|
| 41 |
+
continue
|
| 42 |
+
frame=cv2.resize(frame,(1020,500))
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
results=model.predict(frame)
|
| 46 |
+
# print(results)
|
| 47 |
+
a=results[0].boxes.data
|
| 48 |
+
px=pd.DataFrame(a).astype("float")
|
| 49 |
+
# print(px)
|
| 50 |
+
list=[]
|
| 51 |
+
|
| 52 |
+
for index,row in px.iterrows():
|
| 53 |
+
# print(row)
|
| 54 |
+
|
| 55 |
+
x1=int(row[0])
|
| 56 |
+
y1=int(row[1])
|
| 57 |
+
x2=int(row[2])
|
| 58 |
+
y2=int(row[3])
|
| 59 |
+
d=int(row[5])
|
| 60 |
+
c=class_list[d]
|
| 61 |
+
if 'car' in c:
|
| 62 |
+
list.append([x1,y1,x2,y2])
|
| 63 |
+
bbox_id=tracker.update(list)
|
| 64 |
+
for bbox in bbox_id:
|
| 65 |
+
x3,y3,x4,y4,id=bbox
|
| 66 |
+
cx=int(x3+x4)//2
|
| 67 |
+
cy=int(y3+y4)//2
|
| 68 |
+
cv2.circle(frame,(cx,cy),4,(0,0,255),-1)
|
| 69 |
+
cv2.putText(frame,str(id),(cx,cy),cv2.FONT_HERSHEY_COMPLEX,0.8,(0,255,255),2)
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
# cv2.line(frame,(274,cy1),(814,cy1),(255,255,255),1)
|
| 74 |
+
# cv2.line(frame,(177,cy2),(927,cy2),(255,255,255),1)
|
| 75 |
+
cv2.imshow("RGB", frame)
|
| 76 |
+
if cv2.waitKey(1)&0xFF==27:
|
| 77 |
+
break
|
| 78 |
+
cap.release()
|
| 79 |
+
cv2.destroyAllWindows()
|
| 80 |
+
|
speed.py
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import pandas as pd
|
| 3 |
+
import math
|
| 4 |
+
from ultralytics import YOLO
|
| 5 |
+
from tracker import Tracker
|
| 6 |
+
from datetime import datetime
|
| 7 |
+
|
| 8 |
+
def estimate_speed(p1, p2, fps, ppm=8):
|
| 9 |
+
"""Estimate speed in km/h from two points, frames per second, and pixels-per-meter scale."""
|
| 10 |
+
distance_pixels = math.hypot(p2[0] - p1[0], p2[1] - p1[1])
|
| 11 |
+
distance_meters = distance_pixels / ppm
|
| 12 |
+
speed_mps = distance_meters * fps
|
| 13 |
+
speed_kmph = speed_mps * 3.6
|
| 14 |
+
return speed_kmph
|
| 15 |
+
|
| 16 |
+
def run_speed_estimation(video_path, output_path):
|
| 17 |
+
model = YOLO('yolov8s.pt')
|
| 18 |
+
|
| 19 |
+
with open("coco.txt", "r") as my_file:
|
| 20 |
+
class_list = my_file.read().split("\n")
|
| 21 |
+
|
| 22 |
+
cap = cv2.VideoCapture(video_path)
|
| 23 |
+
if not cap.isOpened():
|
| 24 |
+
print(f"Error opening video file: {video_path}")
|
| 25 |
+
return
|
| 26 |
+
|
| 27 |
+
frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
| 28 |
+
frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
| 29 |
+
fps = int(cap.get(cv2.CAP_PROP_FPS))
|
| 30 |
+
|
| 31 |
+
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
| 32 |
+
out = cv2.VideoWriter(output_path, fourcc, fps, (1020, 500))
|
| 33 |
+
|
| 34 |
+
tracker = Tracker()
|
| 35 |
+
count = 0
|
| 36 |
+
log_data = []
|
| 37 |
+
|
| 38 |
+
while True:
|
| 39 |
+
ret, frame = cap.read()
|
| 40 |
+
if not ret:
|
| 41 |
+
break
|
| 42 |
+
|
| 43 |
+
count += 1
|
| 44 |
+
if count % 3 != 0:
|
| 45 |
+
continue
|
| 46 |
+
|
| 47 |
+
frame = cv2.resize(frame, (1020, 500))
|
| 48 |
+
timestamp = count / fps
|
| 49 |
+
|
| 50 |
+
results = model.predict(frame)
|
| 51 |
+
boxes = results[0].boxes.data
|
| 52 |
+
px = pd.DataFrame(boxes).astype("float")
|
| 53 |
+
|
| 54 |
+
det_list = []
|
| 55 |
+
for _, row in px.iterrows():
|
| 56 |
+
x1, y1, x2, y2 = map(int, row[:4])
|
| 57 |
+
class_id = int(row[5])
|
| 58 |
+
label = class_list[class_id]
|
| 59 |
+
if 'car' in label:
|
| 60 |
+
det_list.append([x1, y1, x2, y2])
|
| 61 |
+
|
| 62 |
+
bbox_id = tracker.update(det_list)
|
| 63 |
+
|
| 64 |
+
for bbox in bbox_id:
|
| 65 |
+
x3, y3, x4, y4, obj_id = bbox
|
| 66 |
+
cx = (x3 + x4) // 2
|
| 67 |
+
cy = (y3 + y4) // 2
|
| 68 |
+
center = (cx, cy)
|
| 69 |
+
|
| 70 |
+
# Estimate speed
|
| 71 |
+
speed = 0
|
| 72 |
+
history = tracker.track_history[obj_id]
|
| 73 |
+
if len(history) >= 2:
|
| 74 |
+
speed = estimate_speed(history[-2], history[-1], fps)
|
| 75 |
+
speed = round(speed, 2)
|
| 76 |
+
|
| 77 |
+
# Draw info
|
| 78 |
+
cv2.circle(frame, center, 4, (0, 0, 255), -1)
|
| 79 |
+
cv2.putText(frame, f"ID {obj_id} | {speed} km/h", (cx, cy),
|
| 80 |
+
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 255), 2)
|
| 81 |
+
|
| 82 |
+
# Save to CSV log
|
| 83 |
+
log_data.append({
|
| 84 |
+
'timestamp_sec': round(timestamp, 2),
|
| 85 |
+
'vehicle_id': obj_id,
|
| 86 |
+
'x': cx,
|
| 87 |
+
'y': cy,
|
| 88 |
+
'speed_kmph': speed
|
| 89 |
+
})
|
| 90 |
+
|
| 91 |
+
out.write(frame)
|
| 92 |
+
|
| 93 |
+
cap.release()
|
| 94 |
+
out.release()
|
| 95 |
+
|
| 96 |
+
# Save CSV log
|
| 97 |
+
df = pd.DataFrame(log_data)
|
| 98 |
+
df.to_csv('static/vehicle_log.csv', index=False)
|
| 99 |
+
|
| 100 |
+
print(f"Processing complete. Output saved to {output_path}")
|
start.sh
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
cat > start.sh <<'EOF'
|
| 2 |
+
#!/usr/bin/env bash
|
| 3 |
+
set -e
|
| 4 |
+
|
| 5 |
+
MODEL_DIR=/app/models
|
| 6 |
+
mkdir -p "$MODEL_DIR"
|
| 7 |
+
|
| 8 |
+
# Download model if not present (replace URL)
|
| 9 |
+
if [ ! -f "$MODEL_DIR/model.pt" ]; then
|
| 10 |
+
echo "Downloading model..."
|
| 11 |
+
wget --timeout=60 -O "$MODEL_DIR/model.pt" "https://huggingface.co/<user>/<repo>/resolve/main/model.pt"
|
| 12 |
+
fi
|
| 13 |
+
|
| 14 |
+
# Launch app via wrapper (app.py)
|
| 15 |
+
exec python app.py
|
| 16 |
+
EOF
|
| 17 |
+
|
| 18 |
+
chmod +x start.sh
|
| 19 |
+
git add start.sh
|
| 20 |
+
git commit -m "Add start.sh to download model at container start"
|
streamlit_app.py
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
import os
|
| 3 |
+
import importlib.util
|
| 4 |
+
|
| 5 |
+
# Ensure Python can find track.py in the same folder
|
| 6 |
+
sys.path.append(os.path.dirname(__file__))
|
| 7 |
+
|
| 8 |
+
import streamlit as st
|
| 9 |
+
# Try importing run_tracker from track.py in the same directory
|
| 10 |
+
try:
|
| 11 |
+
from track import run_tracker
|
| 12 |
+
except ModuleNotFoundError:
|
| 13 |
+
st.error("track.py not found in the current directory. Please ensure track.py exists.")
|
| 14 |
+
sys.exit(1)
|
| 15 |
+
|
| 16 |
+
st.set_page_config(page_title="🚗 Real-Time Vehicle Counter", layout="wide")
|
| 17 |
+
st.title("🚗 Real-Time Vehicle Counter")
|
| 18 |
+
|
| 19 |
+
st.markdown("""
|
| 20 |
+
This app uses YOLOv8 to detect and count vehicles in real-time from your webcam or a video file.
|
| 21 |
+
""")
|
| 22 |
+
|
| 23 |
+
# Sidebar for video input
|
| 24 |
+
video_source = st.sidebar.radio(
|
| 25 |
+
"Select Video Source:",
|
| 26 |
+
("Webcam", "Upload Video")
|
| 27 |
+
)
|
| 28 |
+
|
| 29 |
+
video_path = 0 # default to webcam
|
| 30 |
+
if video_source == "Upload Video":
|
| 31 |
+
uploaded_file = st.sidebar.file_uploader("Upload a video file", type=["mp4", "mov", "avi"])
|
| 32 |
+
if uploaded_file is not None:
|
| 33 |
+
# Save uploaded file to a temporary file
|
| 34 |
+
with open("temp_video.mp4", "wb") as f:
|
| 35 |
+
f.write(uploaded_file.read())
|
| 36 |
+
video_path = "temp_video.mp4"
|
| 37 |
+
|
| 38 |
+
# Start tracking
|
| 39 |
+
if st.button("Start Tracking"):
|
| 40 |
+
run_tracker(video_path)
|
track.py
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import streamlit as st
|
| 3 |
+
import cv2
|
| 4 |
+
from ultralytics import YOLO
|
| 5 |
+
import pandas as pd
|
| 6 |
+
import plotly.express as px
|
| 7 |
+
from datetime import datetime
|
| 8 |
+
import csv
|
| 9 |
+
from collections import defaultdict
|
| 10 |
+
import uuid
|
| 11 |
+
from huggingface_hub import hf_hub_download
|
| 12 |
+
|
| 13 |
+
# Class ID to label mapping
|
| 14 |
+
CLASS_MAP = {
|
| 15 |
+
2: 'car',
|
| 16 |
+
3: 'motorbike',
|
| 17 |
+
5: 'bus',
|
| 18 |
+
7: 'truck'
|
| 19 |
+
}
|
| 20 |
+
|
| 21 |
+
# Function to download YOLOv8 weights if not present
|
| 22 |
+
def get_model_path():
|
| 23 |
+
model_file = "yolov8l.pt"
|
| 24 |
+
if not os.path.exists(model_file):
|
| 25 |
+
# Download from the existing Hugging Face repo
|
| 26 |
+
model_file = hf_hub_download(
|
| 27 |
+
repo_id="lkk688/yolov8l-model",
|
| 28 |
+
filename="yolov8l.pt"
|
| 29 |
+
)
|
| 30 |
+
return model_file
|
| 31 |
+
|
| 32 |
+
@st.cache_resource
|
| 33 |
+
def load_model():
|
| 34 |
+
model_path = get_model_path()
|
| 35 |
+
return YOLO(model_path)
|
| 36 |
+
|
| 37 |
+
def run_tracker(video_path=0):
|
| 38 |
+
model = load_model() # Load model safely
|
| 39 |
+
vehicle_counts = defaultdict(int)
|
| 40 |
+
|
| 41 |
+
st.title("🚗 Real-Time Vehicle Counter")
|
| 42 |
+
stframe = st.empty()
|
| 43 |
+
chart_placeholder = st.empty()
|
| 44 |
+
|
| 45 |
+
cap = cv2.VideoCapture(video_path)
|
| 46 |
+
|
| 47 |
+
while cap.isOpened():
|
| 48 |
+
ret, frame = cap.read()
|
| 49 |
+
if not ret:
|
| 50 |
+
break
|
| 51 |
+
|
| 52 |
+
results = model(frame, stream=True)
|
| 53 |
+
|
| 54 |
+
for r in results:
|
| 55 |
+
for box in r.boxes:
|
| 56 |
+
cls_id = int(box.cls[0])
|
| 57 |
+
label = CLASS_MAP.get(cls_id)
|
| 58 |
+
if label:
|
| 59 |
+
vehicle_counts[label] += 1
|
| 60 |
+
xyxy = box.xyxy[0].cpu().numpy().astype(int)
|
| 61 |
+
cv2.rectangle(frame, tuple(xyxy[:2]), tuple(xyxy[2:]), (0, 255, 0), 2)
|
| 62 |
+
cv2.putText(frame, label, tuple(xyxy[:2]), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2)
|
| 63 |
+
|
| 64 |
+
stframe.image(frame, channels='BGR', use_column_width=True)
|
| 65 |
+
|
| 66 |
+
df = pd.DataFrame(vehicle_counts.items(), columns=['Vehicle', 'Count'])
|
| 67 |
+
fig = px.bar(df, x='Vehicle', y='Count', title='Live Vehicle Count')
|
| 68 |
+
chart_placeholder.plotly_chart(fig, use_container_width=True, key=f"chart_{uuid.uuid4()}")
|
| 69 |
+
|
| 70 |
+
if sum(vehicle_counts.values()) % 50 == 0:
|
| 71 |
+
os.makedirs("static", exist_ok=True)
|
| 72 |
+
with open("static/vehicle_log.csv", "a", newline="") as file:
|
| 73 |
+
writer = csv.writer(file)
|
| 74 |
+
writer.writerow([datetime.now()] + list(vehicle_counts.values()))
|
| 75 |
+
|
| 76 |
+
cap.release()
|
tracker.py
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from collections import defaultdict
|
| 2 |
+
import math
|
| 3 |
+
|
| 4 |
+
class Tracker:
|
| 5 |
+
def __init__(self, max_distance=35, max_history=30):
|
| 6 |
+
self.track_history = defaultdict(lambda: []) # {id: [(x, y), (x, y), ...]}
|
| 7 |
+
self.id_count = 0
|
| 8 |
+
self.max_distance = max_distance
|
| 9 |
+
self.max_history = max_history
|
| 10 |
+
|
| 11 |
+
def update(self, objects_rect):
|
| 12 |
+
objects_bbs_ids = []
|
| 13 |
+
|
| 14 |
+
for rect in objects_rect:
|
| 15 |
+
x1, y1, x2, y2 = rect
|
| 16 |
+
cx = (x1 + x2) // 2
|
| 17 |
+
cy = (y1 + y2) // 2
|
| 18 |
+
|
| 19 |
+
same_object_detected = False
|
| 20 |
+
for obj_id, track in self.track_history.items():
|
| 21 |
+
prev_center = track[-1]
|
| 22 |
+
dist = math.hypot(cx - prev_center[0], cy - prev_center[1])
|
| 23 |
+
if dist < self.max_distance:
|
| 24 |
+
self.track_history[obj_id].append((cx, cy))
|
| 25 |
+
if len(self.track_history[obj_id]) > self.max_history:
|
| 26 |
+
self.track_history[obj_id].pop(0) # Retain only the last 'max_history' points
|
| 27 |
+
objects_bbs_ids.append([x1, y1, x2, y2, obj_id])
|
| 28 |
+
same_object_detected = True
|
| 29 |
+
break
|
| 30 |
+
|
| 31 |
+
if not same_object_detected:
|
| 32 |
+
self.track_history[self.id_count].append((cx, cy))
|
| 33 |
+
objects_bbs_ids.append([x1, y1, x2, y2, self.id_count])
|
| 34 |
+
self.id_count += 1
|
| 35 |
+
|
| 36 |
+
new_track_history = defaultdict(lambda: [])
|
| 37 |
+
for obj_bb_id in objects_bbs_ids:
|
| 38 |
+
_, _, _, _, object_id = obj_bb_id
|
| 39 |
+
new_track_history[object_id] = self.track_history[object_id]
|
| 40 |
+
|
| 41 |
+
self.track_history = new_track_history.copy()
|
| 42 |
+
return objects_bbs_ids
|