Heat-Vision / src /app.py
TulkinRB's picture
Add stuff
0bdfe9d
# gradio_stream_detect_full.py
import gradio as gr
from pathlib import Path
import tempfile, shutil, time
import cv2
from ultralytics import YOLO
import numpy as np
from detect import detect_for_video
from input_output.video_output import write_video_output
from tracking.tracker import Tracker
MODELS_DIR = Path(__file__).parent.parent / "models"
CLASS_LABELS = [
"person",
"bike",
"car",
"motor",
"airplane",
"bus",
"train",
"truck",
"boat",
"light",
"hydrant",
"sign",
"parking meter",
"bench",
"bird",
"cat",
"dog",
"deer",
"sheep",
"cow",
"elephant",
"bear",
"zebra",
"giraffe",
"backpack",
"umbrella",
"handbag",
"tie",
"suitcase",
"frisbee",
"skis",
"snowboard",
"sports ball",
"kite",
"baseball bat",
"baseball glove",
"skateboard",
"surfboard",
"tennis racket",
"bottle",
"wine glass",
"cup",
"fork",
"knife",
"spoon",
"bowl",
"banana",
"apple",
"sandwich",
"orange",
"broccoli",
"carrot",
"hot dog",
"pizza",
"donut",
"cake",
"chair",
"couch",
"potted plant",
"bed",
"dining table",
"toilet",
"tv",
"laptop",
"mouse",
"remote",
"keyboard",
"cell phone",
"microwave",
"oven",
"toaster",
"sink",
"stroller",
"rider",
"scooter",
"vase",
"scissors",
"face",
"other vehicle",
"license plate",
]
def safe_save_upload(video, tmpdir: Path):
"""Save uploaded video (handles path string or file-like). Returns Path."""
if isinstance(video, str):
src = Path(video)
dst = tmpdir / src.name
shutil.copy(str(src), str(dst))
return dst
else:
name = getattr(video, "name", "uploaded.mp4")
dst = tmpdir / Path(name).name
with open(dst, "wb") as f:
try:
shutil.copyfileobj(video, f)
except Exception:
f.write(video.read())
return dst
def draw_box_on_frame(frame, xyxy, label, conf):
x1, y1, x2, y2 = map(int, xyxy)
color = (0, 255, 0)
cv2.rectangle(frame, (x1, y1), (x2, y2), color, 2)
text = f"{label} {conf:.2f}"
cv2.putText(frame, text, (x1, max(0, y1 - 6)),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 1, cv2.LINE_AA)
def process_video(model_name, conf, min_match_score, min_appearance_frames, max_missing_frames, video):
model = YOLO(download_and_get_model(model_name))
confusion_matrix = load_confusion_matrix(model_name)
tmpdir = Path(tempfile.mkdtemp(prefix="gradio_detect_"))
input_video_path = safe_save_upload(video, tmpdir)
raw_detections = detect_for_video(model, input_video_path, conf)
tracker = Tracker(confusion_matrix, min_score_for_match=min_match_score, min_frames=min_appearance_frames,
max_missing_frames=max_missing_frames)
tracker.advance_frames(raw_detections)
tracker.finish()
output_video_path = tmpdir / f"processed_{input_video_path.stem}.mp4"
write_video_output(input_video_path, output_video_path, tracker, CLASS_LABELS, format='avc1')
# Copy to Gradio temp folder
gr_file = Path(tempfile.gettempdir()) / f"gradio_output_{int(time.time())}.mp4"
shutil.copy(str(output_video_path), gr_file)
return str(gr_file)
def download_and_get_model(model_name):
return MODELS_DIR / f"{model_name}.pt"
def load_confusion_matrix(model_name):
return np.load(str((MODELS_DIR / f"{model_name}.confusion_matrix")))
with gr.Blocks() as demo:
gr.Markdown("## Heat Vision Object Detection\nUpload a video and set detection/tracking parameters.")
# Define presets
PRESETS = {
"confidence 0.25": {
"model": "hypertuned_yolov11xl",
"conf": 0.25,
"min_match_score": 0.2,
"min_appearance_frames": 21,
"max_missing_frames": 10
},
"confidence 0.5": {
"model": "hypertuned_yolov11xl",
"conf": 0.5,
"min_match_score": 0.5,
"min_appearance_frames": 10,
"max_missing_frames": 10
}
}
# Row for presets + parameters
with gr.Row():
preset_sel = gr.Dropdown(list(PRESETS.keys()), label="Choose Preset", value=None)
model_sel = gr.Dropdown(
choices=["hypertuned_yolov11xl", "finetuned_yolov11xl", "finetuned_yolov8xl"],
value="hypertuned_yolov11xl",
label="Model"
)
conf_s = gr.Slider(0.0, 1.0, value=0.5, label="Detection confidence")
min_match_score = gr.Slider(0.0, 1.0, value=0.3, label="Tracker Match Score")
min_appearance_frames = gr.Number(value=2, label="Min Appearance Frames")
max_missing_frames = gr.Number(value=15, label="Max Missing Frames")
# Update sliders/numbers when preset is selected
def apply_preset(preset_name):
if preset_name is None:
return "hypertuned_yolov11xl", 0.5, 0.01, 10, 10 # default values
preset = PRESETS[preset_name]
return preset["model"], preset["conf"], preset["min_match_score"], preset["min_appearance_frames"], preset[
"max_missing_frames"]
preset_sel.change(
fn=apply_preset,
inputs=preset_sel,
outputs=[model_sel, conf_s, min_match_score, min_appearance_frames, max_missing_frames]
)
vid_in = gr.Video(label="Upload video", format="mp4")
out_vid = gr.Video(label="Processed video")
btn = gr.Button("Run Detection")
btn.click(
process_video,
inputs=[model_sel, conf_s, min_match_score, min_appearance_frames, max_missing_frames, vid_in],
outputs=out_vid
)
if __name__ == "__main__":
demo.launch()