import cv2 import torch import gradio as gr from ultralytics import YOLO import os # Load YOLOv8 Model model = YOLO("yolov8n.pt") def detect_vehicles(input_video): input_video_path = "input_video.mp4" output_video_path = "output_video.mp4" # Save uploaded video with open(input_video_path, "wb") as f: f.write(input_video) # Open input video cap = cv2.VideoCapture(input_video_path) # Get video properties frame_width = int(cap.get(3)) frame_height = int(cap.get(4)) fps = int(cap.get(cv2.CAP_PROP_FPS)) # Define VideoWriter out = cv2.VideoWriter(output_video_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (frame_width, frame_height)) while cap.isOpened(): ret, frame = cap.read() if not ret: break #Run YOLOv8 inference results = model(frame) #Draw bounding boxes for result in results: for box in result.boxes: x1, y1, x2, y2 = map(int, box.xyxy[0]) conf = box.conf[0] cls = int(box.cls[0]) label = model.names[cls] #Filter vehicles only if label in ["car", "truck", "bus", "motorcycle"]: cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2) cv2.putText(frame, f"{label} {conf:.2f}", (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) #Write to output video out.write(frame) # Release resources cap.release() out.release() return output_video_path # Clear function def clear(): return None, None #Gradio UI with gr.Blocks() as demo: gr.Markdown("## 🚗 Vehicle Detection with YOLOv8") with gr.Row(): input_video = gr.File(label="📂 Upload Video", type="binary") output_video = gr.Video(label="📹 Processed Video") with gr.Row(): process_button = gr.Button("Detect Vehicles", elem_id="process_button") clear_button = gr.Button("Clear", elem_id="clear_button") demo.css = """ #process_button {background-color: #90EE90; color: black; font-weight: bold;} #clear_button {background-color: #FF7F7F; color: white; font-weight: bold;} """ process_button.click(fn=detect_vehicles, inputs=input_video, outputs=output_video) clear_button.click(fn=clear, inputs=[], outputs=[input_video, output_video]) #Launch Gradio demo.launch()