Rohitsharma15's picture
Update app.py
f308ab6 verified
import os
os.system("pip install ultralytics")
from ultralytics import YOLO
import cv2
import gradio as gr
import numpy as np
from ultralytics import YOLO
from PIL import Image
# Load YOLO model
yolo_model = YOLO('yolov8n.pt')
TARGET_LABEL = 'parcel' # Define the target object label
def vid_inf(vid_path, contour_thresh):
cap = cv2.VideoCapture(vid_path)
frame_width, frame_height = int(cap.get(3)), int(cap.get(4))
fps = int(cap.get(cv2.CAP_PROP_FPS))
frame_size = (frame_width, frame_height)
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
output_video = "output_recorded.mp4"
out = cv2.VideoWriter(output_video, fourcc, fps, frame_size)
backSub = cv2.createBackgroundSubtractorMOG2(history=200, varThreshold=25, detectShadows=True)
count = 0
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
fg_mask = backSub.apply(frame)
retval, mask_thresh = cv2.threshold(fg_mask, 200, 255, cv2.THRESH_BINARY)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
mask_eroded = cv2.morphologyEx(mask_thresh, cv2.MORPH_OPEN, kernel)
contours, _ = cv2.findContours(mask_eroded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
large_contours = [cnt for cnt in contours if cv2.contourArea(cnt) > contour_thresh]
# Run YOLO detection
results = yolo_model(frame)
frame_out = frame.copy()
for result in results:
for id, box in enumerate(result.boxes.xyxy):
class_id = int(result.boxes.cls[id])
label = yolo_model.names[class_id]
conf = result.boxes.conf[id]
if label == TARGET_LABEL and conf >= 0.5:
x1, y1, x2, y2 = map(int, box)
center = ((x1 + x2) // 2, (y1 + y2) // 2)
for cnt in large_contours:
if cv2.contourArea(cnt) > 500:
x, y, w, h = cv2.boundingRect(cnt)
if x1 < x < x2 and y1 < y < y2:
cv2.rectangle(frame_out, (x1, y1), (x2, y2), (0, 255, 255), 2)
cv2.putText(frame_out, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 0), 2)
frame_out_final = cv2.cvtColor(frame_out, cv2.COLOR_BGR2RGB)
out.write(frame_out)
if not count % 12:
yield Image.fromarray(frame_out_final), None
count += 1
cap.release()
out.release()
cv2.destroyAllWindows()
yield None, output_video
# Gradio UI Setup
input_video = gr.Video(label="Input Video")
contour_thresh = gr.Slider(0, 10000, value=500, label="Contour Threshold")
output_frames = gr.Image(label="Output Frames")
output_video_file = gr.Video(label="Output Video")
app = gr.Interface(
fn=vid_inf,
inputs=[input_video, contour_thresh],
outputs=[output_frames, output_video_file],
title="YOLO Motion Detection - Parcel Focus",
description='A video analysis tool using YOLOv8 for parcel detection with motion tracking.',
allow_flagging="never",
cache_examples=False,
)
app.queue().launch()