Rohitsharma15's picture
Update app.py
eadbada verified
raw
history blame
2.17 kB
import cv2
import gradio as gr
import numpy as np
from ultralytics import YOLO
# Load the trained YOLO model
model = YOLO('best.pt') # Replace with your trained model file
def detect_packages(frame):
results = model(frame)
for result in results:
for box in result.boxes:
x1, y1, x2, y2 = map(int, box.xyxy[0])
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 0, 255), 3)
return frame
def vid_inf(vid_path, contour_thresh):
cap = cv2.VideoCapture(vid_path)
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
fps = int(cap.get(cv2.CAP_PROP_FPS))
frame_size = (frame_width, frame_height)
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
output_video = "output_recorded.mp4"
out = cv2.VideoWriter(output_video, fourcc, fps, frame_size)
if not cap.isOpened():
print("Error opening video file")
return None, None
count = 0
while cap.isOpened():
ret, frame = cap.read()
if ret:
frame_out = detect_packages(frame.copy())
frame_out_final = cv2.cvtColor(frame_out, cv2.COLOR_BGR2RGB)
out.write(frame_out)
if not count % 12:
yield frame_out_final, None
count += 1
else:
break
cap.release()
out.release()
cv2.destroyAllWindows()
yield None, output_video
# Gradio Interface
input_video = gr.Video(label="Input Video")
contour_thresh = gr.Slider(0, 10000, value=4, label="Contour Threshold", info="Adjust the Contour Threshold according to the object size that you want to detect.")
output_frames = gr.Image(label="Output Frames")
output_video_file = gr.Video(label="Output Video")
app = gr.Interface(
fn=vid_inf,
inputs=[input_video, contour_thresh],
outputs=[output_frames, output_video_file],
title="Motion Detection using YOLOv8",
description="A Gradio app for dynamic video analysis using YOLOv8 to track labeled packages while ignoring other moving objects.",
allow_flagging="never",
examples=[["./sample/package_test.mp4", "1000"]],
cache_examples=False,
)
app.queue().launch()