File size: 3,200 Bytes
a1e9d53 43e9b6c eadbada f308ab6 43e9b6c f308ab6 43e9b6c f308ab6 43e9b6c 9f67c7f 43e9b6c 9d003a2 43e9b6c 9f67c7f 43e9b6c f308ab6 9f67c7f 9d003a2 9f67c7f f308ab6 9f67c7f 9d003a2 f308ab6 9d003a2 f308ab6 9d003a2 f308ab6 9d003a2 f308ab6 9f67c7f f308ab6 9f67c7f 43e9b6c f308ab6 43e9b6c 9d003a2 43e9b6c eadbada 43e9b6c f308ab6 43e9b6c bdb20e6 43e9b6c 9d003a2 eadbada 9f67c7f 9d003a2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 |
import os
os.system("pip install ultralytics")
from ultralytics import YOLO
import cv2
import gradio as gr
import numpy as np
from ultralytics import YOLO
from PIL import Image
# Load YOLO model
yolo_model = YOLO('yolov8n.pt')
TARGET_LABEL = 'parcel' # Define the target object label
def vid_inf(vid_path, contour_thresh):
cap = cv2.VideoCapture(vid_path)
frame_width, frame_height = int(cap.get(3)), int(cap.get(4))
fps = int(cap.get(cv2.CAP_PROP_FPS))
frame_size = (frame_width, frame_height)
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
output_video = "output_recorded.mp4"
out = cv2.VideoWriter(output_video, fourcc, fps, frame_size)
backSub = cv2.createBackgroundSubtractorMOG2(history=200, varThreshold=25, detectShadows=True)
count = 0
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
fg_mask = backSub.apply(frame)
retval, mask_thresh = cv2.threshold(fg_mask, 200, 255, cv2.THRESH_BINARY)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
mask_eroded = cv2.morphologyEx(mask_thresh, cv2.MORPH_OPEN, kernel)
contours, _ = cv2.findContours(mask_eroded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
large_contours = [cnt for cnt in contours if cv2.contourArea(cnt) > contour_thresh]
# Run YOLO detection
results = yolo_model(frame)
frame_out = frame.copy()
for result in results:
for id, box in enumerate(result.boxes.xyxy):
class_id = int(result.boxes.cls[id])
label = yolo_model.names[class_id]
conf = result.boxes.conf[id]
if label == TARGET_LABEL and conf >= 0.5:
x1, y1, x2, y2 = map(int, box)
center = ((x1 + x2) // 2, (y1 + y2) // 2)
for cnt in large_contours:
if cv2.contourArea(cnt) > 500:
x, y, w, h = cv2.boundingRect(cnt)
if x1 < x < x2 and y1 < y < y2:
cv2.rectangle(frame_out, (x1, y1), (x2, y2), (0, 255, 255), 2)
cv2.putText(frame_out, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 0), 2)
frame_out_final = cv2.cvtColor(frame_out, cv2.COLOR_BGR2RGB)
out.write(frame_out)
if not count % 12:
yield Image.fromarray(frame_out_final), None
count += 1
cap.release()
out.release()
cv2.destroyAllWindows()
yield None, output_video
# Gradio UI Setup
input_video = gr.Video(label="Input Video")
contour_thresh = gr.Slider(0, 10000, value=500, label="Contour Threshold")
output_frames = gr.Image(label="Output Frames")
output_video_file = gr.Video(label="Output Video")
app = gr.Interface(
fn=vid_inf,
inputs=[input_video, contour_thresh],
outputs=[output_frames, output_video_file],
title="YOLO Motion Detection - Parcel Focus",
description='A video analysis tool using YOLOv8 for parcel detection with motion tracking.',
allow_flagging="never",
cache_examples=False,
)
app.queue().launch()
|