File size: 1,811 Bytes
24a2c51
ad6c417
4da2efa
dbca85f
16de584
38bdda4
1b870e5
24a2c51
1b870e5
501769f
39005b3
817ac03
3d189fc
1b870e5
501769f
 
39005b3
 
 
 
 
 
 
 
 
 
 
 
 
 
501769f
 
 
 
39005b3
06f81f3
39005b3
501769f
39005b3
1b870e5
501769f
39005b3
501769f
39005b3
dbca85f
501769f
 
39005b3
501769f
 
dbca85f
501769f
 
4da2efa
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
import gradio as gr
from ultralytics import YOLO
import tempfile
import cv2

# Load YOLOv8 model once
model = YOLO("best.pt")  # Use 'yolov8n.pt' if you want faster but lighter detection

# Image detection function
def predict_image(image):
    results = model.predict(image, imgsz=480, conf=0.5, verbose=False)
    return results[0].plot()

# Video detection function
def predict_video(video_path):
    cap = cv2.VideoCapture(video_path)
    fps = cap.get(cv2.CAP_PROP_FPS)
    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) // 2
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) // 2

    temp_output = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False)
    out = cv2.VideoWriter(temp_output.name, cv2.VideoWriter_fourcc(*'mp4v'), fps, (width, height))

    while True:
        ret, frame = cap.read()
        if not ret:
            break
        frame = cv2.resize(frame, (width, height))
        results = model.predict(frame, imgsz=480, conf=0.5, verbose=False)
        annotated = results[0].plot()
        out.write(annotated)

    cap.release()
    out.release()
    return temp_output.name

# Gradio Interface
with gr.Blocks() as demo:
    gr.Markdown("# 🚀 YOLOv8 Object Detection")
    gr.Markdown("Detect objects in images or uploaded video using your trained YOLOv8 model.")

    with gr.Tab("🖼️ Image Detection"):
        img_input = gr.Image(type="pil")
        img_output = gr.Image(label="Detected Objects")
        img_btn = gr.Button("Run Detection")
        img_btn.click(predict_image, inputs=img_input, outputs=img_output)

    with gr.Tab("🎥 Video Detection"):
        vid_input = gr.Video()
        vid_output = gr.Video()
        vid_btn = gr.Button("Run Detection on Video")
        vid_btn.click(predict_video, inputs=vid_input, outputs=vid_output)

demo.launch()