File size: 3,500 Bytes
eac2d14
4df2da8
eac2d14
 
de64e44
4df2da8
eac2d14
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4df2da8
eac2d14
 
 
 
 
 
 
 
4df2da8
 
 
eac2d14
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
# app.py (for Gradio)
import gradio as gr
import cv2
import numpy as np
from Yolov8n_train import run_inference_on_video # Import your function

# Define the Gradio interface
def process_uploaded_video(video_file):
    if video_file is None:
        return None, "Please upload a video file."

    video_path = video_file.name # Gradio passes a NamedTemporaryFile object

    # Run your inference pipeline
    # This will return a list of numpy images (frames)
    processed_frames = run_inference_on_video(video_path)

    if not processed_frames:
        return None, "No frames processed. Check pipeline configuration or video input."

    # For Gradio, you might want to return the first processed frame as an image,
    # or create a GIF/video from the processed frames if they are too many.
    # For simplicity, let's just return the first frame as an example.
    # In a real scenario, you might want to create a video from `processed_frames`.

    # Example: Return the first processed frame as an image
    # If your pipeline processes entire videos and you want to output a video,
    # you'd need to re-encode `processed_frames` into a video format.
    # This can be complex depending on the number of frames.

    # Option 1: Return the first processed image if your workflow outputs a single image
    # This part needs to be adapted based on what `run_inference_on_video` actually returns.
    # If `run_inference_on_video` processed the entire video and now you want to show
    # the output video, you'd need to save it and then return its path.

    # Let's assume for a moment that `run_inference_on_video` somehow makes the
    # output available as a path or something similar that Gradio can display.
    # Or, if it's processing frame by frame and you want to show the results as a sequence.

    # Simpler approach: If you want to demonstrate a *live* webcam, Gradio has an `Image(source="webcam")`
    # but that processes frame-by-frame on the client, and then sends to your backend.
    # Your `InferencePipeline` directly interacts with the video source.

    # Let's pivot slightly to a more common Space pattern:
    # 1. User uploads video.
    # 2. Your backend processes it.
    # 3. Your backend saves the processed video.
    # 4. Gradio displays the processed video.

    # Reworking `run_inference_on_video` for a better Gradio fit:
    # `run_inference_on_video` should probably return the path to the *output video file*.

    # For now, let's keep `processed_frames` as a list of images.
    # We can create a simple GIF for demonstration if there are many frames.
    if processed_frames:
        # Save the first frame as an image (simple demo)
        output_image_path = "output_frame.jpg"
        cv2.imwrite(output_image_path, processed_frames[0])
        return output_image_path, "Video processed. Displaying first output frame."
    else:
        return None, "No frames processed. Check pipeline configuration or video input."


# Gradio Interface setup
iface = gr.Interface(
    fn=process_uploaded_video,
    inputs=gr.File(type="filepath", label="Upload Video File"), # Allow user to upload video
    outputs=[
        gr.Image(label="Processed Output (First Frame)", type="filepath"), # Display image output
        gr.Textbox(label="Status") # For messages
    ],
    title="Yolov8n Video Detection and Counting",
    description="Upload a video file to run the detection and counting workflow."
)

if __name__ == "__main__":
    iface.launch()