Spaces:
Runtime error
Runtime error
| # app.py (for Gradio) | |
| import gradio as gr | |
| import cv2 | |
| import numpy as np | |
| from Yolov8n_train import run_inference_on_video # Import your function | |
| # Define the Gradio interface | |
| def process_uploaded_video(video_file): | |
| if video_file is None: | |
| return None, "Please upload a video file." | |
| video_path = video_file.name # Gradio passes a NamedTemporaryFile object | |
| # Run your inference pipeline | |
| # This will return a list of numpy images (frames) | |
| processed_frames = run_inference_on_video(video_path) | |
| if not processed_frames: | |
| return None, "No frames processed. Check pipeline configuration or video input." | |
| # For Gradio, you might want to return the first processed frame as an image, | |
| # or create a GIF/video from the processed frames if they are too many. | |
| # For simplicity, let's just return the first frame as an example. | |
| # In a real scenario, you might want to create a video from `processed_frames`. | |
| # Example: Return the first processed frame as an image | |
| # If your pipeline processes entire videos and you want to output a video, | |
| # you'd need to re-encode `processed_frames` into a video format. | |
| # This can be complex depending on the number of frames. | |
| # Option 1: Return the first processed image if your workflow outputs a single image | |
| # This part needs to be adapted based on what `run_inference_on_video` actually returns. | |
| # If `run_inference_on_video` processed the entire video and now you want to show | |
| # the output video, you'd need to save it and then return its path. | |
| # Let's assume for a moment that `run_inference_on_video` somehow makes the | |
| # output available as a path or something similar that Gradio can display. | |
| # Or, if it's processing frame by frame and you want to show the results as a sequence. | |
| # Simpler approach: If you want to demonstrate a *live* webcam, Gradio has an `Image(source="webcam")` | |
| # but that processes frame-by-frame on the client, and then sends to your backend. | |
| # Your `InferencePipeline` directly interacts with the video source. | |
| # Let's pivot slightly to a more common Space pattern: | |
| # 1. User uploads video. | |
| # 2. Your backend processes it. | |
| # 3. Your backend saves the processed video. | |
| # 4. Gradio displays the processed video. | |
| # Reworking `run_inference_on_video` for a better Gradio fit: | |
| # `run_inference_on_video` should probably return the path to the *output video file*. | |
| # For now, let's keep `processed_frames` as a list of images. | |
| # We can create a simple GIF for demonstration if there are many frames. | |
| if processed_frames: | |
| # Save the first frame as an image (simple demo) | |
| output_image_path = "output_frame.jpg" | |
| cv2.imwrite(output_image_path, processed_frames[0]) | |
| return output_image_path, "Video processed. Displaying first output frame." | |
| else: | |
| return None, "No frames processed. Check pipeline configuration or video input." | |
| # Gradio Interface setup | |
| iface = gr.Interface( | |
| fn=process_uploaded_video, | |
| inputs=gr.File(type="filepath", label="Upload Video File"), # Allow user to upload video | |
| outputs=[ | |
| gr.Image(label="Processed Output (First Frame)", type="filepath"), # Display image output | |
| gr.Textbox(label="Status") # For messages | |
| ], | |
| title="Yolov8n Video Detection and Counting", | |
| description="Upload a video file to run the detection and counting workflow." | |
| ) | |
| if __name__ == "__main__": | |
| iface.launch() |