File size: 1,603 Bytes
37c2f1c
f1c829c
 
 
5f5b974
f1c829c
 
5f5b974
f1c829c
 
 
 
 
 
5f5b974
f1c829c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
# Yolov8n train.py
import cv2
from inference import InferencePipeline
import numpy as np # Add numpy if not already implicitly used

# No direct imshow or print statements here in the function,
# as these are handled by the Gradio/Streamlit app.

def run_inference_on_video(video_path):
    """
    Runs the inference pipeline on a given video file.
    Returns: A list of results or processed frames to be displayed by the UI.
    """
    processed_frames = [] # To store frames with visualizations

    def my_sink(result, video_frame):
        # We won't use cv2.imshow directly in a web app.
        # Instead, we'll return the processed image if available.
        if result.get("output_image"):
            # Ensure the image is in a format suitable for Gradio/Streamlit (e.g., numpy array)
            processed_frames.append(result["output_image"].numpy_image)
        # print(result) # You can keep this for debugging if logs are enabled

    # initialize a pipeline object
    # For a Space, api_key and workspace_name should ideally be handled as environment variables
    # For now, keep them as is for simplicity, but for production, use secrets.
    pipeline = InferencePipeline.init_with_workflow(
        api_key="dxkgGGHSZ3DI8XzVn29U",
        workspace_name="naveen-kumar-hnmil",
        workflow_id="detect-count-and-visualize-5",
        video_reference=video_path, # Now takes a file path
        max_fps=30, # You might adjust this for web demos
        on_prediction=my_sink
    )
    pipeline.start()
    pipeline.join()

    return processed_frames # Return the processed frames