Newone / inference.py
NaveenKumar5's picture
Update inference.py
37c2f1c verified
# Yolov8n train.py
import cv2
from inference import InferencePipeline
import numpy as np # Add numpy if not already implicitly used
# No direct imshow or print statements here in the function,
# as these are handled by the Gradio/Streamlit app.
def run_inference_on_video(video_path):
"""
Runs the inference pipeline on a given video file.
Returns: A list of results or processed frames to be displayed by the UI.
"""
processed_frames = [] # To store frames with visualizations
def my_sink(result, video_frame):
# We won't use cv2.imshow directly in a web app.
# Instead, we'll return the processed image if available.
if result.get("output_image"):
# Ensure the image is in a format suitable for Gradio/Streamlit (e.g., numpy array)
processed_frames.append(result["output_image"].numpy_image)
# print(result) # You can keep this for debugging if logs are enabled
# initialize a pipeline object
# For a Space, api_key and workspace_name should ideally be handled as environment variables
# For now, keep them as is for simplicity, but for production, use secrets.
pipeline = InferencePipeline.init_with_workflow(
api_key="dxkgGGHSZ3DI8XzVn29U",
workspace_name="naveen-kumar-hnmil",
workflow_id="detect-count-and-visualize-5",
video_reference=video_path, # Now takes a file path
max_fps=30, # You might adjust this for web demos
on_prediction=my_sink
)
pipeline.start()
pipeline.join()
return processed_frames # Return the processed frames