Spaces:
No application file
No application file
| from ultralytics import YOLO | |
| import cv2 | |
| import gradio as gr | |
| # Load the YOLO model | |
| model = YOLO('yolov8n.pt') | |
| def detect_objects(image): | |
| # Convert PIL image (from Gradio webcam) → OpenCV format (BGR) | |
| frame = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) | |
| # Perform object detection | |
| results = model(frame) | |
| # Plot detections (draw bounding boxes) | |
| annotated_frame = results[0].plot() | |
| # Convert back to RGB for displaying in Gradio | |
| annotated_frame = cv2.cvtColor(annotated_frame, cv2.COLOR_BGR2RGB) | |
| return annotated_frame | |
| # Create Gradio interface (this works in Hugging Face Spaces) | |
| demo = gr.Interface( | |
| fn=detect_objects, | |
| inputs=gr.Image(source="webcam", streaming=True), # Enable live camera | |
| outputs="image", | |
| title="🎥 Real-time Object Detection (YOLOv8)", | |
| description="Detects and labels objects in live camera feed using YOLOv8." | |
| ) | |
| demo.launch() | |