tarasevicius's picture
fix: update model path
72e7b47
import gradio as gr
import torch
from PIL import Image
import numpy as np
import cv2
# Download the model from Hugging Face Hub
model_path = 'local_latest.pt'
# Load the YOLOv5 model
model = torch.hub.load('ultralytics/yolov5', 'custom', path=model_path)
model.line_thickness = 3 # Set the thickness of the bounding box lines
# Print model class names
print("Model class names:", model.names)
print("Model loaded successfully.")
def detect(image):
# Convert PIL Image to NumPy array (OpenCV uses BGR format)
image_np = np.array(image)
image_np = image_np[:, :, ::-1].copy() # Convert RGB to BGR
# Run inference
results = model(image)
# Get detections
detections = results.xyxy[0] # xyxy format for bounding boxes
# Check if any detections were made
if len(detections) > 0:
print(f"Number of detections: {len(detections)}")
for i, (*xyxy, conf, cls) in enumerate(detections):
# Extract box coordinates and convert to integers
xyxy = [int(coord.item()) for coord in xyxy] # Coordinates
conf = conf.item() # Confidence score
cls = int(cls.item()) # Class index
class_name = model.names[cls] # Class name
print(f"Detection {i}: Class '{class_name}', Confidence {conf:.2f}, Coordinates {xyxy}")
# Draw bounding box without label
cv2.rectangle(
image_np,
(xyxy[0], xyxy[1]),
(xyxy[2], xyxy[3]),
color=(255, 0, 0),
thickness=model.line_thickness,
lineType=cv2.LINE_AA,
)
else:
print("No detections were made.")
# Convert BGR back to RGB
image_np = image_np[:, :, ::-1]
# Convert NumPy array back to PIL Image
annotated_image = Image.fromarray(image_np)
return annotated_image
# Create Gradio Interface
iface = gr.Interface(
fn=detect,
inputs=gr.Image(type="pil"),
outputs=gr.Image(type="pil"),
title="YOLOv5 Object Detection",
description="Upload an image to detect objects using the YOLOv5 model. Labels are hidden in the output."
)
iface.launch()