Spaces:
Build error
Build error
File size: 2,171 Bytes
68c0f15 58cc803 68c0f15 bb622fc 2b8f883 307d600 58cc803 e4777e1 58cc803 72e7b47 e91b01d 68c0f15 62f55ce 51edc87 775305a d62ae4b 68c0f15 e91b01d e7c1d0b 27f82ce 68c0f15 62f55ce e91b01d 62f55ce e91b01d 736ef53 e91b01d 62f55ce e91b01d 62f55ce e91b01d 68c0f15 e7c1d0b 68c0f15 d62ae4b e91b01d 68c0f15 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 |
import gradio as gr
import torch
from PIL import Image
import numpy as np
import cv2
# Download the model from Hugging Face Hub
model_path = 'local_latest.pt'
# Load the YOLOv5 model
model = torch.hub.load('ultralytics/yolov5', 'custom', path=model_path)
model.line_thickness = 3 # Set the thickness of the bounding box lines
# Print model class names
print("Model class names:", model.names)
print("Model loaded successfully.")
def detect(image):
# Convert PIL Image to NumPy array (OpenCV uses BGR format)
image_np = np.array(image)
image_np = image_np[:, :, ::-1].copy() # Convert RGB to BGR
# Run inference
results = model(image)
# Get detections
detections = results.xyxy[0] # xyxy format for bounding boxes
# Check if any detections were made
if len(detections) > 0:
print(f"Number of detections: {len(detections)}")
for i, (*xyxy, conf, cls) in enumerate(detections):
# Extract box coordinates and convert to integers
xyxy = [int(coord.item()) for coord in xyxy] # Coordinates
conf = conf.item() # Confidence score
cls = int(cls.item()) # Class index
class_name = model.names[cls] # Class name
print(f"Detection {i}: Class '{class_name}', Confidence {conf:.2f}, Coordinates {xyxy}")
# Draw bounding box without label
cv2.rectangle(
image_np,
(xyxy[0], xyxy[1]),
(xyxy[2], xyxy[3]),
color=(255, 0, 0),
thickness=model.line_thickness,
lineType=cv2.LINE_AA,
)
else:
print("No detections were made.")
# Convert BGR back to RGB
image_np = image_np[:, :, ::-1]
# Convert NumPy array back to PIL Image
annotated_image = Image.fromarray(image_np)
return annotated_image
# Create Gradio Interface
iface = gr.Interface(
fn=detect,
inputs=gr.Image(type="pil"),
outputs=gr.Image(type="pil"),
title="YOLOv5 Object Detection",
description="Upload an image to detect objects using the YOLOv5 model. Labels are hidden in the output."
)
iface.launch()
|