| | import gradio as gr |
| | import cv2 |
| | import numpy as np |
| | from ultralytics import YOLO |
| |
|
| | def detect_and_crop_spines(image, model_path): |
| | |
| | if not hasattr(detect_and_crop_spines, 'model'): |
| | detect_and_crop_spines.model = YOLO(model_path) |
| | |
| | |
| | if isinstance(image, str): |
| | image = cv2.imread(image) |
| | elif isinstance(image, np.ndarray) and len(image.shape) == 3 and image.shape[2] == 3: |
| | image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) |
| | |
| | |
| | if image is None: |
| | raise ValueError("Failed to load image") |
| |
|
| | |
| | try: |
| | results = detect_and_crop_spines.model.predict(image, show=False, conf=0.25) |
| | except Exception as e: |
| | return None, f"Error during detection: {str(e)}" |
| |
|
| | annotated_image = image.copy() |
| | |
| | for det in enumerate(results[0].boxes.xyxy.cpu().numpy()): |
| | |
| | points = np.array([[det[0], det[1]], [det[2], det[1]], |
| | [det[2], det[3]], [det[0], det[3]]], dtype=np.float32) |
| | |
| | |
| | rect = cv2.minAreaRect(points) |
| | box = cv2.boxPoints(rect) |
| | box = np.int0(box) |
| | |
| | |
| | cv2.drawContours(annotated_image, [box], 0, (0, 255, 0), 2) |
| | |
| | |
| | annotated_image = cv2.cvtColor(annotated_image, cv2.COLOR_BGR2RGB) |
| | |
| | return annotated_image, f"Detected {len(results[0].boxes)} book spines" |
| |
|
| | |
| | demo = gr.Interface( |
| | fn=detect_and_crop_spines, |
| | inputs=[ |
| | gr.Image(), |
| | gr.Text(label="YOLO Model Path", value="yolo11x.pt") |
| | ], |
| | outputs=[ |
| | gr.Image(label="Annotated Image"), |
| | gr.Text(label="Detection Results") |
| | ], |
| | title="Book Spine Detector", |
| | description="Upload an image of a bookshelf to detect book spines using YOLO" |
| | ) |
| |
|
| | |
| | demo.launch() |