Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import cv2 | |
| import numpy as np | |
| from PIL import Image, ImageDraw | |
| from ultralytics import YOLO | |
| from paddleocr import PaddleOCR | |
| import logging | |
| import os | |
| import requests | |
| # Setup logging | |
| logging.basicConfig(level=logging.INFO) | |
| os.makedirs("debug_outputs", exist_ok=True) | |
| # Path to the YOLOv8 model | |
| model_path = "yolov8-license-plate.pt" # Replace this with your correct model path or URL | |
| # If model is not available, download it (replace the URL with the correct one) | |
| download_url = "https://your-model-url.com/your-model.pt" # Replace with actual URL | |
| if not os.path.exists(model_path): | |
| print(f"Downloading model weights from {download_url}...") | |
| response = requests.get(download_url) | |
| with open(model_path, "wb") as f: | |
| f.write(response.content) | |
| # Load the model (either downloaded or pre-existing) | |
| model = YOLO(model_path) | |
| # PaddleOCR for text recognition | |
| ocr = PaddleOCR(use_angle_cls=True, lang='en') | |
| def locate_plate_within_vehicle(vehicle_crop): | |
| """Try to locate license plate inside a vehicle region using edge detection""" | |
| gray = cv2.cvtColor(np.array(vehicle_crop), cv2.COLOR_RGB2GRAY) | |
| blurred = cv2.bilateralFilter(gray, 11, 17, 17) | |
| edged = cv2.Canny(blurred, 30, 200) | |
| contours, _ = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) | |
| contours = sorted(contours, key=cv2.contourArea, reverse=True)[:5] | |
| for c in contours: | |
| x, y, w, h = cv2.boundingRect(c) | |
| aspect_ratio = w / float(h) | |
| if 2 < aspect_ratio < 6 and w > 60: | |
| plate = vehicle_crop.crop((x, y, x + w, y + h)) | |
| return plate | |
| return None | |
| def detect_vehicles(image): | |
| image_np = np.array(image) | |
| results = model.predict(image_np, conf=0.4) | |
| vehicle_regions = [] | |
| for result in results: | |
| if result.boxes is not None: | |
| boxes = result.boxes.xyxy.cpu().numpy().astype(int) | |
| classes = result.boxes.cls.cpu().numpy().astype(int) | |
| for box, cls_id in zip(boxes, classes): | |
| if cls_id in [2, 3, 5, 7]: # car, motorcycle, bus, truck | |
| x_min, y_min, x_max, y_max = box[:4] | |
| vehicle_regions.append((x_min, y_min, x_max, y_max)) | |
| return vehicle_regions | |
| def extract_text_from_plate(plate_image, idx): | |
| try: | |
| plate_cv2 = cv2.cvtColor(np.array(plate_image), cv2.COLOR_RGB2BGR) | |
| cv2.imwrite(f"debug_outputs/preprocessed_plate_{idx}.jpg", plate_cv2) | |
| result = ocr.ocr(plate_cv2, cls=True) | |
| if result and isinstance(result, list) and len(result[0]) > 0: | |
| text = " ".join([line[1][0] for line in result[0]]) | |
| return text.strip() | |
| return "[No text detected]" | |
| except Exception as e: | |
| logging.warning(f"OCR failed: {e}") | |
| return f"[OCR failed]" | |
| def process_image(image): | |
| try: | |
| if isinstance(image, np.ndarray): | |
| image = Image.fromarray(image) | |
| draw = ImageDraw.Draw(image) | |
| vehicles = detect_vehicles(image) | |
| if not vehicles: | |
| return image, "No vehicles detected." | |
| all_texts = [] | |
| for idx, (x_min, y_min, x_max, y_max) in enumerate(vehicles): | |
| draw.rectangle([x_min, y_min, x_max, y_max], outline="blue", width=2) | |
| vehicle_crop = image.crop((x_min, y_min, x_max, y_max)) | |
| vehicle_crop.save(f"debug_outputs/vehicle_{idx}.jpg") | |
| plate_crop = locate_plate_within_vehicle(vehicle_crop) | |
| if plate_crop: | |
| plate_crop.save(f"debug_outputs/plate_{idx}.jpg") | |
| text = extract_text_from_plate(plate_crop, idx) | |
| all_texts.append(f"Vehicle {idx + 1}: {text}") | |
| else: | |
| all_texts.append(f"Vehicle {idx + 1}: [No plate found]") | |
| return image, "\n".join(all_texts) | |
| except Exception as e: | |
| logging.exception("Unexpected error during image processing:") | |
| return image, f"Error: {str(e)}" | |
| # Gradio UI | |
| with gr.Blocks() as demo: | |
| gr.Markdown("## π FREE License Plate Detector (YOLOv8n + OCR)") | |
| with gr.Row(): | |
| with gr.Column(): | |
| image_input = gr.Image(type="pil", label="Upload or Capture Image") | |
| with gr.Column(): | |
| image_output = gr.Image(type="pil", label="Detected Vehicles") | |
| plates_output = gr.Textbox(label="Recognized Number Plates") | |
| process_button = gr.Button("π Process Image") | |
| process_button.click(process_image, inputs=image_input, outputs=[image_output, plates_output]) | |
| demo.launch() | |