|
|
|
|
|
import gradio as gr |
|
|
import numpy as np |
|
|
import cv2 |
|
|
import roboflow |
|
|
import os |
|
|
|
|
|
|
|
|
print("Downloading and loading models from Roboflow...") |
|
|
|
|
|
|
|
|
ROBOFLOW_API_KEY = "YfKCsreNkoXYFD1CfMBY" |
|
|
|
|
|
|
|
|
PLATE_DETECTOR_WORKSPACE_ID = "mylprproject" |
|
|
PLATE_DETECTOR_PROJECT_ID = "license-plate-yuw1z-kirke" |
|
|
PLATE_DETECTOR_VERSION = 1 |
|
|
|
|
|
|
|
|
CHARACTER_READER_WORKSPACE_ID = "mylprproject" |
|
|
CHARACTER_READER_PROJECT_ID = "pr-character-reader-nqwhr" |
|
|
CHARACTER_READER_VERSION = 1 |
|
|
|
|
|
|
|
|
rf = roboflow.Roboflow(api_key=ROBOFLOW_API_KEY) |
|
|
|
|
|
project_plate = rf.workspace(PLATE_DETECTOR_WORKSPACE_ID).project(PLATE_DETECTOR_PROJECT_ID) |
|
|
plate_model = project_plate.version(PLATE_DETECTOR_VERSION).model |
|
|
print("✅ Plate Detector loaded.") |
|
|
|
|
|
project_char = rf.workspace(CHARACTER_READER_WORKSPACE_ID).project(CHARACTER_READER_PROJECT_ID) |
|
|
character_model = project_char.version(CHARACTER_READER_VERSION).model |
|
|
print("✅ Character Reader loaded.") |
|
|
|
|
|
|
|
|
|
|
|
def detect_license_plate_and_chars(input_image): |
|
|
""" |
|
|
This function uses the full Roboflow two-stage pipeline. |
|
|
""" |
|
|
print("New image received. Starting full pipeline...") |
|
|
output_image = input_image.copy() |
|
|
|
|
|
|
|
|
plate_predictions = plate_model.predict(input_image, confidence=40).json()['predictions'] |
|
|
|
|
|
if not plate_predictions: |
|
|
return output_image, "No license plate found." |
|
|
|
|
|
plate_box = plate_predictions[0] |
|
|
x1, y1, x2, y2 = [int(plate_box['x'] - plate_box['width'] / 2), int(plate_box['y'] - plate_box['height'] / 2), |
|
|
int(plate_box['x'] + plate_box['width'] / 2), int(plate_box['y'] + plate_box['height'] / 2)] |
|
|
|
|
|
cv2.rectangle(output_image, (x1, y1), (x2, y2), (0, 0, 255), 2) |
|
|
plate_crop = input_image[y1:y2, x1:x2] |
|
|
|
|
|
|
|
|
|
|
|
gray_plate = cv2.cvtColor(plate_crop, cv2.COLOR_BGR2GRAY) |
|
|
_, thresh_plate = cv2.threshold(gray_plate, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU) |
|
|
|
|
|
|
|
|
contours, _ = cv2.findContours(thresh_plate, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) |
|
|
|
|
|
detections = [] |
|
|
for contour in contours: |
|
|
x_char, y_char, w_char, h_char = cv2.boundingRect(contour) |
|
|
|
|
|
|
|
|
if h_char > (plate_crop.shape[0] * 0.3) and w_char > 5: |
|
|
|
|
|
char_image = plate_crop[y_char:y_char+h_char, x_char:x_char+w_char] |
|
|
|
|
|
|
|
|
char_prediction = character_model.predict(char_image).json() |
|
|
|
|
|
|
|
|
top_class = char_prediction['top'] |
|
|
confidence = char_prediction['confidence'] |
|
|
|
|
|
if confidence > 0.5: |
|
|
detections.append((x_char, top_class)) |
|
|
|
|
|
|
|
|
abs_x1, abs_y1 = x1 + x_char, y1 + y_char |
|
|
cv2.rectangle(output_image, (abs_x1, abs_y1), (abs_x1 + w_char, abs_y1 + h_char), (0, 255, 0), 1) |
|
|
cv2.putText(output_image, top_class, (abs_x1, abs_y1 - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2) |
|
|
|
|
|
detections.sort(key=lambda x: x[0]) |
|
|
final_text = "".join([char for _, char in detections]) |
|
|
|
|
|
print(f"Prediction complete. Detected text: {final_text}") |
|
|
return output_image, final_text |
|
|
|
|
|
|
|
|
|
|
|
with gr.Blocks() as demo: |
|
|
gr.Markdown("# My Custom LPR System") |
|
|
gr.Markdown("This system uses my own two Roboflow models to find the plate and read the characters.") |
|
|
|
|
|
with gr.Row(): |
|
|
image_input = gr.Image(type="numpy", label="Upload License Plate Image") |
|
|
image_output = gr.Image(type="numpy", label="Result") |
|
|
|
|
|
text_output = gr.Textbox(label="Detected Characters") |
|
|
predict_button = gr.Button(value="Detect Plate and Characters") |
|
|
|
|
|
predict_button.click(fn=detect_license_plate_and_chars, inputs=image_input, outputs=[image_output, text_output]) |
|
|
|
|
|
|
|
|
demo.launch() |