LPRor / app.py
Rhaya03's picture
Create app.py
e9aa8e9 verified
# --- 1. Import all the necessary tools ---
import gradio as gr
import numpy as np
import cv2
import roboflow
import os
# --- 2. Load BOTH of your AI models from Roboflow ---
print("Downloading and loading models from Roboflow...")
# --- ⚙️ PASTE YOUR ROBOFLOW INFORMATION HERE ⚙️ ---
ROBOFLOW_API_KEY = "YfKCsreNkoXYFD1CfMBY"
# --- Model 1: The Plate Detector ---
PLATE_DETECTOR_WORKSPACE_ID = "mylprproject"
PLATE_DETECTOR_PROJECT_ID = "license-plate-yuw1z-kirke"
PLATE_DETECTOR_VERSION = 1 # The version number of your plate detector
# --- Model 2: The Character Reader (Your Specialist!) ---
CHARACTER_READER_WORKSPACE_ID = "mylprproject"
CHARACTER_READER_PROJECT_ID = "pr-character-reader-nqwhr"
CHARACTER_READER_VERSION = 1 # The version number of your character reader
# Authenticate and load models
rf = roboflow.Roboflow(api_key=ROBOFLOW_API_KEY)
project_plate = rf.workspace(PLATE_DETECTOR_WORKSPACE_ID).project(PLATE_DETECTOR_PROJECT_ID)
plate_model = project_plate.version(PLATE_DETECTOR_VERSION).model
print("✅ Plate Detector loaded.")
project_char = rf.workspace(CHARACTER_READER_WORKSPACE_ID).project(CHARACTER_READER_PROJECT_ID)
character_model = project_char.version(CHARACTER_READER_VERSION).model
print("✅ Character Reader loaded.")
# --- 3. Define the main prediction function ---
def detect_license_plate_and_chars(input_image):
"""
This function uses the full Roboflow two-stage pipeline.
"""
print("New image received. Starting full pipeline...")
output_image = input_image.copy()
# --- STAGE 1: Find the license plate ---
plate_predictions = plate_model.predict(input_image, confidence=40).json()['predictions']
if not plate_predictions:
return output_image, "No license plate found."
plate_box = plate_predictions[0]
x1, y1, x2, y2 = [int(plate_box['x'] - plate_box['width'] / 2), int(plate_box['y'] - plate_box['height'] / 2),
int(plate_box['x'] + plate_box['width'] / 2), int(plate_box['y'] + plate_box['height'] / 2)]
cv2.rectangle(output_image, (x1, y1), (x2, y2), (0, 0, 255), 2) # Red box for the plate
plate_crop = input_image[y1:y2, x1:x2]
# --- STAGE 2: Find and Classify Characters within the plate ---
# Convert to grayscale and apply a threshold to make characters stand out
gray_plate = cv2.cvtColor(plate_crop, cv2.COLOR_BGR2GRAY)
_, thresh_plate = cv2.threshold(gray_plate, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
# Find contours of potential characters
contours, _ = cv2.findContours(thresh_plate, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
detections = []
for contour in contours:
x_char, y_char, w_char, h_char = cv2.boundingRect(contour)
# Filter out noise based on size
if h_char > (plate_crop.shape[0] * 0.3) and w_char > 5:
# Crop out the character
char_image = plate_crop[y_char:y_char+h_char, x_char:x_char+w_char]
# --- STAGE 3: Classify the character using your specialist model ---
char_prediction = character_model.predict(char_image).json()
# Get the top prediction
top_class = char_prediction['top']
confidence = char_prediction['confidence']
if confidence > 0.5: # Only accept confident predictions
detections.append((x_char, top_class))
# Draw box and label on the main image
abs_x1, abs_y1 = x1 + x_char, y1 + y_char
cv2.rectangle(output_image, (abs_x1, abs_y1), (abs_x1 + w_char, abs_y1 + h_char), (0, 255, 0), 1)
cv2.putText(output_image, top_class, (abs_x1, abs_y1 - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
detections.sort(key=lambda x: x[0])
final_text = "".join([char for _, char in detections])
print(f"Prediction complete. Detected text: {final_text}")
return output_image, final_text
# --- 4. Create the Gradio Web Interface ---
with gr.Blocks() as demo:
gr.Markdown("# My Custom LPR System")
gr.Markdown("This system uses my own two Roboflow models to find the plate and read the characters.")
with gr.Row():
image_input = gr.Image(type="numpy", label="Upload License Plate Image")
image_output = gr.Image(type="numpy", label="Result")
text_output = gr.Textbox(label="Detected Characters")
predict_button = gr.Button(value="Detect Plate and Characters")
predict_button.click(fn=detect_license_plate_and_chars, inputs=image_input, outputs=[image_output, text_output])
# --- 5. Launch the application ---
demo.launch()