Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,89 +1,93 @@
|
|
| 1 |
# app.py
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
import numpy as np
|
| 3 |
-
np.int = int # patch for PaddleOCRβs old np.int calls
|
| 4 |
-
|
| 5 |
-
import cv2, json, tempfile, re
|
| 6 |
import gradio as gr
|
| 7 |
from ultralytics import YOLO
|
| 8 |
from paddleocr import PaddleOCR
|
| 9 |
|
| 10 |
-
# βββ
|
| 11 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
|
| 13 |
-
# Recognition-only PaddleOCR: no det_model_dir, so ocr.ocr() only runs the recognizer + cls
|
| 14 |
ocr = PaddleOCR(
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
use_space_char=True
|
| 21 |
)
|
|
|
|
|
|
|
| 22 |
|
| 23 |
-
# βββ
|
| 24 |
-
def
|
|
|
|
| 25 |
s = re.sub(r'[^A-Z0-9]', '', s.upper())
|
| 26 |
m = re.match(r'^(\d{2})([A-Z]{1,3})(\d{2,4})$', s)
|
| 27 |
return f"{m.group(1)} {m.group(2)} {m.group(3)}" if m else "Unknown"
|
| 28 |
|
| 29 |
-
# βββ
|
| 30 |
-
def
|
| 31 |
"""
|
| 32 |
-
|
| 33 |
-
[ # list per text-line
|
| 34 |
-
[ <coords>, (<text>, <score>) ],
|
| 35 |
-
[ <coords>, (<text>, <score>) ],
|
| 36 |
-
...
|
| 37 |
-
]
|
| 38 |
-
We join all <text> pieces, and take the min score across them.
|
| 39 |
"""
|
| 40 |
-
|
|
|
|
| 41 |
return "", 0.0
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
scores = [line[1][1] for line in lines]
|
| 45 |
-
return "".join(texts), float(min(scores))
|
| 46 |
|
| 47 |
-
# βββ
|
| 48 |
def run_image(img, conf=0.25):
|
| 49 |
-
#
|
| 50 |
bgr = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
|
| 51 |
-
|
| 52 |
out = bgr.copy()
|
| 53 |
|
| 54 |
-
for box in
|
| 55 |
x1,y1,x2,y2 = box
|
| 56 |
crop = out[y1:y2, x1:x2]
|
| 57 |
if crop.size == 0:
|
| 58 |
continue
|
| 59 |
|
| 60 |
-
# resize
|
| 61 |
plate_img = cv2.resize(crop, (128, 32))
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
raw, score = parse_ocr(recs)
|
| 66 |
-
plate = format_turkish_plate(raw)
|
| 67 |
label = f"{plate} ({score:.2f})"
|
| 68 |
|
| 69 |
-
# draw
|
| 70 |
cv2.rectangle(out, (x1,y1), (x2,y2), (0,255,0), 2)
|
| 71 |
-
cv2.putText(out, label, (x1, y1-
|
| 72 |
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0,255,0), 2)
|
| 73 |
|
| 74 |
-
|
|
|
|
| 75 |
|
| 76 |
-
# βββ
|
| 77 |
def run_video(video_file, conf=0.25):
|
| 78 |
cap = cv2.VideoCapture(video_file)
|
| 79 |
-
fps = cap.get(cv2.CAP_PROP_FPS)
|
| 80 |
-
w
|
| 81 |
-
h
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
|
|
|
|
|
|
| 85 |
records = []
|
| 86 |
frame_idx = 0
|
|
|
|
| 87 |
while True:
|
| 88 |
ret, frame = cap.read()
|
| 89 |
if not ret:
|
|
@@ -91,37 +95,39 @@ def run_video(video_file, conf=0.25):
|
|
| 91 |
frame_idx += 1
|
| 92 |
t = frame_idx / fps
|
| 93 |
|
| 94 |
-
|
| 95 |
-
for
|
|
|
|
| 96 |
crop = frame[y1:y2, x1:x2]
|
| 97 |
if crop.size == 0:
|
| 98 |
continue
|
| 99 |
|
| 100 |
-
plate_img = cv2.resize(crop, (128,32))
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
if plate != "Unknown":
|
| 105 |
records.append({
|
| 106 |
-
"time_s": round(t,2),
|
| 107 |
-
"plate":
|
| 108 |
-
"conf":
|
| 109 |
})
|
| 110 |
|
| 111 |
-
cv2.rectangle(frame, (x1,y1),(x2,y2), (0,255,0), 2)
|
| 112 |
-
cv2.putText(frame, plate, (x1, y1-
|
| 113 |
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0,255,0), 2)
|
| 114 |
|
| 115 |
writer.write(frame)
|
| 116 |
|
| 117 |
cap.release()
|
| 118 |
writer.release()
|
|
|
|
| 119 |
with open("output.json","w") as f:
|
| 120 |
json.dump(records, f, indent=2)
|
| 121 |
|
| 122 |
-
return
|
| 123 |
|
| 124 |
-
# βββ
|
| 125 |
with gr.Blocks() as demo:
|
| 126 |
gr.Markdown("## π License Plate Detection + Recognition")
|
| 127 |
|
|
@@ -129,16 +135,16 @@ with gr.Blocks() as demo:
|
|
| 129 |
with gr.Column():
|
| 130 |
img_in = gr.Image(type="numpy", label="Upload Image")
|
| 131 |
vid_in = gr.File(label="Upload Video (.mp4)")
|
| 132 |
-
conf = gr.Slider(0,1,0.25,0.01, label="YOLO Confidence")
|
| 133 |
-
|
| 134 |
-
|
| 135 |
with gr.Column():
|
| 136 |
img_out = gr.Image(type="numpy", label="Annotated Image")
|
| 137 |
vid_out = gr.Video(label="Annotated Video")
|
| 138 |
status = gr.Textbox(label="Status / JSON Path")
|
| 139 |
|
| 140 |
-
|
| 141 |
-
|
| 142 |
|
| 143 |
-
if __name__=="__main__":
|
| 144 |
demo.launch()
|
|
|
|
| 1 |
# app.py
|
| 2 |
+
import re
|
| 3 |
+
import json
|
| 4 |
+
import cv2
|
| 5 |
+
import tempfile
|
| 6 |
import numpy as np
|
|
|
|
|
|
|
|
|
|
| 7 |
import gradio as gr
|
| 8 |
from ultralytics import YOLO
|
| 9 |
from paddleocr import PaddleOCR
|
| 10 |
|
| 11 |
+
# βββ 0) Patch for old np.int usage βββββββββββββββββββββββββββββββββ
|
| 12 |
+
np.int = int
|
| 13 |
+
|
| 14 |
+
# βββ 1) Character map (exactly the one used for training) ββββββββββ
|
| 15 |
+
# Digits + uppercase letters + space
|
| 16 |
+
MY_CHAR_LIST = list("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ ")
|
| 17 |
+
|
| 18 |
+
# βββ 2) Load models ββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 19 |
+
yolo = YOLO("models/best.pt") # your YOLOv8 detector
|
| 20 |
|
|
|
|
| 21 |
ocr = PaddleOCR(
|
| 22 |
+
det=False, # disable PaddleOCR detector on crops
|
| 23 |
+
rec=True, # enable recognition
|
| 24 |
+
rec_model_dir="models/ocr_model",
|
| 25 |
+
use_angle_cls=True, # angle classifier
|
| 26 |
+
cls=True, # v3.x flag
|
| 27 |
+
use_space_char=True # allow space in output
|
| 28 |
)
|
| 29 |
+
# Override the internal character list so it matches your training:
|
| 30 |
+
ocr.text_recognizer.character = MY_CHAR_LIST
|
| 31 |
|
| 32 |
+
# βββ 3) Plate formatting βββββββββββββββββββββββββββββββββββββββββββ
|
| 33 |
+
def format_plate(s: str) -> str:
|
| 34 |
+
"""Normalize raw OCR-> 'DD AAA DDDD' or 'Unknown'."""
|
| 35 |
s = re.sub(r'[^A-Z0-9]', '', s.upper())
|
| 36 |
m = re.match(r'^(\d{2})([A-Z]{1,3})(\d{2,4})$', s)
|
| 37 |
return f"{m.group(1)} {m.group(2)} {m.group(3)}" if m else "Unknown"
|
| 38 |
|
| 39 |
+
# βββ 4) OCR helper ββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 40 |
+
def recognize_plate(crop):
|
| 41 |
"""
|
| 42 |
+
Run OCR on a 128Γ32 crop; return (text, confidence).
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 43 |
"""
|
| 44 |
+
recs = ocr.ocr(crop, det=False, cls=True)
|
| 45 |
+
if not recs or len(recs[0]) < 2:
|
| 46 |
return "", 0.0
|
| 47 |
+
text, score = recs[0][1]
|
| 48 |
+
return text, float(score)
|
|
|
|
|
|
|
| 49 |
|
| 50 |
+
# βββ 5) Single-image inference ββββββββββββββββββββββββββββββββββββ
|
| 51 |
def run_image(img, conf=0.25):
|
| 52 |
+
# YOLO expects BGR
|
| 53 |
bgr = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
|
| 54 |
+
results = yolo(bgr, conf=conf)[0]
|
| 55 |
out = bgr.copy()
|
| 56 |
|
| 57 |
+
for box in results.boxes.xyxy.cpu().numpy().astype(int):
|
| 58 |
x1,y1,x2,y2 = box
|
| 59 |
crop = out[y1:y2, x1:x2]
|
| 60 |
if crop.size == 0:
|
| 61 |
continue
|
| 62 |
|
| 63 |
+
# resize to match your OCR training size
|
| 64 |
plate_img = cv2.resize(crop, (128, 32))
|
| 65 |
+
text, score = recognize_plate(plate_img)
|
| 66 |
+
plate = format_plate(text)
|
|
|
|
|
|
|
|
|
|
| 67 |
label = f"{plate} ({score:.2f})"
|
| 68 |
|
| 69 |
+
# draw
|
| 70 |
cv2.rectangle(out, (x1,y1), (x2,y2), (0,255,0), 2)
|
| 71 |
+
cv2.putText(out, label, (x1, y1-8),
|
| 72 |
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0,255,0), 2)
|
| 73 |
|
| 74 |
+
# back to RGB
|
| 75 |
+
return cv2.cvtColor(out, cv2.COLOR_BGR2RGB), f"{len(results.boxes)} plate(s) detected"
|
| 76 |
|
| 77 |
+
# βββ 6) Video inference ββββββββββββββββββββββββββββββββββββββββββββ
|
| 78 |
def run_video(video_file, conf=0.25):
|
| 79 |
cap = cv2.VideoCapture(video_file)
|
| 80 |
+
fps = cap.get(cv2.CAP_PROP_FPS) or 30
|
| 81 |
+
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
| 82 |
+
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
| 83 |
+
|
| 84 |
+
out_path = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False).name
|
| 85 |
+
writer = cv2.VideoWriter(
|
| 86 |
+
out_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (w,h)
|
| 87 |
+
)
|
| 88 |
records = []
|
| 89 |
frame_idx = 0
|
| 90 |
+
|
| 91 |
while True:
|
| 92 |
ret, frame = cap.read()
|
| 93 |
if not ret:
|
|
|
|
| 95 |
frame_idx += 1
|
| 96 |
t = frame_idx / fps
|
| 97 |
|
| 98 |
+
results = yolo(frame, conf=conf)[0]
|
| 99 |
+
for box in results.boxes.xyxy.cpu().numpy().astype(int):
|
| 100 |
+
x1,y1,x2,y2 = box
|
| 101 |
crop = frame[y1:y2, x1:x2]
|
| 102 |
if crop.size == 0:
|
| 103 |
continue
|
| 104 |
|
| 105 |
+
plate_img = cv2.resize(crop, (128, 32))
|
| 106 |
+
text, score = recognize_plate(plate_img)
|
| 107 |
+
plate = format_plate(text)
|
| 108 |
+
|
| 109 |
if plate != "Unknown":
|
| 110 |
records.append({
|
| 111 |
+
"time_s": round(t, 2),
|
| 112 |
+
"plate": plate,
|
| 113 |
+
"conf": round(score, 3)
|
| 114 |
})
|
| 115 |
|
| 116 |
+
cv2.rectangle(frame, (x1,y1), (x2,y2), (0,255,0), 2)
|
| 117 |
+
cv2.putText(frame, plate, (x1, y1-8),
|
| 118 |
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0,255,0), 2)
|
| 119 |
|
| 120 |
writer.write(frame)
|
| 121 |
|
| 122 |
cap.release()
|
| 123 |
writer.release()
|
| 124 |
+
|
| 125 |
with open("output.json","w") as f:
|
| 126 |
json.dump(records, f, indent=2)
|
| 127 |
|
| 128 |
+
return out_path, "Done"
|
| 129 |
|
| 130 |
+
# βββ 7) Gradio UI βββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 131 |
with gr.Blocks() as demo:
|
| 132 |
gr.Markdown("## π License Plate Detection + Recognition")
|
| 133 |
|
|
|
|
| 135 |
with gr.Column():
|
| 136 |
img_in = gr.Image(type="numpy", label="Upload Image")
|
| 137 |
vid_in = gr.File(label="Upload Video (.mp4)")
|
| 138 |
+
conf = gr.Slider(0.0, 1.0, 0.25, 0.01, label="YOLO Confidence")
|
| 139 |
+
btn_i = gr.Button("Run Image")
|
| 140 |
+
btn_v = gr.Button("Run Video")
|
| 141 |
with gr.Column():
|
| 142 |
img_out = gr.Image(type="numpy", label="Annotated Image")
|
| 143 |
vid_out = gr.Video(label="Annotated Video")
|
| 144 |
status = gr.Textbox(label="Status / JSON Path")
|
| 145 |
|
| 146 |
+
btn_i.click(run_image, [img_in,conf], [img_out,status])
|
| 147 |
+
btn_v.click(run_video, [vid_in,conf], [vid_out,status])
|
| 148 |
|
| 149 |
+
if __name__ == "__main__":
|
| 150 |
demo.launch()
|