Sarvamangalak commited on
Commit
3d976a9
·
verified ·
1 Parent(s): f183543

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +155 -0
app.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ from ultralytics import YOLO
4
+ import easyocr
5
+ import gradio as gr
6
+ import tempfile
7
+ import os
8
+
9
+ # Load YOLOv8 plate detection model
10
+ model = YOLO("best.pt") # <-- your trained plate model
11
+
12
+ # Initialize OCR
13
+ reader = easyocr.Reader(['en'], gpu=False)
14
+
15
+ def preprocess_plate(plate_img):
16
+ gray = cv2.cvtColor(plate_img, cv2.COLOR_BGR2GRAY)
17
+ blur = cv2.GaussianBlur(gray, (5, 5), 0)
18
+ thresh = cv2.adaptiveThreshold(
19
+ blur, 255,
20
+ cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
21
+ cv2.THRESH_BINARY, 11, 2
22
+ )
23
+ return thresh
24
+
25
+ def recognize_plate(plate_img):
26
+ processed = preprocess_plate(plate_img)
27
+ ocr_result = reader.readtext(processed)
28
+
29
+ plate_text = ""
30
+ for (bbox, text, prob) in ocr_result:
31
+ if prob > 0.4:
32
+ plate_text += text + " "
33
+
34
+ return plate_text.strip()
35
+
36
+ def process_frame(frame):
37
+ detected_plates = []
38
+
39
+ results = model(frame)
40
+
41
+ for r in results:
42
+ if r.boxes is None:
43
+ continue
44
+
45
+ boxes = r.boxes.xyxy.cpu().numpy()
46
+ confs = r.boxes.conf.cpu().numpy()
47
+
48
+ for box, conf in zip(boxes, confs):
49
+ x1, y1, x2, y2 = map(int, box)
50
+
51
+ plate_img = frame[y1:y2, x1:x2]
52
+ if plate_img.size == 0:
53
+ continue
54
+
55
+ plate_text = recognize_plate(plate_img)
56
+
57
+ detected_plates.append({
58
+ "plate_text": plate_text,
59
+ "confidence": float(conf)
60
+ })
61
+
62
+ # Draw bounding box
63
+ cv2.rectangle(frame, (x1, y1), (x2, y2),
64
+ (0, 255, 0), 2)
65
+
66
+ # Draw plate text
67
+ label = plate_text if plate_text else "Plate"
68
+ cv2.putText(frame, label,
69
+ (x1, y1 - 10),
70
+ cv2.FONT_HERSHEY_SIMPLEX,
71
+ 0.8, (255, 0, 0), 2)
72
+
73
+ return frame, detected_plates
74
+
75
+ # =========================
76
+ # IMAGE MODE
77
+ # =========================
78
+ def process_image(image):
79
+ frame = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
80
+ annotated_frame, plates = process_frame(frame)
81
+ annotated_frame = cv2.cvtColor(annotated_frame, cv2.COLOR_BGR2RGB)
82
+
83
+ plate_texts = [p["plate_text"] for p in plates if p["plate_text"]]
84
+ result_text = "\n".join(plate_texts) if plate_texts else "No plates detected."
85
+
86
+ return annotated_frame, result_text
87
+
88
+ # =========================
89
+ # VIDEO MODE
90
+ # =========================
91
+ def process_video(video_file):
92
+ cap = cv2.VideoCapture(video_file)
93
+
94
+ width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
95
+ height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
96
+ fps = cap.get(cv2.CAP_PROP_FPS)
97
+
98
+ temp_out = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4")
99
+ out_path = temp_out.name
100
+ temp_out.close()
101
+
102
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
103
+ out = cv2.VideoWriter(out_path, fourcc, fps, (width, height))
104
+
105
+ all_detected = set()
106
+
107
+ while cap.isOpened():
108
+ ret, frame = cap.read()
109
+ if not ret:
110
+ break
111
+
112
+ annotated_frame, plates = process_frame(frame)
113
+
114
+ for p in plates:
115
+ if p["plate_text"]:
116
+ all_detected.add(p["plate_text"])
117
+
118
+ out.write(annotated_frame)
119
+
120
+ cap.release()
121
+ out.release()
122
+
123
+ result_text = "\n".join(all_detected) if all_detected else "No plates detected."
124
+
125
+ return out_path, result_text
126
+
127
+ # =========================
128
+ # GRADIO UI
129
+ # =========================
130
+ with gr.Blocks() as demo:
131
+ gr.Markdown("## Smart Traffic & EV Analytics System")
132
+ gr.Markdown("Upload an image or video to detect multiple vehicle number plates.")
133
+
134
+ with gr.Tabs():
135
+ with gr.Tab("Image"):
136
+ image_input = gr.Image(type="numpy", label="Upload Image")
137
+ image_output = gr.Image(label="Detected Plates")
138
+ image_text = gr.Textbox(label="Recognized Plate Numbers")
139
+
140
+ image_button = gr.Button("Detect Plates")
141
+ image_button.click(process_image,
142
+ inputs=image_input,
143
+ outputs=[image_output, image_text])
144
+
145
+ with gr.Tab("Video"):
146
+ video_input = gr.Video(label="Upload Video")
147
+ video_output = gr.Video(label="Processed Video")
148
+ video_text = gr.Textbox(label="Recognized Plate Numbers")
149
+
150
+ video_button = gr.Button("Detect Plates")
151
+ video_button.click(process_video,
152
+ inputs=video_input,
153
+ outputs=[video_output, video_text])
154
+
155
+ demo.launch()