Upload 9 files
Browse files- app.py +54 -0
- best.pt +3 -0
- model1.py +57 -0
- models/large_LP_YOLOm_best.pt +3 -0
- requirements.txt +0 -0
- yolov8m_llp.py +3 -0
- yolov8n.pt +3 -0
app.py
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import model1 as m1
|
| 3 |
+
|
| 4 |
+
cars = []
|
| 5 |
+
lps = []
|
| 6 |
+
lp_texts = []
|
| 7 |
+
counter = 0
|
| 8 |
+
def greet(image):
|
| 9 |
+
global cars, lps, lp_texts
|
| 10 |
+
(cars, lps, lp_texts) = m1.run([image])
|
| 11 |
+
print(len(cars), len(lps), len(lp_texts))
|
| 12 |
+
return cars[0], lps[0], lp_texts[0]
|
| 13 |
+
|
| 14 |
+
def next_img():
|
| 15 |
+
global counter
|
| 16 |
+
counter += 1
|
| 17 |
+
index = int(counter % len(cars))
|
| 18 |
+
print(index)
|
| 19 |
+
return cars[index], lps[index], lp_texts[index]
|
| 20 |
+
|
| 21 |
+
def prev_img():
|
| 22 |
+
global counter
|
| 23 |
+
counter -= 1
|
| 24 |
+
index = int(counter % len(cars))
|
| 25 |
+
print(index)
|
| 26 |
+
return cars[index], lps[index], lp_texts[index]
|
| 27 |
+
|
| 28 |
+
with gr.Blocks() as demo:
|
| 29 |
+
gr.Markdown("## ANPR Project")
|
| 30 |
+
with gr.Tab("Model 1"):
|
| 31 |
+
gr.Markdown("Using 3 different ML models")
|
| 32 |
+
gr.Markdown("YOLOv8n for car dection + YOLOv8n for LP detection + easy ocr for text detection")
|
| 33 |
+
img = gr.Image(label="Input")
|
| 34 |
+
submit = gr.Button(value="submit")
|
| 35 |
+
|
| 36 |
+
with gr.Row():
|
| 37 |
+
car = gr.Image(label="Car")
|
| 38 |
+
lp = gr.Image(label="Licence Plate")
|
| 39 |
+
lp_text = gr.Text(label="Plate Number")
|
| 40 |
+
|
| 41 |
+
with gr.Row():
|
| 42 |
+
next = gr.Button(value="next")
|
| 43 |
+
prev = gr.Button(value="prev")
|
| 44 |
+
|
| 45 |
+
submit.click(greet, inputs=[img], outputs=[car, lp, lp_text])
|
| 46 |
+
next.click(next_img, outputs=[car, lp, lp_text])
|
| 47 |
+
prev.click(prev_img, outputs=[car, lp, lp_text])
|
| 48 |
+
|
| 49 |
+
with gr.Tab("Model 2"):
|
| 50 |
+
gr.Markdown("Using 2 different ML models")
|
| 51 |
+
gr.Markdown("YOLOv8m for car dection + easy ocr for text detection")
|
| 52 |
+
gr.Markdown("YOLOv8m for car dection is trained on a large dataset of 25K training images")
|
| 53 |
+
|
| 54 |
+
demo.launch(share=False)
|
best.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b8201fecc45f066f02d5653cc587a27fd2d2b543568f08865736137d20b76718
|
| 3 |
+
size 6242670
|
model1.py
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ultralytics import YOLO
|
| 2 |
+
import cv2
|
| 3 |
+
import easyocr
|
| 4 |
+
import numpy as np
|
| 5 |
+
|
| 6 |
+
car_detection = YOLO("yolov8n.pt")
|
| 7 |
+
lp_detection = YOLO('best.pt')
|
| 8 |
+
reader = easyocr.Reader(['en'])
|
| 9 |
+
|
| 10 |
+
# inputs = [cv2.imread('img2.png')]
|
| 11 |
+
|
| 12 |
+
def run(inputs):
|
| 13 |
+
counter = 0
|
| 14 |
+
input = inputs[0]
|
| 15 |
+
# detecting cars
|
| 16 |
+
car_results = car_detection.predict(source=input, classes=[2], conf=0.6)
|
| 17 |
+
cars = []
|
| 18 |
+
lps = []
|
| 19 |
+
lp_texts = []
|
| 20 |
+
for car_result in car_results:
|
| 21 |
+
# print(car_result.boxes)
|
| 22 |
+
boxes = car_result.boxes.xyxy.tolist()
|
| 23 |
+
# print(boxes)
|
| 24 |
+
for box in boxes:
|
| 25 |
+
# detecting LP of each car
|
| 26 |
+
car = input[int(box[1]):int(box[3]), int(box[0]):int(box[2])]
|
| 27 |
+
|
| 28 |
+
# cv2.imwrite(f'car{counter}.png', car)
|
| 29 |
+
|
| 30 |
+
lp_results = lp_detection.predict(source=car, conf=0.6)
|
| 31 |
+
lp_boxes = lp_results[0].boxes.xyxy.tolist()
|
| 32 |
+
|
| 33 |
+
lp = None
|
| 34 |
+
text = "not found"
|
| 35 |
+
for lp_box in lp_boxes:
|
| 36 |
+
lp = car[int(lp_box[1]):int(lp_box[3]), int(lp_box[0]):int(lp_box[2])]
|
| 37 |
+
lps.append(lp)
|
| 38 |
+
plate = reader.readtext(lp)
|
| 39 |
+
|
| 40 |
+
text='not found'
|
| 41 |
+
if len(plate) > 0:
|
| 42 |
+
text = plate[0][1]
|
| 43 |
+
# cv2.imwrite(f'car{counter}_lp_{text}.png', lp)
|
| 44 |
+
else:
|
| 45 |
+
pass
|
| 46 |
+
# cv2.imwrite(f'car{counter}_lp.png', lp)
|
| 47 |
+
|
| 48 |
+
lp_texts.append(text)
|
| 49 |
+
if lp is None:
|
| 50 |
+
lps.append(np.zeros((100,100,3), np.uint8))
|
| 51 |
+
|
| 52 |
+
counter += 1
|
| 53 |
+
|
| 54 |
+
cars.append(car)
|
| 55 |
+
|
| 56 |
+
return cars, lps, lp_texts
|
| 57 |
+
|
models/large_LP_YOLOm_best.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b9600657c811e290bf3c92f0270c8c182d3ff57fa961a5e5bb82e6136ca1cb1b
|
| 3 |
+
size 207513151
|
requirements.txt
ADDED
|
Binary file (5.62 kB). View file
|
|
|
yolov8m_llp.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ultralytics import YOLO
|
| 2 |
+
|
| 3 |
+
model = YOLO("models/large_LP_YOLOm_best.pt")
|
yolov8n.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:31e20dde3def09e2cf938c7be6fe23d9150bbbe503982af13345706515f2ef95
|
| 3 |
+
size 6534387
|