Update app.py
Browse files
app.py
CHANGED
|
@@ -2,47 +2,53 @@ import streamlit as st
|
|
| 2 |
import cv2
|
| 3 |
import numpy as np
|
| 4 |
import easyocr
|
|
|
|
| 5 |
|
| 6 |
# Title of the app
|
| 7 |
st.title("License Plate Recognition 🚗")
|
| 8 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
# Load EasyOCR reader
|
| 10 |
@st.cache_resource
|
| 11 |
def load_easyocr_reader():
|
| 12 |
return easyocr.Reader(['en'], gpu=False)
|
| 13 |
|
|
|
|
|
|
|
| 14 |
ocr_reader = load_easyocr_reader()
|
| 15 |
|
| 16 |
-
#
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
st.write(f"**Extracted Text (Plate {i+1}):** {detected_text}")
|
| 40 |
-
|
| 41 |
-
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
| 42 |
-
st.image(image_rgb, caption="Processed Image", use_container_width=True)
|
| 43 |
|
| 44 |
# Function to process video and detect license plates
|
| 45 |
-
def process_video(video_path):
|
| 46 |
cap = cv2.VideoCapture(video_path)
|
| 47 |
|
| 48 |
if not cap.isOpened():
|
|
@@ -54,20 +60,24 @@ def process_video(video_path):
|
|
| 54 |
if not ret:
|
| 55 |
break
|
| 56 |
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 71 |
|
| 72 |
cap.release()
|
| 73 |
|
|
|
|
| 2 |
import cv2
|
| 3 |
import numpy as np
|
| 4 |
import easyocr
|
| 5 |
+
from ultralytics import YOLO
|
| 6 |
|
| 7 |
# Title of the app
|
| 8 |
st.title("License Plate Recognition 🚗")
|
| 9 |
|
| 10 |
+
# Load the YOLO model for license plate detection
|
| 11 |
+
@st.cache_resource
|
| 12 |
+
def load_yolo_model():
|
| 13 |
+
model_path = "best.pt" # Replace with your model file
|
| 14 |
+
model = YOLO(model_path)
|
| 15 |
+
return model
|
| 16 |
+
|
| 17 |
# Load EasyOCR reader
|
| 18 |
@st.cache_resource
|
| 19 |
def load_easyocr_reader():
|
| 20 |
return easyocr.Reader(['en'], gpu=False)
|
| 21 |
|
| 22 |
+
# Initialize models
|
| 23 |
+
yolo_model = load_yolo_model()
|
| 24 |
ocr_reader = load_easyocr_reader()
|
| 25 |
|
| 26 |
+
# Function to process the uploaded image
|
| 27 |
+
def process_image(image, confidence_threshold=0.5):
|
| 28 |
+
# Perform license plate detection using YOLO
|
| 29 |
+
results = yolo_model(image, conf=confidence_threshold)
|
| 30 |
+
annotated_image = cv2.cvtColor(results[0].plot(), cv2.COLOR_BGR2RGB)
|
| 31 |
+
st.image(annotated_image, caption="Detected License Plate(s)", use_container_width=True)
|
| 32 |
+
|
| 33 |
+
# Loop through detections and perform OCR
|
| 34 |
+
for result in results:
|
| 35 |
+
boxes = result.boxes.xyxy.cpu().numpy().astype(int)
|
| 36 |
+
if len(boxes) == 0:
|
| 37 |
+
st.warning("No license plate detected!")
|
| 38 |
+
return
|
| 39 |
+
for i, box in enumerate(boxes):
|
| 40 |
+
x1, y1, x2, y2 = box
|
| 41 |
+
cropped_image = image[y1:y2, x1:x2]
|
| 42 |
+
cropped_image_rgb = cv2.cvtColor(cropped_image, cv2.COLOR_BGR2RGB)
|
| 43 |
+
st.image(cropped_image_rgb, caption=f"Cropped License Plate {i+1}", use_container_width=True)
|
| 44 |
+
|
| 45 |
+
# Perform OCR on the cropped image
|
| 46 |
+
text_results = ocr_reader.readtext(cropped_image_rgb, detail=0)
|
| 47 |
+
detected_text = " ".join(text_results)
|
| 48 |
+
st.write(f"**Extracted Text (Plate {i+1}):** {detected_text}")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 49 |
|
| 50 |
# Function to process video and detect license plates
|
| 51 |
+
def process_video(video_path, confidence_threshold=0.5):
|
| 52 |
cap = cv2.VideoCapture(video_path)
|
| 53 |
|
| 54 |
if not cap.isOpened():
|
|
|
|
| 60 |
if not ret:
|
| 61 |
break
|
| 62 |
|
| 63 |
+
results = yolo_model(frame, conf=confidence_threshold)
|
| 64 |
+
annotated_frame = cv2.cvtColor(results[0].plot(), cv2.COLOR_BGR2RGB)
|
| 65 |
+
|
| 66 |
+
st.image(annotated_frame, caption="Processed Video Frame", use_container_width=True)
|
| 67 |
+
|
| 68 |
+
for result in results:
|
| 69 |
+
boxes = result.boxes.xyxy.cpu().numpy().astype(int)
|
| 70 |
+
for i, box in enumerate(boxes):
|
| 71 |
+
x1, y1, x2, y2 = box
|
| 72 |
+
cropped_plate = frame[y1:y2, x1:x2]
|
| 73 |
+
cropped_rgb = cv2.cvtColor(cropped_plate, cv2.COLOR_BGR2RGB)
|
| 74 |
+
|
| 75 |
+
# Perform OCR on the cropped image
|
| 76 |
+
text_results = ocr_reader.readtext(cropped_rgb, detail=0)
|
| 77 |
+
detected_text = " ".join(text_results)
|
| 78 |
+
cv2.putText(annotated_frame, detected_text, (x1, y1-10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
|
| 79 |
+
|
| 80 |
+
st.image(annotated_frame, caption="Video Frame with OCR", use_container_width=True)
|
| 81 |
|
| 82 |
cap.release()
|
| 83 |
|