File size: 4,713 Bytes
4feaf1e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9283a30
4feaf1e
9283a30
4feaf1e
 
 
 
 
 
 
9283a30
4feaf1e
 
 
9283a30
4feaf1e
 
 
 
 
 
9283a30
4feaf1e
9283a30
 
 
 
 
 
4feaf1e
 
 
 
 
 
 
 
 
 
 
9283a30
 
 
4feaf1e
07be9ea
4feaf1e
 
 
 
 
 
 
 
9283a30
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4feaf1e
9283a30
4feaf1e
 
 
 
 
 
9283a30
 
 
4feaf1e
 
 
 
 
 
 
 
 
 
 
9283a30
4feaf1e
 
 
9283a30
 
 
 
4feaf1e
 
9283a30
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
import streamlit as st
import cv2
from ultralytics import YOLO
import os
import easyocr
from moviepy import ImageSequenceClip

st.title("πŸš— PlateVision πŸ”")
st.caption("AI-powered license plate detection & recognition from images and videos")

os.makedirs("input", exist_ok=True)
os.makedirs("output", exist_ok=True)

@st.cache_resource
def load_yolo_model():
    return YOLO("best_model.pt")

@st.cache_resource
def load_ocr_reader():
    return easyocr.Reader(['en'], model_storage_directory='model')

def process_and_find_plate(input_path, output_path):
    extension = os.path.splitext(input_path)[1].lower()
    if extension in ['.mp4', '.mkv']:
        return find_plate_on_video(input_path, output_path)
    elif extension in ['.jpg', '.jpeg', '.png']:
        return find_plate_on_image(input_path, output_path)
    else:
        st.error("Unsupported file type")
        return None

def find_plate_on_image(input_path, output_path):
    model = load_yolo_model()
    reader = load_ocr_reader()

    image = cv2.imread(input_path)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    outputs = model.predict(image, verbose=False)

    for output in outputs:
        for box in output.boxes:
            x1, y1, x2, y2 = map(int, box.xyxy[0])
            confidence = box.conf[0]
            roi = image[y1:y2, x1:x2]
            results = reader.readtext(roi)
            plate_num = results[0][1].strip() if results else "Not Visible!"
            cv2.rectangle(image, (x1, y1), (x2, y2), (0, 0, 255), 2)
            cv2.putText(image, f'{confidence*100:.2f}%', (x1, y1 - 20),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
            cv2.putText(image, f'Number: {plate_num}', (x1, y2 + 20),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)

    cv2.imwrite(output_path, cv2.cvtColor(image, cv2.COLOR_RGB2BGR))
    return output_path

def find_plate_on_video(input_path, output_path):
    model = load_yolo_model()
    reader = load_ocr_reader()

    cap = cv2.VideoCapture(input_path)
    if not cap.isOpened():
        st.error("Error opening the video")
        return None

    fps = int(cap.get(cv2.CAP_PROP_FPS)) or 25
    total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) or 1
    frames = []
    frame_idx = 0
    skip_frame = 3  # βœ… define properly

    progress_bar = st.progress(0, text="πŸ” Analyzing video frames...")

    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            break

        if frame_idx % skip_frame == 0:
            rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            outputs = model.predict(rgb_frame, verbose=False)

            for output in outputs:
                for box in output.boxes:
                    x1, y1, x2, y2 = map(int, box.xyxy[0])
                    confidence = box.conf[0]
                    roi = frame[y1:y2, x1:x2]
                    results = reader.readtext(roi)
                    plate_num = results[0][1].strip() if results else "Not Visible!"
                    cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 0, 255), 2)
                    cv2.putText(frame, f'{confidence*100:.2f}%', (x1, y1 - 20),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
                    cv2.putText(frame, f'Number: {plate_num}', (x1, y2 + 20),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)

            frames.append(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))

        frame_idx += 1
        progress = min(frame_idx / total_frames, 1.0)
        progress_bar.progress(progress, text=f"πŸ“Έ Processed {frame_idx}/{total_frames} frames...")

    cap.release()
    progress_bar.empty()

    # βœ… fix undefined variable (use skip_frame)
    output_fps = max(fps // skip_frame, 1)
    clip = ImageSequenceClip(frames, fps=output_fps)
    clip.write_videofile(output_path, codec='libx264', audio=False, logger=None)

    return output_path

uploaded_file = st.file_uploader("πŸ“€ Upload an image or video", type=['jpg', 'jpeg', 'png', 'mp4', 'mkv'])

if uploaded_file is not None:
    input_path = f"input/{uploaded_file.name}"
    output_path = f"output/{uploaded_file.name}"
    with open(input_path, 'wb') as f:
        f.write(uploaded_file.getbuffer())

    with st.spinner("🚦 Detecting plates... please fasten your seatbelt!"):
        path = process_and_find_plate(input_path, output_path)

    if path and path.endswith(('.mp4', '.mkv')):
        with open(path, 'rb') as video_file:
            st.video(video_file.read())
    elif path and path.endswith(('.jpg', '.jpeg', '.png')):
        st.image(path)
    else:
        st.error("Error occurred while processing the file.")