import streamlit as st import cv2 from ultralytics import YOLO import os import easyocr from moviepy import ImageSequenceClip st.title("🚗 PlateVision 🔍") st.caption("AI-powered license plate detection & recognition from images and videos") os.makedirs("input", exist_ok=True) os.makedirs("output", exist_ok=True) @st.cache_resource def load_yolo_model(): return YOLO("best_model.pt") @st.cache_resource def load_ocr_reader(): return easyocr.Reader(['en'], model_storage_directory='model') def process_and_find_plate(input_path, output_path): extension = os.path.splitext(input_path)[1].lower() if extension in ['.mp4', '.mkv']: return find_plate_on_video(input_path, output_path) elif extension in ['.jpg', '.jpeg', '.png']: return find_plate_on_image(input_path, output_path) else: st.error("Unsupported file type") return None def find_plate_on_image(input_path, output_path): model = load_yolo_model() reader = load_ocr_reader() image = cv2.imread(input_path) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) outputs = model.predict(image, verbose=False) for output in outputs: for box in output.boxes: x1, y1, x2, y2 = map(int, box.xyxy[0]) confidence = box.conf[0] roi = image[y1:y2, x1:x2] results = reader.readtext(roi) plate_num = results[0][1].strip() if results else "Not Visible!" cv2.rectangle(image, (x1, y1), (x2, y2), (0, 0, 255), 2) cv2.putText(image, f'{confidence*100:.2f}%', (x1, y1 - 20), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2) cv2.putText(image, f'Number: {plate_num}', (x1, y2 + 20), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2) cv2.imwrite(output_path, cv2.cvtColor(image, cv2.COLOR_RGB2BGR)) return output_path def find_plate_on_video(input_path, output_path): model = load_yolo_model() reader = load_ocr_reader() cap = cv2.VideoCapture(input_path) if not cap.isOpened(): st.error("Error opening the video") return None fps = int(cap.get(cv2.CAP_PROP_FPS)) or 25 total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) or 1 frames = [] frame_idx = 0 skip_frame = 3 # ✅ define properly progress_bar = st.progress(0, text="🔍 Analyzing video frames...") while cap.isOpened(): ret, frame = cap.read() if not ret: break if frame_idx % skip_frame == 0: rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) outputs = model.predict(rgb_frame, verbose=False) for output in outputs: for box in output.boxes: x1, y1, x2, y2 = map(int, box.xyxy[0]) confidence = box.conf[0] roi = frame[y1:y2, x1:x2] results = reader.readtext(roi) plate_num = results[0][1].strip() if results else "Not Visible!" cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 0, 255), 2) cv2.putText(frame, f'{confidence*100:.2f}%', (x1, y1 - 20), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2) cv2.putText(frame, f'Number: {plate_num}', (x1, y2 + 20), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2) frames.append(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) frame_idx += 1 progress = min(frame_idx / total_frames, 1.0) progress_bar.progress(progress, text=f"📸 Processed {frame_idx}/{total_frames} frames...") cap.release() progress_bar.empty() # ✅ fix undefined variable (use skip_frame) output_fps = max(fps // skip_frame, 1) clip = ImageSequenceClip(frames, fps=output_fps) clip.write_videofile(output_path, codec='libx264', audio=False, logger=None) return output_path uploaded_file = st.file_uploader("📤 Upload an image or video", type=['jpg', 'jpeg', 'png', 'mp4', 'mkv']) if uploaded_file is not None: input_path = f"input/{uploaded_file.name}" output_path = f"output/{uploaded_file.name}" with open(input_path, 'wb') as f: f.write(uploaded_file.getbuffer()) with st.spinner("🚦 Detecting plates... please fasten your seatbelt!"): path = process_and_find_plate(input_path, output_path) if path and path.endswith(('.mp4', '.mkv')): with open(path, 'rb') as video_file: st.video(video_file.read()) elif path and path.endswith(('.jpg', '.jpeg', '.png')): st.image(path) else: st.error("Error occurred while processing the file.")