File size: 4,253 Bytes
4298472
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bab00d2
4298472
 
 
eb6df12
4298472
 
 
 
f183543
4298472
 
14a4526
 
 
4298472
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0318618
 
 
 
 
 
 
 
4298472
eb6df12
4298472
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0318618
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
import gradio as gr
import torch
import numpy as np
import cv2
import sqlite3
import pandas as pd
import matplotlib.pyplot as plt
from PIL import Image, ImageDraw
from transformers import YolosImageProcessor, YolosForObjectDetection
import easyocr
from datetime import datetime

# -------------------- Database --------------------
conn = sqlite3.connect("vehicles.db", check_same_thread=False)
cursor = conn.cursor()
cursor.execute("""
CREATE TABLE IF NOT EXISTS vehicles (
    plate TEXT,
    type TEXT,
    time TEXT
)
""")
conn.commit()

# -------------------- Models --------------------
processor = YolosImageProcessor.from_pretrained(
    "nickmuchi/yolos-small-finetuned-license-plate-detection"
)
model = YolosForObjectDetection.from_pretrained(
    "nickmuchi/yolos-small-finetuned-license-plate-detection"
)
model.eval()

reader = easyocr.Reader(['en'], gpu=False)

# -------------------- Plate Color Classifier --------------------
def classify_plate_color(plate_img):
    img = np.array(plate_img)
    hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)

    green = np.sum(cv2.inRange(hsv, (35, 40, 40), (85, 255, 255)))
    yellow = np.sum(cv2.inRange(hsv, (15, 50, 50), (35, 255, 255)))
    white = np.sum(cv2.inRange(hsv, (0, 0, 200), (180, 30, 255)))

    if green > yellow and green > white:
        return "EV"
    elif yellow > green and yellow > white:
        return "Commercial"
    else:
        return "Personal"

# -------------------- OCR --------------------
def read_plate(plate_img):
    results = reader.readtext(np.array(plate_img))
    if results:
        return results[0][1]
    return "UNKNOWN"

# -------------------- Dashboard --------------------
def get_dashboard():
    df = pd.read_sql("SELECT * FROM vehicles", conn)

    fig, ax = plt.subplots(figsize=(8, 5))

    if len(df) == 0:
        ax.text(0.5, 0.5, "No vehicles scanned yet",
                ha="center", va="center", fontsize=10)
        ax.axis("off")
        return fig

    counts = df["type"].value_counts()
    counts.plot(kind="bar", ax=ax)

    ax.set_title("Vehicle Classification Dashboard")
    ax.set_xlabel("Vehicle Type")
    ax.set_ylabel("Count")
    ax.grid(axis="y")

    return fig

# -------------------- Main Pipeline --------------------
def process_image(img):
    image = Image.fromarray(img)

    inputs = processor(images=image, return_tensors="pt")
    with torch.no_grad():
        outputs = model(**inputs)

    results = processor.post_process_object_detection(
        outputs,
        threshold=0.3,
        target_sizes=torch.tensor([[image.size[1], image.size[0]]])
    )[0]

    draw = ImageDraw.Draw(image)

    if len(results["boxes"]) == 0:
        return image, "No plate detected", "", get_dashboard()

    x1, y1, x2, y2 = map(int, results["boxes"][0].tolist())
    plate_img = image.crop((x1, y1, x2, y2))

    plate_text = read_plate(plate_img)
    vehicle_type = classify_plate_color(plate_img)

    cursor.execute(
        "INSERT INTO vehicles VALUES (?, ?, ?)",
        (plate_text, vehicle_type, datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
    )
    conn.commit()

    draw.rectangle([x1, y1, x2, y2], outline="red", width=3)
    draw.text((x1, y1 - 12), f"{plate_text} | {vehicle_type}", fill="red")

    return image, plate_text, vehicle_type, get_dashboard()

# -------------------- Gradio UI --------------------
css = """
textarea {
    white-space: pre-wrap !important;
    word-break: break-word !important;
    overflow-wrap: break-word !important;
}
"""

with gr.Blocks() as demo:
    gr.Markdown("## Smart Traffic & EV Analytics System")
    gr.Markdown(
        "Detects license plates, reads number plate text, "
        "classifies EV / Commercial / Personal vehicles, "
        "and shows live analytics."
    )

    with gr.Row():
        input_img = gr.Image(type="numpy", sources=["upload", "webcam"])
        output_img = gr.Image()

    with gr.Row():
        plate_box = gr.Textbox(label="Number Plate")
        type_box = gr.Textbox(label="Vehicle Type")

    dashboard = gr.Plot(label="Live Vehicle Dashboard")

    btn = gr.Button("Scan Vehicle")
    btn.click(
        process_image,
        input_img,
        [output_img, plate_box, type_box, dashboard]
    )

demo.launch(css=css)