ANPR / app.py
gouravs300's picture
Update app.py
72a51ab
import streamlit as st
from streamlit_image_select import image_select
import tensorflow as tf
import datetime
from pytz import timezone
import os
import pandas as pd
import numpy as np
import cv2
from PIL import Image
# trained model files and paths
files = {
"label_map_ssd" : os.path.join('eported_models','ssd_mobilnet_numberplate_region_detection','label_map.pbtxt'),
"label_map_efficientdet" : os.path.join('eported_models','efficientdet_d0_ocr_numberplate','label_map.pbtxt')
}
paths = {
"saved_model_path_ssd" : os.path.join('eported_models','ssd_mobilnet_numberplate_region_detection','saved_model'),
"saved_model_path_efficientdet" : os.path.join('eported_models','efficientdet_d0_ocr_numberplate','saved_model')
}
def read_label_map(label_map_path):
item_id = None
item_name = None
items = {}
with open(label_map_path, "r") as file:
for line in file:
line.replace(" ", "")
if line == "item{":
pass
elif line == "}":
pass
elif "id" in line:
item_id = int(line.split(":", 1)[1].strip())
elif "name" in line:
item_name = {line.split(":")[0].replace("\"", " ").strip() : line.split(":")[1].replace("'", '').strip()}
if item_id is not None and item_name is not None:
items[item_id] = item_name
item_id = None
item_name = None
return items
#load model
@st.cache(allow_output_mutation = True)
def cache_model(path1, path2):
model1 = tf.saved_model.load(path1)
model2 = tf.saved_model.load(path2)
return (model1, model2)
detect_fn_ssd, detect_fn_efficientdet = cache_model(paths["saved_model_path_ssd"], paths["saved_model_path_efficientdet"])
# Creating category index
category_index_ssd = read_label_map(files["label_map_ssd"])
category_index_efficientdet = read_label_map(files["label_map_efficientdet"])
def image_resize_with_padding(image):
image = np.array(image)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
old_size = image.shape[:2] # old_size is in (height, width) format
if max(old_size) > 256:
desired_size = 512
else:
desired_size = 256
ratio = float(desired_size)/max(old_size)
new_size = tuple([int(x*ratio) for x in old_size])
# resize the image
resized = cv2.resize(image, (new_size[1],new_size[0]))
delta_w = desired_size - new_size[1]
delta_h = desired_size - new_size[0]
top, bottom = delta_h//2, delta_h-(delta_h//2)
left, right = delta_w//2, delta_w-(delta_w//2)
# print(top,bottom,left,right)
color = [255, 255, 255]
new_im = cv2.copyMakeBorder(resized, top, bottom, left, right, cv2.BORDER_CONSTANT,value=color)
return new_im
def image_resize(image):
image = np.array(image)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
old_size = image.shape[:2] # old_size is in (height, width) format
print(old_size)
if max(old_size) >= 1080:
desired_size = 1080
else:
desired_size = max(old_size)
if desired_size == 1080:
ratio = float(desired_size)/ max(old_size)
new_size = tuple([int(x*ratio) for x in old_size])
resized = cv2.resize(image, (new_size[1],new_size[0]))
if new_size[0] == 1080:
height = 1080
width = 810
else:
height = 810
width = 1080
delta_w = width - new_size[1]
delta_h = height - new_size[0]
top, bottom = delta_h//2, delta_h-(delta_h//2)
left, right = delta_w//2, delta_w-(delta_w//2)
# print(top,bottom,left,right)
color = [255, 255, 255]
image = cv2.copyMakeBorder(resized, top, bottom, left, right, cv2.BORDER_CONSTANT,value=color)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return image
def ExtractBBoxes(bboxes, bclasses, bscores, im_width, im_height, threshold, category_index):
bbox = []
class_labels = []
for idx in range(len(bboxes)):
if bscores[idx] >= threshold:
y_min = int(bboxes[idx][0] * im_height)
x_min = int(bboxes[idx][1] * im_width)
y_max = int(bboxes[idx][2] * im_height)
x_max = int(bboxes[idx][3] * im_width)
class_label = category_index[int(bclasses[idx])]['name']
class_labels.append(class_label)
bbox.append([x_min, y_min, x_max, y_max, class_label, float(bscores[idx])])
return (bbox, class_labels)
def ocr_predict(img,threshold):
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
image_np = tf.convert_to_tensor(img, dtype=tf.uint8)
input_tensor = np.expand_dims(image_np, 0)
image_height, image_width, _ = image_np.shape
detections = detect_fn_efficientdet(input_tensor)
bboxes = detections['detection_boxes'][0].numpy()
bclasses = detections['detection_classes'][0].numpy().astype(np.int32)
bscores = detections['detection_scores'][0].numpy()
det_boxes, class_labels = ExtractBBoxes(bboxes, bclasses, bscores, image_width, image_height, threshold, category_index_efficientdet)
output = []
for detection in det_boxes:
x_min, y_min, x_max, y_max, label, score = detection[0], detection[1], detection[2], detection[3], detection[4], round(detection[5])
output.append((label, int(x_min*image_width), int(y_min*image_height),
int(x_max*image_width), int(y_max*image_height), score))
df = pd.DataFrame(output, columns = ['label','xmin','ymin','xmax','ymax', 'score'])
df_up = df[df.ymin < (df.ymin.min()*1.2)].sort_values(by = ['xmin'])
df_down = df[df.ymin > (df.ymin.min()*1.2)].sort_values(by = ['xmin'])
df = pd.concat([df_up,df_down])
vehicle_number = "".join(df["label"])
current_date_time = datetime.datetime.now()
now_asia = current_date_time.astimezone(timezone('Asia/Kolkata'))
day = now_asia.strftime("%A")
date = now_asia.strftime("%d/%m/%Y")
time = now_asia.strftime("%I:%M:%S %p")
data = [(vehicle_number,day,date,time)]
return (data)
def predict(img,threshold):
image_np = np.array(img)
img = cv2.cvtColor(image_np, cv2.COLOR_BGR2RGB)
image = tf.convert_to_tensor(img, dtype=tf.uint8)
input_tensor = np.expand_dims(image, 0)
# image = tf.image.decode_image(open(file, 'rb').read(), channels=3)
# img = cv2.imread(file)
image_height, image_width, _ = image.shape
# input_tensor = np.expand_dims(image, 0)
detections = detect_fn_ssd(input_tensor)
bboxes = detections['detection_boxes'][0].numpy()
bclasses = detections['detection_classes'][0].numpy().astype(np.int32)
bscores = detections['detection_scores'][0].numpy()
det_boxes, class_labels = ExtractBBoxes(bboxes, bclasses, bscores, image_width, image_height, threshold, category_index_ssd)
output = []
for detection in det_boxes:
x_min, y_min, x_max, y_max, label, score = detection[0], detection[1], detection[2], detection[3], detection[4], round(detection[5])
output.append((label, x_min, y_min, x_max, y_max, score))
image_np_with_detections = image_np.copy()
data_list = []
for l, x_min, y_min, x_max, y_max, score in output:
array = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
image = Image.fromarray(array)
cropped_img = image.crop((x_min, y_min, x_max, y_max))
input_ocr_image = image_resize_with_padding(cropped_img)
data = ocr_predict(input_ocr_image,threshold)
data_list.append(data)
for l, x_min, y_min, x_max, y_max, score in output:
x1 = x_min
y1 = y_min
x2 = x_max
y2 = y_max
# For bounding box
color = (0,255,0)
img = cv2.rectangle(img, (x1, y1), (x2, y2),color, 2)
label = f"{l} : {round(score,2)}"
text_color = (0,0,255)
# For the text background
# Finds space required by the text so that we can put a background with that amount of width.
(w, h), _ = cv2.getTextSize(
label, cv2.FONT_HERSHEY_SIMPLEX, 0.6, 1)
# Prints the text.
img = cv2.rectangle(img, (x1, y1 - 20), (x1 + w, y1), color, -1)
img = cv2.putText(img, label, (x1, y1 - 5),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, text_color, 1)
# plt.imshow(img)
im_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return (im_rgb, data_list)
st.write("""
# Automated Number Plate Recognition
"""
)
st.write("""
**For full code implementation and sub modules please visit the [Github link](https://github.com/gourav300/Automated-Number-plate-Recognition-for-Indian-Vehicles)**
""")
### load file
uploaded_file = st.file_uploader("Upload an image file for a vehicle with standard number plate", type=["jpg", "png", "jpeg"])
st.write("Note -: if you have uploaded an image please click X to enable below images")
test_img = image_select(
label='''Select an image to get number plate data''',
images=[
"test_images/test1.jpg",
"test_images/test2.png",
"test_images/test3.png",
"test_images/test4.jpg",
"test_images/test5.jpg",
"test_images/test6.jpg",
"test_images/test7.jpg",
"test_images/test8.jpg",
])
if uploaded_file is not None:
image = Image.open(uploaded_file)
scale_image = image_resize(image)
img, data = predict(scale_image, 0.6)
# image = Image.open(uploaded_file)
st.image(img, use_column_width=True)
st.write(f'''
Vehicle details :
{data}
''')
else:
image = Image.open(test_img)
img, data = predict(image, 0.6)
st.image(img, use_column_width=True)
st.write(f'''
Vehicle details :
{data}
''')