classifier_demo / app.py
Anas090's picture
Update app.py
6ba409a verified
import gradio as gr
import cv2
import onnxruntime
import numpy as np
onnx_model_vgg19_path = "./vgg19-30epochs.onnx"
onnx_model_inceptionv3_path = "./InceptionV3-20epochs.onnx"
onnx_model_resnet101_path = "./Resnet101-30epochs.onnx"
onnx_model_vgg16_path = "./vgg16-20epochs.onnx"
labels = ['Ajloun Castle', 'Hadrians Arch', 'Petra-siq', 'Roman Ruins-Jerash', 'Roman amphitheater', 'The Cardo Maximus of Jerash', 'Wadi Rum', 'petra-Treasury', 'umm qais']
def predict_image(image_path, model):
if model == "InceptionV3":
img_size = (550, 475)
model_inceptionv3 = onnxruntime.InferenceSession(onnx_model_inceptionv3_path)
elif model == "Resnet101":
img_size = (250, 200)
model_resnet101 = onnxruntime.InferenceSession(onnx_model_resnet101_path)
elif model == "Vgg19":
img_size = (250, 200)
model_vgg19 = onnxruntime.InferenceSession(onnx_model_vgg19_path)
elif model == "Vgg16":
img_size = (200, 150)
model_vgg16 = onnxruntime.InferenceSession(onnx_model_vgg16_path)
img = cv2.imread(image_path)
img = cv2.resize(img, img_size)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = np.expand_dims(img, axis=0) # Add batch dimension
img = img.astype(np.float32) / 255.0 # Normalize pixel values
if model == "Vgg19":
input_name = model_vgg19.get_inputs()[0].name
output_name = model_vgg19.get_outputs()[0].name
prediction = model_vgg19.run(None, {input_name: img})
elif model == "Vgg16":
input_name = model_vgg16.get_inputs()[0].name
output_name = model_vgg16.get_outputs()[0].name
prediction = model_vgg16.run(None, {input_name: img})
elif model == "InceptionV3":
input_name = model_inceptionv3.get_inputs()[0].name
output_name = model_inceptionv3.get_outputs()[0].name
prediction = model_inceptionv3.run(None, {input_name: img})
elif model == "Resnet101":
input_name = model_resnet101.get_inputs()[0].name
output_name = model_resnet101.get_outputs()[0].name
prediction = model_resnet101.run(None, {input_name: img})
softmax_output = np.exp(prediction[0][0]) / np.sum(np.exp(prediction[0][0]), axis=-1)
max_confidence = np.max(softmax_output)
predicted_class = np.argmax(softmax_output)
if max_confidence < 0.2:
return "Hmm, it's a bit tricky. Feel free to add another image, and I'll do my best to make a guess!"
predicted_label = labels[predicted_class]
return predicted_label
inputs_image = [
gr.Image(type="filepath", label="Input Image"),
gr.Radio(choices=[
"Vgg16",
"InceptionV3",
"Vgg19",
"Resnet101",
], type="value", label="Select_model")
]
outputs_text = [gr.Textbox(label="Site")]
interface_image = gr.Interface(
inputs=inputs_image,
fn=predict_image,
outputs=outputs_text,
title="classifier_demo"
)
interface_image.launch()