Spaces:
Sleeping
Sleeping
File size: 2,942 Bytes
9bfc078 ce72798 7e9f3e3 6f4fa81 55b1594 7e9f3e3 6f4fa81 55b1594 382ef4b 55b1594 8b14104 55b1594 7e9f3e3 cbc9e9c ce72798 7e9f3e3 4c1483c ce72798 4c1483c 7e9f3e3 cafee2f ce72798 cafee2f 7e9f3e3 cafee2f ce72798 cafee2f 7e9f3e3 cafee2f ce72798 cafee2f 7e9f3e3 1a17b51 8dce5a9 1a17b51 ac19b93 1a17b51 2c51a01 ac19b93 6ba409a ac19b93 1a17b51 ac19b93 1a17b51 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 | import gradio as gr
import cv2
import onnxruntime
import numpy as np
onnx_model_vgg19_path = "./vgg19-30epochs.onnx"
onnx_model_inceptionv3_path = "./InceptionV3-20epochs.onnx"
onnx_model_resnet101_path = "./Resnet101-30epochs.onnx"
onnx_model_vgg16_path = "./vgg16-20epochs.onnx"
labels = ['Ajloun Castle', 'Hadrians Arch', 'Petra-siq', 'Roman Ruins-Jerash', 'Roman amphitheater', 'The Cardo Maximus of Jerash', 'Wadi Rum', 'petra-Treasury', 'umm qais']
def predict_image(image_path, model):
if model == "InceptionV3":
img_size = (550, 475)
model_inceptionv3 = onnxruntime.InferenceSession(onnx_model_inceptionv3_path)
elif model == "Resnet101":
img_size = (250, 200)
model_resnet101 = onnxruntime.InferenceSession(onnx_model_resnet101_path)
elif model == "Vgg19":
img_size = (250, 200)
model_vgg19 = onnxruntime.InferenceSession(onnx_model_vgg19_path)
elif model == "Vgg16":
img_size = (200, 150)
model_vgg16 = onnxruntime.InferenceSession(onnx_model_vgg16_path)
img = cv2.imread(image_path)
img = cv2.resize(img, img_size)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = np.expand_dims(img, axis=0) # Add batch dimension
img = img.astype(np.float32) / 255.0 # Normalize pixel values
if model == "Vgg19":
input_name = model_vgg19.get_inputs()[0].name
output_name = model_vgg19.get_outputs()[0].name
prediction = model_vgg19.run(None, {input_name: img})
elif model == "Vgg16":
input_name = model_vgg16.get_inputs()[0].name
output_name = model_vgg16.get_outputs()[0].name
prediction = model_vgg16.run(None, {input_name: img})
elif model == "InceptionV3":
input_name = model_inceptionv3.get_inputs()[0].name
output_name = model_inceptionv3.get_outputs()[0].name
prediction = model_inceptionv3.run(None, {input_name: img})
elif model == "Resnet101":
input_name = model_resnet101.get_inputs()[0].name
output_name = model_resnet101.get_outputs()[0].name
prediction = model_resnet101.run(None, {input_name: img})
softmax_output = np.exp(prediction[0][0]) / np.sum(np.exp(prediction[0][0]), axis=-1)
max_confidence = np.max(softmax_output)
predicted_class = np.argmax(softmax_output)
if max_confidence < 0.2:
return "Hmm, it's a bit tricky. Feel free to add another image, and I'll do my best to make a guess!"
predicted_label = labels[predicted_class]
return predicted_label
inputs_image = [
gr.Image(type="filepath", label="Input Image"),
gr.Radio(choices=[
"Vgg16",
"InceptionV3",
"Vgg19",
"Resnet101",
], type="value", label="Select_model")
]
outputs_text = [gr.Textbox(label="Site")]
interface_image = gr.Interface(
inputs=inputs_image,
fn=predict_image,
outputs=outputs_text,
title="classifier_demo"
)
interface_image.launch() |