Spaces:
Runtime error
Runtime error
File size: 1,807 Bytes
3719afd | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 | from transformers import pipeline
import onnxruntime
import numpy as np
from PIL import Image
import gradio as gr
# Load the ONNX model
onnx_model_path = "https://huggingface.co/spaces/Anas090/sites_classification/resolve/main/InceptionV3-20epochs.onnx?dl=1"
session = onnxruntime.InferenceSession(onnx_model_path)
class_labels = ['Ajloun Castle', 'Hadrians Arch', 'Petra-siq', 'Roman Ruins-Jerash', 'Roman amphitheater', 'The Cardo Maximus of Jerash', 'Wadi Rum', 'petra-Treasury', 'umm qais']
dic ={'Ajloun Castle': 0,
'Hadrians Arch': 1,
'Petra-siq': 2,
'Roman Ruins-Jerash': 3,
'Roman amphitheater': 4,
'The Cardo Maximus of Jerash': 5,
'Wadi Rum': 6,
'petra-Treasury': 7,
'umm qais': 8}
def classify_image(image, labels_text, model_name, hypothesis_template):
img = Image.open(image).resize((475, 550))
img_array = np.array(img).astype(np.float32) / 255.0
img_array = np.expand_dims(img_array, axis=0)
# Run inference with the ONNX model
output = session.run(None, {"input": img_array}) # Replace "input" with the actual input name of your ONNX model
# Get the predicted class index
predicted_class_index = np.argmax(output)
# Map the class index to the corresponding label
predicted_class = class_labels[predicted_class_index]
return {predicted_class: 1.0} # You may need to adjust the confidence score based on your model's output
inputs = [
gr.inputs.Image(type='pil', label="Site_image"),
gr.inputs.Radio(choices=[
"ViT/B-16",
"ViT/L-14",
"ViT/L-14@336px",
"ViT/H-14",
], type="value", default="ViT/B-16", label="Model 模型规模"),
]
iface = gr.Interface(classify_image,
inputs,
"label",
title="Your Title Here")
iface.launch()
|