from transformers import pipeline import onnxruntime import numpy as np from PIL import Image import gradio as gr # Load the ONNX model onnx_model_path = "https://huggingface.co/spaces/Anas090/sites_classification/resolve/main/InceptionV3-20epochs.onnx?dl=1" session = onnxruntime.InferenceSession(onnx_model_path) class_labels = ['Ajloun Castle', 'Hadrians Arch', 'Petra-siq', 'Roman Ruins-Jerash', 'Roman amphitheater', 'The Cardo Maximus of Jerash', 'Wadi Rum', 'petra-Treasury', 'umm qais'] dic ={'Ajloun Castle': 0, 'Hadrians Arch': 1, 'Petra-siq': 2, 'Roman Ruins-Jerash': 3, 'Roman amphitheater': 4, 'The Cardo Maximus of Jerash': 5, 'Wadi Rum': 6, 'petra-Treasury': 7, 'umm qais': 8} def classify_image(image, labels_text, model_name, hypothesis_template): img = Image.open(image).resize((475, 550)) img_array = np.array(img).astype(np.float32) / 255.0 img_array = np.expand_dims(img_array, axis=0) # Run inference with the ONNX model output = session.run(None, {"input": img_array}) # Replace "input" with the actual input name of your ONNX model # Get the predicted class index predicted_class_index = np.argmax(output) # Map the class index to the corresponding label predicted_class = class_labels[predicted_class_index] return {predicted_class: 1.0} # You may need to adjust the confidence score based on your model's output inputs = [ gr.inputs.Image(type='pil', label="Site_image"), gr.inputs.Radio(choices=[ "ViT/B-16", "ViT/L-14", "ViT/L-14@336px", "ViT/H-14", ], type="value", default="ViT/B-16", label="Model 模型规模"), ] iface = gr.Interface(classify_image, inputs, "label", title="Your Title Here") iface.launch()