Spaces:
Runtime error
Runtime error
Upload app.py
Browse files
app.py
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from transformers import pipeline
|
| 2 |
+
import onnxruntime
|
| 3 |
+
import numpy as np
|
| 4 |
+
from PIL import Image
|
| 5 |
+
import gradio as gr
|
| 6 |
+
|
| 7 |
+
# Load the ONNX model
|
| 8 |
+
onnx_model_path = "https://huggingface.co/spaces/Anas090/sites_classification/resolve/main/InceptionV3-20epochs.onnx?dl=1"
|
| 9 |
+
session = onnxruntime.InferenceSession(onnx_model_path)
|
| 10 |
+
|
| 11 |
+
class_labels = ['Ajloun Castle', 'Hadrians Arch', 'Petra-siq', 'Roman Ruins-Jerash', 'Roman amphitheater', 'The Cardo Maximus of Jerash', 'Wadi Rum', 'petra-Treasury', 'umm qais']
|
| 12 |
+
|
| 13 |
+
dic ={'Ajloun Castle': 0,
|
| 14 |
+
'Hadrians Arch': 1,
|
| 15 |
+
'Petra-siq': 2,
|
| 16 |
+
'Roman Ruins-Jerash': 3,
|
| 17 |
+
'Roman amphitheater': 4,
|
| 18 |
+
'The Cardo Maximus of Jerash': 5,
|
| 19 |
+
'Wadi Rum': 6,
|
| 20 |
+
'petra-Treasury': 7,
|
| 21 |
+
'umm qais': 8}
|
| 22 |
+
|
| 23 |
+
def classify_image(image, labels_text, model_name, hypothesis_template):
|
| 24 |
+
img = Image.open(image).resize((475, 550))
|
| 25 |
+
img_array = np.array(img).astype(np.float32) / 255.0
|
| 26 |
+
img_array = np.expand_dims(img_array, axis=0)
|
| 27 |
+
|
| 28 |
+
# Run inference with the ONNX model
|
| 29 |
+
output = session.run(None, {"input": img_array}) # Replace "input" with the actual input name of your ONNX model
|
| 30 |
+
|
| 31 |
+
# Get the predicted class index
|
| 32 |
+
predicted_class_index = np.argmax(output)
|
| 33 |
+
|
| 34 |
+
# Map the class index to the corresponding label
|
| 35 |
+
predicted_class = class_labels[predicted_class_index]
|
| 36 |
+
|
| 37 |
+
return {predicted_class: 1.0} # You may need to adjust the confidence score based on your model's output
|
| 38 |
+
|
| 39 |
+
inputs = [
|
| 40 |
+
gr.inputs.Image(type='pil', label="Site_image"),
|
| 41 |
+
gr.inputs.Radio(choices=[
|
| 42 |
+
"ViT/B-16",
|
| 43 |
+
"ViT/L-14",
|
| 44 |
+
"ViT/L-14@336px",
|
| 45 |
+
"ViT/H-14",
|
| 46 |
+
], type="value", default="ViT/B-16", label="Model 模型规模"),
|
| 47 |
+
]
|
| 48 |
+
|
| 49 |
+
iface = gr.Interface(classify_image,
|
| 50 |
+
inputs,
|
| 51 |
+
"label",
|
| 52 |
+
title="Your Title Here")
|
| 53 |
+
iface.launch()
|