Spaces:
Runtime error
Runtime error
File size: 1,649 Bytes
0af6300 ed36966 0af6300 ed36966 0af6300 ed36966 0af6300 ed36966 0af6300 ed36966 0af6300 ed36966 0af6300 ed36966 0af6300 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 | import torch
import torchvision.transforms as T
from PIL import Image
import joblib
import json
import cv2
import gradio as gr
# Define image transformation
transform_image = T.Compose([
T.ToTensor(),
T.Resize(244),
T.CenterCrop(224),
T.Normalize([0.5], [0.5])
])
def load_image(img: str) -> torch.Tensor:
"""
Load an image and return a tensor that can be used as an input to DINOv2.
"""
img = Image.open(img)
transformed_img = transform_image(img)[:3].unsqueeze(0)
return transformed_img
# Load models for inference
dinov2_vits14 = torch.hub.load("facebookresearch/dinov2", "dinov2_vits14")
device = torch.device('cuda' if torch.cuda.is_available() else "cpu")
dinov2_vits14.to(device)
dinov2_vits14.eval() # Set the model to evaluation mode
# Load the classifier
clf = joblib.load('svm_model.joblib')
# Load the embeddings
with open('all_embeddings.json', 'r') as f:
embeddings = json.load(f)
# Predict class for a new image
def predict_image_class(image_path):
new_image = load_image(image_path).to(device)
with torch.no_grad():
embedding = dinov2_vits14(new_image).cpu().numpy().reshape(1, -1)
prediction = clf.predict(embedding)
return prediction[0]
# Gradio interface
def classify_image(image):
predicted_class = predict_image_class(image)
return f"Predicted class: {predicted_class}"
# Define the Gradio interface
iface = gr.Interface(
fn=classify_image,
inputs=gr.Image(type="filepath"),
outputs="text",
title="Currency Classifier",
description="Upload an image of currency to classify."
)
# Launch the Gradio interface
iface.launch()
|