Spaces:
Runtime error
Runtime error
File size: 1,402 Bytes
1c93353 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 |
import torch
import torch.nn as nn
from PIL import Image
from torchvision import transforms
import torchvision
import gradio as gr
agirliklar=torchvision.models.EfficientNet_B2_Weights.DEFAULT
eff_don=agirliklar.transforms()
model=torchvision.models.efficientnet_b2(weights=agirliklar)
model.classifier=nn.Sequential(nn.Dropout(p=0.2),nn.Linear(1408,5))
model.load_state_dict(torch.load("model.pth"))
class_names=['a_bir', 'b_iki', 'c_üç', 'd_dört', 'e_beş']
def predict(img):
"""Transforms and performs a prediction on img and returns prediction and time taken.
"""
# Start the timer
# img=Image.open(img)
# Transform the target image and add a batch dimension
img = eff_don(img).unsqueeze(0)
# Put model into evaluation mode and turn on inference mode
model.eval()
with torch.inference_mode():
# Pass the transformed image through the model and turn the prediction logits into prediction probabilities
pred_probs = torch.softmax(model(img), dim=1)
# Create a prediction label and prediction probability dictionary for each prediction class (this is the required format for Gradio's output parameter)
pred_labels_and_probs = {class_names[i]: float(pred_probs[0][i]) for i in range(len(class_names))}
# Return the prediction dictionary and prediction time
return pred_labels_and_probs
|