FoodVision_Mini / app.py
kdallash's picture
Update app.py
f472d67 verified
### 1 Imports and class names setup###
import gradio as gr
import os
import torch
from model import create_effnetb2_model
from timeit import default_timer as timer
from typing import List, Dict,Tuple
class_names = ["pizza", "steak", "sushi"]
### 2 model and transform preparation###
effnetb2_loaded, effnet_transform = create_effnetb2_model(num_classes=len(class_names))
effnetb2_loaded.load_state_dict(torch.load("11-model_deployment_effnetb2.pth", map_location="cpu"))
effnetb2_loaded.to("cpu")
### 3 we need a predict function###
def predict(img) -> Tuple[Dict,float]:
#start a timer
start_time = timer()
# transform the image
transformed_image = effnet_transform(img).unsqueeze(0)
# putting the model in eval mode and make the prediction
effnetb2_loaded.eval()
with torch.inference_mode():
logit = effnetb2_loaded(transformed_image)
probs = torch.softmax(logit, dim=1)
# Create a prediction label and prediction probability dictionary
pred_label_dict ={class_names[i] : probs[0][i].item() for i in range(len(class_names))}
# calculate the pred time
end_time = timer()
inference_time = round(end_time - start_time, 4)
# return the label dict and inference time
return pred_label_dict, inference_time
###Grad###
title = "FoodVision mini models 🍕,🥩,🍣"
description = "An EfficientnetB2 feature extraction model is used to classifay images as pizza, steak, sushi"
example_list =[["examples/"+example] for example in os.listdir("examples")]
# create a gradio demo
demo = gr.Interface(
fn=predict,
inputs=gr.Image(type="pil"),
outputs=[gr.Label(num_top_classes = 3,label= "prediction"),
gr.Number(label=" Prediction time in second")],
examples=example_list,
title=title,
description=description,
cache_examples=False
)
demo.launch(share= False)