### 1. Imports and class names setup ### import gradio as gr import os import torch from model import create_effnetb2_model from timeit import default_timer as timer from typing import Tuple, Dict ### 1. Open class names with open("class_names.txt", "r") as f: class_names = [food_name.strip() for food_name in f.readlines()] ### 2. Model and transforms prep ### effnetb2, effnetb2_transforms = create_effnetb2_model() # Load saved weights effnetb2.load_state_dict( torch.load( f="pretrained_effnetb2_foodvision.pth", map_location=torch.device("cpu") ) ) ### 3. Predict function ### def predict(img) -> Tuple[Dict, float]: # Start a timer start_time = timer() # Transform the input image for use with EffNetB2 img = effnetb2_transforms(img).unsqueeze(0) # Put the model into eval mode, make prediction effnetb2.eval() with torch.inference_mode(): pred_probs = torch.softmax(effnetb2(img), dim=1) pred_label = torch.argmax(pred_probs, dim=1) # Create a prediction label and prediction probability dictionary pred_labels_and_probs = {class_names[i]: float(pred_probs[0][i]) for i in range(len(class_names))} # Calculate pred time end_time = timer() pred_time = end_time - start_time # Return pred dict and pred time return pred_labels_and_probs, pred_time ### 4. Gradio app ### example_list = [["examples/" + example] for example in os.listdir("examples")] title = "Computer Vision for Food Processing" description = "An [EfficientNetB2](https://pytorch.org/vision/main/models/generated/torchvision.models.efficientnet_b2.html?highlight=efficientnet_b2#torchvision.models.efficientnet_b2) feature extractor computer vision model to classify images of 101 foods (taken from PyTorch's [Food101](https://pytorch.org/vision/main/generated/torchvision.datasets.Food101.html) dataset). View the foods that this model can classify [here](https://github.com/alpapado/food-101/blob/master/data/meta/classes.txt)." demo = gr.Interface(fn=predict, inputs=gr.Image(type="pil"), outputs=[gr.Label(num_top_classes=5, label="Predictions"), gr.Number(label="Prediction time (s)")], examples=example_list, title=title, description=description) demo.launch()