Upload 7 files
Browse files- app.py +75 -0
- effnetb2_feature_extractor_food101_mini.pth +3 -0
- examples/1523026.jpg +0 -0
- examples/46797.jpg +0 -0
- examples/482858.jpg +0 -0
- model.py +20 -0
- requirements.txt +5 -0
app.py
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
### 1. Imports and class names setup ###
|
| 2 |
+
import gradio as gr
|
| 3 |
+
import os
|
| 4 |
+
import torch
|
| 5 |
+
|
| 6 |
+
from model import create_effnetb2_model
|
| 7 |
+
from timeit import default_timer as timer
|
| 8 |
+
from typing import Tuple,Dict
|
| 9 |
+
|
| 10 |
+
# Setup class names
|
| 11 |
+
class_names=["pizza","steak","sushi"]
|
| 12 |
+
### 2. Model and transforms perparation ###
|
| 13 |
+
effnetb2,effnetb2_transforms=create_effnetb2_model(num_classes=len(class_names))
|
| 14 |
+
|
| 15 |
+
# Load save weights
|
| 16 |
+
effnetb2.load_state_dict(
|
| 17 |
+
torch.load(
|
| 18 |
+
f="effnetb2_feature_extractor_food101_mini.pth",
|
| 19 |
+
map_location=torch.device("cpu") # load the model to the CPU
|
| 20 |
+
)
|
| 21 |
+
)
|
| 22 |
+
|
| 23 |
+
### 3. Predict function ###
|
| 24 |
+
def predict(img)->Tuple[Dict,float]:
|
| 25 |
+
# start a timer
|
| 26 |
+
start_time = timer()
|
| 27 |
+
|
| 28 |
+
# Transform the input image for use with EffNetB2
|
| 29 |
+
img=effnetb2_transforms(img).unsqueeze(0) # unsqueeze = add batch dimension on 0th index
|
| 30 |
+
|
| 31 |
+
# Put model into eval mode, make prediction
|
| 32 |
+
effnetb2.eval()
|
| 33 |
+
with torch.inference_mode():
|
| 34 |
+
# Pass transformed image through the model and turn the prediction logits into probaiblities
|
| 35 |
+
pred_probs=torch.softmax(effnetb2(img),dim=1)
|
| 36 |
+
|
| 37 |
+
# Create a prediction label and prediction probability dictionary
|
| 38 |
+
pred_labels_and_probs = {}
|
| 39 |
+
|
| 40 |
+
# pred_labels_and_probs = {class_names[i]: float(pred_probs[0][i]) for i in range(len(class_names))}
|
| 41 |
+
# comment loop below to un-comment line above
|
| 42 |
+
for i,class_name in enumerate(class_names):
|
| 43 |
+
pred_labels_and_probs[class_name]=pred_probs[0][i]
|
| 44 |
+
|
| 45 |
+
# Calculate pred time
|
| 46 |
+
end_time=timer()
|
| 47 |
+
pred_time=round(end_time-start_time,4)
|
| 48 |
+
|
| 49 |
+
# Return pred dict and pred time
|
| 50 |
+
return pred_labels_and_probs,pred_time
|
| 51 |
+
|
| 52 |
+
### 4. Gradio app ###
|
| 53 |
+
|
| 54 |
+
# Create title, description and article
|
| 55 |
+
|
| 56 |
+
title="Food101 Big Classification"
|
| 57 |
+
description = "An [EfficientNetB2 feature extractor](https://pytorch.org/vision/stable/models/generated/torchvision.models.efficientnet_b2.html#torchvision.models.efficientnet_b2) computer vision model to classify images as pizza, steak or sushi."
|
| 58 |
+
article = "Created at [Food101 Mini Classification](https://github.com/MRameezU/Food-101-Mini-Classification.git)."
|
| 59 |
+
|
| 60 |
+
# Create example list
|
| 61 |
+
# Create example list
|
| 62 |
+
example_list = [["examples/" + example] for example in os.listdir("examples")]
|
| 63 |
+
|
| 64 |
+
# Create the Gradio demo
|
| 65 |
+
demo = gr.Interface(fn=predict, # maps inputs to outputs
|
| 66 |
+
inputs=gr.Image(type="pil"),
|
| 67 |
+
outputs=[gr.Label(num_top_classes=3, label="Predictions"),
|
| 68 |
+
gr.Number(label="Prediction time (s)")],
|
| 69 |
+
examples=example_list,
|
| 70 |
+
title=title,
|
| 71 |
+
description=description,
|
| 72 |
+
article=article)
|
| 73 |
+
|
| 74 |
+
# Launch the demo!
|
| 75 |
+
demo.launch()
|
effnetb2_feature_extractor_food101_mini.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:90f73ef943cd559fb486e7fd7a5512587a1589bd98f37da098604cd7aebcacb6
|
| 3 |
+
size 31300410
|
examples/1523026.jpg
ADDED
|
examples/46797.jpg
ADDED
|
examples/482858.jpg
ADDED
|
model.py
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torchvision
|
| 3 |
+
|
| 4 |
+
from torch import nn
|
| 5 |
+
|
| 6 |
+
def create_effnetb2_model(num_classes:int=3,
|
| 7 |
+
seed:int=42):
|
| 8 |
+
# 1, 2, 3 Create EffNetB2 pretrained weights, transforms and model
|
| 9 |
+
weights=torchvision.models.EfficientNet_B2_Weights.DEFAULT
|
| 10 |
+
transforms=weights.transforms()
|
| 11 |
+
model=torchvision.models.efficientnet_b2(weights=weights)
|
| 12 |
+
|
| 13 |
+
for param in model.parameters():
|
| 14 |
+
param.requires_grad=False
|
| 15 |
+
model.classifier=nn.Sequential(
|
| 16 |
+
nn.Dropout(p=0.3,inplace=True),
|
| 17 |
+
nn.Linear(in_features=1408,out_features=num_classes)
|
| 18 |
+
)
|
| 19 |
+
return model,transforms
|
| 20 |
+
|
requirements.txt
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
torch==1.12.0
|
| 3 |
+
torchvision==0.13.0
|
| 4 |
+
gradio==4.38.1
|
| 5 |
+
numpy<2
|