Spaces:
Sleeping
Sleeping
Upload 9 files
Browse files- 11-model_deployment_effnetb2.pth +3 -0
- FoodVision_Mini/.gitattributes +35 -0
- FoodVision_Mini/README.md +14 -0
- app.py +54 -0
- examples/1555015.jpg +0 -0
- examples/2716791.jpg +0 -0
- examples/720302.jpg +0 -0
- model.py +36 -0
- requirements.txt +3 -0
11-model_deployment_effnetb2.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d6ecd08208539742ad16732887698bed25ce873a0981c11d01aedd641c9f0ecc
|
| 3 |
+
size 31285489
|
FoodVision_Mini/.gitattributes
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
FoodVision_Mini/README.md
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: FoodVision Mini
|
| 3 |
+
emoji: 🐠
|
| 4 |
+
colorFrom: pink
|
| 5 |
+
colorTo: purple
|
| 6 |
+
sdk: gradio
|
| 7 |
+
sdk_version: 5.49.1
|
| 8 |
+
app_file: app.py
|
| 9 |
+
pinned: false
|
| 10 |
+
license: mit
|
| 11 |
+
short_description: classify pizza, steak and sushi
|
| 12 |
+
---
|
| 13 |
+
|
| 14 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
### 1 Imports and class names setup###
|
| 2 |
+
import gradio as gr
|
| 3 |
+
import os
|
| 4 |
+
import torch
|
| 5 |
+
from model import create_effnetb2_model
|
| 6 |
+
from timeit import default_timer as timer
|
| 7 |
+
from typing import List, Dict
|
| 8 |
+
|
| 9 |
+
class_names = ["pizza", "steak", "sushi"]
|
| 10 |
+
|
| 11 |
+
### 2 model and transform preparation###
|
| 12 |
+
effnetb2, transforms = create_effnetb2_model(num_classes=len(class_names))
|
| 13 |
+
effnetb2_loaded.load_state_dict("11-model_deployment_effnetb2.pth",map_location="cpu")
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
### 3 we need a predict function###
|
| 17 |
+
def predict(img) -> Tuple[Dict,float]:
|
| 18 |
+
#start a timer
|
| 19 |
+
start_time = timer()
|
| 20 |
+
|
| 21 |
+
# transform the image
|
| 22 |
+
transformed_image = effnet_transform(img).unsqueeze(0)
|
| 23 |
+
|
| 24 |
+
# putting the model in eval mode and make the prediction
|
| 25 |
+
effnetb2_loaded.eval()
|
| 26 |
+
with torch.inference_mode():
|
| 27 |
+
logit = effnetb2_loaded(transformed_image)
|
| 28 |
+
|
| 29 |
+
probs = torch.softmax(logit, dim=1)
|
| 30 |
+
# Create a prediction label and prediction probability dictionary
|
| 31 |
+
pred_label_dict ={class_names[i] : probs[0][i].item() for i in range(len(class_names))}
|
| 32 |
+
|
| 33 |
+
# calculate the pred time
|
| 34 |
+
end_time = timer()
|
| 35 |
+
inference_time = round(end_time - start_time, 4)
|
| 36 |
+
# return the label dict and inference time
|
| 37 |
+
return pred_label_dict, inference_time
|
| 38 |
+
###Grad###
|
| 39 |
+
title = "FoodVision mini models 🍕,🥩,🍣"
|
| 40 |
+
description = "An EfficientnetB2 feature extraction model is used to classifay images as pizza, steak, sushi"
|
| 41 |
+
|
| 42 |
+
example_list =[["example/"+example] for example in os.listdir("example")]
|
| 43 |
+
# create a gradio demo
|
| 44 |
+
demo = gr.Interface(
|
| 45 |
+
fn=predict,
|
| 46 |
+
inputs=gr.Image(type="pil"),
|
| 47 |
+
outputs=[gr.Label(num_top_classes = 3,label= "prediction"),
|
| 48 |
+
gr.Number(label=" Prediction time in second")],
|
| 49 |
+
examples=example_list[0],
|
| 50 |
+
title=title,
|
| 51 |
+
description=description
|
| 52 |
+
)
|
| 53 |
+
demo.launch(share= False)
|
| 54 |
+
|
examples/1555015.jpg
ADDED
|
examples/2716791.jpg
ADDED
|
examples/720302.jpg
ADDED
|
model.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torchvision
|
| 3 |
+
|
| 4 |
+
from torch import nn
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def create_effnetb2_model(num_classes:int=3,
|
| 8 |
+
seed:int=42):
|
| 9 |
+
"""Creates an EfficientNetB2 feature extractor model and transforms.
|
| 10 |
+
|
| 11 |
+
Args:
|
| 12 |
+
num_classes (int, optional): number of classes in the classifier head.
|
| 13 |
+
Defaults to 3.
|
| 14 |
+
seed (int, optional): random seed value. Defaults to 42.
|
| 15 |
+
|
| 16 |
+
Returns:
|
| 17 |
+
model (torch.nn.Module): EffNetB2 feature extractor model.
|
| 18 |
+
transforms (torchvision.transforms): EffNetB2 image transforms.
|
| 19 |
+
"""
|
| 20 |
+
# Create EffNetB2 pretrained weights, transforms and model
|
| 21 |
+
weights = torchvision.models.EfficientNet_B2_Weights.DEFAULT
|
| 22 |
+
transforms = weights.transforms()
|
| 23 |
+
model = torchvision.models.efficientnet_b2(weights=weights)
|
| 24 |
+
|
| 25 |
+
# Freeze all layers in base model
|
| 26 |
+
for param in model.parameters():
|
| 27 |
+
param.requires_grad = False
|
| 28 |
+
|
| 29 |
+
# Change classifier head with random seed for reproducibility
|
| 30 |
+
torch.manual_seed(seed)
|
| 31 |
+
model.classifier = nn.Sequential(
|
| 32 |
+
nn.Dropout(p=0.3, inplace=True),
|
| 33 |
+
nn.Linear(in_features=1408, out_features=num_classes),
|
| 34 |
+
)
|
| 35 |
+
|
| 36 |
+
return model, transforms
|
requirements.txt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
torch ==2.7.1+cu118
|
| 2 |
+
torchvision ==0.8.2+cu118
|
| 3 |
+
gradio ==5.49.0
|