sgshiva25 commited on
Commit
aee3729
·
verified ·
1 Parent(s): 265ea0a

First Commit

Browse files
09_pretrained_effnetb2_feature_extractor_pizza_steak_sushi_20_percent.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a562ae6c3fb899b49658e4e0a2e6cf8145e65265ea9252e99228cf4cea9afd4
3
+ size 31307450
README.md CHANGED
@@ -1,10 +1,10 @@
1
  ---
2
  title: FoodVision Mini
3
  emoji: 🚀
4
- colorFrom: green
5
- colorTo: red
6
  sdk: gradio
7
- sdk_version: 5.25.2
8
  app_file: app.py
9
  pinned: false
10
  license: mit
 
1
  ---
2
  title: FoodVision Mini
3
  emoji: 🚀
4
+ colorFrom: red
5
+ colorTo: gray
6
  sdk: gradio
7
+ sdk_version: 5.24.0
8
  app_file: app.py
9
  pinned: false
10
  license: mit
app.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import gradio as gr
3
+ import os
4
+ import torch
5
+
6
+ from model import create_effnetb2_model
7
+ from timeit import default_timer as timer
8
+ from typing import Tuple, Dict
9
+
10
+ class_names = ['pizza', 'steak', 'sushi']
11
+
12
+ effnetb2, effnetb2_transforms = create_effnetb2_model(num_classes = 3)
13
+
14
+ effnetb2.load_state_dict(
15
+ torch.load(
16
+ f="09_pretrained_effnetb2_feature_extractor_pizza_steak_sushi_20_percent.pth",
17
+ map_location=torch.device("cpu"), # load to CPU
18
+ )
19
+ )
20
+
21
+ ### 3. Predict function ###
22
+
23
+ # Create predict function
24
+ def predict(img) -> Tuple[Dict, float]:
25
+ """Transforms and performs a prediction on img and returns prediction and time taken.
26
+ """
27
+ # Start the timer
28
+ start_time = timer()
29
+
30
+ # Transform the target image and add a batch dimension
31
+ img = effnetb2_transforms(img).unsqueeze(0)
32
+
33
+ # Put model into evaluation mode and turn on inference mode
34
+ effnetb2.eval()
35
+ with torch.inference_mode():
36
+ # Pass the transformed image through the model and turn the prediction logits into prediction probabilities
37
+ pred_probs = torch.softmax(effnetb2(img), dim=1)
38
+
39
+ # Create a prediction label and prediction probability dictionary for each prediction class (this is the required format for Gradio's output parameter)
40
+ pred_labels_and_probs = {class_names[i]: float(pred_probs[0][i]) for i in range(len(class_names))}
41
+
42
+ # Calculate the prediction time
43
+ pred_time = round(timer() - start_time, 5)
44
+
45
+ # Return the prediction dictionary and prediction time
46
+ return pred_labels_and_probs, pred_time
47
+
48
+ title = "FoodVision Mini 🍕🥩🍣"
49
+ description = "An EfficientNetB2 feature extractor computer vision model to classify images of food as pizza, steak or sushi."
50
+ article = "Created at [09. PyTorch Model Deployment](https://www.learnpytorch.io/09_pytorch_model_deployment/)."
51
+
52
+ # Create examples list from "examples/" directory
53
+ example_list = [["examples/" + example] for example in os.listdir("examples")]
54
+
55
+ # Create the Gradio demo
56
+ demo = gr.Interface(fn=predict, # mapping function from input to output
57
+ inputs=gr.Image(type="pil"), # what are the inputs?
58
+ outputs=[gr.Label(num_top_classes=3, label="Predictions"), # what are the outputs?
59
+ gr.Number(label="Prediction time (s)")], # our fn has two outputs, therefore we have two outputs
60
+ # Create examples list from "examples/" directory
61
+ examples=example_list,
62
+ title=title,
63
+ description=description,
64
+ article=article)
65
+
66
+ # Launch the demo!
67
+ demo.launch(share=False)
gitattributes ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ 09_pretrained_effnetb2_feature_extractor_food101_20_percent.pth filter=lfs diff=lfs merge=lfs -text
model.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torchvision
3
+
4
+ from torch import nn
5
+
6
+
7
+ def create_effnetb2_model(num_classes:int=3,
8
+ seed:int=42):
9
+ """Creates an EfficientNetB2 feature extractor model and transforms.
10
+
11
+ Args:
12
+ num_classes (int, optional): number of classes in the classifier head.
13
+ Defaults to 3.
14
+ seed (int, optional): random seed value. Defaults to 42.
15
+
16
+ Returns:
17
+ model (torch.nn.Module): EffNetB2 feature extractor model.
18
+ transforms (torchvision.transforms): EffNetB2 image transforms.
19
+ """
20
+ # Create EffNetB2 pretrained weights, transforms and model
21
+ weights = torchvision.models.EfficientNet_B2_Weights.DEFAULT
22
+ transforms = weights.transforms()
23
+ model = torchvision.models.efficientnet_b2(weights=weights)
24
+
25
+ # Freeze all layers in base model
26
+ for param in model.parameters():
27
+ param.requires_grad = False
28
+
29
+ # Change classifier head with random seed for reproducibility
30
+ torch.manual_seed(seed)
31
+ model.classifier = nn.Sequential(
32
+ nn.Dropout(p=0.3, inplace=True),
33
+ nn.Linear(in_features=1408, out_features=num_classes),
34
+ )
35
+
36
+ return model, transforms
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ torch == 1.12.0
2
+ torchvision == 0.13.0
3
+ gradio==3.50.2
4
+ httpx==0.24.1
5
+ httpcore==0.17.3