Chukwuka commited on
Commit
2d0730a
·
1 Parent(s): d8489b6

First commit

Browse files
07_effnetb2_data_50_percent_10_epochs.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9dc265ff27b5ff26522a292f8039b4c910a49f4a8754ef4b90aafc7d1f00f9f6
3
+ size 31273033
09_pretrained_effnetb2_feature_extractor_pizza_steak_sushi_20_percent.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e65879e63a119f1dcc76e8928790aac49373e4bc1d29c0208b7e7c6f88dee2bb
3
+ size 31273033
app.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ### 1. Imports and class names setup ###
3
+ import gradio as gr
4
+ import os
5
+ import torch
6
+ import torchvision.transforms as T
7
+
8
+ from model import create_effnet_b2
9
+ from timeit import default_timer as timer
10
+ from typing import Tuple, Dict
11
+
12
+ # Setup class names
13
+ class_names = ['pizza', 'steak', 'sushi']
14
+
15
+ ### 2. Model and transforms preparation ###
16
+ test_tsfm = T.Compose([T.Resize((224,224)),
17
+ T.ToTensor(),
18
+ T.Normalize(mean=[0.485, 0.456, 0.406], # 3. A mean of [0.485, 0.456, 0.406] (across each colour channel)
19
+ std=[0.229, 0.224, 0.225]) # 4. A standard deviation of [0.229, 0.224, 0.225] (across each colour channel),
20
+ ])
21
+
22
+ # Create EffNetB2 Model
23
+ effnetb2, test_transform = create_effnet_b2(num_of_class=len(class_names),
24
+ transform=test_tsfm,
25
+ seed=42)
26
+
27
+ # saved_path = 'demos\foodvision_mini\09_pretrained_effnetb2_feature_extractor_pizza_steak_sushi_20_percent.pth'
28
+ saved_path = '07_effnetb2_data_50_percent_10_epochs.pth'
29
+
30
+ print('Loading Model State Dictionary')
31
+ # Load saved weights
32
+ effnetb2.load_state_dict(
33
+ torch.load(f=saved_path,
34
+ map_location=torch.device('cpu'), # load to CPU
35
+ )
36
+ )
37
+
38
+ print('Model Loaded ...')
39
+ ### 3. Predict function ###
40
+
41
+ # Create predict function
42
+ from typing import Tuple, Dict
43
+
44
+ def predict(img) -> Tuple[Dict, float]:
45
+ """Transforms and performs a prediction on img and returns prediction and time taken.
46
+ """
47
+ # Start the timer
48
+ start_time = timer()
49
+
50
+ # Transform the target image and add a batch dimension
51
+ img = test_tsfm(img).unsqueeze(0)
52
+
53
+ # Put model into evaluation mode and turn on inference mode
54
+ effnetb2.eval()
55
+ with torch.inference_mode():
56
+ # Pass the transformed image through the model and turn the prediction logits into prediction probabilities
57
+ pred_probs = torch.softmax(effnetb2(img), dim=1)
58
+
59
+ # Create a prediction label and prediction probability dictionary for each prediction class (this is the required format for Gradio's output parameter)
60
+ pred_labels_and_probs = {class_names[i]: float(pred_probs[0][i]) for i in range(len(class_names))}
61
+
62
+ # Calculate the prediction time
63
+ pred_time = round(timer() - start_time, 5)
64
+
65
+ # Return the prediction dictionary and prediction time
66
+ return pred_labels_and_probs, pred_time
67
+
68
+ ### 4. Gradio App ###
69
+
70
+ # Create title, description and article strings
71
+ title= 'FoodVision Mini 🍕🥩🍣'
72
+ description = "An EfficientNetB2 feature extractor computer vision model to classify images of food as pizza, steak or sushi."
73
+ article = "Created at [09. PyTorch Model Deployment](https://www.learnpytorch.io/09_pytorch_model_deployment/)."
74
+
75
+
76
+ # Create examples list from "examples/" directory
77
+ example_list = [["examples/" + example] for example in os.listdir("examples")]
78
+
79
+ # Create the Gradio demo
80
+ demo = gr.Interface(fn=predict, # mapping function from input to output
81
+ inputs=gr.inputs.Image(type='pil'), # What are the inputs?
82
+ outputs=[gr.Label(num_top_classes=3, label="Predictions"), # what are the outputs?
83
+ gr.Number(label='Prediction time (s)')], # Our fn has two outputs, therefore we have two outputs
84
+ examples=example_list,
85
+ title=title,
86
+ description=description,
87
+ article=article
88
+ )
89
+ # Launch the demo
90
+ print('Gradio Demo Launched')
91
+ demo.launch()
examples/140016.jpg ADDED
examples/647683.jpg ADDED
examples/715227.jpg ADDED
model.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import torch
3
+ import torch.nn as nn
4
+ import torchvision
5
+
6
+
7
+ # Create an EffNetB2 feature extractor
8
+ def create_effnet_b2(num_of_class: str=3,
9
+ transform: torchvision.transforms=None,
10
+ seed=42
11
+ ):
12
+ """Creates an EfficientNetB2 feature extractor model and transforms.
13
+
14
+ Args:
15
+ num_classes (int, optional): number of classes in the classifier head.
16
+ Defaults to 3.
17
+ seed (int, optional): random seed value. Defaults to 42.
18
+
19
+ Returns:
20
+ model (torch.nn.Module): EffNetB2 feature extractor model.
21
+ transforms (torchvision.transforms): EffNetB2 image transforms.
22
+ """
23
+
24
+ # 1. Get the base mdoel with pretrained weights and send to target device
25
+ model = torchvision.models.efficientnet_b2(pretrained=True)
26
+
27
+ # 2. Freeze the base model layers
28
+ for param in model.parameters():
29
+ param.requires_grad = False
30
+
31
+ # 3. Set the seeds
32
+ torch.manual_seed(seed)
33
+
34
+ # 4. Change the classifier head
35
+ model.classifier = nn.Sequential(nn.Dropout(p=0.3, inplace=True),
36
+ nn.Linear(1408, num_of_class, bias=True)
37
+ )
38
+
39
+ return model, transform
40
+
41
+ # mymodel = create_effnet_b2(num_of_class=3,
42
+ # transform=torchvision.transforms.Compose([torchvision.transforms.ToTensor()]),
43
+ # seed=42)
44
+ # print(mymodel)
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+
2
+ torch==1.10.0
3
+ torchvision==0.11.0+cu102
4
+ gradio==3.16.2