ayseakbaba commited on
Commit
32d5eef
·
verified ·
1 Parent(s): ca7152b

Upload 3 files

Browse files
Files changed (3) hide show
  1. app.py +62 -0
  2. best_model_transfer.pth +3 -0
  3. requirements.txt +4 -0
app.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from PIL import Image
4
+ from torchvision import transforms
5
+ import torchvision
6
+ import gradio as gr
7
+
8
+ weights = torchvision.models.GoogLeNet_Weights.DEFAULT
9
+ transfer_model_transformer = weights.transforms()
10
+
11
+ transfer_model = torchvision.models.googlenet(weights=weights)
12
+ transfer_model.classifier = nn.Sequential(nn.Dropout(p=0.2), nn.Linear(in_features=1024, out_features=512),nn.ReLU(),
13
+ nn.Linear(in_features=512, out_features=256),nn.ReLU(),
14
+ nn.Linear(in_features=256, out_features=128),nn.ReLU(),
15
+ nn.Linear(in_features=128, out_features=64),nn.ReLU(),
16
+ nn.Linear(in_features=64, out_features=32),nn.ReLU(),
17
+ nn.Linear(in_features=32, out_features=16),nn.ReLU(),
18
+ nn.Linear(in_features=16, out_features=8),nn.ReLU(),
19
+ nn.Linear(in_features=8, out_features=4),nn.ReLU(),
20
+ nn.Linear(in_features=4, out_features=2))
21
+ transfer_model.load_state_dict(torch.load("best_model_transfer.pth"))
22
+
23
+ class_names=['Tere','Roka']
24
+
25
+ def predict(img):
26
+ """Transforms and performs a prediction on img and returns prediction and time taken.
27
+ """
28
+ # Start the timer
29
+ # img=Image.open(img)
30
+ # Transform the target image and add a batch dimension
31
+ img = transfer_model_transformer(img).unsqueeze(0)
32
+
33
+ # Put model into evaluation mode and turn on inference mode
34
+ transfer_model.eval()
35
+ transfer_model.to("cpu")
36
+ with torch.inference_mode():
37
+ # Pass the transformed image through the model and turn the prediction logits into prediction probabilities
38
+ pred_probs = torch.softmax(transfer_model(img), dim=1)
39
+
40
+ # Create a prediction label and prediction probability dictionary for each prediction class (this is the required format for Gradio's output parameter)
41
+ pred_labels_and_probs = {class_names[i]: float(pred_probs[0][i]) for i in range(len(class_names))}
42
+
43
+
44
+ # Return the prediction dictionary and prediction time
45
+ return pred_labels_and_probs
46
+
47
+ # Create title, description and article strings
48
+ title = "CRESS ARUGULA DISTINCTIVE"
49
+ description = "An artificial intelligence application that recognizes whether the photo uploaded to the system is cress or arugula."
50
+
51
+ # Create the Gradio demo
52
+ demo = gr.Interface(fn=predict, # mapping function from input to output
53
+ inputs=gr.Image(type="pil"), # what are the inputs?
54
+ outputs=[gr.Label(num_top_classes=len(class_names), label="Predictions")], # what are the outputs?
55
+ # gr.Number(label="Prediction time (s)")], # our fn has two outputs, therefore we have two outputs
56
+ # examples=example_list,
57
+ title=title,
58
+ description=description)
59
+
60
+ # Launch the demo!
61
+ demo.launch(debug=False, # print errors locally?
62
+ share=True) # generate a publically shareable URL?
best_model_transfer.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0550657fe32036d8fd70443167cdd954bc04231b15aa1ed9e6e8df0de3a2410f
3
+ size 29488774
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ torch
2
+ gradio
3
+ torchvision
4
+ pillow