ayseakbaba commited on
Commit
1c93353
·
verified ·
1 Parent(s): 63ad173

Upload 3 files

Browse files
Files changed (3) hide show
  1. app.py +35 -0
  2. model.pth +3 -0
  3. requirements.txt +4 -0
app.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from PIL import Image
4
+ from torchvision import transforms
5
+ import torchvision
6
+ import gradio as gr
7
+
8
+ agirliklar=torchvision.models.EfficientNet_B2_Weights.DEFAULT
9
+ eff_don=agirliklar.transforms()
10
+
11
+ model=torchvision.models.efficientnet_b2(weights=agirliklar)
12
+ model.classifier=nn.Sequential(nn.Dropout(p=0.2),nn.Linear(1408,5))
13
+ model.load_state_dict(torch.load("model.pth"))
14
+
15
+ class_names=['a_bir', 'b_iki', 'c_üç', 'd_dört', 'e_beş']
16
+ def predict(img):
17
+ """Transforms and performs a prediction on img and returns prediction and time taken.
18
+ """
19
+ # Start the timer
20
+ # img=Image.open(img)
21
+ # Transform the target image and add a batch dimension
22
+ img = eff_don(img).unsqueeze(0)
23
+
24
+ # Put model into evaluation mode and turn on inference mode
25
+ model.eval()
26
+ with torch.inference_mode():
27
+ # Pass the transformed image through the model and turn the prediction logits into prediction probabilities
28
+ pred_probs = torch.softmax(model(img), dim=1)
29
+
30
+ # Create a prediction label and prediction probability dictionary for each prediction class (this is the required format for Gradio's output parameter)
31
+ pred_labels_and_probs = {class_names[i]: float(pred_probs[0][i]) for i in range(len(class_names))}
32
+
33
+
34
+ # Return the prediction dictionary and prediction time
35
+ return pred_labels_and_probs
model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:243ccb95ae6eed067f0529729734b86535d21b85c0affcb433d0e9d4cffa4efe
3
+ size 31253178
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ torch
2
+ gradio
3
+ torchvision
4
+ pillow