Spaces:
Runtime error
Runtime error
Commit
·
55286c8
1
Parent(s):
eb7c7e7
added some imports back so app runs
Browse files- app.py +5 -1
- predictor.py +10 -5
app.py
CHANGED
|
@@ -34,7 +34,11 @@ def header_white_bg(text, fontsize = 40, bold = True):
|
|
| 34 |
|
| 35 |
def diagnose_health(file):
|
| 36 |
prediction = predict(file)
|
| 37 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 38 |
|
| 39 |
def app():
|
| 40 |
add_bg_from_local('assets/background.png')
|
|
|
|
| 34 |
|
| 35 |
def diagnose_health(file):
|
| 36 |
prediction = predict(file)
|
| 37 |
+
predicted_strings = []
|
| 38 |
+
for p in prediction:
|
| 39 |
+
predicted_string = f"{p['predicted']}, Probability: {float(p['probability']):.2f}"
|
| 40 |
+
predicted_strings.append(predicted_string)
|
| 41 |
+
return predicted_strings
|
| 42 |
|
| 43 |
def app():
|
| 44 |
add_bg_from_local('assets/background.png')
|
predictor.py
CHANGED
|
@@ -1,11 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
def predict(image_file):
|
| 2 |
|
| 3 |
#load model with params
|
| 4 |
model = models.efficientnet_b0(weights=None)
|
| 5 |
-
model.load_state_dict(torch.load('best_model.pth', map_location=torch.device('cpu')))
|
| 6 |
device = torch.device('cpu')
|
| 7 |
|
| 8 |
-
|
| 9 |
"Apple___Apple_scab",
|
| 10 |
"Apple___Black_rot",
|
| 11 |
"Apple___Cedar_apple_rust",
|
|
@@ -57,7 +62,7 @@ def predict(image_file):
|
|
| 57 |
transforms.Normalize(mean=[0.485, 0.456, 0.406],
|
| 58 |
std=[0.229, 0.224, 0.225])])
|
| 59 |
img_normalized = transform(image).unsqueeze(0)
|
| 60 |
-
|
| 61 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
| 62 |
img_normalized = img_normalized.to(device)
|
| 63 |
|
|
@@ -67,8 +72,8 @@ def predict(image_file):
|
|
| 67 |
probs, indices = torch.topk(torch.softmax(output, dim=1), topk)
|
| 68 |
# index = output.data.cpu().numpy().argmax()
|
| 69 |
tmp_lst = []
|
| 70 |
-
|
| 71 |
-
|
| 72 |
for j in range(topk):
|
| 73 |
tmp_dct = {}
|
| 74 |
label_indx = indices[0][j]
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torchvision.models as models
|
| 3 |
+
from PIL import Image
|
| 4 |
+
from torchvision import transforms
|
| 5 |
+
|
| 6 |
def predict(image_file):
|
| 7 |
|
| 8 |
#load model with params
|
| 9 |
model = models.efficientnet_b0(weights=None)
|
| 10 |
+
model.load_state_dict(torch.load('best_model.pth', map_location=torch.device('cpu')), strict=False)
|
| 11 |
device = torch.device('cpu')
|
| 12 |
|
| 13 |
+
class_names = [
|
| 14 |
"Apple___Apple_scab",
|
| 15 |
"Apple___Black_rot",
|
| 16 |
"Apple___Cedar_apple_rust",
|
|
|
|
| 62 |
transforms.Normalize(mean=[0.485, 0.456, 0.406],
|
| 63 |
std=[0.229, 0.224, 0.225])])
|
| 64 |
img_normalized = transform(image).unsqueeze(0)
|
| 65 |
+
|
| 66 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
| 67 |
img_normalized = img_normalized.to(device)
|
| 68 |
|
|
|
|
| 72 |
probs, indices = torch.topk(torch.softmax(output, dim=1), topk)
|
| 73 |
# index = output.data.cpu().numpy().argmax()
|
| 74 |
tmp_lst = []
|
| 75 |
+
print(indices)
|
| 76 |
+
print(probs)
|
| 77 |
for j in range(topk):
|
| 78 |
tmp_dct = {}
|
| 79 |
label_indx = indices[0][j]
|