NifulIslam commited on
Commit
9e9ccc9
·
1 Parent(s): 43c265e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -7
app.py CHANGED
@@ -7,6 +7,8 @@ import torchvision.transforms as transforms
7
  import pickle
8
  from torchvision import transforms, models
9
 
 
 
10
  class FineTunedVGG(nn.Module):
11
  def __init__(self, num_classes, input_size=224):
12
  super(FineTunedVGG, self).__init__()
@@ -50,7 +52,7 @@ model = torch.load("model.pth",map_location ='cpu')
50
  with open("encoder.pkl", "rb") as encoder_file:
51
  label_encoder = pickle.load(encoder_file)
52
 
53
- def preprocess_image(image_path):
54
  transform = transforms.Compose([
55
  transforms.Resize((224, 224)),
56
  transforms.ToTensor(),
@@ -63,11 +65,23 @@ def preprocess_image(image_path):
63
 
64
 
65
  def recognize_image(image):
66
- output = model.predict(image)
67
- probs = torch.softmax(output, dim=1)[0].tolist()
68
- class_labels = label_encoder.classes_
69
- output_dict = dict(zip(class_labels, map(float, probs)))
70
- return output_dict
 
 
 
 
 
 
 
 
 
 
 
 
71
 
72
  image = gr.inputs.Image(shape=(224,224))
73
  label = gr.outputs.Label(num_top_classes=10)
@@ -81,4 +95,4 @@ examples = [
81
 
82
 
83
  iface = gr.Interface(fn=recognize_image, inputs=image, outputs=label, examples=examples)
84
- iface.launch(inline=False)
 
7
  import pickle
8
  from torchvision import transforms, models
9
 
10
+ device = torch.device('cpu')
11
+
12
  class FineTunedVGG(nn.Module):
13
  def __init__(self, num_classes, input_size=224):
14
  super(FineTunedVGG, self).__init__()
 
52
  with open("encoder.pkl", "rb") as encoder_file:
53
  label_encoder = pickle.load(encoder_file)
54
 
55
+ def preprocess_image(image):
56
  transform = transforms.Compose([
57
  transforms.Resize((224, 224)),
58
  transforms.ToTensor(),
 
65
 
66
 
67
  def recognize_image(image):
68
+ mean = [0.0, 0.0, 0.0]
69
+ std = [1.0, 1.0, 1.0]
70
+ transform_norm = transforms.Compose([transforms.ToTensor(),
71
+ transforms.Resize((224,224)),transforms.Normalize(mean, std)])
72
+
73
+ img_normalized = transform_norm(image).float()
74
+ img_normalized = img_normalized.unsqueeze_(0)
75
+
76
+ img_normalized = img_normalized.to(device)
77
+
78
+ with torch.no_grad():
79
+ model.eval()
80
+ output =model(img_normalized)
81
+ probs = torch.softmax(output, dim=1)[0].tolist()
82
+ class_labels = label_encoder.classes_
83
+ output_dict = dict(zip(class_labels, map(float, probs)))
84
+ return output_dict
85
 
86
  image = gr.inputs.Image(shape=(224,224))
87
  label = gr.outputs.Label(num_top_classes=10)
 
95
 
96
 
97
  iface = gr.Interface(fn=recognize_image, inputs=image, outputs=label, examples=examples)
98
+ iface.launch(inline=False)