Shilpaj commited on
Commit
bb98138
·
verified ·
1 Parent(s): b024e46

Fix: Inference issue

Browse files
Files changed (1) hide show
  1. inference.py +13 -18
inference.py CHANGED
@@ -16,29 +16,24 @@ from pytorch_grad_cam.utils.image import show_cam_on_image
16
  from pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget
17
 
18
 
19
- def inference(input_img,
20
- model,
21
- classes,
22
- transparency=0.5,
23
- number_of_top_classes=3,
24
- target_layer_number=4):
25
  """
26
  Function to run inference on the input image
27
- :param input_img: Image provided by the user
 
 
 
28
  :param model: Model to use for inference
29
  :param classes: Classes to use for inference
30
- :param transparency: Percentage of cam overlap over the input image
31
- :param number_of_top_classes: Number of top predictions for the input image
32
- :param target_layer_number: Layer for which GradCam to be shown
33
  """
34
  # Save a copy of input img
35
- org_img = input_img.copy()
36
 
37
  # Calculate mean over each channel of input image
38
- mean_r, mean_g, mean_b = np.mean(input_img[:, :, 0]/255.), np.mean(input_img[:, :, 1]/255.), np.mean(input_img[:, :, 2]/255.)
39
 
40
  # Calculate Standard deviation over each channel
41
- std_r, std_g, std_b = np.std(input_img[:, :, 0]/255.), np.std(input_img[:, :, 1]/255.), np.std(input_img[:, :, 2]/255.)
42
 
43
  # Convert img to tensor and normalize it
44
  _transform = transforms.Compose([
@@ -47,7 +42,7 @@ def inference(input_img,
47
  ])
48
 
49
  # Preprocess the input image
50
- input_tensor = _transform(input_img)
51
 
52
  # Create a mini-batch as expected by the model
53
  input_tensor = input_tensor.unsqueeze(0)
@@ -66,7 +61,7 @@ def inference(input_img,
66
 
67
  # Select the top classes based on user input
68
  sorted_confidences = sorted(confidences.items(), key=lambda val: val[1], reverse=True)
69
- show_confidences = OrderedDict(sorted_confidences[:number_of_top_classes])
70
 
71
  # Map layer numbers to meaningful parts of the ResNet architecture
72
  _layers = {
@@ -79,8 +74,8 @@ def inference(input_img,
79
  }
80
 
81
  # Ensure valid layer selection
82
- target_layer_number = min(max(target_layer_number, 1), 6)
83
- target_layers = [_layers[target_layer_number]]
84
 
85
  # Get the class activations from the selected layer
86
  cam = GradCAM(model=model, target_layers=target_layers)
@@ -98,5 +93,5 @@ def inference(input_img,
98
  grayscale_cam = grayscale_cam[0, :]
99
 
100
  # Overlay input image with Class activations
101
- visualization = show_cam_on_image(org_img/255., grayscale_cam, use_rgb=True, image_weight=transparency)
102
  return show_confidences, visualization
 
16
  from pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget
17
 
18
 
19
+ def inference(image, alpha, top_k, target_layer, model=None, classes=None):
 
 
 
 
 
20
  """
21
  Function to run inference on the input image
22
+ :param image: Image provided by the user
23
+ :param alpha: Percentage of cam overlap over the input image
24
+ :param top_k: Number of top predictions for the input image
25
+ :param target_layer: Layer for which GradCam to be shown
26
  :param model: Model to use for inference
27
  :param classes: Classes to use for inference
 
 
 
28
  """
29
  # Save a copy of input img
30
+ org_img = image.copy()
31
 
32
  # Calculate mean over each channel of input image
33
+ mean_r, mean_g, mean_b = np.mean(image[:, :, 0]/255.), np.mean(image[:, :, 1]/255.), np.mean(image[:, :, 2]/255.)
34
 
35
  # Calculate Standard deviation over each channel
36
+ std_r, std_g, std_b = np.std(image[:, :, 0]/255.), np.std(image[:, :, 1]/255.), np.std(image[:, :, 2]/255.)
37
 
38
  # Convert img to tensor and normalize it
39
  _transform = transforms.Compose([
 
42
  ])
43
 
44
  # Preprocess the input image
45
+ input_tensor = _transform(image)
46
 
47
  # Create a mini-batch as expected by the model
48
  input_tensor = input_tensor.unsqueeze(0)
 
61
 
62
  # Select the top classes based on user input
63
  sorted_confidences = sorted(confidences.items(), key=lambda val: val[1], reverse=True)
64
+ show_confidences = OrderedDict(sorted_confidences[:top_k])
65
 
66
  # Map layer numbers to meaningful parts of the ResNet architecture
67
  _layers = {
 
74
  }
75
 
76
  # Ensure valid layer selection
77
+ target_layer = min(max(target_layer, 1), 6)
78
+ target_layers = [_layers[target_layer]]
79
 
80
  # Get the class activations from the selected layer
81
  cam = GradCAM(model=model, target_layers=target_layers)
 
93
  grayscale_cam = grayscale_cam[0, :]
94
 
95
  # Overlay input image with Class activations
96
+ visualization = show_cam_on_image(org_img/255., grayscale_cam, use_rgb=True, image_weight=alpha)
97
  return show_confidences, visualization