PrarthanaTS commited on
Commit
1fa98f0
·
1 Parent(s): 92af33a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -15
app.py CHANGED
@@ -63,12 +63,12 @@ scaled_anchors = (
63
  * torch.tensor(config.S).unsqueeze(1).unsqueeze(1).repeat(1, 3, 2)
64
  )
65
 
66
- def process_image_and_plot(image, model, scaled_anchors):
67
 
68
  transformed_image = transforms(image=image)["image"].unsqueeze(0)
69
  output = model(transformed_image)
70
  bboxes = [[] for _ in range(1)]
71
-
72
  for i in range(3):
73
  batch_size, A, S, _, _ = output[i].shape
74
  anchor = scaled_anchors[i]
@@ -77,17 +77,25 @@ def process_image_and_plot(image, model, scaled_anchors):
77
  bboxes[idx] += box
78
 
79
  nms_boxes = non_max_suppression(
80
- bboxes[0], iou_threshold=0.5, threshold=0.4, box_format="midpoint",
81
  )
82
  fig = plot_image(transformed_image[0].permute(1, 2, 0), nms_boxes)
83
 
84
- cam = YoloCAM(model=model, target_layers=[model.model.layers[-2]], use_cuda=False)
85
- grayscale_cam = cam(transformed_image, scaled_anchors)[0, :, :]
86
- img = cv2.resize(image, (416, 416))
87
- img = np.float32(img) / 255
88
- cam_image = show_cam_on_image(img, grayscale_cam, use_rgb=True)
89
-
90
- return fig,cam_image
 
 
 
 
 
 
 
 
91
 
92
 
93
  examples = [
@@ -96,9 +104,6 @@ examples = [
96
  ["images/automobile.jpg"],
97
  ]
98
 
99
- def processed_image(image):
100
- figure,gradcam = process_image_and_plot(image, model, scaled_anchors)
101
- return figure,gradcam
102
 
103
  icon_html = '<i class="fas fa-chart-bar"></i>'
104
  title_with_icon = f"""
@@ -123,8 +128,12 @@ description_with_icon = f"""
123
  """
124
 
125
 
126
- demo = gr.Interface(processed_image,
127
- inputs=[gr.Image(label="Input Image"),],
 
 
 
 
128
  outputs=[
129
  gr.Plot(label="Output with Classes",),
130
  gr.Image(shape=(32, 32), label="GradCAM Output"),
 
63
  * torch.tensor(config.S).unsqueeze(1).unsqueeze(1).repeat(1, 3, 2)
64
  )
65
 
66
+ def process_image_and_plot(image,iou_threshold=0.5, threshold=0.4,show_cam="No"):
67
 
68
  transformed_image = transforms(image=image)["image"].unsqueeze(0)
69
  output = model(transformed_image)
70
  bboxes = [[] for _ in range(1)]
71
+
72
  for i in range(3):
73
  batch_size, A, S, _, _ = output[i].shape
74
  anchor = scaled_anchors[i]
 
77
  bboxes[idx] += box
78
 
79
  nms_boxes = non_max_suppression(
80
+ bboxes[0], iou_threshold=iou_threshold, threshold=threshold, box_format="midpoint",
81
  )
82
  fig = plot_image(transformed_image[0].permute(1, 2, 0), nms_boxes)
83
 
84
+ if target_layer == -2:
85
+ layer = [model.model.layers[-2]]
86
+ else:
87
+ layer = [model.model.layers[-1]]
88
+
89
+ cam = YoloCAM(model=model, target_layers=layer, use_cuda=False)
90
+ if show_cam == "No":
91
+ return fig
92
+ else:
93
+ grayscale_cam = cam(transformed_image, scaled_anchors)[0, :, :]
94
+ img = cv2.resize(image, (416, 416))
95
+ img = np.float32(img) / 255
96
+ cam_image = show_cam_on_image(img, grayscale_cam, use_rgb=True)
97
+
98
+ return fig,cam_image
99
 
100
 
101
  examples = [
 
104
  ["images/automobile.jpg"],
105
  ]
106
 
 
 
 
107
 
108
  icon_html = '<i class="fas fa-chart-bar"></i>'
109
  title_with_icon = f"""
 
128
  """
129
 
130
 
131
+ demo = gr.Interface(process_image_and_plot,
132
+ inputs=[gr.Image(label="Input Image"),
133
+ gr.Slider(0, 1, value=0.5, label="Intersection over Union (IOU) Threshold",info="Determines how much overlap between two boxes, Set it low to filter out weaker predicts"),
134
+ gr.Slider(0, 1, value=0.4, label="Threshold"),
135
+ gr.Radio(["Yes", "No"], value="No" , label="Show GradCAM outputs"),
136
+ gr.Slider(-2, -1, value=-1, step=1, label="Which Layer?"),],
137
  outputs=[
138
  gr.Plot(label="Output with Classes",),
139
  gr.Image(shape=(32, 32), label="GradCAM Output"),