Spaces:
Runtime error
Runtime error
Update mask_adapter/sam_maskadapter.py
Browse files
mask_adapter/sam_maskadapter.py
CHANGED
|
@@ -131,7 +131,7 @@ class SAMVisualizationDemo(object):
|
|
| 131 |
pred_masks = np.row_stack(pred_masks)
|
| 132 |
pred_masks = BitMasks(pred_masks)
|
| 133 |
|
| 134 |
-
image = torch.as_tensor(image.astype("
|
| 135 |
|
| 136 |
pixel_mean = torch.tensor(PIXEL_MEAN).view(-1, 1, 1)
|
| 137 |
pixel_std = torch.tensor(PIXEL_STD).view(-1, 1, 1)
|
|
@@ -151,13 +151,13 @@ class SAMVisualizationDemo(object):
|
|
| 151 |
# text_features = self.clip_model.encode_text(text)
|
| 152 |
# text_features /= text_features.norm(dim=-1, keepdim=True)
|
| 153 |
|
| 154 |
-
features = self.extract_features_convnext(image.
|
| 155 |
|
| 156 |
clip_feature = features['clip_vis_dense']
|
| 157 |
|
| 158 |
clip_vis_dense = self.visual_prediction_forward_convnext_2d(clip_feature)
|
| 159 |
|
| 160 |
-
semantic_activation_maps = self.mask_adapter(clip_vis_dense, pred_masks.tensor.unsqueeze(0).to(text_features).
|
| 161 |
|
| 162 |
maps_for_pooling = F.interpolate(semantic_activation_maps, size=clip_feature.shape[-2:],
|
| 163 |
mode='bilinear', align_corners=False)
|
|
@@ -312,12 +312,12 @@ class SAMPointVisualizationDemo(object):
|
|
| 312 |
# text_features /= text_features.norm(dim=-1, keepdim=True)
|
| 313 |
#np.save("/home/yongkangli/Mask-Adapter/text_embedding/lvis_coco_text_embedding.npy", text_features.cpu().numpy())
|
| 314 |
#text_features = self.text_embedding.to(self.clip_model.device)
|
| 315 |
-
features = self.extract_features_convnext(image.to(self.clip_model.device).
|
| 316 |
clip_feature = features['clip_vis_dense']
|
| 317 |
|
| 318 |
clip_vis_dense = self.visual_prediction_forward_convnext_2d(clip_feature)
|
| 319 |
|
| 320 |
-
semantic_activation_maps = self.mask_adapter(clip_vis_dense, pred_masks.tensor.unsqueeze(0).to(self.clip_model.device).
|
| 321 |
maps_for_pooling = F.interpolate(semantic_activation_maps, size=clip_feature.shape[-2:], mode='bilinear', align_corners=False)
|
| 322 |
|
| 323 |
B, C = clip_feature.size(0), clip_feature.size(1)
|
|
|
|
| 131 |
pred_masks = np.row_stack(pred_masks)
|
| 132 |
pred_masks = BitMasks(pred_masks)
|
| 133 |
|
| 134 |
+
image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1))
|
| 135 |
|
| 136 |
pixel_mean = torch.tensor(PIXEL_MEAN).view(-1, 1, 1)
|
| 137 |
pixel_std = torch.tensor(PIXEL_STD).view(-1, 1, 1)
|
|
|
|
| 151 |
# text_features = self.clip_model.encode_text(text)
|
| 152 |
# text_features /= text_features.norm(dim=-1, keepdim=True)
|
| 153 |
|
| 154 |
+
features = self.extract_features_convnext(image.float())
|
| 155 |
|
| 156 |
clip_feature = features['clip_vis_dense']
|
| 157 |
|
| 158 |
clip_vis_dense = self.visual_prediction_forward_convnext_2d(clip_feature)
|
| 159 |
|
| 160 |
+
semantic_activation_maps = self.mask_adapter(clip_vis_dense, pred_masks.tensor.unsqueeze(0).to(text_features).float())
|
| 161 |
|
| 162 |
maps_for_pooling = F.interpolate(semantic_activation_maps, size=clip_feature.shape[-2:],
|
| 163 |
mode='bilinear', align_corners=False)
|
|
|
|
| 312 |
# text_features /= text_features.norm(dim=-1, keepdim=True)
|
| 313 |
#np.save("/home/yongkangli/Mask-Adapter/text_embedding/lvis_coco_text_embedding.npy", text_features.cpu().numpy())
|
| 314 |
#text_features = self.text_embedding.to(self.clip_model.device)
|
| 315 |
+
features = self.extract_features_convnext(image.to(self.clip_model.device).float())
|
| 316 |
clip_feature = features['clip_vis_dense']
|
| 317 |
|
| 318 |
clip_vis_dense = self.visual_prediction_forward_convnext_2d(clip_feature)
|
| 319 |
|
| 320 |
+
semantic_activation_maps = self.mask_adapter(clip_vis_dense, pred_masks.tensor.unsqueeze(0).to(self.clip_model.device).float())
|
| 321 |
maps_for_pooling = F.interpolate(semantic_activation_maps, size=clip_feature.shape[-2:], mode='bilinear', align_corners=False)
|
| 322 |
|
| 323 |
B, C = clip_feature.size(0), clip_feature.size(1)
|