Shengxiao0709 commited on
Commit
a84ae56
·
verified ·
1 Parent(s): 6b4656b

Upload 29 files

Browse files
Files changed (29) hide show
  1. segment_anything/__init__.py +16 -0
  2. segment_anything/__pycache__/__init__.cpython-310.pyc +0 -0
  3. segment_anything/__pycache__/automatic_mask_generator.cpython-310.pyc +0 -0
  4. segment_anything/__pycache__/build_sam.cpython-310.pyc +0 -0
  5. segment_anything/__pycache__/predictor.cpython-310.pyc +0 -0
  6. segment_anything/automatic_mask_generator.py +383 -0
  7. segment_anything/build_sam.py +146 -0
  8. segment_anything/modeling/__init__.py +12 -0
  9. segment_anything/modeling/__pycache__/__init__.cpython-310.pyc +0 -0
  10. segment_anything/modeling/__pycache__/common.cpython-310.pyc +0 -0
  11. segment_anything/modeling/__pycache__/image_encoder.cpython-310.pyc +0 -0
  12. segment_anything/modeling/__pycache__/mask_decoder.cpython-310.pyc +0 -0
  13. segment_anything/modeling/__pycache__/prompt_encoder.cpython-310.pyc +0 -0
  14. segment_anything/modeling/__pycache__/sam.cpython-310.pyc +0 -0
  15. segment_anything/modeling/__pycache__/transformer.cpython-310.pyc +0 -0
  16. segment_anything/modeling/common.py +44 -0
  17. segment_anything/modeling/image_encoder.py +420 -0
  18. segment_anything/modeling/mask_decoder.py +190 -0
  19. segment_anything/modeling/prompt_encoder.py +226 -0
  20. segment_anything/modeling/sam.py +181 -0
  21. segment_anything/modeling/transformer.py +243 -0
  22. segment_anything/predictor.py +286 -0
  23. segment_anything/utils/__init__.py +6 -0
  24. segment_anything/utils/__pycache__/__init__.cpython-310.pyc +0 -0
  25. segment_anything/utils/__pycache__/amg.cpython-310.pyc +0 -0
  26. segment_anything/utils/__pycache__/transforms.cpython-310.pyc +0 -0
  27. segment_anything/utils/amg.py +347 -0
  28. segment_anything/utils/onnx.py +158 -0
  29. segment_anything/utils/transforms.py +111 -0
segment_anything/__init__.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ # All rights reserved.
4
+
5
+ # This source code is licensed under the license found in the
6
+ # LICENSE file in the root directory of this source tree.
7
+
8
+ from .build_sam import (
9
+ build_sam,
10
+ build_sam_vit_h,
11
+ build_sam_vit_l,
12
+ build_sam_vit_b,
13
+ sam_model_registry,
14
+ )
15
+ from .predictor import SamPredictor
16
+ from .automatic_mask_generator import SamAutomaticMaskGenerator
segment_anything/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (423 Bytes). View file
 
segment_anything/__pycache__/automatic_mask_generator.cpython-310.pyc ADDED
Binary file (11.5 kB). View file
 
segment_anything/__pycache__/build_sam.cpython-310.pyc ADDED
Binary file (3.36 kB). View file
 
segment_anything/__pycache__/predictor.cpython-310.pyc ADDED
Binary file (10 kB). View file
 
segment_anything/automatic_mask_generator.py ADDED
@@ -0,0 +1,383 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ # All rights reserved.
4
+
5
+ # This source code is licensed under the license found in the
6
+ # LICENSE file in the root directory of this source tree.
7
+
8
+ import numpy as np
9
+ import torch
10
+ from torchvision.ops.boxes import batched_nms, box_area # type: ignore
11
+
12
+ from typing import Any, Dict, List, Optional, Tuple
13
+
14
+ from .modeling import Sam
15
+ from .predictor import SamPredictor
16
+ from .utils.amg import (
17
+ MaskData,
18
+ area_from_rle,
19
+ batch_iterator,
20
+ batched_mask_to_box,
21
+ box_xyxy_to_xywh,
22
+ build_all_layer_point_grids,
23
+ calculate_stability_score,
24
+ coco_encode_rle,
25
+ generate_crop_boxes,
26
+ is_box_near_crop_edge,
27
+ mask_to_rle_pytorch,
28
+ remove_small_regions,
29
+ rle_to_mask,
30
+ uncrop_boxes_xyxy,
31
+ uncrop_masks,
32
+ uncrop_points,
33
+ )
34
+
35
+
36
+ class SamAutomaticMaskGenerator:
37
+ def __init__(
38
+ self,
39
+ model: Sam,
40
+ points_per_side: Optional[int] = 32,
41
+ points_per_batch: int = 64,
42
+ pred_iou_thresh: float = 0.88,
43
+ stability_score_thresh: float = 0.95,
44
+ stability_score_offset: float = 1.0,
45
+ box_nms_thresh: float = 0.7,
46
+ crop_n_layers: int = 0,
47
+ crop_nms_thresh: float = 0.7,
48
+ crop_overlap_ratio: float = 512 / 1500,
49
+ crop_n_points_downscale_factor: int = 1,
50
+ point_grids: Optional[List[np.ndarray]] = None,
51
+ min_mask_region_area: int = 0,
52
+ output_mode: str = "binary_mask",
53
+ ) -> None:
54
+ """
55
+ Using a SAM model, generates masks for the entire image.
56
+ Generates a grid of point prompts over the image, then filters
57
+ low quality and duplicate masks. The default settings are chosen
58
+ for SAM with a ViT-H backbone.
59
+
60
+ Arguments:
61
+ model (Sam): The SAM model to use for mask prediction.
62
+ points_per_side (int or None): The number of points to be sampled
63
+ along one side of the image. The total number of points is
64
+ points_per_side**2. If None, 'point_grids' must provide explicit
65
+ point sampling.
66
+ points_per_batch (int): Sets the number of points run simultaneously
67
+ by the model. Higher numbers may be faster but use more GPU memory.
68
+ pred_iou_thresh (float): A filtering threshold in [0,1], using the
69
+ model's predicted mask quality.
70
+ stability_score_thresh (float): A filtering threshold in [0,1], using
71
+ the stability of the mask under changes to the cutoff used to binarize
72
+ the model's mask predictions.
73
+ stability_score_offset (float): The amount to shift the cutoff when
74
+ calculated the stability score.
75
+ box_nms_thresh (float): The box IoU cutoff used by non-maximal
76
+ suppression to filter duplicate masks.
77
+ crop_n_layers (int): If >0, mask prediction will be run again on
78
+ crops of the image. Sets the number of layers to run, where each
79
+ layer has 2**i_layer number of image crops.
80
+ crop_nms_thresh (float): The box IoU cutoff used by non-maximal
81
+ suppression to filter duplicate masks between different crops.
82
+ crop_overlap_ratio (float): Sets the degree to which crops overlap.
83
+ In the first crop layer, crops will overlap by this fraction of
84
+ the image length. Later layers with more crops scale down this overlap.
85
+ crop_n_points_downscale_factor (int): The number of points-per-side
86
+ sampled in layer n is scaled down by crop_n_points_downscale_factor**n.
87
+ point_grids (list(np.ndarray) or None): A list over explicit grids
88
+ of points used for sampling, normalized to [0,1]. The nth grid in the
89
+ list is used in the nth crop layer. Exclusive with points_per_side.
90
+ min_mask_region_area (int): If >0, postprocessing will be applied
91
+ to remove disconnected regions and holes in masks with area smaller
92
+ than min_mask_region_area. Requires opencv.
93
+ output_mode (str): The form masks are returned in. Can be 'binary_mask',
94
+ 'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools.
95
+ For large resolutions, 'binary_mask' may consume large amounts of
96
+ memory.
97
+ """
98
+
99
+ assert (points_per_side is None) != (
100
+ point_grids is None
101
+ ), "Exactly one of points_per_side or point_grid must be provided."
102
+ if points_per_side is not None:
103
+ self.point_grids = build_all_layer_point_grids(
104
+ points_per_side,
105
+ crop_n_layers,
106
+ crop_n_points_downscale_factor,
107
+ )
108
+ elif point_grids is not None:
109
+ self.point_grids = point_grids
110
+ else:
111
+ raise ValueError("Can't have both points_per_side and point_grid be None.")
112
+
113
+ assert output_mode in [
114
+ "binary_mask",
115
+ "uncompressed_rle",
116
+ "coco_rle",
117
+ ], f"Unknown output_mode {output_mode}."
118
+ if output_mode == "coco_rle":
119
+ from pycocotools import mask as mask_utils # type: ignore # noqa: F401
120
+
121
+ if min_mask_region_area > 0:
122
+ import cv2 # type: ignore # noqa: F401
123
+
124
+ self.predictor = SamPredictor(model)
125
+ self.points_per_batch = points_per_batch
126
+ self.pred_iou_thresh = pred_iou_thresh
127
+ self.stability_score_thresh = stability_score_thresh
128
+ self.stability_score_offset = stability_score_offset
129
+ self.box_nms_thresh = box_nms_thresh
130
+ self.crop_n_layers = crop_n_layers
131
+ self.crop_nms_thresh = crop_nms_thresh
132
+ self.crop_overlap_ratio = crop_overlap_ratio
133
+ self.crop_n_points_downscale_factor = crop_n_points_downscale_factor
134
+ self.min_mask_region_area = min_mask_region_area
135
+ self.output_mode = output_mode
136
+
137
+ @torch.no_grad()
138
+ def generate(self, image: np.ndarray) -> List[Dict[str, Any]]:
139
+ """
140
+ Generates masks for the given image.
141
+
142
+ Arguments:
143
+ image (np.ndarray): The image to generate masks for, in HWC uint8 format.
144
+
145
+ Returns:
146
+ list(dict(str, any)): A list over records for masks. Each record is
147
+ a dict containing the following keys:
148
+ segmentation (dict(str, any) or np.ndarray): The mask. If
149
+ output_mode='binary_mask', is an array of shape HW. Otherwise,
150
+ is a dictionary containing the RLE.
151
+ bbox (list(float)): The box around the mask, in XYWH format.
152
+ area (int): The area in pixels of the mask.
153
+ predicted_iou (float): The model's own prediction of the mask's
154
+ quality. This is filtered by the pred_iou_thresh parameter.
155
+ point_coords (list(list(float))): The point coordinates input
156
+ to the model to generate this mask.
157
+ stability_score (float): A measure of the mask's quality. This
158
+ is filtered on using the stability_score_thresh parameter.
159
+ crop_box (list(float)): The crop of the image used to generate
160
+ the mask, given in XYWH format.
161
+ """
162
+
163
+ # Generate masks
164
+ mask_data = self._generate_masks(image)
165
+
166
+ # Filter small disconnected regions and holes in masks
167
+ if self.min_mask_region_area > 0:
168
+ mask_data = self.postprocess_small_regions(
169
+ mask_data,
170
+ self.min_mask_region_area,
171
+ max(self.box_nms_thresh, self.crop_nms_thresh),
172
+ )
173
+
174
+ # Encode masks
175
+ if self.output_mode == "coco_rle":
176
+ mask_data["segmentations"] = [
177
+ coco_encode_rle(rle) for rle in mask_data["rles"]
178
+ ]
179
+ elif self.output_mode == "binary_mask":
180
+ mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]]
181
+ else:
182
+ mask_data["segmentations"] = mask_data["rles"]
183
+
184
+ # Write mask records
185
+ curr_anns = []
186
+ for idx in range(len(mask_data["segmentations"])):
187
+ ann = {
188
+ "segmentation": mask_data["segmentations"][idx],
189
+ "area": area_from_rle(mask_data["rles"][idx]),
190
+ "bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(),
191
+ "predicted_iou": mask_data["iou_preds"][idx].item(),
192
+ "point_coords": [mask_data["points"][idx].tolist()],
193
+ "stability_score": mask_data["stability_score"][idx].item(),
194
+ "crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(),
195
+ }
196
+ curr_anns.append(ann)
197
+
198
+ return curr_anns
199
+
200
+ def _generate_masks(self, image: np.ndarray) -> MaskData:
201
+ orig_size = image.shape[:2]
202
+ crop_boxes, layer_idxs = generate_crop_boxes(
203
+ orig_size, self.crop_n_layers, self.crop_overlap_ratio
204
+ )
205
+
206
+ # Iterate over image crops
207
+ data = MaskData()
208
+ for crop_box, layer_idx in zip(crop_boxes, layer_idxs):
209
+ crop_data = self._process_crop(image, crop_box, layer_idx, orig_size)
210
+ data.cat(crop_data)
211
+
212
+ # Remove duplicate masks between crops
213
+ if len(crop_boxes) > 1:
214
+ # Prefer masks from smaller crops
215
+ scores = 1 / box_area(data["crop_boxes"])
216
+ scores = scores.to(data["boxes"].device)
217
+ keep_by_nms = batched_nms(
218
+ data["boxes"].float(),
219
+ scores,
220
+ torch.zeros_like(data["boxes"][:, 0]), # categories
221
+ iou_threshold=self.crop_nms_thresh,
222
+ )
223
+ data.filter(keep_by_nms)
224
+
225
+ data.to_numpy()
226
+ return data
227
+
228
+ def _process_crop(
229
+ self,
230
+ image: np.ndarray,
231
+ crop_box: List[int],
232
+ crop_layer_idx: int,
233
+ orig_size: Tuple[int, ...],
234
+ ) -> MaskData:
235
+ # Crop the image and calculate embeddings
236
+ x0, y0, x1, y1 = crop_box
237
+ cropped_im = image[y0:y1, x0:x1, :]
238
+ cropped_im_size = cropped_im.shape[:2]
239
+ self.predictor.set_image(cropped_im)
240
+
241
+ # Get points for this crop
242
+ points_scale = np.array(cropped_im_size)[None, ::-1]
243
+ points_for_image = self.point_grids[crop_layer_idx] * points_scale
244
+
245
+ # Generate masks for this crop in batches
246
+ data = MaskData()
247
+ for (points,) in batch_iterator(self.points_per_batch, points_for_image):
248
+ batch_data = self._process_batch(
249
+ points, cropped_im_size, crop_box, orig_size
250
+ )
251
+ data.cat(batch_data)
252
+ del batch_data
253
+ self.predictor.reset_image()
254
+
255
+ # Remove duplicates within this crop.
256
+ keep_by_nms = batched_nms(
257
+ data["boxes"].float(),
258
+ data["iou_preds"],
259
+ torch.zeros_like(data["boxes"][:, 0]), # categories
260
+ iou_threshold=self.box_nms_thresh,
261
+ )
262
+ data.filter(keep_by_nms)
263
+
264
+ # Return to the original image frame
265
+ data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box)
266
+ data["points"] = uncrop_points(data["points"], crop_box)
267
+ data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(data["rles"]))])
268
+
269
+ return data
270
+
271
+ def _process_batch(
272
+ self,
273
+ points: np.ndarray,
274
+ im_size: Tuple[int, ...],
275
+ crop_box: List[int],
276
+ orig_size: Tuple[int, ...],
277
+ ) -> MaskData:
278
+ orig_h, orig_w = orig_size
279
+
280
+ # Run model on this batch
281
+ transformed_points = self.predictor.transform.apply_coords(points, im_size)
282
+ in_points = torch.as_tensor(transformed_points, device=self.predictor.device)
283
+ in_labels = torch.ones(
284
+ in_points.shape[0], dtype=torch.int, device=in_points.device
285
+ )
286
+ masks, iou_preds, _ = self.predictor.predict_torch(
287
+ in_points[:, None, :],
288
+ in_labels[:, None],
289
+ multimask_output=True,
290
+ return_logits=True,
291
+ )
292
+
293
+ # Serialize predictions and store in MaskData
294
+ data = MaskData(
295
+ masks=masks.flatten(0, 1),
296
+ iou_preds=iou_preds.flatten(0, 1),
297
+ points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)),
298
+ )
299
+ del masks
300
+
301
+ # Filter by predicted IoU
302
+ if self.pred_iou_thresh > 0.0:
303
+ keep_mask = data["iou_preds"] > self.pred_iou_thresh
304
+ data.filter(keep_mask)
305
+
306
+ # Calculate stability score
307
+ data["stability_score"] = calculate_stability_score(
308
+ data["masks"],
309
+ self.predictor.model.mask_threshold,
310
+ self.stability_score_offset,
311
+ )
312
+ if self.stability_score_thresh > 0.0:
313
+ keep_mask = data["stability_score"] >= self.stability_score_thresh
314
+ data.filter(keep_mask)
315
+
316
+ # Threshold masks and calculate boxes
317
+ data["masks"] = data["masks"] > self.predictor.model.mask_threshold
318
+ data["boxes"] = batched_mask_to_box(data["masks"])
319
+
320
+ # Filter boxes that touch crop boundaries
321
+ keep_mask = ~is_box_near_crop_edge(
322
+ data["boxes"], crop_box, [0, 0, orig_w, orig_h]
323
+ )
324
+ if not torch.all(keep_mask):
325
+ data.filter(keep_mask)
326
+
327
+ # Compress to RLE
328
+ data["masks"] = uncrop_masks(data["masks"], crop_box, orig_h, orig_w)
329
+ data["rles"] = mask_to_rle_pytorch(data["masks"])
330
+ del data["masks"]
331
+
332
+ return data
333
+
334
+ @staticmethod
335
+ def postprocess_small_regions(
336
+ mask_data: MaskData, min_area: int, nms_thresh: float
337
+ ) -> MaskData:
338
+ """
339
+ Removes small disconnected regions and holes in masks, then reruns
340
+ box NMS to remove any new duplicates.
341
+
342
+ Edits mask_data in place.
343
+
344
+ Requires open-cv as a dependency.
345
+ """
346
+ if len(mask_data["rles"]) == 0:
347
+ return mask_data
348
+
349
+ # Filter small disconnected regions and holes
350
+ new_masks = []
351
+ scores = []
352
+ for rle in mask_data["rles"]:
353
+ mask = rle_to_mask(rle)
354
+
355
+ mask, changed = remove_small_regions(mask, min_area, mode="holes")
356
+ unchanged = not changed
357
+ mask, changed = remove_small_regions(mask, min_area, mode="islands")
358
+ unchanged = unchanged and not changed
359
+
360
+ new_masks.append(torch.as_tensor(mask).unsqueeze(0))
361
+ # Give score=0 to changed masks and score=1 to unchanged masks
362
+ # so NMS will prefer ones that didn't need postprocessing
363
+ scores.append(float(unchanged))
364
+
365
+ # Recalculate boxes and remove any new duplicates
366
+ masks = torch.cat(new_masks, dim=0)
367
+ boxes = batched_mask_to_box(masks)
368
+ keep_by_nms = batched_nms(
369
+ boxes.float(),
370
+ torch.as_tensor(scores),
371
+ torch.zeros_like(boxes[:, 0]), # categories
372
+ iou_threshold=nms_thresh,
373
+ )
374
+
375
+ # Only recalculate RLEs for masks that have changed
376
+ for i_mask in keep_by_nms:
377
+ if scores[i_mask] == 0.0:
378
+ mask_torch = masks[i_mask].unsqueeze(0)
379
+ mask_data["rles"][i_mask] = mask_to_rle_pytorch(mask_torch)[0]
380
+ mask_data["boxes"][i_mask] = boxes[i_mask] # update res directly
381
+ mask_data.filter(keep_by_nms)
382
+
383
+ return mask_data
segment_anything/build_sam.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ # All rights reserved.
4
+
5
+ # This source code is licensed under the license found in the
6
+ # LICENSE file in the root directory of this source tree.
7
+ from functools import partial
8
+ from pathlib import Path
9
+ import urllib.request
10
+ import torch
11
+
12
+ from .modeling import (
13
+ ImageEncoderViT,
14
+ MaskDecoder,
15
+ PromptEncoder,
16
+ Sam,
17
+ TwoWayTransformer,
18
+ )
19
+
20
+
21
+ def build_sam_vit_h(checkpoint=None):
22
+ return _build_sam(
23
+ encoder_embed_dim=1280,
24
+ encoder_depth=32,
25
+ encoder_num_heads=16,
26
+ encoder_global_attn_indexes=[7, 15, 23, 31],
27
+ checkpoint=checkpoint,
28
+ )
29
+
30
+
31
+ build_sam = build_sam_vit_h
32
+
33
+
34
+ def build_sam_vit_l(checkpoint=None):
35
+ return _build_sam(
36
+ encoder_embed_dim=1024,
37
+ encoder_depth=24,
38
+ encoder_num_heads=16,
39
+ encoder_global_attn_indexes=[5, 11, 17, 23],
40
+ checkpoint=checkpoint,
41
+ )
42
+
43
+
44
+ def build_sam_vit_b(checkpoint=None):
45
+ return _build_sam(
46
+ encoder_embed_dim=768,
47
+ encoder_depth=12,
48
+ encoder_num_heads=12,
49
+ encoder_global_attn_indexes=[2, 5, 8, 11],
50
+ checkpoint=checkpoint,
51
+ )
52
+
53
+
54
+ sam_model_registry = {
55
+ "default": build_sam_vit_h,
56
+ "vit_h": build_sam_vit_h,
57
+ "vit_l": build_sam_vit_l,
58
+ "vit_b": build_sam_vit_b,
59
+ }
60
+
61
+
62
+ def _build_sam(
63
+ encoder_embed_dim,
64
+ encoder_depth,
65
+ encoder_num_heads,
66
+ encoder_global_attn_indexes,
67
+ checkpoint=None,
68
+ ):
69
+ prompt_embed_dim = 256
70
+ image_size = 1024
71
+ vit_patch_size = 16
72
+ image_embedding_size = image_size // vit_patch_size
73
+ sam = Sam(
74
+ image_encoder=ImageEncoderViT(
75
+ depth=encoder_depth,
76
+ embed_dim=encoder_embed_dim,
77
+ img_size=image_size,
78
+ mlp_ratio=4,
79
+ norm_layer=partial(torch.nn.LayerNorm, eps=1e-6),
80
+ num_heads=encoder_num_heads,
81
+ patch_size=vit_patch_size,
82
+ qkv_bias=True,
83
+ use_rel_pos=True,
84
+ global_attn_indexes=encoder_global_attn_indexes,
85
+ window_size=14,
86
+ out_chans=prompt_embed_dim,
87
+ ),
88
+ prompt_encoder=PromptEncoder(
89
+ embed_dim=prompt_embed_dim,
90
+ image_embedding_size=(image_embedding_size, image_embedding_size),
91
+ input_image_size=(image_size, image_size),
92
+ mask_in_chans=16,
93
+ ),
94
+ mask_decoder=MaskDecoder(
95
+ num_multimask_outputs=3,
96
+ transformer=TwoWayTransformer(
97
+ depth=2,
98
+ embedding_dim=prompt_embed_dim,
99
+ mlp_dim=2048,
100
+ num_heads=8,
101
+ ),
102
+ transformer_dim=prompt_embed_dim,
103
+ iou_head_depth=3,
104
+ iou_head_hidden_dim=256,
105
+ ),
106
+ pixel_mean=[123.675, 116.28, 103.53],
107
+ pixel_std=[58.395, 57.12, 57.375],
108
+ )
109
+ sam.eval()
110
+ checkpoint = Path(checkpoint)
111
+ if checkpoint.name == "sam_vit_b_01ec64.pth" and not checkpoint.exists():
112
+ cmd = input("Download sam_vit_b_01ec64.pth from facebook AI? [y]/n: ")
113
+ if len(cmd) == 0 or cmd.lower() == "y":
114
+ checkpoint.parent.mkdir(parents=True, exist_ok=True)
115
+ print("Downloading SAM ViT-B checkpoint...")
116
+ urllib.request.urlretrieve(
117
+ "https://dl.fbaipublicfiles.com/segment_anything/sam_vit_b_01ec64.pth",
118
+ checkpoint,
119
+ )
120
+ print(checkpoint.name, " is downloaded!")
121
+ elif checkpoint.name == "sam_vit_h_4b8939.pth" and not checkpoint.exists():
122
+ cmd = input("Download sam_vit_h_4b8939.pth from facebook AI? [y]/n: ")
123
+ if len(cmd) == 0 or cmd.lower() == "y":
124
+ checkpoint.parent.mkdir(parents=True, exist_ok=True)
125
+ print("Downloading SAM ViT-H checkpoint...")
126
+ urllib.request.urlretrieve(
127
+ "https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth",
128
+ checkpoint,
129
+ )
130
+ print(checkpoint.name, " is downloaded!")
131
+ elif checkpoint.name == "sam_vit_l_0b3195.pth" and not checkpoint.exists():
132
+ cmd = input("Download sam_vit_l_0b3195.pth from facebook AI? [y]/n: ")
133
+ if len(cmd) == 0 or cmd.lower() == "y":
134
+ checkpoint.parent.mkdir(parents=True, exist_ok=True)
135
+ print("Downloading SAM ViT-L checkpoint...")
136
+ urllib.request.urlretrieve(
137
+ "https://dl.fbaipublicfiles.com/segment_anything/sam_vit_l_0b3195.pth",
138
+ checkpoint,
139
+ )
140
+ print(checkpoint.name, " is downloaded!")
141
+
142
+ if checkpoint is not None:
143
+ with open(checkpoint, "rb") as f:
144
+ state_dict = torch.load(f, map_location=torch.device('cpu'))
145
+ sam.load_state_dict(state_dict)
146
+ return sam
segment_anything/modeling/__init__.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ # All rights reserved.
4
+
5
+ # This source code is licensed under the license found in the
6
+ # LICENSE file in the root directory of this source tree.
7
+
8
+ from .sam import Sam
9
+ from .image_encoder import ImageEncoderViT
10
+ from .mask_decoder import MaskDecoder
11
+ from .prompt_encoder import PromptEncoder
12
+ from .transformer import TwoWayTransformer
segment_anything/modeling/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (410 Bytes). View file
 
segment_anything/modeling/__pycache__/common.cpython-310.pyc ADDED
Binary file (1.77 kB). View file
 
segment_anything/modeling/__pycache__/image_encoder.cpython-310.pyc ADDED
Binary file (12.7 kB). View file
 
segment_anything/modeling/__pycache__/mask_decoder.cpython-310.pyc ADDED
Binary file (5.54 kB). View file
 
segment_anything/modeling/__pycache__/prompt_encoder.cpython-310.pyc ADDED
Binary file (7.72 kB). View file
 
segment_anything/modeling/__pycache__/sam.cpython-310.pyc ADDED
Binary file (6.69 kB). View file
 
segment_anything/modeling/__pycache__/transformer.cpython-310.pyc ADDED
Binary file (6.62 kB). View file
 
segment_anything/modeling/common.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ # All rights reserved.
4
+
5
+ # This source code is licensed under the license found in the
6
+ # LICENSE file in the root directory of this source tree.
7
+
8
+ import torch
9
+ import torch.nn as nn
10
+
11
+ from typing import Type
12
+
13
+
14
+ class MLPBlock(nn.Module):
15
+ def __init__(
16
+ self,
17
+ embedding_dim: int,
18
+ mlp_dim: int,
19
+ act: Type[nn.Module] = nn.GELU,
20
+ ) -> None:
21
+ super().__init__()
22
+ self.lin1 = nn.Linear(embedding_dim, mlp_dim)
23
+ self.lin2 = nn.Linear(mlp_dim, embedding_dim)
24
+ self.act = act()
25
+
26
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
27
+ return self.lin2(self.act(self.lin1(x)))
28
+
29
+
30
+ # From https://github.com/facebookresearch/detectron2/blob/main/detectron2/layers/batch_norm.py # noqa
31
+ # Itself from https://github.com/facebookresearch/ConvNeXt/blob/d1fa8f6fef0a165b27399986cc2bdacc92777e40/models/convnext.py#L119 # noqa
32
+ class LayerNorm2d(nn.Module):
33
+ def __init__(self, num_channels: int, eps: float = 1e-6) -> None:
34
+ super().__init__()
35
+ self.weight = nn.Parameter(torch.ones(num_channels))
36
+ self.bias = nn.Parameter(torch.zeros(num_channels))
37
+ self.eps = eps
38
+
39
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
40
+ u = x.mean(1, keepdim=True)
41
+ s = (x - u).pow(2).mean(1, keepdim=True)
42
+ x = (x - u) / torch.sqrt(s + self.eps)
43
+ x = self.weight[:, None, None] * x + self.bias[:, None, None]
44
+ return x
segment_anything/modeling/image_encoder.py ADDED
@@ -0,0 +1,420 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ # All rights reserved.
4
+
5
+ # This source code is licensed under the license found in the
6
+ # LICENSE file in the root directory of this source tree.
7
+
8
+ import torch
9
+ import torch.nn as nn
10
+ import torch.nn.functional as F
11
+
12
+ from typing import Optional, Tuple, Type
13
+
14
+ from .common import LayerNorm2d, MLPBlock
15
+
16
+
17
+ # This class and its supporting functions below lightly adapted from the ViTDet backbone available at: https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/vit.py # noqa
18
+ class ImageEncoderViT(nn.Module):
19
+ def __init__(
20
+ self,
21
+ img_size: int = 1024,
22
+ patch_size: int = 16,
23
+ in_chans: int = 3,
24
+ embed_dim: int = 768,
25
+ depth: int = 12,
26
+ num_heads: int = 12,
27
+ mlp_ratio: float = 4.0,
28
+ out_chans: int = 256,
29
+ qkv_bias: bool = True,
30
+ norm_layer: Type[nn.Module] = nn.LayerNorm,
31
+ act_layer: Type[nn.Module] = nn.GELU,
32
+ use_abs_pos: bool = True,
33
+ use_rel_pos: bool = False,
34
+ rel_pos_zero_init: bool = True,
35
+ window_size: int = 0,
36
+ global_attn_indexes: Tuple[int, ...] = (),
37
+ ) -> None:
38
+ """
39
+ Args:
40
+ img_size (int): Input image size.
41
+ patch_size (int): Patch size.
42
+ in_chans (int): Number of input image channels.
43
+ embed_dim (int): Patch embedding dimension.
44
+ depth (int): Depth of ViT.
45
+ num_heads (int): Number of attention heads in each ViT block.
46
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
47
+ qkv_bias (bool): If True, add a learnable bias to query, key, value.
48
+ norm_layer (nn.Module): Normalization layer.
49
+ act_layer (nn.Module): Activation layer.
50
+ use_abs_pos (bool): If True, use absolute positional embeddings.
51
+ use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
52
+ rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
53
+ window_size (int): Window size for window attention blocks.
54
+ global_attn_indexes (list): Indexes for blocks using global attention.
55
+ """
56
+ super().__init__()
57
+ self.img_size = img_size
58
+
59
+ self.patch_embed = PatchEmbed(
60
+ kernel_size=(patch_size, patch_size),
61
+ stride=(patch_size, patch_size),
62
+ in_chans=in_chans,
63
+ embed_dim=embed_dim,
64
+ )
65
+
66
+ self.pos_embed: Optional[nn.Parameter] = None
67
+ if use_abs_pos:
68
+ # Initialize absolute positional embedding with pretrain image size.
69
+ self.pos_embed = nn.Parameter(
70
+ torch.zeros(
71
+ 1, img_size // patch_size, img_size // patch_size, embed_dim
72
+ )
73
+ )
74
+
75
+ self.blocks = nn.ModuleList()
76
+ for i in range(depth):
77
+ block = Block(
78
+ dim=embed_dim,
79
+ num_heads=num_heads,
80
+ mlp_ratio=mlp_ratio,
81
+ qkv_bias=qkv_bias,
82
+ norm_layer=norm_layer,
83
+ act_layer=act_layer,
84
+ use_rel_pos=use_rel_pos,
85
+ rel_pos_zero_init=rel_pos_zero_init,
86
+ window_size=window_size if i not in global_attn_indexes else 0,
87
+ input_size=(img_size // patch_size, img_size // patch_size),
88
+ )
89
+ self.blocks.append(block)
90
+
91
+ self.neck = nn.Sequential(
92
+ nn.Conv2d(
93
+ embed_dim,
94
+ out_chans,
95
+ kernel_size=1,
96
+ bias=False,
97
+ ),
98
+ LayerNorm2d(out_chans),
99
+ nn.Conv2d(
100
+ out_chans,
101
+ out_chans,
102
+ kernel_size=3,
103
+ padding=1,
104
+ bias=False,
105
+ ),
106
+ LayerNorm2d(out_chans),
107
+ )
108
+
109
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
110
+ x = self.patch_embed(x)
111
+ if self.pos_embed is not None:
112
+ x = x + self.pos_embed
113
+
114
+ for blk in self.blocks:
115
+ x = blk(x)
116
+
117
+ x = self.neck(x.permute(0, 3, 1, 2))
118
+
119
+ return x
120
+
121
+
122
+ class Block(nn.Module):
123
+ """Transformer blocks with support of window attention and residual propagation blocks"""
124
+
125
+ def __init__(
126
+ self,
127
+ dim: int,
128
+ num_heads: int,
129
+ mlp_ratio: float = 4.0,
130
+ qkv_bias: bool = True,
131
+ norm_layer: Type[nn.Module] = nn.LayerNorm,
132
+ act_layer: Type[nn.Module] = nn.GELU,
133
+ use_rel_pos: bool = False,
134
+ rel_pos_zero_init: bool = True,
135
+ window_size: int = 0,
136
+ input_size: Optional[Tuple[int, int]] = None,
137
+ ) -> None:
138
+ """
139
+ Args:
140
+ dim (int): Number of input channels.
141
+ num_heads (int): Number of attention heads in each ViT block.
142
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
143
+ qkv_bias (bool): If True, add a learnable bias to query, key, value.
144
+ norm_layer (nn.Module): Normalization layer.
145
+ act_layer (nn.Module): Activation layer.
146
+ use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
147
+ rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
148
+ window_size (int): Window size for window attention blocks. If it equals 0, then
149
+ use global attention.
150
+ input_size (tuple(int, int) or None): Input resolution for calculating the relative
151
+ positional parameter size.
152
+ """
153
+ super().__init__()
154
+ self.norm1 = norm_layer(dim)
155
+ self.attn = Attention(
156
+ dim,
157
+ num_heads=num_heads,
158
+ qkv_bias=qkv_bias,
159
+ use_rel_pos=use_rel_pos,
160
+ rel_pos_zero_init=rel_pos_zero_init,
161
+ input_size=input_size if window_size == 0 else (window_size, window_size),
162
+ )
163
+
164
+ self.norm2 = norm_layer(dim)
165
+ self.mlp = MLPBlock(
166
+ embedding_dim=dim, mlp_dim=int(dim * mlp_ratio), act=act_layer
167
+ )
168
+
169
+ self.window_size = window_size
170
+
171
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
172
+ shortcut = x
173
+ x = self.norm1(x)
174
+ # Window partition
175
+ if self.window_size > 0:
176
+ H, W = x.shape[1], x.shape[2]
177
+ x, pad_hw = window_partition(x, self.window_size)
178
+
179
+ x = self.attn(x)
180
+ # Reverse window partition
181
+ if self.window_size > 0:
182
+ x = window_unpartition(x, self.window_size, pad_hw, (H, W))
183
+
184
+ x = shortcut + x
185
+ x = x + self.mlp(self.norm2(x))
186
+
187
+ return x
188
+
189
+
190
+ class Attention(nn.Module):
191
+ """Multi-head Attention block with relative position embeddings."""
192
+
193
+ def __init__(
194
+ self,
195
+ dim: int,
196
+ num_heads: int = 8,
197
+ qkv_bias: bool = True,
198
+ use_rel_pos: bool = False,
199
+ rel_pos_zero_init: bool = True,
200
+ input_size: Optional[Tuple[int, int]] = None,
201
+ ) -> None:
202
+ """
203
+ Args:
204
+ dim (int): Number of input channels.
205
+ num_heads (int): Number of attention heads.
206
+ qkv_bias (bool): If True, add a learnable bias to query, key, value.
207
+ rel_pos (bool): If True, add relative positional embeddings to the attention map.
208
+ rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
209
+ input_size (tuple(int, int) or None): Input resolution for calculating the relative
210
+ positional parameter size.
211
+ """
212
+ super().__init__()
213
+ self.num_heads = num_heads
214
+ head_dim = dim // num_heads
215
+ self.scale = head_dim**-0.5
216
+
217
+ self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
218
+ self.proj = nn.Linear(dim, dim)
219
+
220
+ self.use_rel_pos = use_rel_pos
221
+ if self.use_rel_pos:
222
+ assert (
223
+ input_size is not None
224
+ ), "Input size must be provided if using relative positional encoding."
225
+ # initialize relative positional embeddings
226
+ self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, head_dim))
227
+ self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, head_dim))
228
+
229
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
230
+ B, H, W, _ = x.shape
231
+ # qkv with shape (3, B, nHead, H * W, C)
232
+ qkv = (
233
+ self.qkv(x).reshape(B, H * W, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
234
+ )
235
+ # q, k, v with shape (B * nHead, H * W, C)
236
+ q, k, v = qkv.reshape(3, B * self.num_heads, H * W, -1).unbind(0)
237
+
238
+ attn = (q * self.scale) @ k.transpose(-2, -1)
239
+
240
+ if self.use_rel_pos:
241
+ attn = add_decomposed_rel_pos(
242
+ attn, q, self.rel_pos_h, self.rel_pos_w, (H, W), (H, W)
243
+ )
244
+
245
+ attn = attn.softmax(dim=-1)
246
+ x = (
247
+ (attn @ v)
248
+ .view(B, self.num_heads, H, W, -1)
249
+ .permute(0, 2, 3, 1, 4)
250
+ .reshape(B, H, W, -1)
251
+ )
252
+ x = self.proj(x)
253
+
254
+ return x
255
+
256
+
257
+ def window_partition(
258
+ x: torch.Tensor, window_size: int
259
+ ) -> Tuple[torch.Tensor, Tuple[int, int]]:
260
+ """
261
+ Partition into non-overlapping windows with padding if needed.
262
+ Args:
263
+ x (tensor): input tokens with [B, H, W, C].
264
+ window_size (int): window size.
265
+
266
+ Returns:
267
+ windows: windows after partition with [B * num_windows, window_size, window_size, C].
268
+ (Hp, Wp): padded height and width before partition
269
+ """
270
+ B, H, W, C = x.shape
271
+
272
+ pad_h = (window_size - H % window_size) % window_size
273
+ pad_w = (window_size - W % window_size) % window_size
274
+ if pad_h > 0 or pad_w > 0:
275
+ x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h))
276
+ Hp, Wp = H + pad_h, W + pad_w
277
+
278
+ x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C)
279
+ windows = (
280
+ x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
281
+ )
282
+ return windows, (Hp, Wp)
283
+
284
+
285
+ def window_unpartition(
286
+ windows: torch.Tensor,
287
+ window_size: int,
288
+ pad_hw: Tuple[int, int],
289
+ hw: Tuple[int, int],
290
+ ) -> torch.Tensor:
291
+ """
292
+ Window unpartition into original sequences and removing padding.
293
+ Args:
294
+ windows (tensor): input tokens with [B * num_windows, window_size, window_size, C].
295
+ window_size (int): window size.
296
+ pad_hw (Tuple): padded height and width (Hp, Wp).
297
+ hw (Tuple): original height and width (H, W) before padding.
298
+
299
+ Returns:
300
+ x: unpartitioned sequences with [B, H, W, C].
301
+ """
302
+ Hp, Wp = pad_hw
303
+ H, W = hw
304
+ B = windows.shape[0] // (Hp * Wp // window_size // window_size)
305
+ x = windows.view(
306
+ B, Hp // window_size, Wp // window_size, window_size, window_size, -1
307
+ )
308
+ x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1)
309
+
310
+ if Hp > H or Wp > W:
311
+ x = x[:, :H, :W, :].contiguous()
312
+ return x
313
+
314
+
315
+ def get_rel_pos(q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor:
316
+ """
317
+ Get relative positional embeddings according to the relative positions of
318
+ query and key sizes.
319
+ Args:
320
+ q_size (int): size of query q.
321
+ k_size (int): size of key k.
322
+ rel_pos (Tensor): relative position embeddings (L, C).
323
+
324
+ Returns:
325
+ Extracted positional embeddings according to relative positions.
326
+ """
327
+ max_rel_dist = int(2 * max(q_size, k_size) - 1)
328
+ # Interpolate rel pos if needed.
329
+ if rel_pos.shape[0] != max_rel_dist:
330
+ # Interpolate rel pos.
331
+ rel_pos_resized = F.interpolate(
332
+ rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1),
333
+ size=max_rel_dist,
334
+ mode="linear",
335
+ )
336
+ rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0)
337
+ else:
338
+ rel_pos_resized = rel_pos
339
+
340
+ # Scale the coords with short length if shapes for q and k are different.
341
+ q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0)
342
+ k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0)
343
+ relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0)
344
+
345
+ return rel_pos_resized[relative_coords.long()]
346
+
347
+
348
+ def add_decomposed_rel_pos(
349
+ attn: torch.Tensor,
350
+ q: torch.Tensor,
351
+ rel_pos_h: torch.Tensor,
352
+ rel_pos_w: torch.Tensor,
353
+ q_size: Tuple[int, int],
354
+ k_size: Tuple[int, int],
355
+ ) -> torch.Tensor:
356
+ """
357
+ Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`.
358
+ https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py # noqa B950
359
+ Args:
360
+ attn (Tensor): attention map.
361
+ q (Tensor): query q in the attention layer with shape (B, q_h * q_w, C).
362
+ rel_pos_h (Tensor): relative position embeddings (Lh, C) for height axis.
363
+ rel_pos_w (Tensor): relative position embeddings (Lw, C) for width axis.
364
+ q_size (Tuple): spatial sequence size of query q with (q_h, q_w).
365
+ k_size (Tuple): spatial sequence size of key k with (k_h, k_w).
366
+
367
+ Returns:
368
+ attn (Tensor): attention map with added relative positional embeddings.
369
+ """
370
+ q_h, q_w = q_size
371
+ k_h, k_w = k_size
372
+ Rh = get_rel_pos(q_h, k_h, rel_pos_h)
373
+ Rw = get_rel_pos(q_w, k_w, rel_pos_w)
374
+
375
+ B, _, dim = q.shape
376
+ r_q = q.reshape(B, q_h, q_w, dim)
377
+ rel_h = torch.einsum("bhwc,hkc->bhwk", r_q, Rh)
378
+ rel_w = torch.einsum("bhwc,wkc->bhwk", r_q, Rw)
379
+
380
+ attn = (
381
+ attn.view(B, q_h, q_w, k_h, k_w)
382
+ + rel_h[:, :, :, :, None]
383
+ + rel_w[:, :, :, None, :]
384
+ ).view(B, q_h * q_w, k_h * k_w)
385
+
386
+ return attn
387
+
388
+
389
+ class PatchEmbed(nn.Module):
390
+ """
391
+ Image to Patch Embedding.
392
+ """
393
+
394
+ def __init__(
395
+ self,
396
+ kernel_size: Tuple[int, int] = (16, 16),
397
+ stride: Tuple[int, int] = (16, 16),
398
+ padding: Tuple[int, int] = (0, 0),
399
+ in_chans: int = 3,
400
+ embed_dim: int = 768,
401
+ ) -> None:
402
+ """
403
+ Args:
404
+ kernel_size (Tuple): kernel size of the projection layer.
405
+ stride (Tuple): stride of the projection layer.
406
+ padding (Tuple): padding size of the projection layer.
407
+ in_chans (int): Number of input image channels.
408
+ embed_dim (int): Patch embedding dimension.
409
+ """
410
+ super().__init__()
411
+
412
+ self.proj = nn.Conv2d(
413
+ in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding
414
+ )
415
+
416
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
417
+ x = self.proj(x)
418
+ # B C H W -> B H W C
419
+ x = x.permute(0, 2, 3, 1)
420
+ return x
segment_anything/modeling/mask_decoder.py ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ # All rights reserved.
4
+
5
+ # This source code is licensed under the license found in the
6
+ # LICENSE file in the root directory of this source tree.
7
+
8
+ import torch
9
+ from torch import nn
10
+ from torch.nn import functional as F
11
+
12
+ from typing import List, Tuple, Type
13
+
14
+ from .common import LayerNorm2d
15
+
16
+
17
+ class MaskDecoder(nn.Module):
18
+ def __init__(
19
+ self,
20
+ *,
21
+ transformer_dim: int,
22
+ transformer: nn.Module,
23
+ num_multimask_outputs: int = 3,
24
+ activation: Type[nn.Module] = nn.GELU,
25
+ iou_head_depth: int = 3,
26
+ iou_head_hidden_dim: int = 256,
27
+ ) -> None:
28
+ """
29
+ Predicts masks given an image and prompt embeddings, using a
30
+ transformer architecture.
31
+
32
+ Arguments:
33
+ transformer_dim (int): the channel dimension of the transformer
34
+ transformer (nn.Module): the transformer used to predict masks
35
+ num_multimask_outputs (int): the number of masks to predict
36
+ when disambiguating masks
37
+ activation (nn.Module): the type of activation to use when
38
+ upscaling masks
39
+ iou_head_depth (int): the depth of the MLP used to predict
40
+ mask quality
41
+ iou_head_hidden_dim (int): the hidden dimension of the MLP
42
+ used to predict mask quality
43
+ """
44
+ super().__init__()
45
+ self.transformer_dim = transformer_dim
46
+ self.transformer = transformer
47
+
48
+ self.num_multimask_outputs = num_multimask_outputs
49
+
50
+ self.iou_token = nn.Embedding(1, transformer_dim)
51
+ self.num_mask_tokens = num_multimask_outputs + 1
52
+ self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim)
53
+
54
+ self.output_upscaling = nn.Sequential(
55
+ nn.ConvTranspose2d(
56
+ transformer_dim, transformer_dim // 4, kernel_size=2, stride=2
57
+ ),
58
+ LayerNorm2d(transformer_dim // 4),
59
+ activation(),
60
+ nn.ConvTranspose2d(
61
+ transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2
62
+ ),
63
+ activation(),
64
+ )
65
+ self.output_hypernetworks_mlps = nn.ModuleList(
66
+ [
67
+ MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3)
68
+ for i in range(self.num_mask_tokens)
69
+ ]
70
+ )
71
+
72
+ self.iou_prediction_head = MLP(
73
+ transformer_dim, iou_head_hidden_dim, self.num_mask_tokens, iou_head_depth
74
+ )
75
+
76
+ def forward(
77
+ self,
78
+ image_embeddings: torch.Tensor,
79
+ image_pe: torch.Tensor,
80
+ sparse_prompt_embeddings: torch.Tensor,
81
+ dense_prompt_embeddings: torch.Tensor,
82
+ multimask_output: bool,
83
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
84
+ """
85
+ Predict masks given image and prompt embeddings.
86
+
87
+ Arguments:
88
+ image_embeddings (torch.Tensor): the embeddings from the image encoder
89
+ image_pe (torch.Tensor): positional encoding with the shape of image_embeddings
90
+ sparse_prompt_embeddings (torch.Tensor): the embeddings of the points and boxes
91
+ dense_prompt_embeddings (torch.Tensor): the embeddings of the mask inputs
92
+ multimask_output (bool): Whether to return multiple masks or a single
93
+ mask.
94
+
95
+ Returns:
96
+ torch.Tensor: batched predicted masks
97
+ torch.Tensor: batched predictions of mask quality
98
+ """
99
+ masks, iou_pred = self.predict_masks(
100
+ image_embeddings=image_embeddings,
101
+ image_pe=image_pe,
102
+ sparse_prompt_embeddings=sparse_prompt_embeddings,
103
+ dense_prompt_embeddings=dense_prompt_embeddings,
104
+ )
105
+
106
+ # Select the correct mask or masks for output
107
+ if multimask_output:
108
+ mask_slice = slice(1, None)
109
+ else:
110
+ mask_slice = slice(0, 1)
111
+ masks = masks[:, mask_slice, :, :]
112
+ iou_pred = iou_pred[:, mask_slice]
113
+
114
+ # Prepare output
115
+ return masks, iou_pred
116
+
117
+ def predict_masks(
118
+ self,
119
+ image_embeddings: torch.Tensor,
120
+ image_pe: torch.Tensor,
121
+ sparse_prompt_embeddings: torch.Tensor,
122
+ dense_prompt_embeddings: torch.Tensor,
123
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
124
+ """Predicts masks. See 'forward' for more details."""
125
+ # Concatenate output tokens
126
+ output_tokens = torch.cat(
127
+ [self.iou_token.weight, self.mask_tokens.weight], dim=0
128
+ )
129
+ output_tokens = output_tokens.unsqueeze(0).expand(
130
+ sparse_prompt_embeddings.size(0), -1, -1
131
+ )
132
+ tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1)
133
+
134
+ # Expand per-image data in batch direction to be per-mask
135
+ if image_embeddings.shape[0] != tokens.shape[0]:
136
+ src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0)
137
+ else:
138
+ src = image_embeddings
139
+ src = src + dense_prompt_embeddings
140
+ pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0)
141
+ b, c, h, w = src.shape
142
+
143
+ # Run the transformer
144
+ hs, src = self.transformer(src, pos_src, tokens)
145
+ iou_token_out = hs[:, 0, :]
146
+ mask_tokens_out = hs[:, 1 : (1 + self.num_mask_tokens), :]
147
+
148
+ # Upscale mask embeddings and predict masks using the mask tokens
149
+ src = src.transpose(1, 2).view(b, c, h, w)
150
+ upscaled_embedding = self.output_upscaling(src)
151
+ hyper_in_list: List[torch.Tensor] = []
152
+ for i in range(self.num_mask_tokens):
153
+ hyper_in_list.append(
154
+ self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :])
155
+ )
156
+ hyper_in = torch.stack(hyper_in_list, dim=1)
157
+ b, c, h, w = upscaled_embedding.shape
158
+ masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(b, -1, h, w)
159
+
160
+ # Generate mask quality predictions
161
+ iou_pred = self.iou_prediction_head(iou_token_out)
162
+
163
+ return masks, iou_pred
164
+
165
+
166
+ # Lightly adapted from
167
+ # https://github.com/facebookresearch/MaskFormer/blob/main/mask_former/modeling/transformer/transformer_predictor.py # noqa
168
+ class MLP(nn.Module):
169
+ def __init__(
170
+ self,
171
+ input_dim: int,
172
+ hidden_dim: int,
173
+ output_dim: int,
174
+ num_layers: int,
175
+ sigmoid_output: bool = False,
176
+ ) -> None:
177
+ super().__init__()
178
+ self.num_layers = num_layers
179
+ h = [hidden_dim] * (num_layers - 1)
180
+ self.layers = nn.ModuleList(
181
+ nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])
182
+ )
183
+ self.sigmoid_output = sigmoid_output
184
+
185
+ def forward(self, x):
186
+ for i, layer in enumerate(self.layers):
187
+ x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
188
+ if self.sigmoid_output:
189
+ x = F.sigmoid(x)
190
+ return x
segment_anything/modeling/prompt_encoder.py ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ # All rights reserved.
4
+
5
+ # This source code is licensed under the license found in the
6
+ # LICENSE file in the root directory of this source tree.
7
+
8
+ import numpy as np
9
+ import torch
10
+ from torch import nn
11
+
12
+ from typing import Any, Optional, Tuple, Type
13
+
14
+ from .common import LayerNorm2d
15
+
16
+
17
+ class PromptEncoder(nn.Module):
18
+ def __init__(
19
+ self,
20
+ embed_dim: int,
21
+ image_embedding_size: Tuple[int, int],
22
+ input_image_size: Tuple[int, int],
23
+ mask_in_chans: int,
24
+ activation: Type[nn.Module] = nn.GELU,
25
+ ) -> None:
26
+ """
27
+ Encodes prompts for input to SAM's mask decoder.
28
+
29
+ Arguments:
30
+ embed_dim (int): The prompts' embedding dimension
31
+ image_embedding_size (tuple(int, int)): The spatial size of the
32
+ image embedding, as (H, W).
33
+ input_image_size (int): The padded size of the image as input
34
+ to the image encoder, as (H, W).
35
+ mask_in_chans (int): The number of hidden channels used for
36
+ encoding input masks.
37
+ activation (nn.Module): The activation to use when encoding
38
+ input masks.
39
+ """
40
+ super().__init__()
41
+ self.embed_dim = embed_dim
42
+ self.input_image_size = input_image_size
43
+ self.image_embedding_size = image_embedding_size
44
+ self.pe_layer = PositionEmbeddingRandom(embed_dim // 2)
45
+
46
+ self.num_point_embeddings: int = 4 # pos/neg point + 2 box corners
47
+ point_embeddings = [
48
+ nn.Embedding(1, embed_dim) for i in range(self.num_point_embeddings)
49
+ ]
50
+ self.point_embeddings = nn.ModuleList(point_embeddings)
51
+ self.not_a_point_embed = nn.Embedding(1, embed_dim)
52
+
53
+ self.mask_input_size = (
54
+ 4 * image_embedding_size[0],
55
+ 4 * image_embedding_size[1],
56
+ )
57
+ self.mask_downscaling = nn.Sequential(
58
+ nn.Conv2d(1, mask_in_chans // 4, kernel_size=2, stride=2),
59
+ LayerNorm2d(mask_in_chans // 4),
60
+ activation(),
61
+ nn.Conv2d(mask_in_chans // 4, mask_in_chans, kernel_size=2, stride=2),
62
+ LayerNorm2d(mask_in_chans),
63
+ activation(),
64
+ nn.Conv2d(mask_in_chans, embed_dim, kernel_size=1),
65
+ )
66
+ self.no_mask_embed = nn.Embedding(1, embed_dim)
67
+
68
+ def get_dense_pe(self) -> torch.Tensor:
69
+ """
70
+ Returns the positional encoding used to encode point prompts,
71
+ applied to a dense set of points the shape of the image encoding.
72
+
73
+ Returns:
74
+ torch.Tensor: Positional encoding with shape
75
+ 1x(embed_dim)x(embedding_h)x(embedding_w)
76
+ """
77
+ return self.pe_layer(self.image_embedding_size).unsqueeze(0)
78
+
79
+ def _embed_points(
80
+ self,
81
+ points: torch.Tensor,
82
+ labels: torch.Tensor,
83
+ pad: bool,
84
+ ) -> torch.Tensor:
85
+ """Embeds point prompts."""
86
+ points = points + 0.5 # Shift to center of pixel
87
+ if pad:
88
+ padding_point = torch.zeros((points.shape[0], 1, 2), device=points.device)
89
+ padding_label = -torch.ones((labels.shape[0], 1), device=labels.device)
90
+ points = torch.cat([points, padding_point], dim=1)
91
+ labels = torch.cat([labels, padding_label], dim=1)
92
+ point_embedding = self.pe_layer.forward_with_coords(
93
+ points, self.input_image_size
94
+ )
95
+ point_embedding[labels == -1] = 0.0
96
+ point_embedding[labels == -1] += self.not_a_point_embed.weight
97
+ point_embedding[labels == 0] += self.point_embeddings[0].weight
98
+ point_embedding[labels == 1] += self.point_embeddings[1].weight
99
+ return point_embedding
100
+
101
+ def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor:
102
+ """Embeds box prompts."""
103
+ boxes = boxes + 0.5 # Shift to center of pixel
104
+ coords = boxes.reshape(-1, 2, 2)
105
+ corner_embedding = self.pe_layer.forward_with_coords(
106
+ coords, self.input_image_size
107
+ )
108
+ corner_embedding[:, 0, :] += self.point_embeddings[2].weight
109
+ corner_embedding[:, 1, :] += self.point_embeddings[3].weight
110
+ return corner_embedding
111
+
112
+ def _embed_masks(self, masks: torch.Tensor) -> torch.Tensor:
113
+ """Embeds mask inputs."""
114
+ mask_embedding = self.mask_downscaling(masks)
115
+ return mask_embedding
116
+
117
+ def _get_batch_size(
118
+ self,
119
+ points: Optional[Tuple[torch.Tensor, torch.Tensor]],
120
+ boxes: Optional[torch.Tensor],
121
+ masks: Optional[torch.Tensor],
122
+ ) -> int:
123
+ """
124
+ Gets the batch size of the output given the batch size of the input prompts.
125
+ """
126
+ if points is not None:
127
+ return points[0].shape[0]
128
+ elif boxes is not None:
129
+ return boxes.shape[0]
130
+ elif masks is not None:
131
+ return masks.shape[0]
132
+ else:
133
+ return 1
134
+
135
+ def _get_device(self) -> torch.device:
136
+ return self.point_embeddings[0].weight.device
137
+
138
+ def forward(
139
+ self,
140
+ points: Optional[Tuple[torch.Tensor, torch.Tensor]],
141
+ boxes: Optional[torch.Tensor],
142
+ masks: Optional[torch.Tensor],
143
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
144
+ """
145
+ Embeds different types of prompts, returning both sparse and dense
146
+ embeddings.
147
+
148
+ Arguments:
149
+ points (tuple(torch.Tensor, torch.Tensor) or none): point coordinates
150
+ and labels to embed.
151
+ boxes (torch.Tensor or none): boxes to embed
152
+ masks (torch.Tensor or none): masks to embed
153
+
154
+ Returns:
155
+ torch.Tensor: sparse embeddings for the points and boxes, with shape
156
+ BxNx(embed_dim), where N is determined by the number of input points
157
+ and boxes.
158
+ torch.Tensor: dense embeddings for the masks, in the shape
159
+ Bx(embed_dim)x(embed_H)x(embed_W)
160
+ """
161
+ bs = self._get_batch_size(points, boxes, masks)
162
+ sparse_embeddings = torch.empty(
163
+ (bs, 0, self.embed_dim), device=self._get_device()
164
+ )
165
+ if points is not None:
166
+ coords, labels = points
167
+ point_embeddings = self._embed_points(coords, labels, pad=(boxes is None))
168
+ sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1)
169
+ if boxes is not None:
170
+ box_embeddings = self._embed_boxes(boxes)
171
+ sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=1)
172
+
173
+ if masks is not None:
174
+ dense_embeddings = self._embed_masks(masks)
175
+ else:
176
+ dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand(
177
+ bs, -1, self.image_embedding_size[0], self.image_embedding_size[1]
178
+ )
179
+
180
+ return sparse_embeddings, dense_embeddings
181
+
182
+
183
+ class PositionEmbeddingRandom(nn.Module):
184
+ """
185
+ Positional encoding using random spatial frequencies.
186
+ """
187
+
188
+ def __init__(self, num_pos_feats: int = 64, scale: Optional[float] = None) -> None:
189
+ super().__init__()
190
+ if scale is None or scale <= 0.0:
191
+ scale = 1.0
192
+ self.register_buffer(
193
+ "positional_encoding_gaussian_matrix",
194
+ scale * torch.randn((2, num_pos_feats)),
195
+ )
196
+
197
+ def _pe_encoding(self, coords: torch.Tensor) -> torch.Tensor:
198
+ """Positionally encode points that are normalized to [0,1]."""
199
+ # assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape
200
+ coords = 2 * coords - 1
201
+ coords = coords @ self.positional_encoding_gaussian_matrix
202
+ coords = 2 * np.pi * coords
203
+ # outputs d_1 x ... x d_n x C shape
204
+ return torch.cat([torch.sin(coords), torch.cos(coords)], dim=-1)
205
+
206
+ def forward(self, size: Tuple[int, int]) -> torch.Tensor:
207
+ """Generate positional encoding for a grid of the specified size."""
208
+ h, w = size
209
+ device: Any = self.positional_encoding_gaussian_matrix.device
210
+ grid = torch.ones((h, w), device=device, dtype=torch.float32)
211
+ y_embed = grid.cumsum(dim=0) - 0.5
212
+ x_embed = grid.cumsum(dim=1) - 0.5
213
+ y_embed = y_embed / h
214
+ x_embed = x_embed / w
215
+
216
+ pe = self._pe_encoding(torch.stack([x_embed, y_embed], dim=-1))
217
+ return pe.permute(2, 0, 1) # C x H x W
218
+
219
+ def forward_with_coords(
220
+ self, coords_input: torch.Tensor, image_size: Tuple[int, int]
221
+ ) -> torch.Tensor:
222
+ """Positionally encode points that are not normalized to [0,1]."""
223
+ coords = coords_input.clone()
224
+ coords[:, :, 0] = coords[:, :, 0] / image_size[1]
225
+ coords[:, :, 1] = coords[:, :, 1] / image_size[0]
226
+ return self._pe_encoding(coords.to(torch.float)) # B x N x C
segment_anything/modeling/sam.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ # All rights reserved.
4
+
5
+ # This source code is licensed under the license found in the
6
+ # LICENSE file in the root directory of this source tree.
7
+
8
+ import torch
9
+ from torch import nn
10
+ from torch.nn import functional as F
11
+
12
+ from typing import Any, Dict, List, Tuple
13
+
14
+ from .image_encoder import ImageEncoderViT
15
+ from .mask_decoder import MaskDecoder
16
+ from .prompt_encoder import PromptEncoder
17
+
18
+
19
+ class Sam(nn.Module):
20
+ mask_threshold: float = 0.0
21
+ image_format: str = "RGB"
22
+
23
+ def __init__(
24
+ self,
25
+ image_encoder: ImageEncoderViT,
26
+ prompt_encoder: PromptEncoder,
27
+ mask_decoder: MaskDecoder,
28
+ pixel_mean: List[float] = [123.675, 116.28, 103.53],
29
+ pixel_std: List[float] = [58.395, 57.12, 57.375],
30
+ ) -> None:
31
+ """
32
+ SAM predicts object masks from an image and input prompts.
33
+
34
+ Arguments:
35
+ image_encoder (ImageEncoderViT): The backbone used to encode the
36
+ image into image embeddings that allow for efficient mask prediction.
37
+ prompt_encoder (PromptEncoder): Encodes various types of input prompts.
38
+ mask_decoder (MaskDecoder): Predicts masks from the image embeddings
39
+ and encoded prompts.
40
+ pixel_mean (list(float)): Mean values for normalizing pixels in the input image.
41
+ pixel_std (list(float)): Std values for normalizing pixels in the input image.
42
+ """
43
+ super().__init__()
44
+ self.image_encoder = image_encoder
45
+ self.prompt_encoder = prompt_encoder
46
+ self.mask_decoder = mask_decoder
47
+ self.register_buffer(
48
+ "pixel_mean", torch.Tensor(pixel_mean).view(-1, 1, 1), False
49
+ )
50
+ self.register_buffer("pixel_std", torch.Tensor(pixel_std).view(-1, 1, 1), False)
51
+
52
+ @property
53
+ def device(self) -> Any:
54
+ return self.pixel_mean.device
55
+
56
+ @torch.no_grad()
57
+ def forward(
58
+ self,
59
+ batched_input: List[Dict[str, Any]],
60
+ multimask_output: bool,
61
+ ) -> List[Dict[str, torch.Tensor]]:
62
+ """
63
+ Predicts masks end-to-end from provided images and prompts.
64
+ If prompts are not known in advance, using SamPredictor is
65
+ recommended over calling the model directly.
66
+
67
+ Arguments:
68
+ batched_input (list(dict)): A list over input images, each a
69
+ dictionary with the following keys. A prompt key can be
70
+ excluded if it is not present.
71
+ 'image': The image as a torch tensor in 3xHxW format,
72
+ already transformed for input to the model.
73
+ 'original_size': (tuple(int, int)) The original size of
74
+ the image before transformation, as (H, W).
75
+ 'point_coords': (torch.Tensor) Batched point prompts for
76
+ this image, with shape BxNx2. Already transformed to the
77
+ input frame of the model.
78
+ 'point_labels': (torch.Tensor) Batched labels for point prompts,
79
+ with shape BxN.
80
+ 'boxes': (torch.Tensor) Batched box inputs, with shape Bx4.
81
+ Already transformed to the input frame of the model.
82
+ 'mask_inputs': (torch.Tensor) Batched mask inputs to the model,
83
+ in the form Bx1xHxW.
84
+ multimask_output (bool): Whether the model should predict multiple
85
+ disambiguating masks, or return a single mask.
86
+
87
+ Returns:
88
+ (list(dict)): A list over input images, where each element is
89
+ as dictionary with the following keys.
90
+ 'masks': (torch.Tensor) Batched binary mask predictions,
91
+ with shape BxCxHxW, where B is the number of input prompts,
92
+ C is determined by multimask_output, and (H, W) is the
93
+ original size of the image.
94
+ 'iou_predictions': (torch.Tensor) The model's predictions
95
+ of mask quality, in shape BxC.
96
+ 'low_res_logits': (torch.Tensor) Low resolution logits with
97
+ shape BxCxHxW, where H=W=256. Can be passed as mask input
98
+ to subsequent iterations of prediction.
99
+ """
100
+ input_images = torch.stack(
101
+ [self.preprocess(x["image"]) for x in batched_input], dim=0
102
+ )
103
+ image_embeddings = self.image_encoder(input_images)
104
+
105
+ outputs = []
106
+ for image_record, curr_embedding in zip(batched_input, image_embeddings):
107
+ if "point_coords" in image_record:
108
+ points = (image_record["point_coords"], image_record["point_labels"])
109
+ else:
110
+ points = None
111
+ sparse_embeddings, dense_embeddings = self.prompt_encoder(
112
+ points=points,
113
+ boxes=image_record.get("boxes", None),
114
+ masks=image_record.get("mask_inputs", None),
115
+ )
116
+ low_res_masks, iou_predictions = self.mask_decoder(
117
+ image_embeddings=curr_embedding.unsqueeze(0),
118
+ image_pe=self.prompt_encoder.get_dense_pe(),
119
+ sparse_prompt_embeddings=sparse_embeddings,
120
+ dense_prompt_embeddings=dense_embeddings,
121
+ multimask_output=multimask_output,
122
+ )
123
+ masks = self.postprocess_masks(
124
+ low_res_masks,
125
+ input_size=image_record["image"].shape[-2:],
126
+ original_size=image_record["original_size"],
127
+ )
128
+ masks = masks > self.mask_threshold
129
+ outputs.append(
130
+ {
131
+ "masks": masks,
132
+ "iou_predictions": iou_predictions,
133
+ "low_res_logits": low_res_masks,
134
+ }
135
+ )
136
+ return outputs
137
+
138
+ def postprocess_masks(
139
+ self,
140
+ masks: torch.Tensor,
141
+ input_size: Tuple[int, ...],
142
+ original_size: Tuple[int, ...],
143
+ ) -> torch.Tensor:
144
+ """
145
+ Remove padding and upscale masks to the original image size.
146
+
147
+ Arguments:
148
+ masks (torch.Tensor): Batched masks from the mask_decoder,
149
+ in BxCxHxW format.
150
+ input_size (tuple(int, int)): The size of the image input to the
151
+ model, in (H, W) format. Used to remove padding.
152
+ original_size (tuple(int, int)): The original size of the image
153
+ before resizing for input to the model, in (H, W) format.
154
+
155
+ Returns:
156
+ (torch.Tensor): Batched masks in BxCxHxW format, where (H, W)
157
+ is given by original_size.
158
+ """
159
+ masks = F.interpolate(
160
+ masks,
161
+ (self.image_encoder.img_size, self.image_encoder.img_size),
162
+ mode="bilinear",
163
+ align_corners=False,
164
+ )
165
+ masks = masks[..., : input_size[0], : input_size[1]]
166
+ masks = F.interpolate(
167
+ masks, original_size, mode="bilinear", align_corners=False
168
+ )
169
+ return masks
170
+
171
+ def preprocess(self, x: torch.Tensor) -> torch.Tensor:
172
+ """Normalize pixel values and pad to a square input."""
173
+ # Normalize colors
174
+ x = (x - self.pixel_mean) / self.pixel_std
175
+
176
+ # Pad
177
+ h, w = x.shape[-2:]
178
+ padh = self.image_encoder.img_size - h
179
+ padw = self.image_encoder.img_size - w
180
+ x = F.pad(x, (0, padw, 0, padh))
181
+ return x
segment_anything/modeling/transformer.py ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ # All rights reserved.
4
+
5
+ # This source code is licensed under the license found in the
6
+ # LICENSE file in the root directory of this source tree.
7
+
8
+ import torch
9
+ from torch import Tensor, nn
10
+
11
+ import math
12
+ from typing import Tuple, Type
13
+
14
+ from .common import MLPBlock
15
+
16
+
17
+ class TwoWayTransformer(nn.Module):
18
+ def __init__(
19
+ self,
20
+ depth: int,
21
+ embedding_dim: int,
22
+ num_heads: int,
23
+ mlp_dim: int,
24
+ activation: Type[nn.Module] = nn.ReLU,
25
+ attention_downsample_rate: int = 2,
26
+ ) -> None:
27
+ """
28
+ A transformer decoder that attends to an input image using
29
+ queries whose positional embedding is supplied.
30
+
31
+ Args:
32
+ depth (int): number of layers in the transformer
33
+ embedding_dim (int): the channel dimension for the input embeddings
34
+ num_heads (int): the number of heads for multihead attention. Must
35
+ divide embedding_dim
36
+ mlp_dim (int): the channel dimension internal to the MLP block
37
+ activation (nn.Module): the activation to use in the MLP block
38
+ """
39
+ super().__init__()
40
+ self.depth = depth
41
+ self.embedding_dim = embedding_dim
42
+ self.num_heads = num_heads
43
+ self.mlp_dim = mlp_dim
44
+ self.layers = nn.ModuleList()
45
+
46
+ for i in range(depth):
47
+ self.layers.append(
48
+ TwoWayAttentionBlock(
49
+ embedding_dim=embedding_dim,
50
+ num_heads=num_heads,
51
+ mlp_dim=mlp_dim,
52
+ activation=activation,
53
+ attention_downsample_rate=attention_downsample_rate,
54
+ skip_first_layer_pe=(i == 0),
55
+ )
56
+ )
57
+
58
+ self.final_attn_token_to_image = Attention(
59
+ embedding_dim, num_heads, downsample_rate=attention_downsample_rate
60
+ )
61
+ self.norm_final_attn = nn.LayerNorm(embedding_dim)
62
+
63
+ def forward(
64
+ self,
65
+ image_embedding: Tensor,
66
+ image_pe: Tensor,
67
+ point_embedding: Tensor,
68
+ ) -> Tuple[Tensor, Tensor]:
69
+ """
70
+ Args:
71
+ image_embedding (torch.Tensor): image to attend to. Should be shape
72
+ B x embedding_dim x h x w for any h and w.
73
+ image_pe (torch.Tensor): the positional encoding to add to the image. Must
74
+ have the same shape as image_embedding.
75
+ point_embedding (torch.Tensor): the embedding to add to the query points.
76
+ Must have shape B x N_points x embedding_dim for any N_points.
77
+
78
+ Returns:
79
+ torch.Tensor: the processed point_embedding
80
+ torch.Tensor: the processed image_embedding
81
+ """
82
+ # BxCxHxW -> BxHWxC == B x N_image_tokens x C
83
+ bs, c, h, w = image_embedding.shape
84
+ image_embedding = image_embedding.flatten(2).permute(0, 2, 1)
85
+ image_pe = image_pe.flatten(2).permute(0, 2, 1)
86
+
87
+ # Prepare queries
88
+ queries = point_embedding
89
+ keys = image_embedding
90
+
91
+ # Apply transformer blocks and final layernorm
92
+ for layer in self.layers:
93
+ queries, keys = layer(
94
+ queries=queries,
95
+ keys=keys,
96
+ query_pe=point_embedding,
97
+ key_pe=image_pe,
98
+ )
99
+
100
+ # Apply the final attention layer from the points to the image
101
+ q = queries + point_embedding
102
+ k = keys + image_pe
103
+ attn_out = self.final_attn_token_to_image(q=q, k=k, v=keys)
104
+ queries = queries + attn_out
105
+ queries = self.norm_final_attn(queries)
106
+
107
+ return queries, keys
108
+
109
+
110
+ class TwoWayAttentionBlock(nn.Module):
111
+ def __init__(
112
+ self,
113
+ embedding_dim: int,
114
+ num_heads: int,
115
+ mlp_dim: int = 2048,
116
+ activation: Type[nn.Module] = nn.ReLU,
117
+ attention_downsample_rate: int = 2,
118
+ skip_first_layer_pe: bool = False,
119
+ ) -> None:
120
+ """
121
+ A transformer block with four layers: (1) self-attention of sparse
122
+ inputs, (2) cross attention of sparse inputs to dense inputs, (3) mlp
123
+ block on sparse inputs, and (4) cross attention of dense inputs to sparse
124
+ inputs.
125
+
126
+ Arguments:
127
+ embedding_dim (int): the channel dimension of the embeddings
128
+ num_heads (int): the number of heads in the attention layers
129
+ mlp_dim (int): the hidden dimension of the mlp block
130
+ activation (nn.Module): the activation of the mlp block
131
+ skip_first_layer_pe (bool): skip the PE on the first layer
132
+ """
133
+ super().__init__()
134
+ self.self_attn = Attention(embedding_dim, num_heads)
135
+ self.norm1 = nn.LayerNorm(embedding_dim)
136
+
137
+ self.cross_attn_token_to_image = Attention(
138
+ embedding_dim, num_heads, downsample_rate=attention_downsample_rate
139
+ )
140
+ self.norm2 = nn.LayerNorm(embedding_dim)
141
+
142
+ self.mlp = MLPBlock(embedding_dim, mlp_dim, activation)
143
+ self.norm3 = nn.LayerNorm(embedding_dim)
144
+
145
+ self.norm4 = nn.LayerNorm(embedding_dim)
146
+ self.cross_attn_image_to_token = Attention(
147
+ embedding_dim, num_heads, downsample_rate=attention_downsample_rate
148
+ )
149
+
150
+ self.skip_first_layer_pe = skip_first_layer_pe
151
+
152
+ def forward(
153
+ self, queries: Tensor, keys: Tensor, query_pe: Tensor, key_pe: Tensor
154
+ ) -> Tuple[Tensor, Tensor]:
155
+ # Self attention block
156
+ if self.skip_first_layer_pe:
157
+ queries = self.self_attn(q=queries, k=queries, v=queries)
158
+ else:
159
+ q = queries + query_pe
160
+ attn_out = self.self_attn(q=q, k=q, v=queries)
161
+ queries = queries + attn_out
162
+ queries = self.norm1(queries)
163
+
164
+ # Cross attention block, tokens attending to image embedding
165
+ q = queries + query_pe
166
+ k = keys + key_pe
167
+ attn_out = self.cross_attn_token_to_image(q=q, k=k, v=keys)
168
+ queries = queries + attn_out
169
+ queries = self.norm2(queries)
170
+
171
+ # MLP block
172
+ mlp_out = self.mlp(queries)
173
+ queries = queries + mlp_out
174
+ queries = self.norm3(queries)
175
+
176
+ # Cross attention block, image embedding attending to tokens
177
+ q = queries + query_pe
178
+ k = keys + key_pe
179
+ attn_out = self.cross_attn_image_to_token(q=k, k=q, v=queries)
180
+ keys = keys + attn_out
181
+ keys = self.norm4(keys)
182
+
183
+ return queries, keys
184
+
185
+
186
+ class Attention(nn.Module):
187
+ """
188
+ An attention layer that allows for downscaling the size of the embedding
189
+ after projection to queries, keys, and values.
190
+ """
191
+
192
+ def __init__(
193
+ self,
194
+ embedding_dim: int,
195
+ num_heads: int,
196
+ downsample_rate: int = 1,
197
+ ) -> None:
198
+ super().__init__()
199
+ self.embedding_dim = embedding_dim
200
+ self.internal_dim = embedding_dim // downsample_rate
201
+ self.num_heads = num_heads
202
+ assert (
203
+ self.internal_dim % num_heads == 0
204
+ ), "num_heads must divide embedding_dim."
205
+
206
+ self.q_proj = nn.Linear(embedding_dim, self.internal_dim)
207
+ self.k_proj = nn.Linear(embedding_dim, self.internal_dim)
208
+ self.v_proj = nn.Linear(embedding_dim, self.internal_dim)
209
+ self.out_proj = nn.Linear(self.internal_dim, embedding_dim)
210
+
211
+ def _separate_heads(self, x: Tensor, num_heads: int) -> Tensor:
212
+ b, n, c = x.shape
213
+ x = x.reshape(b, n, num_heads, c // num_heads)
214
+ return x.transpose(1, 2) # B x N_heads x N_tokens x C_per_head
215
+
216
+ def _recombine_heads(self, x: Tensor) -> Tensor:
217
+ b, n_heads, n_tokens, c_per_head = x.shape
218
+ x = x.transpose(1, 2)
219
+ return x.reshape(b, n_tokens, n_heads * c_per_head) # B x N_tokens x C
220
+
221
+ def forward(self, q: Tensor, k: Tensor, v: Tensor) -> Tensor:
222
+ # Input projections
223
+ q = self.q_proj(q)
224
+ k = self.k_proj(k)
225
+ v = self.v_proj(v)
226
+
227
+ # Separate into heads
228
+ q = self._separate_heads(q, self.num_heads)
229
+ k = self._separate_heads(k, self.num_heads)
230
+ v = self._separate_heads(v, self.num_heads)
231
+
232
+ # Attention
233
+ _, _, _, c_per_head = q.shape
234
+ attn = q @ k.permute(0, 1, 3, 2) # B x N_heads x N_tokens x N_tokens
235
+ attn = attn / math.sqrt(c_per_head)
236
+ attn = torch.softmax(attn, dim=-1)
237
+
238
+ # Get output
239
+ out = attn @ v
240
+ out = self._recombine_heads(out)
241
+ out = self.out_proj(out)
242
+
243
+ return out
segment_anything/predictor.py ADDED
@@ -0,0 +1,286 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ # All rights reserved.
4
+
5
+ # This source code is licensed under the license found in the
6
+ # LICENSE file in the root directory of this source tree.
7
+
8
+ import numpy as np
9
+ import torch
10
+
11
+ from segment_anything.modeling import Sam
12
+
13
+ from typing import Optional, Tuple
14
+
15
+ from .utils.transforms import ResizeLongestSide
16
+
17
+
18
+ class SamPredictor:
19
+ def __init__(
20
+ self,
21
+ sam_model: Sam,
22
+ ) -> None:
23
+ """
24
+ Uses SAM to calculate the image embedding for an image, and then
25
+ allow repeated, efficient mask prediction given prompts.
26
+
27
+ Arguments:
28
+ sam_model (Sam): The model to use for mask prediction.
29
+ """
30
+ super().__init__()
31
+ self.model = sam_model
32
+ self.transform = ResizeLongestSide(sam_model.image_encoder.img_size)
33
+ self.reset_image()
34
+
35
+ def set_image(
36
+ self,
37
+ image: np.ndarray,
38
+ image_format: str = "RGB",
39
+ ) -> None:
40
+ """
41
+ Calculates the image embeddings for the provided image, allowing
42
+ masks to be predicted with the 'predict' method.
43
+
44
+ Arguments:
45
+ image (np.ndarray): The image for calculating masks. Expects an
46
+ image in HWC uint8 format, with pixel values in [0, 255].
47
+ image_format (str): The color format of the image, in ['RGB', 'BGR'].
48
+ """
49
+ assert image_format in [
50
+ "RGB",
51
+ "BGR",
52
+ ], f"image_format must be in ['RGB', 'BGR'], is {image_format}."
53
+ if image_format != self.model.image_format:
54
+ image = image[..., ::-1]
55
+
56
+ # Transform the image to the form expected by the model
57
+ input_image = self.transform.apply_image(image)
58
+ input_image_torch = torch.as_tensor(input_image, device=self.device)
59
+ input_image_torch = input_image_torch.permute(2, 0, 1).contiguous()[
60
+ None, :, :, :
61
+ ]
62
+
63
+ self.set_torch_image(input_image_torch, image.shape[:2])
64
+
65
+ @torch.no_grad()
66
+ def set_torch_image(
67
+ self,
68
+ transformed_image: torch.Tensor,
69
+ original_image_size: Tuple[int, ...],
70
+ ) -> None:
71
+ """
72
+ Calculates the image embeddings for the provided image, allowing
73
+ masks to be predicted with the 'predict' method. Expects the input
74
+ image to be already transformed to the format expected by the model.
75
+
76
+ Arguments:
77
+ transformed_image (torch.Tensor): The input image, with shape
78
+ 1x3xHxW, which has been transformed with ResizeLongestSide.
79
+ original_image_size (tuple(int, int)): The size of the image
80
+ before transformation, in (H, W) format.
81
+ """
82
+ assert (
83
+ len(transformed_image.shape) == 4
84
+ and transformed_image.shape[1] == 3
85
+ and max(*transformed_image.shape[2:]) == self.model.image_encoder.img_size
86
+ ), f"set_torch_image input must be BCHW with long side {self.model.image_encoder.img_size}."
87
+ self.reset_image()
88
+
89
+ self.original_size = original_image_size
90
+ self.input_size = tuple(transformed_image.shape[-2:])
91
+ input_image = self.model.preprocess(transformed_image)
92
+ self.features = self.model.image_encoder(input_image)
93
+ self.is_image_set = True
94
+
95
+ def predict(
96
+ self,
97
+ point_coords: Optional[np.ndarray] = None,
98
+ point_labels: Optional[np.ndarray] = None,
99
+ box: Optional[np.ndarray] = None,
100
+ mask_input: Optional[np.ndarray] = None,
101
+ multimask_output: bool = True,
102
+ return_logits: bool = False,
103
+ ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
104
+ """
105
+ Predict masks for the given input prompts, using the currently set image.
106
+
107
+ Arguments:
108
+ point_coords (np.ndarray or None): A Nx2 array of point prompts to the
109
+ model. Each point is in (X,Y) in pixels.
110
+ point_labels (np.ndarray or None): A length N array of labels for the
111
+ point prompts. 1 indicates a foreground point and 0 indicates a
112
+ background point.
113
+ box (np.ndarray or None): A length 4 array given a box prompt to the
114
+ model, in XYXY format.
115
+ mask_input (np.ndarray): A low resolution mask input to the model, typically
116
+ coming from a previous prediction iteration. Has form 1xHxW, where
117
+ for SAM, H=W=256.
118
+ multimask_output (bool): If true, the model will return three masks.
119
+ For ambiguous input prompts (such as a single click), this will often
120
+ produce better masks than a single prediction. If only a single
121
+ mask is needed, the model's predicted quality score can be used
122
+ to select the best mask. For non-ambiguous prompts, such as multiple
123
+ input prompts, multimask_output=False can give better results.
124
+ return_logits (bool): If true, returns un-thresholded masks logits
125
+ instead of a binary mask.
126
+
127
+ Returns:
128
+ (np.ndarray): The output masks in CxHxW format, where C is the
129
+ number of masks, and (H, W) is the original image size.
130
+ (np.ndarray): An array of length C containing the model's
131
+ predictions for the quality of each mask.
132
+ (np.ndarray): An array of shape CxHxW, where C is the number
133
+ of masks and H=W=256. These low resolution logits can be passed to
134
+ a subsequent iteration as mask input.
135
+ """
136
+ if not self.is_image_set:
137
+ raise RuntimeError(
138
+ "An image must be set with .set_image(...) before mask prediction."
139
+ )
140
+
141
+ # Transform input prompts
142
+ coords_torch, labels_torch, box_torch, mask_input_torch = None, None, None, None
143
+ if point_coords is not None:
144
+ assert (
145
+ point_labels is not None
146
+ ), "point_labels must be supplied if point_coords is supplied."
147
+ point_coords = self.transform.apply_coords(point_coords, self.original_size)
148
+ coords_torch = torch.as_tensor(
149
+ point_coords, dtype=torch.float, device=self.device
150
+ )
151
+ labels_torch = torch.as_tensor(
152
+ point_labels, dtype=torch.int, device=self.device
153
+ )
154
+ coords_torch, labels_torch = coords_torch[None, :, :], labels_torch[None, :]
155
+ if box is not None:
156
+ box = self.transform.apply_boxes(box, self.original_size)
157
+ box_torch = torch.as_tensor(box, dtype=torch.float, device=self.device)
158
+ box_torch = box_torch[None, :]
159
+ if mask_input is not None:
160
+ mask_input_torch = torch.as_tensor(
161
+ mask_input, dtype=torch.float, device=self.device
162
+ )
163
+ mask_input_torch = mask_input_torch[None, :, :, :]
164
+
165
+ masks, iou_predictions, low_res_masks = self.predict_torch(
166
+ coords_torch,
167
+ labels_torch,
168
+ box_torch,
169
+ mask_input_torch,
170
+ multimask_output,
171
+ return_logits=return_logits,
172
+ )
173
+
174
+ masks_np = masks[0].detach().cpu().numpy()
175
+ iou_predictions_np = iou_predictions[0].detach().cpu().numpy()
176
+ low_res_masks_np = low_res_masks[0].detach().cpu().numpy()
177
+ return masks_np, iou_predictions_np, low_res_masks_np
178
+
179
+ @torch.no_grad()
180
+ def predict_torch(
181
+ self,
182
+ point_coords: Optional[torch.Tensor],
183
+ point_labels: Optional[torch.Tensor],
184
+ boxes: Optional[torch.Tensor] = None,
185
+ mask_input: Optional[torch.Tensor] = None,
186
+ multimask_output: bool = True,
187
+ return_logits: bool = False,
188
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
189
+ """
190
+ Predict masks for the given input prompts, using the currently set image.
191
+ Input prompts are batched torch tensors and are expected to already be
192
+ transformed to the input frame using ResizeLongestSide.
193
+
194
+ Arguments:
195
+ point_coords (torch.Tensor or None): A BxNx2 array of point prompts to the
196
+ model. Each point is in (X,Y) in pixels.
197
+ point_labels (torch.Tensor or None): A BxN array of labels for the
198
+ point prompts. 1 indicates a foreground point and 0 indicates a
199
+ background point.
200
+ boxes (np.ndarray or None): A Bx4 array given a box prompt to the
201
+ model, in XYXY format.
202
+ mask_input (np.ndarray): A low resolution mask input to the model, typically
203
+ coming from a previous prediction iteration. Has form Bx1xHxW, where
204
+ for SAM, H=W=256. Masks returned by a previous iteration of the
205
+ predict method do not need further transformation.
206
+ multimask_output (bool): If true, the model will return three masks.
207
+ For ambiguous input prompts (such as a single click), this will often
208
+ produce better masks than a single prediction. If only a single
209
+ mask is needed, the model's predicted quality score can be used
210
+ to select the best mask. For non-ambiguous prompts, such as multiple
211
+ input prompts, multimask_output=False can give better results.
212
+ return_logits (bool): If true, returns un-thresholded masks logits
213
+ instead of a binary mask.
214
+
215
+ Returns:
216
+ (torch.Tensor): The output masks in BxCxHxW format, where C is the
217
+ number of masks, and (H, W) is the original image size.
218
+ (torch.Tensor): An array of shape BxC containing the model's
219
+ predictions for the quality of each mask.
220
+ (torch.Tensor): An array of shape BxCxHxW, where C is the number
221
+ of masks and H=W=256. These low res logits can be passed to
222
+ a subsequent iteration as mask input.
223
+ """
224
+ if not self.is_image_set:
225
+ raise RuntimeError(
226
+ "An image must be set with .set_image(...) before mask prediction."
227
+ )
228
+
229
+ if point_coords is not None:
230
+ points = (point_coords, point_labels)
231
+ else:
232
+ points = None
233
+
234
+ # Embed prompts
235
+ sparse_embeddings, dense_embeddings = self.model.prompt_encoder(
236
+ points=points,
237
+ boxes=boxes,
238
+ masks=mask_input,
239
+ )
240
+
241
+ # Predict masks
242
+ low_res_masks, iou_predictions = self.model.mask_decoder(
243
+ image_embeddings=self.features,
244
+ image_pe=self.model.prompt_encoder.get_dense_pe(),
245
+ sparse_prompt_embeddings=sparse_embeddings,
246
+ dense_prompt_embeddings=dense_embeddings,
247
+ multimask_output=multimask_output,
248
+ )
249
+
250
+ # Upscale the masks to the original image resolution
251
+ masks = self.model.postprocess_masks(
252
+ low_res_masks, self.input_size, self.original_size
253
+ )
254
+
255
+ if not return_logits:
256
+ masks = masks > self.model.mask_threshold
257
+
258
+ return masks, iou_predictions, low_res_masks
259
+
260
+ def get_image_embedding(self) -> torch.Tensor:
261
+ """
262
+ Returns the image embeddings for the currently set image, with
263
+ shape 1xCxHxW, where C is the embedding dimension and (H,W) are
264
+ the embedding spatial dimension of SAM (typically C=256, H=W=64).
265
+ """
266
+ if not self.is_image_set:
267
+ raise RuntimeError(
268
+ "An image must be set with .set_image(...) to generate an embedding."
269
+ )
270
+ assert (
271
+ self.features is not None
272
+ ), "Features must exist if an image has been set."
273
+ return self.features
274
+
275
+ @property
276
+ def device(self) -> torch.device:
277
+ return self.model.device
278
+
279
+ def reset_image(self) -> None:
280
+ """Resets the currently set image."""
281
+ self.is_image_set = False
282
+ self.features = None
283
+ self.orig_h = None
284
+ self.orig_w = None
285
+ self.input_h = None
286
+ self.input_w = None
segment_anything/utils/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ # All rights reserved.
4
+
5
+ # This source code is licensed under the license found in the
6
+ # LICENSE file in the root directory of this source tree.
segment_anything/utils/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (170 Bytes). View file
 
segment_anything/utils/__pycache__/amg.cpython-310.pyc ADDED
Binary file (12.1 kB). View file
 
segment_anything/utils/__pycache__/transforms.cpython-310.pyc ADDED
Binary file (4.01 kB). View file
 
segment_anything/utils/amg.py ADDED
@@ -0,0 +1,347 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ # All rights reserved.
4
+
5
+ # This source code is licensed under the license found in the
6
+ # LICENSE file in the root directory of this source tree.
7
+
8
+ import numpy as np
9
+ import torch
10
+
11
+ import math
12
+ from copy import deepcopy
13
+ from itertools import product
14
+ from typing import Any, Dict, Generator, ItemsView, List, Tuple
15
+
16
+
17
+ class MaskData:
18
+ """
19
+ A structure for storing masks and their related data in batched format.
20
+ Implements basic filtering and concatenation.
21
+ """
22
+
23
+ def __init__(self, **kwargs) -> None:
24
+ for v in kwargs.values():
25
+ assert isinstance(
26
+ v, (list, np.ndarray, torch.Tensor)
27
+ ), "MaskData only supports list, numpy arrays, and torch tensors."
28
+ self._stats = dict(**kwargs)
29
+
30
+ def __setitem__(self, key: str, item: Any) -> None:
31
+ assert isinstance(
32
+ item, (list, np.ndarray, torch.Tensor)
33
+ ), "MaskData only supports list, numpy arrays, and torch tensors."
34
+ self._stats[key] = item
35
+
36
+ def __delitem__(self, key: str) -> None:
37
+ del self._stats[key]
38
+
39
+ def __getitem__(self, key: str) -> Any:
40
+ return self._stats[key]
41
+
42
+ def items(self) -> ItemsView[str, Any]:
43
+ return self._stats.items()
44
+
45
+ def filter(self, keep: torch.Tensor) -> None:
46
+ for k, v in self._stats.items():
47
+ if v is None:
48
+ self._stats[k] = None
49
+ elif isinstance(v, torch.Tensor):
50
+ self._stats[k] = v[torch.as_tensor(keep, device=v.device)]
51
+ elif isinstance(v, np.ndarray):
52
+ self._stats[k] = v[keep.detach().cpu().numpy()]
53
+ elif isinstance(v, list) and keep.dtype == torch.bool:
54
+ self._stats[k] = [a for i, a in enumerate(v) if keep[i]]
55
+ elif isinstance(v, list):
56
+ self._stats[k] = [v[i] for i in keep]
57
+ else:
58
+ raise TypeError(f"MaskData key {k} has an unsupported type {type(v)}.")
59
+
60
+ def cat(self, new_stats: "MaskData") -> None:
61
+ for k, v in new_stats.items():
62
+ if k not in self._stats or self._stats[k] is None:
63
+ self._stats[k] = deepcopy(v)
64
+ elif isinstance(v, torch.Tensor):
65
+ self._stats[k] = torch.cat([self._stats[k], v], dim=0)
66
+ elif isinstance(v, np.ndarray):
67
+ self._stats[k] = np.concatenate([self._stats[k], v], axis=0)
68
+ elif isinstance(v, list):
69
+ self._stats[k] = self._stats[k] + deepcopy(v)
70
+ else:
71
+ raise TypeError(f"MaskData key {k} has an unsupported type {type(v)}.")
72
+
73
+ def to_numpy(self) -> None:
74
+ for k, v in self._stats.items():
75
+ if isinstance(v, torch.Tensor):
76
+ self._stats[k] = v.detach().cpu().numpy()
77
+
78
+
79
+ def is_box_near_crop_edge(
80
+ boxes: torch.Tensor, crop_box: List[int], orig_box: List[int], atol: float = 20.0
81
+ ) -> torch.Tensor:
82
+ """Filter masks at the edge of a crop, but not at the edge of the original image."""
83
+ crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device)
84
+ orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device)
85
+ boxes = uncrop_boxes_xyxy(boxes, crop_box).float()
86
+ near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0)
87
+ near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0)
88
+ near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge)
89
+ return torch.any(near_crop_edge, dim=1)
90
+
91
+
92
+ def box_xyxy_to_xywh(box_xyxy: torch.Tensor) -> torch.Tensor:
93
+ box_xywh = deepcopy(box_xyxy)
94
+ box_xywh[2] = box_xywh[2] - box_xywh[0]
95
+ box_xywh[3] = box_xywh[3] - box_xywh[1]
96
+ return box_xywh
97
+
98
+
99
+ def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]:
100
+ assert len(args) > 0 and all(
101
+ len(a) == len(args[0]) for a in args
102
+ ), "Batched iteration must have inputs of all the same size."
103
+ n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0)
104
+ for b in range(n_batches):
105
+ yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args]
106
+
107
+
108
+ def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]:
109
+ """
110
+ Encodes masks to an uncompressed RLE, in the format expected by
111
+ pycoco tools.
112
+ """
113
+ # Put in fortran order and flatten h,w
114
+ b, h, w = tensor.shape
115
+ tensor = tensor.permute(0, 2, 1).flatten(1)
116
+
117
+ # Compute change indices
118
+ diff = tensor[:, 1:] ^ tensor[:, :-1]
119
+ change_indices = diff.nonzero()
120
+
121
+ # Encode run length
122
+ out = []
123
+ for i in range(b):
124
+ cur_idxs = change_indices[change_indices[:, 0] == i, 1]
125
+ cur_idxs = torch.cat(
126
+ [
127
+ torch.tensor([0], dtype=cur_idxs.dtype, device=cur_idxs.device),
128
+ cur_idxs + 1,
129
+ torch.tensor([h * w], dtype=cur_idxs.dtype, device=cur_idxs.device),
130
+ ]
131
+ )
132
+ btw_idxs = cur_idxs[1:] - cur_idxs[:-1]
133
+ counts = [] if tensor[i, 0] == 0 else [0]
134
+ counts.extend(btw_idxs.detach().cpu().tolist())
135
+ out.append({"size": [h, w], "counts": counts})
136
+ return out
137
+
138
+
139
+ def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray:
140
+ """Compute a binary mask from an uncompressed RLE."""
141
+ h, w = rle["size"]
142
+ mask = np.empty(h * w, dtype=bool)
143
+ idx = 0
144
+ parity = False
145
+ for count in rle["counts"]:
146
+ mask[idx : idx + count] = parity
147
+ idx += count
148
+ parity ^= True
149
+ mask = mask.reshape(w, h)
150
+ return mask.transpose() # Put in C order
151
+
152
+
153
+ def area_from_rle(rle: Dict[str, Any]) -> int:
154
+ return sum(rle["counts"][1::2])
155
+
156
+
157
+ def calculate_stability_score(
158
+ masks: torch.Tensor, mask_threshold: float, threshold_offset: float
159
+ ) -> torch.Tensor:
160
+ """
161
+ Computes the stability score for a batch of masks. The stability
162
+ score is the IoU between the binary masks obtained by thresholding
163
+ the predicted mask logits at high and low values.
164
+ """
165
+ # One mask is always contained inside the other.
166
+ # Save memory by preventing unnecessary cast to torch.int64
167
+ intersections = (
168
+ (masks > (mask_threshold + threshold_offset))
169
+ .sum(-1, dtype=torch.int16)
170
+ .sum(-1, dtype=torch.int32)
171
+ )
172
+ unions = (
173
+ (masks > (mask_threshold - threshold_offset))
174
+ .sum(-1, dtype=torch.int16)
175
+ .sum(-1, dtype=torch.int32)
176
+ )
177
+ return intersections / unions
178
+
179
+
180
+ def build_point_grid(n_per_side: int) -> np.ndarray:
181
+ """Generates a 2D grid of points evenly spaced in [0,1]x[0,1]."""
182
+ offset = 1 / (2 * n_per_side)
183
+ points_one_side = np.linspace(offset, 1 - offset, n_per_side)
184
+ points_x = np.tile(points_one_side[None, :], (n_per_side, 1))
185
+ points_y = np.tile(points_one_side[:, None], (1, n_per_side))
186
+ points = np.stack([points_x, points_y], axis=-1).reshape(-1, 2)
187
+ return points
188
+
189
+
190
+ def build_all_layer_point_grids(
191
+ n_per_side: int, n_layers: int, scale_per_layer: int
192
+ ) -> List[np.ndarray]:
193
+ """Generates point grids for all crop layers."""
194
+ points_by_layer = []
195
+ for i in range(n_layers + 1):
196
+ n_points = int(n_per_side / (scale_per_layer**i))
197
+ points_by_layer.append(build_point_grid(n_points))
198
+ return points_by_layer
199
+
200
+
201
+ def generate_crop_boxes(
202
+ im_size: Tuple[int, ...], n_layers: int, overlap_ratio: float
203
+ ) -> Tuple[List[List[int]], List[int]]:
204
+ """
205
+ Generates a list of crop boxes of different sizes. Each layer
206
+ has (2**i)**2 boxes for the ith layer.
207
+ """
208
+ crop_boxes, layer_idxs = [], []
209
+ im_h, im_w = im_size
210
+ short_side = min(im_h, im_w)
211
+
212
+ # Original image
213
+ crop_boxes.append([0, 0, im_w, im_h])
214
+ layer_idxs.append(0)
215
+
216
+ def crop_len(orig_len, n_crops, overlap):
217
+ return int(math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops))
218
+
219
+ for i_layer in range(n_layers):
220
+ n_crops_per_side = 2 ** (i_layer + 1)
221
+ overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side))
222
+
223
+ crop_w = crop_len(im_w, n_crops_per_side, overlap)
224
+ crop_h = crop_len(im_h, n_crops_per_side, overlap)
225
+
226
+ crop_box_x0 = [int((crop_w - overlap) * i) for i in range(n_crops_per_side)]
227
+ crop_box_y0 = [int((crop_h - overlap) * i) for i in range(n_crops_per_side)]
228
+
229
+ # Crops in XYWH format
230
+ for x0, y0 in product(crop_box_x0, crop_box_y0):
231
+ box = [x0, y0, min(x0 + crop_w, im_w), min(y0 + crop_h, im_h)]
232
+ crop_boxes.append(box)
233
+ layer_idxs.append(i_layer + 1)
234
+
235
+ return crop_boxes, layer_idxs
236
+
237
+
238
+ def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: List[int]) -> torch.Tensor:
239
+ x0, y0, _, _ = crop_box
240
+ offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device)
241
+ # Check if boxes has a channel dimension
242
+ if len(boxes.shape) == 3:
243
+ offset = offset.unsqueeze(1)
244
+ return boxes + offset
245
+
246
+
247
+ def uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor:
248
+ x0, y0, _, _ = crop_box
249
+ offset = torch.tensor([[x0, y0]], device=points.device)
250
+ # Check if points has a channel dimension
251
+ if len(points.shape) == 3:
252
+ offset = offset.unsqueeze(1)
253
+ return points + offset
254
+
255
+
256
+ def uncrop_masks(
257
+ masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int
258
+ ) -> torch.Tensor:
259
+ x0, y0, x1, y1 = crop_box
260
+ if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h:
261
+ return masks
262
+ # Coordinate transform masks
263
+ pad_x, pad_y = orig_w - (x1 - x0), orig_h - (y1 - y0)
264
+ pad = (x0, pad_x - x0, y0, pad_y - y0)
265
+ return torch.nn.functional.pad(masks, pad, value=0)
266
+
267
+
268
+ def remove_small_regions(
269
+ mask: np.ndarray, area_thresh: float, mode: str
270
+ ) -> Tuple[np.ndarray, bool]:
271
+ """
272
+ Removes small disconnected regions and holes in a mask. Returns the
273
+ mask and an indicator of if the mask has been modified.
274
+ """
275
+ import cv2 # type: ignore
276
+
277
+ assert mode in ["holes", "islands"]
278
+ correct_holes = mode == "holes"
279
+ working_mask = (correct_holes ^ mask).astype(np.uint8)
280
+ n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8)
281
+ sizes = stats[:, -1][1:] # Row 0 is background label
282
+ small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh]
283
+ if len(small_regions) == 0:
284
+ return mask, False
285
+ fill_labels = [0] + small_regions
286
+ if not correct_holes:
287
+ fill_labels = [i for i in range(n_labels) if i not in fill_labels]
288
+ # If every region is below threshold, keep largest
289
+ if len(fill_labels) == 0:
290
+ fill_labels = [int(np.argmax(sizes)) + 1]
291
+ mask = np.isin(regions, fill_labels)
292
+ return mask, True
293
+
294
+
295
+ def coco_encode_rle(uncompressed_rle: Dict[str, Any]) -> Dict[str, Any]:
296
+ from pycocotools import mask as mask_utils # type: ignore
297
+
298
+ h, w = uncompressed_rle["size"]
299
+ rle = mask_utils.frPyObjects(uncompressed_rle, h, w)
300
+ rle["counts"] = rle["counts"].decode("utf-8") # Necessary to serialize with json
301
+ return rle
302
+
303
+
304
+ def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor:
305
+ """
306
+ Calculates boxes in XYXY format around masks. Return [0,0,0,0] for
307
+ an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4.
308
+ """
309
+ # torch.max below raises an error on empty inputs, just skip in this case
310
+ if torch.numel(masks) == 0:
311
+ return torch.zeros(*masks.shape[:-2], 4, device=masks.device)
312
+
313
+ # Normalize shape to CxHxW
314
+ shape = masks.shape
315
+ h, w = shape[-2:]
316
+ if len(shape) > 2:
317
+ masks = masks.flatten(0, -3)
318
+ else:
319
+ masks = masks.unsqueeze(0)
320
+
321
+ # Get top and bottom edges
322
+ in_height, _ = torch.max(masks, dim=-1)
323
+ in_height_coords = in_height * torch.arange(h, device=in_height.device)[None, :]
324
+ bottom_edges, _ = torch.max(in_height_coords, dim=-1)
325
+ in_height_coords = in_height_coords + h * (~in_height)
326
+ top_edges, _ = torch.min(in_height_coords, dim=-1)
327
+
328
+ # Get left and right edges
329
+ in_width, _ = torch.max(masks, dim=-2)
330
+ in_width_coords = in_width * torch.arange(w, device=in_width.device)[None, :]
331
+ right_edges, _ = torch.max(in_width_coords, dim=-1)
332
+ in_width_coords = in_width_coords + w * (~in_width)
333
+ left_edges, _ = torch.min(in_width_coords, dim=-1)
334
+
335
+ # If the mask is empty the right edge will be to the left of the left edge.
336
+ # Replace these boxes with [0, 0, 0, 0]
337
+ empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)
338
+ out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1)
339
+ out = out * (~empty_filter).unsqueeze(-1)
340
+
341
+ # Return to original shape
342
+ if len(shape) > 2:
343
+ out = out.reshape(*shape[:-2], 4)
344
+ else:
345
+ out = out[0]
346
+
347
+ return out
segment_anything/utils/onnx.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ # All rights reserved.
4
+
5
+ # This source code is licensed under the license found in the
6
+ # LICENSE file in the root directory of this source tree.
7
+
8
+ import torch
9
+ import torch.nn as nn
10
+ from torch.nn import functional as F
11
+
12
+ from typing import Tuple
13
+
14
+ from ..modeling import Sam
15
+ from .amg import calculate_stability_score
16
+
17
+
18
+ class SamOnnxModel(nn.Module):
19
+ """
20
+ This model should not be called directly, but is used in ONNX export.
21
+ It combines the prompt encoder, mask decoder, and mask postprocessing of Sam,
22
+ with some functions modified to enable model tracing. Also supports extra
23
+ options controlling what information. See the ONNX export script for details.
24
+ """
25
+
26
+ def __init__(
27
+ self,
28
+ model: Sam,
29
+ return_single_mask: bool,
30
+ use_stability_score: bool = False,
31
+ return_extra_metrics: bool = False,
32
+ ) -> None:
33
+ super().__init__()
34
+ self.mask_decoder = model.mask_decoder
35
+ self.model = model
36
+ self.img_size = model.image_encoder.img_size
37
+ self.return_single_mask = return_single_mask
38
+ self.use_stability_score = use_stability_score
39
+ self.stability_score_offset = 1.0
40
+ self.return_extra_metrics = return_extra_metrics
41
+
42
+ @staticmethod
43
+ def resize_longest_image_size(
44
+ input_image_size: torch.Tensor, longest_side: int
45
+ ) -> torch.Tensor:
46
+ input_image_size = input_image_size.to(torch.float32)
47
+ scale = longest_side / torch.max(input_image_size)
48
+ transformed_size = scale * input_image_size
49
+ transformed_size = torch.floor(transformed_size + 0.5).to(torch.int64)
50
+ return transformed_size
51
+
52
+ def _embed_points(
53
+ self, point_coords: torch.Tensor, point_labels: torch.Tensor
54
+ ) -> torch.Tensor:
55
+ point_coords = point_coords + 0.5
56
+ point_coords = point_coords / self.img_size
57
+ point_embedding = self.model.prompt_encoder.pe_layer._pe_encoding(point_coords)
58
+ point_labels = point_labels.unsqueeze(-1).expand_as(point_embedding)
59
+
60
+ point_embedding = point_embedding * (point_labels != -1)
61
+ point_embedding = (
62
+ point_embedding
63
+ + self.model.prompt_encoder.not_a_point_embed.weight * (point_labels == -1)
64
+ )
65
+
66
+ for i in range(self.model.prompt_encoder.num_point_embeddings):
67
+ point_embedding = (
68
+ point_embedding
69
+ + self.model.prompt_encoder.point_embeddings[i].weight
70
+ * (point_labels == i)
71
+ )
72
+
73
+ return point_embedding
74
+
75
+ def _embed_masks(
76
+ self, input_mask: torch.Tensor, has_mask_input: torch.Tensor
77
+ ) -> torch.Tensor:
78
+ mask_embedding = has_mask_input * self.model.prompt_encoder.mask_downscaling(
79
+ input_mask
80
+ )
81
+ mask_embedding = mask_embedding + (
82
+ 1 - has_mask_input
83
+ ) * self.model.prompt_encoder.no_mask_embed.weight.reshape(1, -1, 1, 1)
84
+ return mask_embedding
85
+
86
+ def mask_postprocessing(
87
+ self, masks: torch.Tensor, orig_im_size: torch.Tensor
88
+ ) -> torch.Tensor:
89
+ masks = F.interpolate(
90
+ masks,
91
+ size=(self.img_size, self.img_size),
92
+ mode="bilinear",
93
+ align_corners=False,
94
+ )
95
+
96
+ prepadded_size = self.resize_longest_image_size(orig_im_size, self.img_size).to(
97
+ torch.int64
98
+ )
99
+ masks = masks[..., : prepadded_size[0], : prepadded_size[1]] # type: ignore
100
+
101
+ orig_im_size = orig_im_size.to(torch.int64)
102
+ h, w = orig_im_size[0], orig_im_size[1]
103
+ masks = F.interpolate(masks, size=(h, w), mode="bilinear", align_corners=False)
104
+ return masks
105
+
106
+ def select_masks(
107
+ self, masks: torch.Tensor, iou_preds: torch.Tensor, num_points: int
108
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
109
+ # Determine if we should return the multiclick mask or not from the number of points.
110
+ # The reweighting is used to avoid control flow.
111
+ score_reweight = torch.tensor(
112
+ [[1000] + [0] * (self.model.mask_decoder.num_mask_tokens - 1)]
113
+ ).to(iou_preds.device)
114
+ score = iou_preds + (num_points - 2.5) * score_reweight
115
+ best_idx = torch.argmax(score, dim=1)
116
+ masks = masks[torch.arange(masks.shape[0]), best_idx, :, :].unsqueeze(1)
117
+ iou_preds = iou_preds[torch.arange(masks.shape[0]), best_idx].unsqueeze(1)
118
+
119
+ return masks, iou_preds
120
+
121
+ @torch.no_grad()
122
+ def forward(
123
+ self,
124
+ image_embeddings: torch.Tensor,
125
+ point_coords: torch.Tensor,
126
+ point_labels: torch.Tensor,
127
+ mask_input: torch.Tensor,
128
+ has_mask_input: torch.Tensor,
129
+ orig_im_size: torch.Tensor,
130
+ ):
131
+ sparse_embedding = self._embed_points(point_coords, point_labels)
132
+ dense_embedding = self._embed_masks(mask_input, has_mask_input)
133
+
134
+ masks, scores = self.model.mask_decoder.predict_masks(
135
+ image_embeddings=image_embeddings,
136
+ image_pe=self.model.prompt_encoder.get_dense_pe(),
137
+ sparse_prompt_embeddings=sparse_embedding,
138
+ dense_prompt_embeddings=dense_embedding,
139
+ )
140
+
141
+ if self.use_stability_score:
142
+ scores = calculate_stability_score(
143
+ masks, self.model.mask_threshold, self.stability_score_offset
144
+ )
145
+
146
+ if self.return_single_mask:
147
+ masks, scores = self.select_masks(masks, scores, point_coords.shape[1])
148
+
149
+ upscaled_masks = self.mask_postprocessing(masks, orig_im_size)
150
+
151
+ if self.return_extra_metrics:
152
+ stability_scores = calculate_stability_score(
153
+ upscaled_masks, self.model.mask_threshold, self.stability_score_offset
154
+ )
155
+ areas = (upscaled_masks > self.model.mask_threshold).sum(-1).sum(-1)
156
+ return upscaled_masks, scores, stability_scores, areas, masks
157
+
158
+ return upscaled_masks, scores, masks
segment_anything/utils/transforms.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ # All rights reserved.
4
+
5
+ # This source code is licensed under the license found in the
6
+ # LICENSE file in the root directory of this source tree.
7
+
8
+ import numpy as np
9
+ import torch
10
+ from torch.nn import functional as F
11
+ from torchvision.transforms.functional import resize, to_pil_image # type: ignore
12
+
13
+ from copy import deepcopy
14
+ from typing import Tuple
15
+
16
+
17
+ class ResizeLongestSide:
18
+ """
19
+ Resizes images to the longest side 'target_length', as well as provides
20
+ methods for resizing coordinates and boxes. Provides methods for
21
+ transforming both numpy array and batched torch tensors.
22
+ """
23
+
24
+ def __init__(self, target_length: int) -> None:
25
+ self.target_length = target_length
26
+
27
+ def apply_image(self, image: np.ndarray) -> np.ndarray:
28
+ """
29
+ Expects a numpy array with shape HxWxC in uint8 format.
30
+ """
31
+ target_size = self.get_preprocess_shape(
32
+ image.shape[0], image.shape[1], self.target_length
33
+ )
34
+ return np.array(resize(to_pil_image(image), target_size))
35
+
36
+ def apply_coords(
37
+ self, coords: np.ndarray, original_size: Tuple[int, ...]
38
+ ) -> np.ndarray:
39
+ """
40
+ Expects a numpy array of length 2 in the final dimension. Requires the
41
+ original image size in (H, W) format.
42
+ """
43
+ old_h, old_w = original_size
44
+ new_h, new_w = self.get_preprocess_shape(old_h, old_w, self.target_length)
45
+ new_coords = np.empty_like(coords)
46
+ new_coords[..., 0] = coords[..., 0] * (new_w / old_w)
47
+ new_coords[..., 1] = coords[..., 1] * (new_h / old_h)
48
+ return new_coords
49
+
50
+ def apply_boxes(
51
+ self, boxes: np.ndarray, original_size: Tuple[int, ...]
52
+ ) -> np.ndarray:
53
+ """
54
+ Expects a numpy array shape Bx4. Requires the original image size
55
+ in (H, W) format.
56
+ """
57
+ boxes = self.apply_coords(boxes.reshape(-1, 2, 2), original_size)
58
+ return boxes.reshape(-1, 4)
59
+
60
+ def apply_image_torch(self, image: torch.Tensor) -> torch.Tensor:
61
+ """
62
+ Expects batched images with shape BxCxHxW and float format. This
63
+ transformation may not exactly match apply_image. apply_image is
64
+ the transformation expected by the model.
65
+ """
66
+ # Expects an image in BCHW format. May not exactly match apply_image.
67
+ target_size = self.get_preprocess_shape(
68
+ image.shape[2], image.shape[3], self.target_length
69
+ )
70
+ return F.interpolate(
71
+ image, target_size, mode="bilinear", align_corners=False, antialias=True
72
+ )
73
+
74
+ def apply_coords_torch(
75
+ self, coords: torch.Tensor, original_size: Tuple[int, ...]
76
+ ) -> torch.Tensor:
77
+ """
78
+ Expects a torch tensor with length 2 in the last dimension. Requires the
79
+ original image size in (H, W) format.
80
+ """
81
+ old_h, old_w = original_size
82
+ new_h, new_w = self.get_preprocess_shape(
83
+ original_size[0], original_size[1], self.target_length
84
+ )
85
+ coords = deepcopy(coords).to(torch.float)
86
+ coords[..., 0] = coords[..., 0] * (new_w / old_w)
87
+ coords[..., 1] = coords[..., 1] * (new_h / old_h)
88
+ return coords
89
+
90
+ def apply_boxes_torch(
91
+ self, boxes: torch.Tensor, original_size: Tuple[int, ...]
92
+ ) -> torch.Tensor:
93
+ """
94
+ Expects a torch tensor with shape Bx4. Requires the original image
95
+ size in (H, W) format.
96
+ """
97
+ boxes = self.apply_coords_torch(boxes.reshape(-1, 2, 2), original_size)
98
+ return boxes.reshape(-1, 4)
99
+
100
+ @staticmethod
101
+ def get_preprocess_shape(
102
+ oldh: int, oldw: int, long_side_length: int
103
+ ) -> Tuple[int, int]:
104
+ """
105
+ Compute the output size given input size and target long side length.
106
+ """
107
+ scale = long_side_length * 1.0 / max(oldh, oldw)
108
+ newh, neww = oldh * scale, oldw * scale
109
+ neww = int(neww + 0.5)
110
+ newh = int(newh + 0.5)
111
+ return (newh, neww)