yeliudev commited on
Commit
f880dff
·
1 Parent(s): 0debef5
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +0 -35
  2. .gitignore +9 -0
  3. README.md +7 -7
  4. app.py +435 -0
  5. requirements.txt +33 -0
  6. sam2/__init__.py +11 -0
  7. sam2/automatic_mask_generator.py +416 -0
  8. sam2/build_sam.py +172 -0
  9. sam2/configs/sam2.1/sam2.1_hiera_b+.yaml +116 -0
  10. sam2/configs/sam2.1/sam2.1_hiera_l.yaml +120 -0
  11. sam2/configs/sam2.1/sam2.1_hiera_s.yaml +119 -0
  12. sam2/configs/sam2.1/sam2.1_hiera_t.yaml +121 -0
  13. sam2/configs/sam2.1_hiera_b+.yaml +137 -0
  14. sam2/configs/sam2.1_hiera_l.yaml +141 -0
  15. sam2/configs/sam2.1_hiera_s.yaml +140 -0
  16. sam2/configs/sam2.1_hiera_t.yaml +142 -0
  17. sam2/configs/sam2.1_training/sam2.1_hiera_b+_MOSE_finetune.yaml +339 -0
  18. sam2/configs/sam2/sam2_hiera_b+.yaml +113 -0
  19. sam2/configs/sam2/sam2_hiera_l.yaml +117 -0
  20. sam2/configs/sam2/sam2_hiera_s.yaml +116 -0
  21. sam2/configs/sam2/sam2_hiera_t.yaml +118 -0
  22. sam2/csrc/connected_components.cu +289 -0
  23. sam2/loss_fns.py +288 -0
  24. sam2/modeling/__init__.py +5 -0
  25. sam2/modeling/backbones/__init__.py +5 -0
  26. sam2/modeling/backbones/hieradet.py +312 -0
  27. sam2/modeling/backbones/image_encoder.py +145 -0
  28. sam2/modeling/backbones/utils.py +88 -0
  29. sam2/modeling/memory_attention.py +168 -0
  30. sam2/modeling/memory_encoder.py +180 -0
  31. sam2/modeling/position_encoding.py +312 -0
  32. sam2/modeling/sam/__init__.py +5 -0
  33. sam2/modeling/sam/mask_decoder.py +274 -0
  34. sam2/modeling/sam/prompt_encoder.py +188 -0
  35. sam2/modeling/sam/transformer.py +303 -0
  36. sam2/modeling/sam2_base.py +882 -0
  37. sam2/modeling/sam2_utils.py +320 -0
  38. sam2/sam2_image_predictor.py +428 -0
  39. sam2/sam2_train.py +575 -0
  40. sam2/sam2_video_predictor.py +1272 -0
  41. sam2/utils/__init__.py +5 -0
  42. sam2/utils/amg.py +328 -0
  43. sam2/utils/misc.py +340 -0
  44. sam2/utils/transforms.py +108 -0
  45. setup.cfg +16 -0
  46. unipixel/constants.py +7 -0
  47. unipixel/conversation.py +49 -0
  48. unipixel/dataset/utils.py +531 -0
  49. unipixel/model/__init__.py +5 -0
  50. unipixel/model/builder.py +109 -0
.gitattributes DELETED
@@ -1,35 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.gitignore ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__
3
+ *.egg-info
4
+ *.py[cod]
5
+ *$py.class
6
+
7
+ # Temporary data
8
+ .DS_Store
9
+ ._*
README.md CHANGED
@@ -1,12 +1,12 @@
1
  ---
2
  title: UniPixel
3
- emoji: 📉
4
- colorFrom: red
5
- colorTo: purple
6
  sdk: gradio
7
  sdk_version: 5.48.0
8
  app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
  title: UniPixel
3
+ emoji: 🔮
4
+ colorFrom: purple
5
+ colorTo: yellow
6
  sdk: gradio
7
  sdk_version: 5.48.0
8
  app_file: app.py
9
+ pinned: true
10
+ license: bsd-3-clause
11
+ short_description: Unified Object Referring and Segmentation for Pixel-Level Visual Reasoning
12
+ ---
app.py ADDED
@@ -0,0 +1,435 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2025 Ye Liu. Licensed under the BSD-3-Clause license.
2
+
3
+ import os
4
+ import re
5
+ import uuid
6
+ from functools import partial
7
+
8
+ import gradio as gr
9
+ import imageio.v3 as iio
10
+ import spaces
11
+ import torch
12
+ import torch.nn.functional as F
13
+ import torchvision.transforms.functional as T
14
+ from PIL import Image
15
+
16
+ from unipixel.constants import MEM_TOKEN, SEG_TOKEN
17
+ from unipixel.dataset.utils import process_vision_info
18
+ from unipixel.model.builder import build_model
19
+ from unipixel.utils.io import load_image, load_video
20
+ from unipixel.utils.transforms import get_sam2_transform
21
+ from unipixel.utils.visualizer import draw_mask, sample_color
22
+
23
+ PATH = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
24
+
25
+ MODEL = 'PolyU-ChenLab/UniPixel-3B'
26
+
27
+ TITLE = 'UniPixel: Unified Object Referring and Segmentation for Pixel-Level Visual Reasoning'
28
+
29
+ HEADER = """
30
+ <p align="center" style="margin: 1em 0 2em;"><img width="280" src="https://raw.githubusercontent.com/PolyU-ChenLab/UniPixel/refs/heads/main/.github/logo.png"></p>
31
+ <h3 align="center">Unified Object Referring and Segmentation for Pixel-Level Visual Reasoning</h3>
32
+ <div style="display: flex; justify-content: center; gap: 5px;">
33
+ <a href="https://arxiv.org/abs/2509.18094" target="_blank"><img src="https://img.shields.io/badge/arXiv-2509.18094-red"></a>
34
+ <a href="https://polyu-chenlab.github.io/unipixel/" target="_blank"><img src="https://img.shields.io/badge/Project-Page-brightgreen"></a>
35
+ <a href="https://huggingface.co/collections/PolyU-ChenLab/unipixel-68cf7137013455e5b15962e8" target="_blank"><img src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Model-blue"></a>
36
+ <a href="https://huggingface.co/datasets/PolyU-ChenLab/UniPixel-SFT-1M" target="_blank"><img src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Dataset-orange"></a>
37
+ <a href="https://github.com/PolyU-ChenLab/UniPixel/blob/main/README.md" target="_blank"><img src="https://img.shields.io/badge/License-BSD--3--Clause-purple"></a>
38
+ <a href="https://github.com/PolyU-ChenLab/UniPixel" target="_blank"><img src="https://img.shields.io/github/stars/PolyU-ChenLab/UniPixel"></a>
39
+ </div>
40
+ <p style="margin-top: 1em;">UniPixel is a unified MLLM for pixel-level vision-language understanding. It flexibly supports a variety of fine-grained tasks, including image/video segmentation, regional understanding, and a novel PixelQA task that jointly requires object-centric referring, segmentation, and question-answering in videos. Please open an <a href="https://github.com/PolyU-ChenLab/UniPixel/issues/new" target="_blank">issue</a> if you meet any problems.</p>
41
+ """
42
+
43
+ # https://github.com/gradio-app/gradio/pull/10552
44
+ JS = """
45
+ function init() {
46
+ if (window.innerWidth >= 1536) {
47
+ document.querySelector('main').style.maxWidth = '1536px'
48
+ }
49
+ }
50
+ """
51
+
52
+ model, processor = build_model(MODEL)
53
+ device = next(model.parameters()).device
54
+
55
+ sam2_transform = get_sam2_transform(model.config.sam2_image_size)
56
+
57
+ colors = sample_color()
58
+ color_map = {f'Target {i + 1}': f'#{int(c[0]):02x}{int(c[1]):02x}{int(c[2]):02x}' for i, c in enumerate(colors * 255)}
59
+ color_map_light = {
60
+ f'Target {i + 1}': f'#{int(c[0] * 127.5 + 127.5):02x}{int(c[1] * 127.5 + 127.5):02x}{int(c[2] * 127.5 + 127.5):02x}'
61
+ for i, c in enumerate(colors)
62
+ }
63
+
64
+
65
+ def enable_btns():
66
+ return (gr.Button(interactive=True), ) * 4
67
+
68
+
69
+ def disable_btns():
70
+ return (gr.Button(interactive=False), ) * 4
71
+
72
+
73
+ def reset_seg():
74
+ return 16, gr.Button(interactive=False)
75
+
76
+
77
+ def reset_reg():
78
+ return 1, gr.Button(interactive=False)
79
+
80
+
81
+ def update_region(blob):
82
+ if blob['background'] is None or not blob['layers'][0].any():
83
+ return
84
+
85
+ region = blob['background'].copy()
86
+ region[blob['layers'][0][:, :, -1] == 0] = [0, 0, 0, 0]
87
+
88
+ return region
89
+
90
+
91
+ def update_video(video, prompt_idx):
92
+ if video is None:
93
+ return
94
+
95
+ _, images = load_video(video, sample_frames=16)
96
+ path = images[prompt_idx - 1]
97
+
98
+ return path
99
+
100
+
101
+ @spaces.GPU
102
+ def infer_seg(media, query, sample_frames=16, media_type=None):
103
+ if not media:
104
+ gr.Warning('Please upload an image or a video.')
105
+ return None, None, None
106
+
107
+ if not query:
108
+ gr.Warning('Please provide a text prompt.')
109
+ return None, None, None
110
+
111
+ if any(media.endswith(k) for k in ('jpg', 'png')):
112
+ frames, images = load_image(media), [media]
113
+ else:
114
+ frames, images = load_video(media, sample_frames=sample_frames)
115
+
116
+ messages = [{
117
+ 'role':
118
+ 'user',
119
+ 'content': [{
120
+ 'type': 'video',
121
+ 'video': images,
122
+ 'min_pixels': 128 * 28 * 28,
123
+ 'max_pixels': 256 * 28 * 28 * int(sample_frames / len(images))
124
+ }, {
125
+ 'type': 'text',
126
+ 'text': query
127
+ }]
128
+ }]
129
+
130
+ text = processor.apply_chat_template(messages, add_generation_prompt=True)
131
+
132
+ images, videos, kwargs = process_vision_info(messages, return_video_kwargs=True)
133
+
134
+ data = processor(text=[text], images=images, videos=videos, return_tensors='pt', **kwargs)
135
+
136
+ data['frames'] = [sam2_transform(frames).to(model.sam2.dtype)]
137
+ data['frame_size'] = [frames.shape[1:3]]
138
+
139
+ output_ids = model.generate(
140
+ **data.to(device),
141
+ do_sample=False,
142
+ temperature=None,
143
+ top_k=None,
144
+ top_p=None,
145
+ repetition_penalty=None,
146
+ max_new_tokens=512)
147
+
148
+ assert data.input_ids.size(0) == output_ids.size(0) == 1
149
+ output_ids = output_ids[0, data.input_ids.size(1):]
150
+
151
+ if output_ids[-1] == processor.tokenizer.eos_token_id:
152
+ output_ids = output_ids[:-1]
153
+
154
+ response = processor.decode(output_ids, clean_up_tokenization_spaces=False)
155
+ response = response.replace(f' {SEG_TOKEN}', SEG_TOKEN).replace(f'{SEG_TOKEN} ', SEG_TOKEN)
156
+
157
+ entities = []
158
+ for i, m in enumerate(re.finditer(re.escape(SEG_TOKEN), response)):
159
+ entities.append(dict(entity=f'Target {i + 1}', start=m.start(), end=m.end()))
160
+
161
+ answer = dict(text=response, entities=entities)
162
+
163
+ imgs = draw_mask(frames, model.seg, colors=colors)
164
+
165
+ path = f"/tmp/{uuid.uuid4().hex}.{'gif' if len(imgs) > 1 else 'png'}"
166
+ iio.imwrite(path, imgs, duration=100, loop=0)
167
+
168
+ if media_type == 'image':
169
+ if len(model.seg) >= 1:
170
+ masks = media, [(m[0, 0].numpy(), f'Target {i + 1}') for i, m in enumerate(model.seg)]
171
+ else:
172
+ masks = None
173
+ else:
174
+ masks = path
175
+
176
+ return answer, masks, path
177
+
178
+
179
+ infer_seg_image = partial(infer_seg, media_type='image')
180
+ infer_seg_video = partial(infer_seg, media_type='video')
181
+
182
+
183
+ @spaces.GPU
184
+ def infer_reg(blob, query, prompt_idx=1, video=None):
185
+ if blob['background'] is None:
186
+ gr.Warning('Please upload an image or a video.')
187
+ return
188
+
189
+ if not blob['layers'][0].any():
190
+ gr.Warning('Please provide a mask prompt.')
191
+ return
192
+
193
+ if not query:
194
+ gr.Warning('Please provide a text prompt.')
195
+ return
196
+
197
+ if video is None:
198
+ frames = torch.from_numpy(blob['background'][:, :, :3]).unsqueeze(0)
199
+ images = [Image.fromarray(blob['background'], mode='RGBA')]
200
+ else:
201
+ frames, images = load_video(video, sample_frames=16)
202
+
203
+ frame_size = frames.shape[1:3]
204
+
205
+ mask = torch.from_numpy(blob['layers'][0][:, :, -1]).unsqueeze(0) > 0
206
+
207
+ refer_mask = torch.zeros(frames.size(0), 1, *frame_size)
208
+ refer_mask[prompt_idx - 1] = mask
209
+
210
+ if refer_mask.size(0) % 2 != 0:
211
+ refer_mask = torch.cat((refer_mask, refer_mask[-1, None]))
212
+ refer_mask = refer_mask.flatten(1)
213
+ refer_mask = F.max_pool1d(refer_mask.transpose(-1, -2), kernel_size=2, stride=2).transpose(-1, -2)
214
+ refer_mask = refer_mask.view(-1, 1, *frame_size)
215
+
216
+ if video is None:
217
+ prefix = f'Here is an image with the following highlighted regions:\n[0]: <{prompt_idx}> {MEM_TOKEN}\n'
218
+ else:
219
+ prefix = f'Here is a video with {len(images)} frames denoted as <1> to <{len(images)}>. The highlighted regions are as follows:\n[0]: <{prompt_idx}>-<{prompt_idx + 1}> {MEM_TOKEN}\n'
220
+
221
+ messages = [{
222
+ 'role':
223
+ 'user',
224
+ 'content': [{
225
+ 'type': 'video',
226
+ 'video': images,
227
+ 'min_pixels': 128 * 28 * 28,
228
+ 'max_pixels': 256 * 28 * 28 * int(16 / len(images))
229
+ }, {
230
+ 'type': 'text',
231
+ 'text': prefix + query
232
+ }]
233
+ }]
234
+
235
+ text = processor.apply_chat_template(messages, add_generation_prompt=True)
236
+
237
+ images, videos, kwargs = process_vision_info(messages, return_video_kwargs=True)
238
+
239
+ data = processor(text=[text], images=images, videos=videos, return_tensors='pt', **kwargs)
240
+
241
+ refer_mask = T.resize(refer_mask, (data['video_grid_thw'][0][1] * 14, data['video_grid_thw'][0][2] * 14))
242
+ refer_mask = F.max_pool2d(refer_mask, kernel_size=28, stride=28)
243
+ refer_mask = refer_mask > 0
244
+
245
+ data['frames'] = [sam2_transform(frames).to(model.sam2.dtype)]
246
+ data['frame_size'] = [frames.shape[1:3]]
247
+ data['refer_mask'] = [refer_mask]
248
+
249
+ output_ids = model.generate(
250
+ **data.to(device),
251
+ do_sample=False,
252
+ temperature=None,
253
+ top_k=None,
254
+ top_p=None,
255
+ repetition_penalty=None,
256
+ max_new_tokens=512)
257
+
258
+ assert data.input_ids.size(0) == output_ids.size(0) == 1
259
+ output_ids = output_ids[0, data.input_ids.size(1):]
260
+
261
+ if output_ids[-1] == processor.tokenizer.eos_token_id:
262
+ output_ids = output_ids[:-1]
263
+
264
+ response = processor.decode(output_ids, clean_up_tokenization_spaces=False)
265
+ response = response.replace(' [0]', '[0]').replace('[0] ', '[0]').replace('[0]', '<REGION>')
266
+
267
+ entities = []
268
+ for m in re.finditer(re.escape('<REGION>'), response):
269
+ entities.append(dict(entity='region', start=m.start(), end=m.end(), color="#f85050"))
270
+
271
+ answer = dict(text=response, entities=entities)
272
+
273
+ return answer
274
+
275
+
276
+ def build_demo():
277
+ with gr.Blocks(title=TITLE, js=JS) as demo:
278
+ gr.HTML(HEADER)
279
+
280
+ with gr.Tab('Image Segmentation'):
281
+ download_btn_1 = gr.DownloadButton(label='📦 Download', interactive=False, render=False)
282
+ msk_1 = gr.AnnotatedImage(label='Segmentation Results', color_map=color_map, render=False)
283
+ ans_1 = gr.HighlightedText(
284
+ label='Model Response', color_map=color_map_light, show_inline_category=False, render=False)
285
+
286
+ with gr.Row():
287
+ with gr.Column():
288
+ media_1 = gr.Image(type='filepath')
289
+
290
+ sample_frames_1 = gr.Slider(1, 32, value=16, step=1, visible=False)
291
+
292
+ query_1 = gr.Textbox(label='Text Prompt', placeholder='Please segment the...')
293
+
294
+ with gr.Row():
295
+ random_btn_1 = gr.Button(value='🔮 Random', visible=False)
296
+
297
+ reset_btn_1 = gr.ClearButton([media_1, query_1, msk_1, ans_1], value='🗑️ Reset')
298
+ reset_btn_1.click(reset_seg, None, [sample_frames_1, download_btn_1])
299
+
300
+ download_btn_1.render()
301
+
302
+ submit_btn_1 = gr.Button(value='🚀 Submit', variant='primary')
303
+ with gr.Column():
304
+ msk_1.render()
305
+ ans_1.render()
306
+
307
+ ctx_1 = submit_btn_1.click(disable_btns, None, [random_btn_1, reset_btn_1, download_btn_1, submit_btn_1])
308
+ ctx_1 = ctx_1.then(infer_seg_image, [media_1, query_1, sample_frames_1], [ans_1, msk_1, download_btn_1])
309
+ ctx_1.then(enable_btns, None, [random_btn_1, reset_btn_1, download_btn_1, submit_btn_1])
310
+
311
+ with gr.Tab('Video Segmentation'):
312
+ download_btn_2 = gr.DownloadButton(label='📦 Download', interactive=False, render=False)
313
+ msk_2 = gr.Image(label='Segmentation Results', render=False)
314
+ ans_2 = gr.HighlightedText(
315
+ label='Model Response', color_map=color_map_light, show_inline_category=False, render=False)
316
+
317
+ with gr.Row():
318
+ with gr.Column():
319
+ media_2 = gr.Video()
320
+
321
+ with gr.Accordion(label='Hyperparameters', open=False):
322
+ sample_frames_2 = gr.Slider(
323
+ 1,
324
+ 32,
325
+ value=16,
326
+ step=1,
327
+ interactive=True,
328
+ label='Sample Frames',
329
+ info='The number of frames to sample from a video (Default: 16)')
330
+
331
+ query_2 = gr.Textbox(label='Text Prompt', placeholder='Please segment the...')
332
+
333
+ with gr.Row():
334
+ random_btn_2 = gr.Button(value='🔮 Random', visible=False)
335
+
336
+ reset_btn_2 = gr.ClearButton([media_2, query_2, msk_2, ans_2], value='🗑️ Reset')
337
+ reset_btn_2.click(reset_seg, None, [sample_frames_2, download_btn_2])
338
+
339
+ download_btn_2.render()
340
+
341
+ submit_btn_2 = gr.Button(value='🚀 Submit', variant='primary')
342
+ with gr.Column():
343
+ msk_2.render()
344
+ ans_2.render()
345
+
346
+ ctx_2 = submit_btn_2.click(disable_btns, None, [random_btn_2, reset_btn_2, download_btn_2, submit_btn_2])
347
+ ctx_2 = ctx_2.then(infer_seg_video, [media_2, query_2, sample_frames_2], [ans_2, msk_2, download_btn_2])
348
+ ctx_2.then(enable_btns, None, [random_btn_2, reset_btn_2, download_btn_2, submit_btn_2])
349
+
350
+ with gr.Tab('Image Regional Understanding'):
351
+ download_btn_3 = gr.DownloadButton(visible=False)
352
+ msk_3 = gr.Image(label='Highlighted Region', render=False)
353
+ ans_3 = gr.HighlightedText(label='Model Response', show_inline_category=False, render=False)
354
+
355
+ with gr.Row():
356
+ with gr.Column():
357
+ media_3 = gr.ImageEditor(
358
+ label='Image & Mask Prompt',
359
+ brush=gr.Brush(colors=["#ff000080"], color_mode='fixed'),
360
+ transforms=None,
361
+ layers=False)
362
+ media_3.change(update_region, media_3, msk_3)
363
+
364
+ prompt_frame_index_3 = gr.Slider(1, 16, value=1, step=1, visible=False)
365
+
366
+ query_3 = gr.Textbox(label='Text Prompt', placeholder='Please describe the highlighted region...')
367
+
368
+ with gr.Row():
369
+ random_btn_3 = gr.Button(value='🔮 Random', visible=False)
370
+
371
+ reset_btn_3 = gr.ClearButton([media_3, query_3, msk_3, ans_3], value='🗑️ Reset')
372
+ reset_btn_3.click(reset_reg, None, [prompt_frame_index_3, download_btn_3])
373
+
374
+ submit_btn_3 = gr.Button(value='🚀 Submit', variant='primary')
375
+ with gr.Column():
376
+ msk_3.render()
377
+ ans_3.render()
378
+
379
+ ctx_3 = submit_btn_3.click(disable_btns, None, [random_btn_3, reset_btn_3, download_btn_3, submit_btn_3])
380
+ ctx_3 = ctx_3.then(infer_reg, [media_3, query_3, prompt_frame_index_3], ans_3)
381
+ ctx_3.then(enable_btns, None, [random_btn_3, reset_btn_3, download_btn_3, submit_btn_3])
382
+
383
+ with gr.Tab('Video Regional Understanding'):
384
+ download_btn_4 = gr.DownloadButton(visible=False)
385
+ prompt_frame_index_4 = gr.Slider(
386
+ 1,
387
+ 16,
388
+ value=1,
389
+ step=1,
390
+ interactive=True,
391
+ label='Prompt Frame Index',
392
+ info='The index of the frame that includes mask prompts (Default: 1)',
393
+ render=False)
394
+ msk_4 = gr.ImageEditor(
395
+ label='Mask Prompt',
396
+ brush=gr.Brush(colors=['#ff000080'], color_mode='fixed'),
397
+ transforms=None,
398
+ layers=False,
399
+ render=False)
400
+ ans_4 = gr.HighlightedText(label='Model Response', show_inline_category=False, render=False)
401
+
402
+ with gr.Row():
403
+ with gr.Column():
404
+ media_4 = gr.Video()
405
+ media_4.change(update_video, [media_4, prompt_frame_index_4], msk_4)
406
+
407
+ with gr.Accordion(label='Hyperparameters', open=False):
408
+ prompt_frame_index_4.render()
409
+ prompt_frame_index_4.change(update_video, [media_4, prompt_frame_index_4], msk_4)
410
+
411
+ query_4 = gr.Textbox(label='Text Prompt', placeholder='Please describe the highlighted region...')
412
+
413
+ with gr.Row():
414
+ random_btn_4 = gr.Button(value='🔮 Random', visible=False)
415
+
416
+ reset_btn_4 = gr.ClearButton([media_4, query_4, msk_4, ans_4], value='🗑️ Reset')
417
+ reset_btn_4.click(reset_reg, None, [prompt_frame_index_4, download_btn_4])
418
+
419
+ submit_btn_4 = gr.Button(value='🚀 Submit', variant='primary')
420
+ with gr.Column():
421
+ msk_4.render()
422
+ ans_4.render()
423
+
424
+ ctx_4 = submit_btn_4.click(disable_btns, None, [random_btn_4, reset_btn_4, download_btn_4, submit_btn_4])
425
+ ctx_4 = ctx_4.then(infer_reg, [msk_4, query_4, prompt_frame_index_4, media_4], ans_4)
426
+ ctx_4.then(enable_btns, None, [random_btn_4, reset_btn_4, download_btn_4, submit_btn_4])
427
+
428
+ return demo
429
+
430
+
431
+ if __name__ == '__main__':
432
+ demo = build_demo()
433
+
434
+ demo.queue()
435
+ demo.launch(server_name='0.0.0.0')
requirements.txt ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate==1.9.0
2
+ decord==0.6.0
3
+ deepspeed==0.17.4
4
+ gradio==5.48.0
5
+ hydra-core==1.3.2
6
+ imageio==2.37.0
7
+ iopath==0.1.10
8
+ matplotlib==3.10.5
9
+ nncore==0.4.7
10
+ numpy==2.1.2
11
+ openai==1.99.1
12
+ pandas==2.3.1
13
+ peft==0.17.0
14
+ pycocotools==2.0.10
15
+ pydantic==2.11.7
16
+ pysrt==1.1.2
17
+ scikit-image==0.25.2
18
+ scikit-learn==1.7.1
19
+ sentencepiece==0.2.0
20
+ spaces==0.42.1
21
+ tensordict==0.9.1
22
+ termplotlib==0.3.9
23
+ transformers==4.53.3
24
+ triton==3.3.1
25
+ wandb==0.21.0
26
+
27
+ # torch==2.7.1+cu128
28
+ # torchvision==0.22.1+cu128
29
+
30
+ # https://github.com/Dao-AILab/flash-attention/pull/1751
31
+ # flash_attn==2.8.2
32
+
33
+ # sam2 modified from https://github.com/facebookresearch/sam2/tree/722d1d15111c689908aeeb82d49a57780aac5153
sam2/__init__.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ from hydra import initialize_config_module
8
+ from hydra.core.global_hydra import GlobalHydra
9
+
10
+ if not GlobalHydra.instance().is_initialized():
11
+ initialize_config_module("sam2", version_base="1.2")
sam2/automatic_mask_generator.py ADDED
@@ -0,0 +1,416 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ # Adapted from https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/automatic_mask_generator.py
8
+ from typing import Any, Dict, List, Optional, Tuple
9
+
10
+ import numpy as np
11
+ import torch
12
+ from torchvision.ops.boxes import batched_nms, box_area # type: ignore
13
+
14
+ from sam2.modeling.sam2_base import SAM2Base
15
+ from sam2.sam2_image_predictor import SAM2ImagePredictor
16
+ from sam2.utils.amg import (MaskData, area_from_rle, batch_iterator, batched_mask_to_box, box_xyxy_to_xywh,
17
+ build_all_layer_point_grids, calculate_stability_score, coco_encode_rle,
18
+ generate_crop_boxes, is_box_near_crop_edge, mask_to_rle_pytorch, remove_small_regions,
19
+ rle_to_mask, uncrop_boxes_xyxy, uncrop_masks, uncrop_points)
20
+
21
+
22
+ class SAM2AutomaticMaskGenerator:
23
+
24
+ def __init__(
25
+ self,
26
+ model: SAM2Base,
27
+ points_per_side: Optional[int] = 32,
28
+ points_per_batch: int = 64,
29
+ pred_iou_thresh: float = 0.8,
30
+ stability_score_thresh: float = 0.95,
31
+ stability_score_offset: float = 1.0,
32
+ mask_threshold: float = 0.0,
33
+ box_nms_thresh: float = 0.7,
34
+ crop_n_layers: int = 0,
35
+ crop_nms_thresh: float = 0.7,
36
+ crop_overlap_ratio: float = 512 / 1500,
37
+ crop_n_points_downscale_factor: int = 1,
38
+ point_grids: Optional[List[np.ndarray]] = None,
39
+ min_mask_region_area: int = 0,
40
+ output_mode: str = "binary_mask",
41
+ use_m2m: bool = False,
42
+ multimask_output: bool = True,
43
+ **kwargs,
44
+ ) -> None:
45
+ """
46
+ Using a SAM 2 model, generates masks for the entire image.
47
+ Generates a grid of point prompts over the image, then filters
48
+ low quality and duplicate masks. The default settings are chosen
49
+ for SAM 2 with a HieraL backbone.
50
+
51
+ Arguments:
52
+ model (Sam): The SAM 2 model to use for mask prediction.
53
+ points_per_side (int or None): The number of points to be sampled
54
+ along one side of the image. The total number of points is
55
+ points_per_side**2. If None, 'point_grids' must provide explicit
56
+ point sampling.
57
+ points_per_batch (int): Sets the number of points run simultaneously
58
+ by the model. Higher numbers may be faster but use more GPU memory.
59
+ pred_iou_thresh (float): A filtering threshold in [0,1], using the
60
+ model's predicted mask quality.
61
+ stability_score_thresh (float): A filtering threshold in [0,1], using
62
+ the stability of the mask under changes to the cutoff used to binarize
63
+ the model's mask predictions.
64
+ stability_score_offset (float): The amount to shift the cutoff when
65
+ calculated the stability score.
66
+ mask_threshold (float): Threshold for binarizing the mask logits
67
+ box_nms_thresh (float): The box IoU cutoff used by non-maximal
68
+ suppression to filter duplicate masks.
69
+ crop_n_layers (int): If >0, mask prediction will be run again on
70
+ crops of the image. Sets the number of layers to run, where each
71
+ layer has 2**i_layer number of image crops.
72
+ crop_nms_thresh (float): The box IoU cutoff used by non-maximal
73
+ suppression to filter duplicate masks between different crops.
74
+ crop_overlap_ratio (float): Sets the degree to which crops overlap.
75
+ In the first crop layer, crops will overlap by this fraction of
76
+ the image length. Later layers with more crops scale down this overlap.
77
+ crop_n_points_downscale_factor (int): The number of points-per-side
78
+ sampled in layer n is scaled down by crop_n_points_downscale_factor**n.
79
+ point_grids (list(np.ndarray) or None): A list over explicit grids
80
+ of points used for sampling, normalized to [0,1]. The nth grid in the
81
+ list is used in the nth crop layer. Exclusive with points_per_side.
82
+ min_mask_region_area (int): If >0, postprocessing will be applied
83
+ to remove disconnected regions and holes in masks with area smaller
84
+ than min_mask_region_area. Requires opencv.
85
+ output_mode (str): The form masks are returned in. Can be 'binary_mask',
86
+ 'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools.
87
+ For large resolutions, 'binary_mask' may consume large amounts of
88
+ memory.
89
+ use_m2m (bool): Whether to add a one step refinement using previous mask predictions.
90
+ multimask_output (bool): Whether to output multimask at each point of the grid.
91
+ """
92
+
93
+ assert (points_per_side is None) != (point_grids
94
+ is None), "Exactly one of points_per_side or point_grid must be provided."
95
+ if points_per_side is not None:
96
+ self.point_grids = build_all_layer_point_grids(
97
+ points_per_side,
98
+ crop_n_layers,
99
+ crop_n_points_downscale_factor,
100
+ )
101
+ elif point_grids is not None:
102
+ self.point_grids = point_grids
103
+ else:
104
+ raise ValueError("Can't have both points_per_side and point_grid be None.")
105
+
106
+ assert output_mode in [
107
+ "binary_mask",
108
+ "uncompressed_rle",
109
+ "coco_rle",
110
+ ], f"Unknown output_mode {output_mode}."
111
+ if output_mode == "coco_rle":
112
+ try:
113
+ from pycocotools import mask as mask_utils # type: ignore # noqa: F401
114
+ except ImportError as e:
115
+ print("Please install pycocotools")
116
+ raise e
117
+
118
+ self.predictor = SAM2ImagePredictor(
119
+ model,
120
+ max_hole_area=min_mask_region_area,
121
+ max_sprinkle_area=min_mask_region_area,
122
+ )
123
+ self.points_per_batch = points_per_batch
124
+ self.pred_iou_thresh = pred_iou_thresh
125
+ self.stability_score_thresh = stability_score_thresh
126
+ self.stability_score_offset = stability_score_offset
127
+ self.mask_threshold = mask_threshold
128
+ self.box_nms_thresh = box_nms_thresh
129
+ self.crop_n_layers = crop_n_layers
130
+ self.crop_nms_thresh = crop_nms_thresh
131
+ self.crop_overlap_ratio = crop_overlap_ratio
132
+ self.crop_n_points_downscale_factor = crop_n_points_downscale_factor
133
+ self.min_mask_region_area = min_mask_region_area
134
+ self.output_mode = output_mode
135
+ self.use_m2m = use_m2m
136
+ self.multimask_output = multimask_output
137
+
138
+ @classmethod
139
+ def from_pretrained(cls, model_id: str, **kwargs) -> "SAM2AutomaticMaskGenerator":
140
+ """
141
+ Load a pretrained model from the Hugging Face hub.
142
+
143
+ Arguments:
144
+ model_id (str): The Hugging Face repository ID.
145
+ **kwargs: Additional arguments to pass to the model constructor.
146
+
147
+ Returns:
148
+ (SAM2AutomaticMaskGenerator): The loaded model.
149
+ """
150
+ from sam2.build_sam import build_sam2_hf
151
+
152
+ sam_model = build_sam2_hf(model_id, **kwargs)
153
+ return cls(sam_model, **kwargs)
154
+
155
+ @torch.no_grad()
156
+ def generate(self, image: np.ndarray) -> List[Dict[str, Any]]:
157
+ """
158
+ Generates masks for the given image.
159
+
160
+ Arguments:
161
+ image (np.ndarray): The image to generate masks for, in HWC uint8 format.
162
+
163
+ Returns:
164
+ list(dict(str, any)): A list over records for masks. Each record is
165
+ a dict containing the following keys:
166
+ segmentation (dict(str, any) or np.ndarray): The mask. If
167
+ output_mode='binary_mask', is an array of shape HW. Otherwise,
168
+ is a dictionary containing the RLE.
169
+ bbox (list(float)): The box around the mask, in XYWH format.
170
+ area (int): The area in pixels of the mask.
171
+ predicted_iou (float): The model's own prediction of the mask's
172
+ quality. This is filtered by the pred_iou_thresh parameter.
173
+ point_coords (list(list(float))): The point coordinates input
174
+ to the model to generate this mask.
175
+ stability_score (float): A measure of the mask's quality. This
176
+ is filtered on using the stability_score_thresh parameter.
177
+ crop_box (list(float)): The crop of the image used to generate
178
+ the mask, given in XYWH format.
179
+ """
180
+
181
+ # Generate masks
182
+ mask_data = self._generate_masks(image)
183
+
184
+ # Encode masks
185
+ if self.output_mode == "coco_rle":
186
+ mask_data["segmentations"] = [coco_encode_rle(rle) for rle in mask_data["rles"]]
187
+ elif self.output_mode == "binary_mask":
188
+ mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]]
189
+ else:
190
+ mask_data["segmentations"] = mask_data["rles"]
191
+
192
+ # Write mask records
193
+ curr_anns = []
194
+ for idx in range(len(mask_data["segmentations"])):
195
+ ann = {
196
+ "segmentation": mask_data["segmentations"][idx],
197
+ "area": area_from_rle(mask_data["rles"][idx]),
198
+ "bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(),
199
+ "predicted_iou": mask_data["iou_preds"][idx].item(),
200
+ "point_coords": [mask_data["points"][idx].tolist()],
201
+ "stability_score": mask_data["stability_score"][idx].item(),
202
+ "crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(),
203
+ }
204
+ curr_anns.append(ann)
205
+
206
+ return curr_anns
207
+
208
+ def _generate_masks(self, image: np.ndarray) -> MaskData:
209
+ orig_size = image.shape[:2]
210
+ crop_boxes, layer_idxs = generate_crop_boxes(orig_size, self.crop_n_layers, self.crop_overlap_ratio)
211
+
212
+ # Iterate over image crops
213
+ data = MaskData()
214
+ for crop_box, layer_idx in zip(crop_boxes, layer_idxs):
215
+ crop_data = self._process_crop(image, crop_box, layer_idx, orig_size)
216
+ data.cat(crop_data)
217
+
218
+ # Remove duplicate masks between crops
219
+ if len(crop_boxes) > 1:
220
+ # Prefer masks from smaller crops
221
+ scores = 1 / box_area(data["crop_boxes"])
222
+ scores = scores.to(data["boxes"].device)
223
+ keep_by_nms = batched_nms(
224
+ data["boxes"].float(),
225
+ scores,
226
+ torch.zeros_like(data["boxes"][:, 0]), # categories
227
+ iou_threshold=self.crop_nms_thresh,
228
+ )
229
+ data.filter(keep_by_nms)
230
+ data.to_numpy()
231
+ return data
232
+
233
+ def _process_crop(
234
+ self,
235
+ image: np.ndarray,
236
+ crop_box: List[int],
237
+ crop_layer_idx: int,
238
+ orig_size: Tuple[int, ...],
239
+ ) -> MaskData:
240
+ # Crop the image and calculate embeddings
241
+ x0, y0, x1, y1 = crop_box
242
+ cropped_im = image[y0:y1, x0:x1, :]
243
+ cropped_im_size = cropped_im.shape[:2]
244
+ self.predictor.set_image(cropped_im)
245
+
246
+ # Get points for this crop
247
+ points_scale = np.array(cropped_im_size)[None, ::-1]
248
+ points_for_image = self.point_grids[crop_layer_idx] * points_scale
249
+
250
+ # Generate masks for this crop in batches
251
+ data = MaskData()
252
+ for (points, ) in batch_iterator(self.points_per_batch, points_for_image):
253
+ batch_data = self._process_batch(points, cropped_im_size, crop_box, orig_size, normalize=True)
254
+ data.cat(batch_data)
255
+ del batch_data
256
+ self.predictor.reset_predictor()
257
+
258
+ # Remove duplicates within this crop.
259
+ keep_by_nms = batched_nms(
260
+ data["boxes"].float(),
261
+ data["iou_preds"],
262
+ torch.zeros_like(data["boxes"][:, 0]), # categories
263
+ iou_threshold=self.box_nms_thresh,
264
+ )
265
+ data.filter(keep_by_nms)
266
+
267
+ # Return to the original image frame
268
+ data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box)
269
+ data["points"] = uncrop_points(data["points"], crop_box)
270
+ data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(data["rles"]))])
271
+
272
+ return data
273
+
274
+ def _process_batch(
275
+ self,
276
+ points: np.ndarray,
277
+ im_size: Tuple[int, ...],
278
+ crop_box: List[int],
279
+ orig_size: Tuple[int, ...],
280
+ normalize=False,
281
+ ) -> MaskData:
282
+ orig_h, orig_w = orig_size
283
+
284
+ # Run model on this batch
285
+ points = torch.as_tensor(points, dtype=torch.float32, device=self.predictor.device)
286
+ in_points = self.predictor._transforms.transform_coords(points, normalize=normalize, orig_hw=im_size)
287
+ in_labels = torch.ones(in_points.shape[0], dtype=torch.int, device=in_points.device)
288
+ masks, iou_preds, low_res_masks = self.predictor._predict(
289
+ in_points[:, None, :],
290
+ in_labels[:, None],
291
+ multimask_output=self.multimask_output,
292
+ return_logits=True,
293
+ )
294
+
295
+ # Serialize predictions and store in MaskData
296
+ data = MaskData(
297
+ masks=masks.flatten(0, 1),
298
+ iou_preds=iou_preds.flatten(0, 1),
299
+ points=points.repeat_interleave(masks.shape[1], dim=0),
300
+ low_res_masks=low_res_masks.flatten(0, 1),
301
+ )
302
+ del masks
303
+
304
+ if not self.use_m2m:
305
+ # Filter by predicted IoU
306
+ if self.pred_iou_thresh > 0.0:
307
+ keep_mask = data["iou_preds"] > self.pred_iou_thresh
308
+ data.filter(keep_mask)
309
+
310
+ # Calculate and filter by stability score
311
+ data["stability_score"] = calculate_stability_score(data["masks"], self.mask_threshold,
312
+ self.stability_score_offset)
313
+ if self.stability_score_thresh > 0.0:
314
+ keep_mask = data["stability_score"] >= self.stability_score_thresh
315
+ data.filter(keep_mask)
316
+ else:
317
+ # One step refinement using previous mask predictions
318
+ in_points = self.predictor._transforms.transform_coords(
319
+ data["points"], normalize=normalize, orig_hw=im_size)
320
+ labels = torch.ones(in_points.shape[0], dtype=torch.int, device=in_points.device)
321
+ masks, ious = self.refine_with_m2m(in_points, labels, data["low_res_masks"], self.points_per_batch)
322
+ data["masks"] = masks.squeeze(1)
323
+ data["iou_preds"] = ious.squeeze(1)
324
+
325
+ if self.pred_iou_thresh > 0.0:
326
+ keep_mask = data["iou_preds"] > self.pred_iou_thresh
327
+ data.filter(keep_mask)
328
+
329
+ data["stability_score"] = calculate_stability_score(data["masks"], self.mask_threshold,
330
+ self.stability_score_offset)
331
+ if self.stability_score_thresh > 0.0:
332
+ keep_mask = data["stability_score"] >= self.stability_score_thresh
333
+ data.filter(keep_mask)
334
+
335
+ # Threshold masks and calculate boxes
336
+ data["masks"] = data["masks"] > self.mask_threshold
337
+ data["boxes"] = batched_mask_to_box(data["masks"])
338
+
339
+ # Filter boxes that touch crop boundaries
340
+ keep_mask = ~is_box_near_crop_edge(data["boxes"], crop_box, [0, 0, orig_w, orig_h])
341
+ if not torch.all(keep_mask):
342
+ data.filter(keep_mask)
343
+
344
+ # Compress to RLE
345
+ data["masks"] = uncrop_masks(data["masks"], crop_box, orig_h, orig_w)
346
+ data["rles"] = mask_to_rle_pytorch(data["masks"])
347
+ del data["masks"]
348
+
349
+ return data
350
+
351
+ @staticmethod
352
+ def postprocess_small_regions(mask_data: MaskData, min_area: int, nms_thresh: float) -> MaskData:
353
+ """
354
+ Removes small disconnected regions and holes in masks, then reruns
355
+ box NMS to remove any new duplicates.
356
+
357
+ Edits mask_data in place.
358
+
359
+ Requires open-cv as a dependency.
360
+ """
361
+ if len(mask_data["rles"]) == 0:
362
+ return mask_data
363
+
364
+ # Filter small disconnected regions and holes
365
+ new_masks = []
366
+ scores = []
367
+ for rle in mask_data["rles"]:
368
+ mask = rle_to_mask(rle)
369
+
370
+ mask, changed = remove_small_regions(mask, min_area, mode="holes")
371
+ unchanged = not changed
372
+ mask, changed = remove_small_regions(mask, min_area, mode="islands")
373
+ unchanged = unchanged and not changed
374
+
375
+ new_masks.append(torch.as_tensor(mask).unsqueeze(0))
376
+ # Give score=0 to changed masks and score=1 to unchanged masks
377
+ # so NMS will prefer ones that didn't need postprocessing
378
+ scores.append(float(unchanged))
379
+
380
+ # Recalculate boxes and remove any new duplicates
381
+ masks = torch.cat(new_masks, dim=0)
382
+ boxes = batched_mask_to_box(masks)
383
+ keep_by_nms = batched_nms(
384
+ boxes.float(),
385
+ torch.as_tensor(scores),
386
+ torch.zeros_like(boxes[:, 0]), # categories
387
+ iou_threshold=nms_thresh,
388
+ )
389
+
390
+ # Only recalculate RLEs for masks that have changed
391
+ for i_mask in keep_by_nms:
392
+ if scores[i_mask] == 0.0:
393
+ mask_torch = masks[i_mask].unsqueeze(0)
394
+ mask_data["rles"][i_mask] = mask_to_rle_pytorch(mask_torch)[0]
395
+ mask_data["boxes"][i_mask] = boxes[i_mask] # update res directly
396
+ mask_data.filter(keep_by_nms)
397
+
398
+ return mask_data
399
+
400
+ def refine_with_m2m(self, points, point_labels, low_res_masks, points_per_batch):
401
+ new_masks = []
402
+ new_iou_preds = []
403
+
404
+ for cur_points, cur_point_labels, low_res_mask in batch_iterator(points_per_batch, points, point_labels,
405
+ low_res_masks):
406
+ best_masks, best_iou_preds, _ = self.predictor._predict(
407
+ cur_points[:, None, :],
408
+ cur_point_labels[:, None],
409
+ mask_input=low_res_mask[:, None, :],
410
+ multimask_output=False,
411
+ return_logits=True,
412
+ )
413
+ new_masks.append(best_masks)
414
+ new_iou_preds.append(best_iou_preds)
415
+ masks = torch.cat(new_masks, dim=0)
416
+ return masks, torch.cat(new_iou_preds, dim=0)
sam2/build_sam.py ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import logging
8
+ import os
9
+
10
+ import torch
11
+ from hydra import compose
12
+ from hydra.utils import instantiate
13
+ from omegaconf import OmegaConf
14
+
15
+ import sam2
16
+
17
+ # Check if the user is running Python from the parent directory of the sam2 repo
18
+ # (i.e. the directory where this repo is cloned into) -- this is not supported since
19
+ # it could shadow the sam2 package and cause issues.
20
+ if os.path.isdir(os.path.join(sam2.__path__[0], "sam2")):
21
+ # If the user has "sam2/sam2" in their path, they are likey importing the repo itself
22
+ # as "sam2" rather than importing the "sam2" python package (i.e. "sam2/sam2" directory).
23
+ # This typically happens because the user is running Python from the parent directory
24
+ # that contains the sam2 repo they cloned.
25
+ raise RuntimeError("You're likely running Python from the parent directory of the sam2 repository "
26
+ "(i.e. the directory where https://github.com/facebookresearch/sam2 is cloned into). "
27
+ "This is not supported since the `sam2` Python package could be shadowed by the "
28
+ "repository name (the repository is also named `sam2` and contains the Python package "
29
+ "in `sam2/sam2`). Please run Python from another directory (e.g. from the repo dir "
30
+ "rather than its parent dir, or from your home directory) after installing SAM 2.")
31
+
32
+ HF_MODEL_ID_TO_FILENAMES = {
33
+ "facebook/sam2-hiera-tiny": (
34
+ "configs/sam2/sam2_hiera_t.yaml",
35
+ "sam2_hiera_tiny.pt",
36
+ ),
37
+ "facebook/sam2-hiera-small": (
38
+ "configs/sam2/sam2_hiera_s.yaml",
39
+ "sam2_hiera_small.pt",
40
+ ),
41
+ "facebook/sam2-hiera-base-plus": (
42
+ "configs/sam2/sam2_hiera_b+.yaml",
43
+ "sam2_hiera_base_plus.pt",
44
+ ),
45
+ "facebook/sam2-hiera-large": (
46
+ "configs/sam2/sam2_hiera_l.yaml",
47
+ "sam2_hiera_large.pt",
48
+ ),
49
+ "facebook/sam2.1-hiera-tiny": (
50
+ "configs/sam2.1/sam2.1_hiera_t.yaml",
51
+ "sam2.1_hiera_tiny.pt",
52
+ ),
53
+ "facebook/sam2.1-hiera-small": (
54
+ "configs/sam2.1/sam2.1_hiera_s.yaml",
55
+ "sam2.1_hiera_small.pt",
56
+ ),
57
+ "facebook/sam2.1-hiera-base-plus": (
58
+ "configs/sam2.1/sam2.1_hiera_b+.yaml",
59
+ "sam2.1_hiera_base_plus.pt",
60
+ ),
61
+ "facebook/sam2.1-hiera-large": (
62
+ "configs/sam2.1/sam2.1_hiera_l.yaml",
63
+ "sam2.1_hiera_large.pt",
64
+ ),
65
+ }
66
+
67
+
68
+ def build_sam2(
69
+ config_file,
70
+ ckpt_path=None,
71
+ device="cuda",
72
+ mode="eval",
73
+ hydra_overrides_extra=[],
74
+ apply_postprocessing=True,
75
+ **kwargs,
76
+ ):
77
+
78
+ if apply_postprocessing:
79
+ hydra_overrides_extra = hydra_overrides_extra.copy()
80
+ hydra_overrides_extra += [
81
+ # dynamically fall back to multi-mask if the single mask is not stable
82
+ "++model.sam_mask_decoder_extra_args.dynamic_multimask_via_stability=true",
83
+ "++model.sam_mask_decoder_extra_args.dynamic_multimask_stability_delta=0.05",
84
+ "++model.sam_mask_decoder_extra_args.dynamic_multimask_stability_thresh=0.98",
85
+ ]
86
+ # Read config and init model
87
+ cfg = compose(config_name=config_file, overrides=hydra_overrides_extra)
88
+ OmegaConf.resolve(cfg)
89
+ model = instantiate(cfg.model, _recursive_=True)
90
+ _load_checkpoint(model, ckpt_path)
91
+ model = model.to(device)
92
+ if mode == "eval":
93
+ model.eval()
94
+ return model
95
+
96
+
97
+ def build_sam2_video_predictor(
98
+ config_file,
99
+ ckpt_path=None,
100
+ device="cuda",
101
+ mode="eval",
102
+ hydra_overrides_extra=[],
103
+ apply_postprocessing=True,
104
+ vos_optimized=False,
105
+ **kwargs,
106
+ ):
107
+ hydra_overrides = [
108
+ "++model._target_=sam2.sam2_video_predictor.SAM2VideoPredictor",
109
+ ]
110
+ if vos_optimized:
111
+ hydra_overrides = [
112
+ "++model._target_=sam2.sam2_video_predictor.SAM2VideoPredictorVOS",
113
+ "++model.compile_image_encoder=True", # Let sam2_base handle this
114
+ ]
115
+
116
+ if apply_postprocessing:
117
+ hydra_overrides_extra = hydra_overrides_extra.copy()
118
+ hydra_overrides_extra += [
119
+ # dynamically fall back to multi-mask if the single mask is not stable
120
+ "++model.sam_mask_decoder_extra_args.dynamic_multimask_via_stability=true",
121
+ "++model.sam_mask_decoder_extra_args.dynamic_multimask_stability_delta=0.05",
122
+ "++model.sam_mask_decoder_extra_args.dynamic_multimask_stability_thresh=0.98",
123
+ # the sigmoid mask logits on interacted frames with clicks in the memory encoder so that the encoded masks are exactly as what users see from clicking
124
+ "++model.binarize_mask_from_pts_for_mem_enc=true",
125
+ # fill small holes in the low-res masks up to `fill_hole_area` (before resizing them to the original video resolution)
126
+ "++model.fill_hole_area=8",
127
+ ]
128
+ hydra_overrides.extend(hydra_overrides_extra)
129
+
130
+ # Read config and init model
131
+ cfg = compose(config_name=config_file, overrides=hydra_overrides)
132
+ OmegaConf.resolve(cfg)
133
+ model = instantiate(cfg.model, _recursive_=True)
134
+ _load_checkpoint(model, ckpt_path)
135
+ model = model.to(device)
136
+ if mode == "eval":
137
+ model.eval()
138
+ return model
139
+
140
+
141
+ def _hf_download(model_id):
142
+ from huggingface_hub import hf_hub_download
143
+
144
+ config_name, checkpoint_name = HF_MODEL_ID_TO_FILENAMES[model_id]
145
+ ckpt_path = hf_hub_download(repo_id=model_id, filename=checkpoint_name)
146
+ return config_name, ckpt_path
147
+
148
+
149
+ def build_sam2_hf(model_id, **kwargs):
150
+ config_name, ckpt_path = _hf_download(model_id)
151
+ return build_sam2(config_file=config_name, ckpt_path=ckpt_path, **kwargs)
152
+
153
+
154
+ def build_sam2_video_predictor_hf(model_id, **kwargs):
155
+ config_name, ckpt_path = _hf_download(model_id)
156
+ return build_sam2_video_predictor(config_file=config_name, ckpt_path=ckpt_path, **kwargs)
157
+
158
+
159
+ def _load_checkpoint(model, ckpt_path):
160
+ if ckpt_path is not None:
161
+ sd = torch.load(ckpt_path, map_location="cpu", weights_only=True)["model"]
162
+ # https://github.com/huggingface/transformers/issues/29554
163
+ sd['memory_encoder.fuser.layers.0.weight'] = sd.pop('memory_encoder.fuser.layers.0.gamma')
164
+ sd['memory_encoder.fuser.layers.1.weight'] = sd.pop('memory_encoder.fuser.layers.1.gamma')
165
+ missing_keys, unexpected_keys = model.load_state_dict(sd)
166
+ if missing_keys:
167
+ logging.error(missing_keys)
168
+ raise RuntimeError()
169
+ if unexpected_keys:
170
+ logging.error(unexpected_keys)
171
+ raise RuntimeError()
172
+ logging.info("Loaded checkpoint sucessfully")
sam2/configs/sam2.1/sam2.1_hiera_b+.yaml ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # @package _global_
2
+
3
+ # Model
4
+ model:
5
+ _target_: sam2.modeling.sam2_base.SAM2Base
6
+ image_encoder:
7
+ _target_: sam2.modeling.backbones.image_encoder.ImageEncoder
8
+ scalp: 1
9
+ trunk:
10
+ _target_: sam2.modeling.backbones.hieradet.Hiera
11
+ embed_dim: 112
12
+ num_heads: 2
13
+ neck:
14
+ _target_: sam2.modeling.backbones.image_encoder.FpnNeck
15
+ position_encoding:
16
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
17
+ num_pos_feats: 256
18
+ normalize: true
19
+ scale: null
20
+ temperature: 10000
21
+ d_model: 256
22
+ backbone_channel_list: [896, 448, 224, 112]
23
+ fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features
24
+ fpn_interp_model: nearest
25
+
26
+ memory_attention:
27
+ _target_: sam2.modeling.memory_attention.MemoryAttention
28
+ d_model: 256
29
+ pos_enc_at_input: true
30
+ layer:
31
+ _target_: sam2.modeling.memory_attention.MemoryAttentionLayer
32
+ activation: relu
33
+ dim_feedforward: 2048
34
+ dropout: 0.1
35
+ pos_enc_at_attn: false
36
+ self_attention:
37
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
38
+ rope_theta: 10000.0
39
+ feat_sizes: [64, 64]
40
+ embedding_dim: 256
41
+ num_heads: 1
42
+ downsample_rate: 1
43
+ dropout: 0.1
44
+ d_model: 256
45
+ pos_enc_at_cross_attn_keys: true
46
+ pos_enc_at_cross_attn_queries: false
47
+ cross_attention:
48
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
49
+ rope_theta: 10000.0
50
+ feat_sizes: [64, 64]
51
+ rope_k_repeat: True
52
+ embedding_dim: 256
53
+ num_heads: 1
54
+ downsample_rate: 1
55
+ dropout: 0.1
56
+ kv_in_dim: 64
57
+ num_layers: 4
58
+
59
+ memory_encoder:
60
+ _target_: sam2.modeling.memory_encoder.MemoryEncoder
61
+ out_dim: 64
62
+ position_encoding:
63
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
64
+ num_pos_feats: 64
65
+ normalize: true
66
+ scale: null
67
+ temperature: 10000
68
+ mask_downsampler:
69
+ _target_: sam2.modeling.memory_encoder.MaskDownSampler
70
+ kernel_size: 3
71
+ stride: 2
72
+ padding: 1
73
+ fuser:
74
+ _target_: sam2.modeling.memory_encoder.Fuser
75
+ layer:
76
+ _target_: sam2.modeling.memory_encoder.CXBlock
77
+ dim: 256
78
+ kernel_size: 7
79
+ padding: 3
80
+ layer_scale_init_value: 1e-6
81
+ use_dwconv: True # depth-wise convs
82
+ num_layers: 2
83
+
84
+ num_maskmem: 7
85
+ image_size: 1024
86
+ # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask
87
+ sigmoid_scale_for_mem_enc: 20.0
88
+ sigmoid_bias_for_mem_enc: -10.0
89
+ use_mask_input_as_output_without_sam: true
90
+ # Memory
91
+ directly_add_no_mem_embed: true
92
+ no_obj_embed_spatial: true
93
+ # use high-resolution feature map in the SAM mask decoder
94
+ use_high_res_features_in_sam: true
95
+ # output 3 masks on the first click on initial conditioning frames
96
+ multimask_output_in_sam: true
97
+ # SAM heads
98
+ iou_prediction_use_sigmoid: True
99
+ # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
100
+ use_obj_ptrs_in_encoder: true
101
+ add_tpos_enc_to_obj_ptrs: true
102
+ proj_tpos_enc_in_obj_ptrs: true
103
+ use_signed_tpos_enc_to_obj_ptrs: true
104
+ only_obj_ptrs_in_the_past_for_eval: true
105
+ # object occlusion prediction
106
+ pred_obj_scores: true
107
+ pred_obj_scores_mlp: true
108
+ fixed_no_obj_ptr: true
109
+ # multimask tracking settings
110
+ multimask_output_for_tracking: true
111
+ use_multimask_token_for_obj_ptr: true
112
+ multimask_min_pt_num: 0
113
+ multimask_max_pt_num: 1
114
+ use_mlp_for_obj_ptr_proj: true
115
+ # Compilation flag
116
+ compile_image_encoder: False
sam2/configs/sam2.1/sam2.1_hiera_l.yaml ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # @package _global_
2
+
3
+ # Model
4
+ model:
5
+ _target_: sam2.modeling.sam2_base.SAM2Base
6
+ image_encoder:
7
+ _target_: sam2.modeling.backbones.image_encoder.ImageEncoder
8
+ scalp: 1
9
+ trunk:
10
+ _target_: sam2.modeling.backbones.hieradet.Hiera
11
+ embed_dim: 144
12
+ num_heads: 2
13
+ stages: [2, 6, 36, 4]
14
+ global_att_blocks: [23, 33, 43]
15
+ window_pos_embed_bkg_spatial_size: [7, 7]
16
+ window_spec: [8, 4, 16, 8]
17
+ neck:
18
+ _target_: sam2.modeling.backbones.image_encoder.FpnNeck
19
+ position_encoding:
20
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
21
+ num_pos_feats: 256
22
+ normalize: true
23
+ scale: null
24
+ temperature: 10000
25
+ d_model: 256
26
+ backbone_channel_list: [1152, 576, 288, 144]
27
+ fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features
28
+ fpn_interp_model: nearest
29
+
30
+ memory_attention:
31
+ _target_: sam2.modeling.memory_attention.MemoryAttention
32
+ d_model: 256
33
+ pos_enc_at_input: true
34
+ layer:
35
+ _target_: sam2.modeling.memory_attention.MemoryAttentionLayer
36
+ activation: relu
37
+ dim_feedforward: 2048
38
+ dropout: 0.1
39
+ pos_enc_at_attn: false
40
+ self_attention:
41
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
42
+ rope_theta: 10000.0
43
+ feat_sizes: [64, 64]
44
+ embedding_dim: 256
45
+ num_heads: 1
46
+ downsample_rate: 1
47
+ dropout: 0.1
48
+ d_model: 256
49
+ pos_enc_at_cross_attn_keys: true
50
+ pos_enc_at_cross_attn_queries: false
51
+ cross_attention:
52
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
53
+ rope_theta: 10000.0
54
+ feat_sizes: [64, 64]
55
+ rope_k_repeat: True
56
+ embedding_dim: 256
57
+ num_heads: 1
58
+ downsample_rate: 1
59
+ dropout: 0.1
60
+ kv_in_dim: 64
61
+ num_layers: 4
62
+
63
+ memory_encoder:
64
+ _target_: sam2.modeling.memory_encoder.MemoryEncoder
65
+ out_dim: 64
66
+ position_encoding:
67
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
68
+ num_pos_feats: 64
69
+ normalize: true
70
+ scale: null
71
+ temperature: 10000
72
+ mask_downsampler:
73
+ _target_: sam2.modeling.memory_encoder.MaskDownSampler
74
+ kernel_size: 3
75
+ stride: 2
76
+ padding: 1
77
+ fuser:
78
+ _target_: sam2.modeling.memory_encoder.Fuser
79
+ layer:
80
+ _target_: sam2.modeling.memory_encoder.CXBlock
81
+ dim: 256
82
+ kernel_size: 7
83
+ padding: 3
84
+ layer_scale_init_value: 1e-6
85
+ use_dwconv: True # depth-wise convs
86
+ num_layers: 2
87
+
88
+ num_maskmem: 7
89
+ image_size: 1024
90
+ # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask
91
+ sigmoid_scale_for_mem_enc: 20.0
92
+ sigmoid_bias_for_mem_enc: -10.0
93
+ use_mask_input_as_output_without_sam: true
94
+ # Memory
95
+ directly_add_no_mem_embed: true
96
+ no_obj_embed_spatial: true
97
+ # use high-resolution feature map in the SAM mask decoder
98
+ use_high_res_features_in_sam: true
99
+ # output 3 masks on the first click on initial conditioning frames
100
+ multimask_output_in_sam: true
101
+ # SAM heads
102
+ iou_prediction_use_sigmoid: True
103
+ # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
104
+ use_obj_ptrs_in_encoder: true
105
+ add_tpos_enc_to_obj_ptrs: true
106
+ proj_tpos_enc_in_obj_ptrs: true
107
+ use_signed_tpos_enc_to_obj_ptrs: true
108
+ only_obj_ptrs_in_the_past_for_eval: true
109
+ # object occlusion prediction
110
+ pred_obj_scores: true
111
+ pred_obj_scores_mlp: true
112
+ fixed_no_obj_ptr: true
113
+ # multimask tracking settings
114
+ multimask_output_for_tracking: true
115
+ use_multimask_token_for_obj_ptr: true
116
+ multimask_min_pt_num: 0
117
+ multimask_max_pt_num: 1
118
+ use_mlp_for_obj_ptr_proj: true
119
+ # Compilation flag
120
+ compile_image_encoder: False
sam2/configs/sam2.1/sam2.1_hiera_s.yaml ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # @package _global_
2
+
3
+ # Model
4
+ model:
5
+ _target_: sam2.modeling.sam2_base.SAM2Base
6
+ image_encoder:
7
+ _target_: sam2.modeling.backbones.image_encoder.ImageEncoder
8
+ scalp: 1
9
+ trunk:
10
+ _target_: sam2.modeling.backbones.hieradet.Hiera
11
+ embed_dim: 96
12
+ num_heads: 1
13
+ stages: [1, 2, 11, 2]
14
+ global_att_blocks: [7, 10, 13]
15
+ window_pos_embed_bkg_spatial_size: [7, 7]
16
+ neck:
17
+ _target_: sam2.modeling.backbones.image_encoder.FpnNeck
18
+ position_encoding:
19
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
20
+ num_pos_feats: 256
21
+ normalize: true
22
+ scale: null
23
+ temperature: 10000
24
+ d_model: 256
25
+ backbone_channel_list: [768, 384, 192, 96]
26
+ fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features
27
+ fpn_interp_model: nearest
28
+
29
+ memory_attention:
30
+ _target_: sam2.modeling.memory_attention.MemoryAttention
31
+ d_model: 256
32
+ pos_enc_at_input: true
33
+ layer:
34
+ _target_: sam2.modeling.memory_attention.MemoryAttentionLayer
35
+ activation: relu
36
+ dim_feedforward: 2048
37
+ dropout: 0.1
38
+ pos_enc_at_attn: false
39
+ self_attention:
40
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
41
+ rope_theta: 10000.0
42
+ feat_sizes: [64, 64]
43
+ embedding_dim: 256
44
+ num_heads: 1
45
+ downsample_rate: 1
46
+ dropout: 0.1
47
+ d_model: 256
48
+ pos_enc_at_cross_attn_keys: true
49
+ pos_enc_at_cross_attn_queries: false
50
+ cross_attention:
51
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
52
+ rope_theta: 10000.0
53
+ feat_sizes: [64, 64]
54
+ rope_k_repeat: True
55
+ embedding_dim: 256
56
+ num_heads: 1
57
+ downsample_rate: 1
58
+ dropout: 0.1
59
+ kv_in_dim: 64
60
+ num_layers: 4
61
+
62
+ memory_encoder:
63
+ _target_: sam2.modeling.memory_encoder.MemoryEncoder
64
+ out_dim: 64
65
+ position_encoding:
66
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
67
+ num_pos_feats: 64
68
+ normalize: true
69
+ scale: null
70
+ temperature: 10000
71
+ mask_downsampler:
72
+ _target_: sam2.modeling.memory_encoder.MaskDownSampler
73
+ kernel_size: 3
74
+ stride: 2
75
+ padding: 1
76
+ fuser:
77
+ _target_: sam2.modeling.memory_encoder.Fuser
78
+ layer:
79
+ _target_: sam2.modeling.memory_encoder.CXBlock
80
+ dim: 256
81
+ kernel_size: 7
82
+ padding: 3
83
+ layer_scale_init_value: 1e-6
84
+ use_dwconv: True # depth-wise convs
85
+ num_layers: 2
86
+
87
+ num_maskmem: 7
88
+ image_size: 1024
89
+ # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask
90
+ sigmoid_scale_for_mem_enc: 20.0
91
+ sigmoid_bias_for_mem_enc: -10.0
92
+ use_mask_input_as_output_without_sam: true
93
+ # Memory
94
+ directly_add_no_mem_embed: true
95
+ no_obj_embed_spatial: true
96
+ # use high-resolution feature map in the SAM mask decoder
97
+ use_high_res_features_in_sam: true
98
+ # output 3 masks on the first click on initial conditioning frames
99
+ multimask_output_in_sam: true
100
+ # SAM heads
101
+ iou_prediction_use_sigmoid: True
102
+ # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
103
+ use_obj_ptrs_in_encoder: true
104
+ add_tpos_enc_to_obj_ptrs: true
105
+ proj_tpos_enc_in_obj_ptrs: true
106
+ use_signed_tpos_enc_to_obj_ptrs: true
107
+ only_obj_ptrs_in_the_past_for_eval: true
108
+ # object occlusion prediction
109
+ pred_obj_scores: true
110
+ pred_obj_scores_mlp: true
111
+ fixed_no_obj_ptr: true
112
+ # multimask tracking settings
113
+ multimask_output_for_tracking: true
114
+ use_multimask_token_for_obj_ptr: true
115
+ multimask_min_pt_num: 0
116
+ multimask_max_pt_num: 1
117
+ use_mlp_for_obj_ptr_proj: true
118
+ # Compilation flag
119
+ compile_image_encoder: False
sam2/configs/sam2.1/sam2.1_hiera_t.yaml ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # @package _global_
2
+
3
+ # Model
4
+ model:
5
+ _target_: sam2.modeling.sam2_base.SAM2Base
6
+ image_encoder:
7
+ _target_: sam2.modeling.backbones.image_encoder.ImageEncoder
8
+ scalp: 1
9
+ trunk:
10
+ _target_: sam2.modeling.backbones.hieradet.Hiera
11
+ embed_dim: 96
12
+ num_heads: 1
13
+ stages: [1, 2, 7, 2]
14
+ global_att_blocks: [5, 7, 9]
15
+ window_pos_embed_bkg_spatial_size: [7, 7]
16
+ neck:
17
+ _target_: sam2.modeling.backbones.image_encoder.FpnNeck
18
+ position_encoding:
19
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
20
+ num_pos_feats: 256
21
+ normalize: true
22
+ scale: null
23
+ temperature: 10000
24
+ d_model: 256
25
+ backbone_channel_list: [768, 384, 192, 96]
26
+ fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features
27
+ fpn_interp_model: nearest
28
+
29
+ memory_attention:
30
+ _target_: sam2.modeling.memory_attention.MemoryAttention
31
+ d_model: 256
32
+ pos_enc_at_input: true
33
+ layer:
34
+ _target_: sam2.modeling.memory_attention.MemoryAttentionLayer
35
+ activation: relu
36
+ dim_feedforward: 2048
37
+ dropout: 0.1
38
+ pos_enc_at_attn: false
39
+ self_attention:
40
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
41
+ rope_theta: 10000.0
42
+ feat_sizes: [64, 64]
43
+ embedding_dim: 256
44
+ num_heads: 1
45
+ downsample_rate: 1
46
+ dropout: 0.1
47
+ d_model: 256
48
+ pos_enc_at_cross_attn_keys: true
49
+ pos_enc_at_cross_attn_queries: false
50
+ cross_attention:
51
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
52
+ rope_theta: 10000.0
53
+ feat_sizes: [64, 64]
54
+ rope_k_repeat: True
55
+ embedding_dim: 256
56
+ num_heads: 1
57
+ downsample_rate: 1
58
+ dropout: 0.1
59
+ kv_in_dim: 64
60
+ num_layers: 4
61
+
62
+ memory_encoder:
63
+ _target_: sam2.modeling.memory_encoder.MemoryEncoder
64
+ out_dim: 64
65
+ position_encoding:
66
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
67
+ num_pos_feats: 64
68
+ normalize: true
69
+ scale: null
70
+ temperature: 10000
71
+ mask_downsampler:
72
+ _target_: sam2.modeling.memory_encoder.MaskDownSampler
73
+ kernel_size: 3
74
+ stride: 2
75
+ padding: 1
76
+ fuser:
77
+ _target_: sam2.modeling.memory_encoder.Fuser
78
+ layer:
79
+ _target_: sam2.modeling.memory_encoder.CXBlock
80
+ dim: 256
81
+ kernel_size: 7
82
+ padding: 3
83
+ layer_scale_init_value: 1e-6
84
+ use_dwconv: True # depth-wise convs
85
+ num_layers: 2
86
+
87
+ num_maskmem: 7
88
+ image_size: 1024
89
+ # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask
90
+ # SAM decoder
91
+ sigmoid_scale_for_mem_enc: 20.0
92
+ sigmoid_bias_for_mem_enc: -10.0
93
+ use_mask_input_as_output_without_sam: true
94
+ # Memory
95
+ directly_add_no_mem_embed: true
96
+ no_obj_embed_spatial: true
97
+ # use high-resolution feature map in the SAM mask decoder
98
+ use_high_res_features_in_sam: true
99
+ # output 3 masks on the first click on initial conditioning frames
100
+ multimask_output_in_sam: true
101
+ # SAM heads
102
+ iou_prediction_use_sigmoid: True
103
+ # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
104
+ use_obj_ptrs_in_encoder: true
105
+ add_tpos_enc_to_obj_ptrs: true
106
+ proj_tpos_enc_in_obj_ptrs: true
107
+ use_signed_tpos_enc_to_obj_ptrs: true
108
+ only_obj_ptrs_in_the_past_for_eval: true
109
+ # object occlusion prediction
110
+ pred_obj_scores: true
111
+ pred_obj_scores_mlp: true
112
+ fixed_no_obj_ptr: true
113
+ # multimask tracking settings
114
+ multimask_output_for_tracking: true
115
+ use_multimask_token_for_obj_ptr: true
116
+ multimask_min_pt_num: 0
117
+ multimask_max_pt_num: 1
118
+ use_mlp_for_obj_ptr_proj: true
119
+ # Compilation flag
120
+ # HieraT does not currently support compilation, should always be set to False
121
+ compile_image_encoder: False
sam2/configs/sam2.1_hiera_b+.yaml ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # @package _global_
2
+
3
+ # Model
4
+ model:
5
+ _target_: sam2.sam2_train.SAM2Train
6
+ image_encoder:
7
+ _target_: sam2.modeling.backbones.image_encoder.ImageEncoder
8
+ scalp: 1
9
+ trunk:
10
+ _target_: sam2.modeling.backbones.hieradet.Hiera
11
+ embed_dim: 112
12
+ num_heads: 2
13
+ drop_path_rate: 0.1
14
+ neck:
15
+ _target_: sam2.modeling.backbones.image_encoder.FpnNeck
16
+ position_encoding:
17
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
18
+ num_pos_feats: 256
19
+ normalize: true
20
+ scale: null
21
+ temperature: 10000
22
+ d_model: 256
23
+ backbone_channel_list: [896, 448, 224, 112]
24
+ fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features
25
+ fpn_interp_model: nearest
26
+
27
+ memory_attention:
28
+ _target_: sam2.modeling.memory_attention.MemoryAttention
29
+ d_model: 256
30
+ pos_enc_at_input: true
31
+ layer:
32
+ _target_: sam2.modeling.memory_attention.MemoryAttentionLayer
33
+ activation: relu
34
+ dim_feedforward: 2048
35
+ dropout: 0.1
36
+ pos_enc_at_attn: false
37
+ self_attention:
38
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
39
+ rope_theta: 10000.0
40
+ feat_sizes: [64, 64]
41
+ embedding_dim: 256
42
+ num_heads: 1
43
+ downsample_rate: 1
44
+ dropout: 0.1
45
+ d_model: 256
46
+ pos_enc_at_cross_attn_keys: true
47
+ pos_enc_at_cross_attn_queries: false
48
+ cross_attention:
49
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
50
+ rope_theta: 10000.0
51
+ feat_sizes: [64, 64]
52
+ rope_k_repeat: True
53
+ embedding_dim: 256
54
+ num_heads: 1
55
+ downsample_rate: 1
56
+ dropout: 0.1
57
+ kv_in_dim: 64
58
+ num_layers: 4
59
+
60
+ memory_encoder:
61
+ _target_: sam2.modeling.memory_encoder.MemoryEncoder
62
+ out_dim: 64
63
+ position_encoding:
64
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
65
+ num_pos_feats: 64
66
+ normalize: true
67
+ scale: null
68
+ temperature: 10000
69
+ mask_downsampler:
70
+ _target_: sam2.modeling.memory_encoder.MaskDownSampler
71
+ kernel_size: 3
72
+ stride: 2
73
+ padding: 1
74
+ fuser:
75
+ _target_: sam2.modeling.memory_encoder.Fuser
76
+ layer:
77
+ _target_: sam2.modeling.memory_encoder.CXBlock
78
+ dim: 256
79
+ kernel_size: 7
80
+ padding: 3
81
+ layer_scale_init_value: 1e-6
82
+ use_dwconv: True # depth-wise convs
83
+ num_layers: 2
84
+
85
+ num_maskmem: 7
86
+ image_size: 1024
87
+ # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask
88
+ sigmoid_scale_for_mem_enc: 20.0
89
+ sigmoid_bias_for_mem_enc: -10.0
90
+ use_mask_input_as_output_without_sam: true
91
+ # Memory
92
+ directly_add_no_mem_embed: true
93
+ no_obj_embed_spatial: true
94
+ # use high-resolution feature map in the SAM mask decoder
95
+ use_high_res_features_in_sam: true
96
+ # output 3 masks on the first click on initial conditioning frames
97
+ multimask_output_in_sam: true
98
+ # SAM heads
99
+ iou_prediction_use_sigmoid: True
100
+ # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
101
+ use_obj_ptrs_in_encoder: true
102
+ add_tpos_enc_to_obj_ptrs: true
103
+ proj_tpos_enc_in_obj_ptrs: true
104
+ use_signed_tpos_enc_to_obj_ptrs: true
105
+ only_obj_ptrs_in_the_past_for_eval: true
106
+ # object occlusion prediction
107
+ pred_obj_scores: true
108
+ pred_obj_scores_mlp: true
109
+ fixed_no_obj_ptr: true
110
+ # multimask tracking settings
111
+ multimask_output_for_tracking: true
112
+ use_multimask_token_for_obj_ptr: true
113
+ multimask_min_pt_num: 0
114
+ multimask_max_pt_num: 1
115
+ use_mlp_for_obj_ptr_proj: true
116
+ # Compilation flag
117
+ compile_image_encoder: False
118
+
119
+ ####### Training specific params #######
120
+ # box/point input and corrections
121
+ prob_to_use_pt_input_for_train: 0.5
122
+ prob_to_use_pt_input_for_eval: 0.0
123
+ prob_to_use_box_input_for_train: 0.5 # 0.5*0.5 = 0.25 prob to use box instead of points
124
+ prob_to_use_box_input_for_eval: 0.0
125
+ prob_to_sample_from_gt_for_train: 0.1 # with a small prob, sampling correction points from GT mask instead of prediction errors
126
+ num_frames_to_correct_for_train: 2 # iteratively sample on random 1~2 frames (always include the first frame)
127
+ num_frames_to_correct_for_eval: 1 # only iteratively sample on first frame
128
+ rand_frames_to_correct_for_train: true # random #init-cond-frame ~ 2
129
+ add_all_frames_to_correct_as_cond: true # when a frame receives a correction click, it becomes a conditioning frame (even if it's not initially a conditioning frame)
130
+ # maximum 2 initial conditioning frames
131
+ num_init_cond_frames_for_train: 2
132
+ rand_init_cond_frames_for_train: true # random 1~2
133
+ num_correction_pt_per_frame: 7
134
+ use_act_ckpt_iterative_pt_sampling: false
135
+
136
+ num_init_cond_frames_for_eval: 1 # only mask on the first frame
137
+ forward_backbone_per_frame_for_eval: true
sam2/configs/sam2.1_hiera_l.yaml ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # @package _global_
2
+
3
+ # Model
4
+ model:
5
+ _target_: sam2.sam2_train.SAM2Train
6
+ image_encoder:
7
+ _target_: sam2.modeling.backbones.image_encoder.ImageEncoder
8
+ scalp: 1
9
+ trunk:
10
+ _target_: sam2.modeling.backbones.hieradet.Hiera
11
+ embed_dim: 144
12
+ num_heads: 2
13
+ stages: [2, 6, 36, 4]
14
+ global_att_blocks: [23, 33, 43]
15
+ window_pos_embed_bkg_spatial_size: [7, 7]
16
+ window_spec: [8, 4, 16, 8]
17
+ drop_path_rate: 0.1
18
+ neck:
19
+ _target_: sam2.modeling.backbones.image_encoder.FpnNeck
20
+ position_encoding:
21
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
22
+ num_pos_feats: 256
23
+ normalize: true
24
+ scale: null
25
+ temperature: 10000
26
+ d_model: 256
27
+ backbone_channel_list: [1152, 576, 288, 144]
28
+ fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features
29
+ fpn_interp_model: nearest
30
+
31
+ memory_attention:
32
+ _target_: sam2.modeling.memory_attention.MemoryAttention
33
+ d_model: 256
34
+ pos_enc_at_input: true
35
+ layer:
36
+ _target_: sam2.modeling.memory_attention.MemoryAttentionLayer
37
+ activation: relu
38
+ dim_feedforward: 2048
39
+ dropout: 0.1
40
+ pos_enc_at_attn: false
41
+ self_attention:
42
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
43
+ rope_theta: 10000.0
44
+ feat_sizes: [64, 64]
45
+ embedding_dim: 256
46
+ num_heads: 1
47
+ downsample_rate: 1
48
+ dropout: 0.1
49
+ d_model: 256
50
+ pos_enc_at_cross_attn_keys: true
51
+ pos_enc_at_cross_attn_queries: false
52
+ cross_attention:
53
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
54
+ rope_theta: 10000.0
55
+ feat_sizes: [64, 64]
56
+ rope_k_repeat: True
57
+ embedding_dim: 256
58
+ num_heads: 1
59
+ downsample_rate: 1
60
+ dropout: 0.1
61
+ kv_in_dim: 64
62
+ num_layers: 4
63
+
64
+ memory_encoder:
65
+ _target_: sam2.modeling.memory_encoder.MemoryEncoder
66
+ out_dim: 64
67
+ position_encoding:
68
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
69
+ num_pos_feats: 64
70
+ normalize: true
71
+ scale: null
72
+ temperature: 10000
73
+ mask_downsampler:
74
+ _target_: sam2.modeling.memory_encoder.MaskDownSampler
75
+ kernel_size: 3
76
+ stride: 2
77
+ padding: 1
78
+ fuser:
79
+ _target_: sam2.modeling.memory_encoder.Fuser
80
+ layer:
81
+ _target_: sam2.modeling.memory_encoder.CXBlock
82
+ dim: 256
83
+ kernel_size: 7
84
+ padding: 3
85
+ layer_scale_init_value: 1e-6
86
+ use_dwconv: True # depth-wise convs
87
+ num_layers: 2
88
+
89
+ num_maskmem: 7
90
+ image_size: 1024
91
+ # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask
92
+ sigmoid_scale_for_mem_enc: 20.0
93
+ sigmoid_bias_for_mem_enc: -10.0
94
+ use_mask_input_as_output_without_sam: true
95
+ # Memory
96
+ directly_add_no_mem_embed: true
97
+ no_obj_embed_spatial: true
98
+ # use high-resolution feature map in the SAM mask decoder
99
+ use_high_res_features_in_sam: true
100
+ # output 3 masks on the first click on initial conditioning frames
101
+ multimask_output_in_sam: true
102
+ # SAM heads
103
+ iou_prediction_use_sigmoid: True
104
+ # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
105
+ use_obj_ptrs_in_encoder: true
106
+ add_tpos_enc_to_obj_ptrs: true
107
+ proj_tpos_enc_in_obj_ptrs: true
108
+ use_signed_tpos_enc_to_obj_ptrs: true
109
+ only_obj_ptrs_in_the_past_for_eval: true
110
+ # object occlusion prediction
111
+ pred_obj_scores: true
112
+ pred_obj_scores_mlp: true
113
+ fixed_no_obj_ptr: true
114
+ # multimask tracking settings
115
+ multimask_output_for_tracking: true
116
+ use_multimask_token_for_obj_ptr: true
117
+ multimask_min_pt_num: 0
118
+ multimask_max_pt_num: 1
119
+ use_mlp_for_obj_ptr_proj: true
120
+ # Compilation flag
121
+ compile_image_encoder: False
122
+
123
+ ####### Training specific params #######
124
+ # box/point input and corrections
125
+ prob_to_use_pt_input_for_train: 0.5
126
+ prob_to_use_pt_input_for_eval: 0.0
127
+ prob_to_use_box_input_for_train: 0.5 # 0.5*0.5 = 0.25 prob to use box instead of points
128
+ prob_to_use_box_input_for_eval: 0.0
129
+ prob_to_sample_from_gt_for_train: 0.1 # with a small prob, sampling correction points from GT mask instead of prediction errors
130
+ num_frames_to_correct_for_train: 2 # iteratively sample on random 1~2 frames (always include the first frame)
131
+ num_frames_to_correct_for_eval: 1 # only iteratively sample on first frame
132
+ rand_frames_to_correct_for_train: true # random #init-cond-frame ~ 2
133
+ add_all_frames_to_correct_as_cond: true # when a frame receives a correction click, it becomes a conditioning frame (even if it's not initially a conditioning frame)
134
+ # maximum 2 initial conditioning frames
135
+ num_init_cond_frames_for_train: 2
136
+ rand_init_cond_frames_for_train: true # random 1~2
137
+ num_correction_pt_per_frame: 7
138
+ use_act_ckpt_iterative_pt_sampling: false
139
+
140
+ num_init_cond_frames_for_eval: 1 # only mask on the first frame
141
+ forward_backbone_per_frame_for_eval: true
sam2/configs/sam2.1_hiera_s.yaml ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # @package _global_
2
+
3
+ # Model
4
+ model:
5
+ _target_: sam2.sam2_train.SAM2Train
6
+ image_encoder:
7
+ _target_: sam2.modeling.backbones.image_encoder.ImageEncoder
8
+ scalp: 1
9
+ trunk:
10
+ _target_: sam2.modeling.backbones.hieradet.Hiera
11
+ embed_dim: 96
12
+ num_heads: 1
13
+ stages: [1, 2, 11, 2]
14
+ global_att_blocks: [7, 10, 13]
15
+ window_pos_embed_bkg_spatial_size: [7, 7]
16
+ drop_path_rate: 0.1
17
+ neck:
18
+ _target_: sam2.modeling.backbones.image_encoder.FpnNeck
19
+ position_encoding:
20
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
21
+ num_pos_feats: 256
22
+ normalize: true
23
+ scale: null
24
+ temperature: 10000
25
+ d_model: 256
26
+ backbone_channel_list: [768, 384, 192, 96]
27
+ fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features
28
+ fpn_interp_model: nearest
29
+
30
+ memory_attention:
31
+ _target_: sam2.modeling.memory_attention.MemoryAttention
32
+ d_model: 256
33
+ pos_enc_at_input: true
34
+ layer:
35
+ _target_: sam2.modeling.memory_attention.MemoryAttentionLayer
36
+ activation: relu
37
+ dim_feedforward: 2048
38
+ dropout: 0.1
39
+ pos_enc_at_attn: false
40
+ self_attention:
41
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
42
+ rope_theta: 10000.0
43
+ feat_sizes: [64, 64]
44
+ embedding_dim: 256
45
+ num_heads: 1
46
+ downsample_rate: 1
47
+ dropout: 0.1
48
+ d_model: 256
49
+ pos_enc_at_cross_attn_keys: true
50
+ pos_enc_at_cross_attn_queries: false
51
+ cross_attention:
52
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
53
+ rope_theta: 10000.0
54
+ feat_sizes: [64, 64]
55
+ rope_k_repeat: True
56
+ embedding_dim: 256
57
+ num_heads: 1
58
+ downsample_rate: 1
59
+ dropout: 0.1
60
+ kv_in_dim: 64
61
+ num_layers: 4
62
+
63
+ memory_encoder:
64
+ _target_: sam2.modeling.memory_encoder.MemoryEncoder
65
+ out_dim: 64
66
+ position_encoding:
67
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
68
+ num_pos_feats: 64
69
+ normalize: true
70
+ scale: null
71
+ temperature: 10000
72
+ mask_downsampler:
73
+ _target_: sam2.modeling.memory_encoder.MaskDownSampler
74
+ kernel_size: 3
75
+ stride: 2
76
+ padding: 1
77
+ fuser:
78
+ _target_: sam2.modeling.memory_encoder.Fuser
79
+ layer:
80
+ _target_: sam2.modeling.memory_encoder.CXBlock
81
+ dim: 256
82
+ kernel_size: 7
83
+ padding: 3
84
+ layer_scale_init_value: 1e-6
85
+ use_dwconv: True # depth-wise convs
86
+ num_layers: 2
87
+
88
+ num_maskmem: 7
89
+ image_size: 1024
90
+ # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask
91
+ sigmoid_scale_for_mem_enc: 20.0
92
+ sigmoid_bias_for_mem_enc: -10.0
93
+ use_mask_input_as_output_without_sam: true
94
+ # Memory
95
+ directly_add_no_mem_embed: true
96
+ no_obj_embed_spatial: true
97
+ # use high-resolution feature map in the SAM mask decoder
98
+ use_high_res_features_in_sam: true
99
+ # output 3 masks on the first click on initial conditioning frames
100
+ multimask_output_in_sam: true
101
+ # SAM heads
102
+ iou_prediction_use_sigmoid: True
103
+ # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
104
+ use_obj_ptrs_in_encoder: true
105
+ add_tpos_enc_to_obj_ptrs: true
106
+ proj_tpos_enc_in_obj_ptrs: true
107
+ use_signed_tpos_enc_to_obj_ptrs: true
108
+ only_obj_ptrs_in_the_past_for_eval: true
109
+ # object occlusion prediction
110
+ pred_obj_scores: true
111
+ pred_obj_scores_mlp: true
112
+ fixed_no_obj_ptr: true
113
+ # multimask tracking settings
114
+ multimask_output_for_tracking: true
115
+ use_multimask_token_for_obj_ptr: true
116
+ multimask_min_pt_num: 0
117
+ multimask_max_pt_num: 1
118
+ use_mlp_for_obj_ptr_proj: true
119
+ # Compilation flag
120
+ compile_image_encoder: False
121
+
122
+ ####### Training specific params #######
123
+ # box/point input and corrections
124
+ prob_to_use_pt_input_for_train: 0.5
125
+ prob_to_use_pt_input_for_eval: 0.0
126
+ prob_to_use_box_input_for_train: 0.5 # 0.5*0.5 = 0.25 prob to use box instead of points
127
+ prob_to_use_box_input_for_eval: 0.0
128
+ prob_to_sample_from_gt_for_train: 0.1 # with a small prob, sampling correction points from GT mask instead of prediction errors
129
+ num_frames_to_correct_for_train: 2 # iteratively sample on random 1~2 frames (always include the first frame)
130
+ num_frames_to_correct_for_eval: 1 # only iteratively sample on first frame
131
+ rand_frames_to_correct_for_train: true # random #init-cond-frame ~ 2
132
+ add_all_frames_to_correct_as_cond: true # when a frame receives a correction click, it becomes a conditioning frame (even if it's not initially a conditioning frame)
133
+ # maximum 2 initial conditioning frames
134
+ num_init_cond_frames_for_train: 2
135
+ rand_init_cond_frames_for_train: true # random 1~2
136
+ num_correction_pt_per_frame: 7
137
+ use_act_ckpt_iterative_pt_sampling: false
138
+
139
+ num_init_cond_frames_for_eval: 1 # only mask on the first frame
140
+ forward_backbone_per_frame_for_eval: true
sam2/configs/sam2.1_hiera_t.yaml ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # @package _global_
2
+
3
+ # Model
4
+ model:
5
+ _target_: sam2.sam2_train.SAM2Train
6
+ image_encoder:
7
+ _target_: sam2.modeling.backbones.image_encoder.ImageEncoder
8
+ scalp: 1
9
+ trunk:
10
+ _target_: sam2.modeling.backbones.hieradet.Hiera
11
+ embed_dim: 96
12
+ num_heads: 1
13
+ stages: [1, 2, 7, 2]
14
+ global_att_blocks: [5, 7, 9]
15
+ window_pos_embed_bkg_spatial_size: [7, 7]
16
+ drop_path_rate: 0.1
17
+ neck:
18
+ _target_: sam2.modeling.backbones.image_encoder.FpnNeck
19
+ position_encoding:
20
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
21
+ num_pos_feats: 256
22
+ normalize: true
23
+ scale: null
24
+ temperature: 10000
25
+ d_model: 256
26
+ backbone_channel_list: [768, 384, 192, 96]
27
+ fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features
28
+ fpn_interp_model: nearest
29
+
30
+ memory_attention:
31
+ _target_: sam2.modeling.memory_attention.MemoryAttention
32
+ d_model: 256
33
+ pos_enc_at_input: true
34
+ layer:
35
+ _target_: sam2.modeling.memory_attention.MemoryAttentionLayer
36
+ activation: relu
37
+ dim_feedforward: 2048
38
+ dropout: 0.1
39
+ pos_enc_at_attn: false
40
+ self_attention:
41
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
42
+ rope_theta: 10000.0
43
+ feat_sizes: [64, 64]
44
+ embedding_dim: 256
45
+ num_heads: 1
46
+ downsample_rate: 1
47
+ dropout: 0.1
48
+ d_model: 256
49
+ pos_enc_at_cross_attn_keys: true
50
+ pos_enc_at_cross_attn_queries: false
51
+ cross_attention:
52
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
53
+ rope_theta: 10000.0
54
+ feat_sizes: [64, 64]
55
+ rope_k_repeat: true
56
+ embedding_dim: 256
57
+ num_heads: 1
58
+ downsample_rate: 1
59
+ dropout: 0.1
60
+ kv_in_dim: 64
61
+ num_layers: 4
62
+
63
+ memory_encoder:
64
+ _target_: sam2.modeling.memory_encoder.MemoryEncoder
65
+ out_dim: 64
66
+ position_encoding:
67
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
68
+ num_pos_feats: 64
69
+ normalize: true
70
+ scale: null
71
+ temperature: 10000
72
+ mask_downsampler:
73
+ _target_: sam2.modeling.memory_encoder.MaskDownSampler
74
+ kernel_size: 3
75
+ stride: 2
76
+ padding: 1
77
+ fuser:
78
+ _target_: sam2.modeling.memory_encoder.Fuser
79
+ layer:
80
+ _target_: sam2.modeling.memory_encoder.CXBlock
81
+ dim: 256
82
+ kernel_size: 7
83
+ padding: 3
84
+ layer_scale_init_value: 1e-6
85
+ use_dwconv: true # depth-wise convs
86
+ num_layers: 2
87
+
88
+ num_maskmem: 7
89
+ image_size: 1024
90
+ # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask
91
+ # SAM decoder
92
+ sigmoid_scale_for_mem_enc: 20.0
93
+ sigmoid_bias_for_mem_enc: -10.0
94
+ use_mask_input_as_output_without_sam: true
95
+ # Memory
96
+ directly_add_no_mem_embed: true
97
+ no_obj_embed_spatial: true
98
+ # use high-resolution feature map in the SAM mask decoder
99
+ use_high_res_features_in_sam: true
100
+ # output 3 masks on the first click on initial conditioning frames
101
+ multimask_output_in_sam: true
102
+ # SAM heads
103
+ iou_prediction_use_sigmoid: true
104
+ # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
105
+ use_obj_ptrs_in_encoder: true
106
+ add_tpos_enc_to_obj_ptrs: true
107
+ proj_tpos_enc_in_obj_ptrs: true
108
+ use_signed_tpos_enc_to_obj_ptrs: true
109
+ only_obj_ptrs_in_the_past_for_eval: true
110
+ # object occlusion prediction
111
+ pred_obj_scores: true
112
+ pred_obj_scores_mlp: true
113
+ fixed_no_obj_ptr: true
114
+ # multimask tracking settings
115
+ multimask_output_for_tracking: true
116
+ use_multimask_token_for_obj_ptr: true
117
+ multimask_min_pt_num: 0
118
+ multimask_max_pt_num: 1
119
+ use_mlp_for_obj_ptr_proj: true
120
+ # Compilation flag
121
+ # HieraT does not currently support compilation, should always be set to false
122
+ compile_image_encoder: false
123
+
124
+ ####### Training specific params #######
125
+ # box/point input and corrections
126
+ prob_to_use_pt_input_for_train: 0.5
127
+ prob_to_use_pt_input_for_eval: 0.0
128
+ prob_to_use_box_input_for_train: 0.5 # 0.5*0.5 = 0.25 prob to use box instead of points
129
+ prob_to_use_box_input_for_eval: 0.0
130
+ prob_to_sample_from_gt_for_train: 0.1 # with a small prob, sampling correction points from GT mask instead of prediction errors
131
+ num_frames_to_correct_for_train: 2 # iteratively sample on random 1~2 frames (always include the first frame)
132
+ num_frames_to_correct_for_eval: 1 # only iteratively sample on first frame
133
+ rand_frames_to_correct_for_train: true # random #init-cond-frame ~ 2
134
+ add_all_frames_to_correct_as_cond: true # when a frame receives a correction click, it becomes a conditioning frame (even if it's not initially a conditioning frame)
135
+ # maximum 2 initial conditioning frames
136
+ num_init_cond_frames_for_train: 2
137
+ rand_init_cond_frames_for_train: true # random 1~2
138
+ num_correction_pt_per_frame: 7
139
+ use_act_ckpt_iterative_pt_sampling: false
140
+
141
+ num_init_cond_frames_for_eval: 1 # only mask on the first frame
142
+ forward_backbone_per_frame_for_eval: true
sam2/configs/sam2.1_training/sam2.1_hiera_b+_MOSE_finetune.yaml ADDED
@@ -0,0 +1,339 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # @package _global_
2
+
3
+ scratch:
4
+ resolution: 1024
5
+ train_batch_size: 1
6
+ num_train_workers: 10
7
+ num_frames: 8
8
+ max_num_objects: 3
9
+ base_lr: 5.0e-6
10
+ vision_lr: 3.0e-06
11
+ phases_per_epoch: 1
12
+ num_epochs: 40
13
+
14
+ dataset:
15
+ # PATHS to Dataset
16
+ img_folder: null # PATH to MOSE JPEGImages folder
17
+ gt_folder: null # PATH to MOSE Annotations folder
18
+ file_list_txt: training/assets/MOSE_sample_train_list.txt # Optional PATH to filelist containing a subset of videos to be used for training
19
+ multiplier: 2
20
+
21
+ # Video transforms
22
+ vos:
23
+ train_transforms:
24
+ - _target_: training.dataset.transforms.ComposeAPI
25
+ transforms:
26
+ - _target_: training.dataset.transforms.RandomHorizontalFlip
27
+ consistent_transform: True
28
+ - _target_: training.dataset.transforms.RandomAffine
29
+ degrees: 25
30
+ shear: 20
31
+ image_interpolation: bilinear
32
+ consistent_transform: True
33
+ - _target_: training.dataset.transforms.RandomResizeAPI
34
+ sizes: ${scratch.resolution}
35
+ square: true
36
+ consistent_transform: True
37
+ - _target_: training.dataset.transforms.ColorJitter
38
+ consistent_transform: True
39
+ brightness: 0.1
40
+ contrast: 0.03
41
+ saturation: 0.03
42
+ hue: null
43
+ - _target_: training.dataset.transforms.RandomGrayscale
44
+ p: 0.05
45
+ consistent_transform: True
46
+ - _target_: training.dataset.transforms.ColorJitter
47
+ consistent_transform: False
48
+ brightness: 0.1
49
+ contrast: 0.05
50
+ saturation: 0.05
51
+ hue: null
52
+ - _target_: training.dataset.transforms.ToTensorAPI
53
+ - _target_: training.dataset.transforms.NormalizeAPI
54
+ mean: [0.485, 0.456, 0.406]
55
+ std: [0.229, 0.224, 0.225]
56
+
57
+ trainer:
58
+ _target_: training.trainer.Trainer
59
+ mode: train_only
60
+ max_epochs: ${times:${scratch.num_epochs},${scratch.phases_per_epoch}}
61
+ accelerator: cuda
62
+ seed_value: 123
63
+
64
+ model:
65
+ _target_: training.model.sam2.SAM2Train
66
+ image_encoder:
67
+ _target_: sam2.modeling.backbones.image_encoder.ImageEncoder
68
+ scalp: 1
69
+ trunk:
70
+ _target_: sam2.modeling.backbones.hieradet.Hiera
71
+ embed_dim: 112
72
+ num_heads: 2
73
+ drop_path_rate: 0.1
74
+ neck:
75
+ _target_: sam2.modeling.backbones.image_encoder.FpnNeck
76
+ position_encoding:
77
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
78
+ num_pos_feats: 256
79
+ normalize: true
80
+ scale: null
81
+ temperature: 10000
82
+ d_model: 256
83
+ backbone_channel_list: [896, 448, 224, 112]
84
+ fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features
85
+ fpn_interp_model: nearest
86
+
87
+ memory_attention:
88
+ _target_: sam2.modeling.memory_attention.MemoryAttention
89
+ d_model: 256
90
+ pos_enc_at_input: true
91
+ layer:
92
+ _target_: sam2.modeling.memory_attention.MemoryAttentionLayer
93
+ activation: relu
94
+ dim_feedforward: 2048
95
+ dropout: 0.1
96
+ pos_enc_at_attn: false
97
+ self_attention:
98
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
99
+ rope_theta: 10000.0
100
+ feat_sizes: [64, 64]
101
+ embedding_dim: 256
102
+ num_heads: 1
103
+ downsample_rate: 1
104
+ dropout: 0.1
105
+ d_model: 256
106
+ pos_enc_at_cross_attn_keys: true
107
+ pos_enc_at_cross_attn_queries: false
108
+ cross_attention:
109
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
110
+ rope_theta: 10000.0
111
+ feat_sizes: [64, 64]
112
+ rope_k_repeat: True
113
+ embedding_dim: 256
114
+ num_heads: 1
115
+ downsample_rate: 1
116
+ dropout: 0.1
117
+ kv_in_dim: 64
118
+ num_layers: 4
119
+
120
+ memory_encoder:
121
+ _target_: sam2.modeling.memory_encoder.MemoryEncoder
122
+ out_dim: 64
123
+ position_encoding:
124
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
125
+ num_pos_feats: 64
126
+ normalize: true
127
+ scale: null
128
+ temperature: 10000
129
+ mask_downsampler:
130
+ _target_: sam2.modeling.memory_encoder.MaskDownSampler
131
+ kernel_size: 3
132
+ stride: 2
133
+ padding: 1
134
+ fuser:
135
+ _target_: sam2.modeling.memory_encoder.Fuser
136
+ layer:
137
+ _target_: sam2.modeling.memory_encoder.CXBlock
138
+ dim: 256
139
+ kernel_size: 7
140
+ padding: 3
141
+ layer_scale_init_value: 1e-6
142
+ use_dwconv: True # depth-wise convs
143
+ num_layers: 2
144
+
145
+ num_maskmem: 7
146
+ image_size: ${scratch.resolution}
147
+ # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask
148
+ sigmoid_scale_for_mem_enc: 20.0
149
+ sigmoid_bias_for_mem_enc: -10.0
150
+ use_mask_input_as_output_without_sam: true
151
+ # Memory
152
+ directly_add_no_mem_embed: true
153
+ no_obj_embed_spatial: true
154
+ # use high-resolution feature map in the SAM mask decoder
155
+ use_high_res_features_in_sam: true
156
+ # output 3 masks on the first click on initial conditioning frames
157
+ multimask_output_in_sam: true
158
+ # SAM heads
159
+ iou_prediction_use_sigmoid: True
160
+ # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
161
+ use_obj_ptrs_in_encoder: true
162
+ add_tpos_enc_to_obj_ptrs: true
163
+ proj_tpos_enc_in_obj_ptrs: true
164
+ use_signed_tpos_enc_to_obj_ptrs: true
165
+ only_obj_ptrs_in_the_past_for_eval: true
166
+ # object occlusion prediction
167
+ pred_obj_scores: true
168
+ pred_obj_scores_mlp: true
169
+ fixed_no_obj_ptr: true
170
+ # multimask tracking settings
171
+ multimask_output_for_tracking: true
172
+ use_multimask_token_for_obj_ptr: true
173
+ multimask_min_pt_num: 0
174
+ multimask_max_pt_num: 1
175
+ use_mlp_for_obj_ptr_proj: true
176
+ # Compilation flag
177
+ # compile_image_encoder: False
178
+
179
+ ####### Training specific params #######
180
+ # box/point input and corrections
181
+ prob_to_use_pt_input_for_train: 0.5
182
+ prob_to_use_pt_input_for_eval: 0.0
183
+ prob_to_use_box_input_for_train: 0.5 # 0.5*0.5 = 0.25 prob to use box instead of points
184
+ prob_to_use_box_input_for_eval: 0.0
185
+ prob_to_sample_from_gt_for_train: 0.1 # with a small prob, sampling correction points from GT mask instead of prediction errors
186
+ num_frames_to_correct_for_train: 2 # iteratively sample on random 1~2 frames (always include the first frame)
187
+ num_frames_to_correct_for_eval: 1 # only iteratively sample on first frame
188
+ rand_frames_to_correct_for_train: True # random #init-cond-frame ~ 2
189
+ add_all_frames_to_correct_as_cond: True # when a frame receives a correction click, it becomes a conditioning frame (even if it's not initially a conditioning frame)
190
+ # maximum 2 initial conditioning frames
191
+ num_init_cond_frames_for_train: 2
192
+ rand_init_cond_frames_for_train: True # random 1~2
193
+ num_correction_pt_per_frame: 7
194
+ use_act_ckpt_iterative_pt_sampling: false
195
+
196
+
197
+
198
+ num_init_cond_frames_for_eval: 1 # only mask on the first frame
199
+ forward_backbone_per_frame_for_eval: True
200
+
201
+
202
+ data:
203
+ train:
204
+ _target_: training.dataset.sam2_datasets.TorchTrainMixedDataset
205
+ phases_per_epoch: ${scratch.phases_per_epoch}
206
+ batch_sizes:
207
+ - ${scratch.train_batch_size}
208
+
209
+ datasets:
210
+ - _target_: training.dataset.utils.RepeatFactorWrapper
211
+ dataset:
212
+ _target_: training.dataset.utils.ConcatDataset
213
+ datasets:
214
+ - _target_: training.dataset.vos_dataset.VOSDataset
215
+ transforms: ${vos.train_transforms}
216
+ training: true
217
+ video_dataset:
218
+ _target_: training.dataset.vos_raw_dataset.PNGRawDataset
219
+ img_folder: ${dataset.img_folder}
220
+ gt_folder: ${dataset.gt_folder}
221
+ file_list_txt: ${dataset.file_list_txt}
222
+ sampler:
223
+ _target_: training.dataset.vos_sampler.RandomUniformSampler
224
+ num_frames: ${scratch.num_frames}
225
+ max_num_objects: ${scratch.max_num_objects}
226
+ multiplier: ${dataset.multiplier}
227
+ shuffle: True
228
+ num_workers: ${scratch.num_train_workers}
229
+ pin_memory: True
230
+ drop_last: True
231
+ collate_fn:
232
+ _target_: training.utils.data_utils.collate_fn
233
+ _partial_: true
234
+ dict_key: all
235
+
236
+ optim:
237
+ amp:
238
+ enabled: True
239
+ amp_dtype: bfloat16
240
+
241
+ optimizer:
242
+ _target_: torch.optim.AdamW
243
+
244
+ gradient_clip:
245
+ _target_: training.optimizer.GradientClipper
246
+ max_norm: 0.1
247
+ norm_type: 2
248
+
249
+ param_group_modifiers:
250
+ - _target_: training.optimizer.layer_decay_param_modifier
251
+ _partial_: True
252
+ layer_decay_value: 0.9
253
+ apply_to: 'image_encoder.trunk'
254
+ overrides:
255
+ - pattern: '*pos_embed*'
256
+ value: 1.0
257
+
258
+ options:
259
+ lr:
260
+ - scheduler:
261
+ _target_: fvcore.common.param_scheduler.CosineParamScheduler
262
+ start_value: ${scratch.base_lr}
263
+ end_value: ${divide:${scratch.base_lr},10}
264
+ - scheduler:
265
+ _target_: fvcore.common.param_scheduler.CosineParamScheduler
266
+ start_value: ${scratch.vision_lr}
267
+ end_value: ${divide:${scratch.vision_lr},10}
268
+ param_names:
269
+ - 'image_encoder.*'
270
+ weight_decay:
271
+ - scheduler:
272
+ _target_: fvcore.common.param_scheduler.ConstantParamScheduler
273
+ value: 0.1
274
+ - scheduler:
275
+ _target_: fvcore.common.param_scheduler.ConstantParamScheduler
276
+ value: 0.0
277
+ param_names:
278
+ - '*bias*'
279
+ module_cls_names: ['torch.nn.LayerNorm']
280
+
281
+ loss:
282
+ all:
283
+ _target_: training.loss_fns.MultiStepMultiMasksAndIous
284
+ weight_dict:
285
+ loss_mask: 20
286
+ loss_dice: 1
287
+ loss_iou: 1
288
+ loss_class: 1
289
+ supervise_all_iou: true
290
+ iou_use_l1_loss: true
291
+ pred_obj_scores: true
292
+ focal_gamma_obj_score: 0.0
293
+ focal_alpha_obj_score: -1.0
294
+
295
+ distributed:
296
+ backend: nccl
297
+ find_unused_parameters: True
298
+
299
+ logging:
300
+ tensorboard_writer:
301
+ _target_: training.utils.logger.make_tensorboard_logger
302
+ log_dir: ${launcher.experiment_log_dir}/tensorboard
303
+ flush_secs: 120
304
+ should_log: True
305
+ log_dir: ${launcher.experiment_log_dir}/logs
306
+ log_freq: 10
307
+
308
+ # initialize from a SAM 2 checkpoint
309
+ checkpoint:
310
+ save_dir: ${launcher.experiment_log_dir}/checkpoints
311
+ save_freq: 0 # 0 only last checkpoint is saved.
312
+ model_weight_initializer:
313
+ _partial_: True
314
+ _target_: training.utils.checkpoint_utils.load_state_dict_into_model
315
+ strict: True
316
+ ignore_unexpected_keys: null
317
+ ignore_missing_keys: null
318
+
319
+ state_dict:
320
+ _target_: training.utils.checkpoint_utils.load_checkpoint_and_apply_kernels
321
+ checkpoint_path: ./checkpoints/sam2.1_hiera_base_plus.pt # PATH to SAM 2.1 checkpoint
322
+ ckpt_state_dict_keys: ['model']
323
+
324
+ launcher:
325
+ num_nodes: 1
326
+ gpus_per_node: 8
327
+ experiment_log_dir: null # Path to log directory, defaults to ./sam2_logs/${config_name}
328
+
329
+ # SLURM args if running on a cluster
330
+ submitit:
331
+ partition: null
332
+ account: null
333
+ qos: null
334
+ cpus_per_task: 10
335
+ use_cluster: false
336
+ timeout_hour: 24
337
+ name: null
338
+ port_range: [10000, 65000]
339
+
sam2/configs/sam2/sam2_hiera_b+.yaml ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # @package _global_
2
+
3
+ # Model
4
+ model:
5
+ _target_: sam2.modeling.sam2_base.SAM2Base
6
+ image_encoder:
7
+ _target_: sam2.modeling.backbones.image_encoder.ImageEncoder
8
+ scalp: 1
9
+ trunk:
10
+ _target_: sam2.modeling.backbones.hieradet.Hiera
11
+ embed_dim: 112
12
+ num_heads: 2
13
+ neck:
14
+ _target_: sam2.modeling.backbones.image_encoder.FpnNeck
15
+ position_encoding:
16
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
17
+ num_pos_feats: 256
18
+ normalize: true
19
+ scale: null
20
+ temperature: 10000
21
+ d_model: 256
22
+ backbone_channel_list: [896, 448, 224, 112]
23
+ fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features
24
+ fpn_interp_model: nearest
25
+
26
+ memory_attention:
27
+ _target_: sam2.modeling.memory_attention.MemoryAttention
28
+ d_model: 256
29
+ pos_enc_at_input: true
30
+ layer:
31
+ _target_: sam2.modeling.memory_attention.MemoryAttentionLayer
32
+ activation: relu
33
+ dim_feedforward: 2048
34
+ dropout: 0.1
35
+ pos_enc_at_attn: false
36
+ self_attention:
37
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
38
+ rope_theta: 10000.0
39
+ feat_sizes: [64, 64]
40
+ embedding_dim: 256
41
+ num_heads: 1
42
+ downsample_rate: 1
43
+ dropout: 0.1
44
+ d_model: 256
45
+ pos_enc_at_cross_attn_keys: true
46
+ pos_enc_at_cross_attn_queries: false
47
+ cross_attention:
48
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
49
+ rope_theta: 10000.0
50
+ feat_sizes: [64, 64]
51
+ rope_k_repeat: True
52
+ embedding_dim: 256
53
+ num_heads: 1
54
+ downsample_rate: 1
55
+ dropout: 0.1
56
+ kv_in_dim: 64
57
+ num_layers: 4
58
+
59
+ memory_encoder:
60
+ _target_: sam2.modeling.memory_encoder.MemoryEncoder
61
+ out_dim: 64
62
+ position_encoding:
63
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
64
+ num_pos_feats: 64
65
+ normalize: true
66
+ scale: null
67
+ temperature: 10000
68
+ mask_downsampler:
69
+ _target_: sam2.modeling.memory_encoder.MaskDownSampler
70
+ kernel_size: 3
71
+ stride: 2
72
+ padding: 1
73
+ fuser:
74
+ _target_: sam2.modeling.memory_encoder.Fuser
75
+ layer:
76
+ _target_: sam2.modeling.memory_encoder.CXBlock
77
+ dim: 256
78
+ kernel_size: 7
79
+ padding: 3
80
+ layer_scale_init_value: 1e-6
81
+ use_dwconv: True # depth-wise convs
82
+ num_layers: 2
83
+
84
+ num_maskmem: 7
85
+ image_size: 1024
86
+ # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask
87
+ sigmoid_scale_for_mem_enc: 20.0
88
+ sigmoid_bias_for_mem_enc: -10.0
89
+ use_mask_input_as_output_without_sam: true
90
+ # Memory
91
+ directly_add_no_mem_embed: true
92
+ # use high-resolution feature map in the SAM mask decoder
93
+ use_high_res_features_in_sam: true
94
+ # output 3 masks on the first click on initial conditioning frames
95
+ multimask_output_in_sam: true
96
+ # SAM heads
97
+ iou_prediction_use_sigmoid: True
98
+ # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
99
+ use_obj_ptrs_in_encoder: true
100
+ add_tpos_enc_to_obj_ptrs: false
101
+ only_obj_ptrs_in_the_past_for_eval: true
102
+ # object occlusion prediction
103
+ pred_obj_scores: true
104
+ pred_obj_scores_mlp: true
105
+ fixed_no_obj_ptr: true
106
+ # multimask tracking settings
107
+ multimask_output_for_tracking: true
108
+ use_multimask_token_for_obj_ptr: true
109
+ multimask_min_pt_num: 0
110
+ multimask_max_pt_num: 1
111
+ use_mlp_for_obj_ptr_proj: true
112
+ # Compilation flag
113
+ compile_image_encoder: False
sam2/configs/sam2/sam2_hiera_l.yaml ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # @package _global_
2
+
3
+ # Model
4
+ model:
5
+ _target_: sam2.modeling.sam2_base.SAM2Base
6
+ image_encoder:
7
+ _target_: sam2.modeling.backbones.image_encoder.ImageEncoder
8
+ scalp: 1
9
+ trunk:
10
+ _target_: sam2.modeling.backbones.hieradet.Hiera
11
+ embed_dim: 144
12
+ num_heads: 2
13
+ stages: [2, 6, 36, 4]
14
+ global_att_blocks: [23, 33, 43]
15
+ window_pos_embed_bkg_spatial_size: [7, 7]
16
+ window_spec: [8, 4, 16, 8]
17
+ neck:
18
+ _target_: sam2.modeling.backbones.image_encoder.FpnNeck
19
+ position_encoding:
20
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
21
+ num_pos_feats: 256
22
+ normalize: true
23
+ scale: null
24
+ temperature: 10000
25
+ d_model: 256
26
+ backbone_channel_list: [1152, 576, 288, 144]
27
+ fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features
28
+ fpn_interp_model: nearest
29
+
30
+ memory_attention:
31
+ _target_: sam2.modeling.memory_attention.MemoryAttention
32
+ d_model: 256
33
+ pos_enc_at_input: true
34
+ layer:
35
+ _target_: sam2.modeling.memory_attention.MemoryAttentionLayer
36
+ activation: relu
37
+ dim_feedforward: 2048
38
+ dropout: 0.1
39
+ pos_enc_at_attn: false
40
+ self_attention:
41
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
42
+ rope_theta: 10000.0
43
+ feat_sizes: [64, 64]
44
+ embedding_dim: 256
45
+ num_heads: 1
46
+ downsample_rate: 1
47
+ dropout: 0.1
48
+ d_model: 256
49
+ pos_enc_at_cross_attn_keys: true
50
+ pos_enc_at_cross_attn_queries: false
51
+ cross_attention:
52
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
53
+ rope_theta: 10000.0
54
+ feat_sizes: [64, 64]
55
+ rope_k_repeat: True
56
+ embedding_dim: 256
57
+ num_heads: 1
58
+ downsample_rate: 1
59
+ dropout: 0.1
60
+ kv_in_dim: 64
61
+ num_layers: 4
62
+
63
+ memory_encoder:
64
+ _target_: sam2.modeling.memory_encoder.MemoryEncoder
65
+ out_dim: 64
66
+ position_encoding:
67
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
68
+ num_pos_feats: 64
69
+ normalize: true
70
+ scale: null
71
+ temperature: 10000
72
+ mask_downsampler:
73
+ _target_: sam2.modeling.memory_encoder.MaskDownSampler
74
+ kernel_size: 3
75
+ stride: 2
76
+ padding: 1
77
+ fuser:
78
+ _target_: sam2.modeling.memory_encoder.Fuser
79
+ layer:
80
+ _target_: sam2.modeling.memory_encoder.CXBlock
81
+ dim: 256
82
+ kernel_size: 7
83
+ padding: 3
84
+ layer_scale_init_value: 1e-6
85
+ use_dwconv: True # depth-wise convs
86
+ num_layers: 2
87
+
88
+ num_maskmem: 7
89
+ image_size: 1024
90
+ # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask
91
+ sigmoid_scale_for_mem_enc: 20.0
92
+ sigmoid_bias_for_mem_enc: -10.0
93
+ use_mask_input_as_output_without_sam: true
94
+ # Memory
95
+ directly_add_no_mem_embed: true
96
+ # use high-resolution feature map in the SAM mask decoder
97
+ use_high_res_features_in_sam: true
98
+ # output 3 masks on the first click on initial conditioning frames
99
+ multimask_output_in_sam: true
100
+ # SAM heads
101
+ iou_prediction_use_sigmoid: True
102
+ # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
103
+ use_obj_ptrs_in_encoder: true
104
+ add_tpos_enc_to_obj_ptrs: false
105
+ only_obj_ptrs_in_the_past_for_eval: true
106
+ # object occlusion prediction
107
+ pred_obj_scores: true
108
+ pred_obj_scores_mlp: true
109
+ fixed_no_obj_ptr: true
110
+ # multimask tracking settings
111
+ multimask_output_for_tracking: true
112
+ use_multimask_token_for_obj_ptr: true
113
+ multimask_min_pt_num: 0
114
+ multimask_max_pt_num: 1
115
+ use_mlp_for_obj_ptr_proj: true
116
+ # Compilation flag
117
+ compile_image_encoder: False
sam2/configs/sam2/sam2_hiera_s.yaml ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # @package _global_
2
+
3
+ # Model
4
+ model:
5
+ _target_: sam2.modeling.sam2_base.SAM2Base
6
+ image_encoder:
7
+ _target_: sam2.modeling.backbones.image_encoder.ImageEncoder
8
+ scalp: 1
9
+ trunk:
10
+ _target_: sam2.modeling.backbones.hieradet.Hiera
11
+ embed_dim: 96
12
+ num_heads: 1
13
+ stages: [1, 2, 11, 2]
14
+ global_att_blocks: [7, 10, 13]
15
+ window_pos_embed_bkg_spatial_size: [7, 7]
16
+ neck:
17
+ _target_: sam2.modeling.backbones.image_encoder.FpnNeck
18
+ position_encoding:
19
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
20
+ num_pos_feats: 256
21
+ normalize: true
22
+ scale: null
23
+ temperature: 10000
24
+ d_model: 256
25
+ backbone_channel_list: [768, 384, 192, 96]
26
+ fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features
27
+ fpn_interp_model: nearest
28
+
29
+ memory_attention:
30
+ _target_: sam2.modeling.memory_attention.MemoryAttention
31
+ d_model: 256
32
+ pos_enc_at_input: true
33
+ layer:
34
+ _target_: sam2.modeling.memory_attention.MemoryAttentionLayer
35
+ activation: relu
36
+ dim_feedforward: 2048
37
+ dropout: 0.1
38
+ pos_enc_at_attn: false
39
+ self_attention:
40
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
41
+ rope_theta: 10000.0
42
+ feat_sizes: [64, 64]
43
+ embedding_dim: 256
44
+ num_heads: 1
45
+ downsample_rate: 1
46
+ dropout: 0.1
47
+ d_model: 256
48
+ pos_enc_at_cross_attn_keys: true
49
+ pos_enc_at_cross_attn_queries: false
50
+ cross_attention:
51
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
52
+ rope_theta: 10000.0
53
+ feat_sizes: [64, 64]
54
+ rope_k_repeat: True
55
+ embedding_dim: 256
56
+ num_heads: 1
57
+ downsample_rate: 1
58
+ dropout: 0.1
59
+ kv_in_dim: 64
60
+ num_layers: 4
61
+
62
+ memory_encoder:
63
+ _target_: sam2.modeling.memory_encoder.MemoryEncoder
64
+ out_dim: 64
65
+ position_encoding:
66
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
67
+ num_pos_feats: 64
68
+ normalize: true
69
+ scale: null
70
+ temperature: 10000
71
+ mask_downsampler:
72
+ _target_: sam2.modeling.memory_encoder.MaskDownSampler
73
+ kernel_size: 3
74
+ stride: 2
75
+ padding: 1
76
+ fuser:
77
+ _target_: sam2.modeling.memory_encoder.Fuser
78
+ layer:
79
+ _target_: sam2.modeling.memory_encoder.CXBlock
80
+ dim: 256
81
+ kernel_size: 7
82
+ padding: 3
83
+ layer_scale_init_value: 1e-6
84
+ use_dwconv: True # depth-wise convs
85
+ num_layers: 2
86
+
87
+ num_maskmem: 7
88
+ image_size: 1024
89
+ # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask
90
+ sigmoid_scale_for_mem_enc: 20.0
91
+ sigmoid_bias_for_mem_enc: -10.0
92
+ use_mask_input_as_output_without_sam: true
93
+ # Memory
94
+ directly_add_no_mem_embed: true
95
+ # use high-resolution feature map in the SAM mask decoder
96
+ use_high_res_features_in_sam: true
97
+ # output 3 masks on the first click on initial conditioning frames
98
+ multimask_output_in_sam: true
99
+ # SAM heads
100
+ iou_prediction_use_sigmoid: True
101
+ # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
102
+ use_obj_ptrs_in_encoder: true
103
+ add_tpos_enc_to_obj_ptrs: false
104
+ only_obj_ptrs_in_the_past_for_eval: true
105
+ # object occlusion prediction
106
+ pred_obj_scores: true
107
+ pred_obj_scores_mlp: true
108
+ fixed_no_obj_ptr: true
109
+ # multimask tracking settings
110
+ multimask_output_for_tracking: true
111
+ use_multimask_token_for_obj_ptr: true
112
+ multimask_min_pt_num: 0
113
+ multimask_max_pt_num: 1
114
+ use_mlp_for_obj_ptr_proj: true
115
+ # Compilation flag
116
+ compile_image_encoder: False
sam2/configs/sam2/sam2_hiera_t.yaml ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # @package _global_
2
+
3
+ # Model
4
+ model:
5
+ _target_: sam2.modeling.sam2_base.SAM2Base
6
+ image_encoder:
7
+ _target_: sam2.modeling.backbones.image_encoder.ImageEncoder
8
+ scalp: 1
9
+ trunk:
10
+ _target_: sam2.modeling.backbones.hieradet.Hiera
11
+ embed_dim: 96
12
+ num_heads: 1
13
+ stages: [1, 2, 7, 2]
14
+ global_att_blocks: [5, 7, 9]
15
+ window_pos_embed_bkg_spatial_size: [7, 7]
16
+ neck:
17
+ _target_: sam2.modeling.backbones.image_encoder.FpnNeck
18
+ position_encoding:
19
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
20
+ num_pos_feats: 256
21
+ normalize: true
22
+ scale: null
23
+ temperature: 10000
24
+ d_model: 256
25
+ backbone_channel_list: [768, 384, 192, 96]
26
+ fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features
27
+ fpn_interp_model: nearest
28
+
29
+ memory_attention:
30
+ _target_: sam2.modeling.memory_attention.MemoryAttention
31
+ d_model: 256
32
+ pos_enc_at_input: true
33
+ layer:
34
+ _target_: sam2.modeling.memory_attention.MemoryAttentionLayer
35
+ activation: relu
36
+ dim_feedforward: 2048
37
+ dropout: 0.1
38
+ pos_enc_at_attn: false
39
+ self_attention:
40
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
41
+ rope_theta: 10000.0
42
+ feat_sizes: [64, 64]
43
+ embedding_dim: 256
44
+ num_heads: 1
45
+ downsample_rate: 1
46
+ dropout: 0.1
47
+ d_model: 256
48
+ pos_enc_at_cross_attn_keys: true
49
+ pos_enc_at_cross_attn_queries: false
50
+ cross_attention:
51
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
52
+ rope_theta: 10000.0
53
+ feat_sizes: [64, 64]
54
+ rope_k_repeat: True
55
+ embedding_dim: 256
56
+ num_heads: 1
57
+ downsample_rate: 1
58
+ dropout: 0.1
59
+ kv_in_dim: 64
60
+ num_layers: 4
61
+
62
+ memory_encoder:
63
+ _target_: sam2.modeling.memory_encoder.MemoryEncoder
64
+ out_dim: 64
65
+ position_encoding:
66
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
67
+ num_pos_feats: 64
68
+ normalize: true
69
+ scale: null
70
+ temperature: 10000
71
+ mask_downsampler:
72
+ _target_: sam2.modeling.memory_encoder.MaskDownSampler
73
+ kernel_size: 3
74
+ stride: 2
75
+ padding: 1
76
+ fuser:
77
+ _target_: sam2.modeling.memory_encoder.Fuser
78
+ layer:
79
+ _target_: sam2.modeling.memory_encoder.CXBlock
80
+ dim: 256
81
+ kernel_size: 7
82
+ padding: 3
83
+ layer_scale_init_value: 1e-6
84
+ use_dwconv: True # depth-wise convs
85
+ num_layers: 2
86
+
87
+ num_maskmem: 7
88
+ image_size: 1024
89
+ # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask
90
+ # SAM decoder
91
+ sigmoid_scale_for_mem_enc: 20.0
92
+ sigmoid_bias_for_mem_enc: -10.0
93
+ use_mask_input_as_output_without_sam: true
94
+ # Memory
95
+ directly_add_no_mem_embed: true
96
+ # use high-resolution feature map in the SAM mask decoder
97
+ use_high_res_features_in_sam: true
98
+ # output 3 masks on the first click on initial conditioning frames
99
+ multimask_output_in_sam: true
100
+ # SAM heads
101
+ iou_prediction_use_sigmoid: True
102
+ # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
103
+ use_obj_ptrs_in_encoder: true
104
+ add_tpos_enc_to_obj_ptrs: false
105
+ only_obj_ptrs_in_the_past_for_eval: true
106
+ # object occlusion prediction
107
+ pred_obj_scores: true
108
+ pred_obj_scores_mlp: true
109
+ fixed_no_obj_ptr: true
110
+ # multimask tracking settings
111
+ multimask_output_for_tracking: true
112
+ use_multimask_token_for_obj_ptr: true
113
+ multimask_min_pt_num: 0
114
+ multimask_max_pt_num: 1
115
+ use_mlp_for_obj_ptr_proj: true
116
+ # Compilation flag
117
+ # HieraT does not currently support compilation, should always be set to False
118
+ compile_image_encoder: False
sam2/csrc/connected_components.cu ADDED
@@ -0,0 +1,289 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ // All rights reserved.
3
+
4
+ // This source code is licensed under the license found in the
5
+ // LICENSE file in the root directory of this source tree.
6
+
7
+ // adapted from https://github.com/zsef123/Connected_components_PyTorch
8
+ // with license found in the LICENSE_cctorch file in the root directory.
9
+ #include <ATen/cuda/CUDAContext.h>
10
+ #include <cuda.h>
11
+ #include <cuda_runtime.h>
12
+ #include <torch/extension.h>
13
+ #include <torch/script.h>
14
+ #include <vector>
15
+
16
+ // 2d
17
+ #define BLOCK_ROWS 16
18
+ #define BLOCK_COLS 16
19
+
20
+ namespace cc2d {
21
+
22
+ template <typename T>
23
+ __device__ __forceinline__ unsigned char hasBit(T bitmap, unsigned char pos) {
24
+ return (bitmap >> pos) & 1;
25
+ }
26
+
27
+ __device__ int32_t find(const int32_t* s_buf, int32_t n) {
28
+ while (s_buf[n] != n)
29
+ n = s_buf[n];
30
+ return n;
31
+ }
32
+
33
+ __device__ int32_t find_n_compress(int32_t* s_buf, int32_t n) {
34
+ const int32_t id = n;
35
+ while (s_buf[n] != n) {
36
+ n = s_buf[n];
37
+ s_buf[id] = n;
38
+ }
39
+ return n;
40
+ }
41
+
42
+ __device__ void union_(int32_t* s_buf, int32_t a, int32_t b) {
43
+ bool done;
44
+ do {
45
+ a = find(s_buf, a);
46
+ b = find(s_buf, b);
47
+
48
+ if (a < b) {
49
+ int32_t old = atomicMin(s_buf + b, a);
50
+ done = (old == b);
51
+ b = old;
52
+ } else if (b < a) {
53
+ int32_t old = atomicMin(s_buf + a, b);
54
+ done = (old == a);
55
+ a = old;
56
+ } else
57
+ done = true;
58
+
59
+ } while (!done);
60
+ }
61
+
62
+ __global__ void
63
+ init_labeling(int32_t* label, const uint32_t W, const uint32_t H) {
64
+ const uint32_t row = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
65
+ const uint32_t col = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
66
+ const uint32_t idx = row * W + col;
67
+
68
+ if (row < H && col < W)
69
+ label[idx] = idx;
70
+ }
71
+
72
+ __global__ void
73
+ merge(uint8_t* img, int32_t* label, const uint32_t W, const uint32_t H) {
74
+ const uint32_t row = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
75
+ const uint32_t col = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
76
+ const uint32_t idx = row * W + col;
77
+
78
+ if (row >= H || col >= W)
79
+ return;
80
+
81
+ uint32_t P = 0;
82
+
83
+ if (img[idx])
84
+ P |= 0x777;
85
+ if (row + 1 < H && img[idx + W])
86
+ P |= 0x777 << 4;
87
+ if (col + 1 < W && img[idx + 1])
88
+ P |= 0x777 << 1;
89
+
90
+ if (col == 0)
91
+ P &= 0xEEEE;
92
+ if (col + 1 >= W)
93
+ P &= 0x3333;
94
+ else if (col + 2 >= W)
95
+ P &= 0x7777;
96
+
97
+ if (row == 0)
98
+ P &= 0xFFF0;
99
+ if (row + 1 >= H)
100
+ P &= 0xFF;
101
+
102
+ if (P > 0) {
103
+ // If need check about top-left pixel(if flag the first bit) and hit the
104
+ // top-left pixel
105
+ if (hasBit(P, 0) && img[idx - W - 1]) {
106
+ union_(label, idx, idx - 2 * W - 2); // top left block
107
+ }
108
+
109
+ if ((hasBit(P, 1) && img[idx - W]) || (hasBit(P, 2) && img[idx - W + 1]))
110
+ union_(label, idx, idx - 2 * W); // top bottom block
111
+
112
+ if (hasBit(P, 3) && img[idx + 2 - W])
113
+ union_(label, idx, idx - 2 * W + 2); // top right block
114
+
115
+ if ((hasBit(P, 4) && img[idx - 1]) || (hasBit(P, 8) && img[idx + W - 1]))
116
+ union_(label, idx, idx - 2); // just left block
117
+ }
118
+ }
119
+
120
+ __global__ void compression(int32_t* label, const int32_t W, const int32_t H) {
121
+ const uint32_t row = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
122
+ const uint32_t col = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
123
+ const uint32_t idx = row * W + col;
124
+
125
+ if (row < H && col < W)
126
+ find_n_compress(label, idx);
127
+ }
128
+
129
+ __global__ void final_labeling(
130
+ const uint8_t* img,
131
+ int32_t* label,
132
+ const int32_t W,
133
+ const int32_t H) {
134
+ const uint32_t row = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
135
+ const uint32_t col = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
136
+ const uint32_t idx = row * W + col;
137
+
138
+ if (row >= H || col >= W)
139
+ return;
140
+
141
+ int32_t y = label[idx] + 1;
142
+
143
+ if (img[idx])
144
+ label[idx] = y;
145
+ else
146
+ label[idx] = 0;
147
+
148
+ if (col + 1 < W) {
149
+ if (img[idx + 1])
150
+ label[idx + 1] = y;
151
+ else
152
+ label[idx + 1] = 0;
153
+
154
+ if (row + 1 < H) {
155
+ if (img[idx + W + 1])
156
+ label[idx + W + 1] = y;
157
+ else
158
+ label[idx + W + 1] = 0;
159
+ }
160
+ }
161
+
162
+ if (row + 1 < H) {
163
+ if (img[idx + W])
164
+ label[idx + W] = y;
165
+ else
166
+ label[idx + W] = 0;
167
+ }
168
+ }
169
+
170
+ __global__ void init_counting(
171
+ const int32_t* label,
172
+ int32_t* count_init,
173
+ const int32_t W,
174
+ const int32_t H) {
175
+ const uint32_t row = (blockIdx.y * blockDim.y + threadIdx.y);
176
+ const uint32_t col = (blockIdx.x * blockDim.x + threadIdx.x);
177
+ const uint32_t idx = row * W + col;
178
+
179
+ if (row >= H || col >= W)
180
+ return;
181
+
182
+ int32_t y = label[idx];
183
+ if (y > 0) {
184
+ int32_t count_idx = y - 1;
185
+ atomicAdd(count_init + count_idx, 1);
186
+ }
187
+ }
188
+
189
+ __global__ void final_counting(
190
+ const int32_t* label,
191
+ const int32_t* count_init,
192
+ int32_t* count_final,
193
+ const int32_t W,
194
+ const int32_t H) {
195
+ const uint32_t row = (blockIdx.y * blockDim.y + threadIdx.y);
196
+ const uint32_t col = (blockIdx.x * blockDim.x + threadIdx.x);
197
+ const uint32_t idx = row * W + col;
198
+
199
+ if (row >= H || col >= W)
200
+ return;
201
+
202
+ int32_t y = label[idx];
203
+ if (y > 0) {
204
+ int32_t count_idx = y - 1;
205
+ count_final[idx] = count_init[count_idx];
206
+ } else {
207
+ count_final[idx] = 0;
208
+ }
209
+ }
210
+
211
+ } // namespace cc2d
212
+
213
+ std::vector<torch::Tensor> get_connected_componnets(
214
+ const torch::Tensor& inputs) {
215
+ AT_ASSERTM(inputs.is_cuda(), "inputs must be a CUDA tensor");
216
+ AT_ASSERTM(inputs.ndimension() == 4, "inputs must be [N, 1, H, W] shape");
217
+ AT_ASSERTM(
218
+ inputs.scalar_type() == torch::kUInt8, "inputs must be a uint8 type");
219
+
220
+ const uint32_t N = inputs.size(0);
221
+ const uint32_t C = inputs.size(1);
222
+ const uint32_t H = inputs.size(2);
223
+ const uint32_t W = inputs.size(3);
224
+
225
+ AT_ASSERTM(C == 1, "inputs must be [N, 1, H, W] shape");
226
+ AT_ASSERTM((H % 2) == 0, "height must be an even number");
227
+ AT_ASSERTM((W % 2) == 0, "width must be an even number");
228
+
229
+ // label must be uint32_t
230
+ auto label_options =
231
+ torch::TensorOptions().dtype(torch::kInt32).device(inputs.device());
232
+ torch::Tensor labels = torch::zeros({N, C, H, W}, label_options);
233
+ torch::Tensor counts_init = torch::zeros({N, C, H, W}, label_options);
234
+ torch::Tensor counts_final = torch::zeros({N, C, H, W}, label_options);
235
+
236
+ dim3 grid = dim3(
237
+ ((W + 1) / 2 + BLOCK_COLS - 1) / BLOCK_COLS,
238
+ ((H + 1) / 2 + BLOCK_ROWS - 1) / BLOCK_ROWS);
239
+ dim3 block = dim3(BLOCK_COLS, BLOCK_ROWS);
240
+ dim3 grid_count =
241
+ dim3((W + BLOCK_COLS) / BLOCK_COLS, (H + BLOCK_ROWS) / BLOCK_ROWS);
242
+ dim3 block_count = dim3(BLOCK_COLS, BLOCK_ROWS);
243
+ cudaStream_t stream = at::cuda::getCurrentCUDAStream();
244
+
245
+ for (int n = 0; n < N; n++) {
246
+ uint32_t offset = n * H * W;
247
+
248
+ cc2d::init_labeling<<<grid, block, 0, stream>>>(
249
+ labels.data_ptr<int32_t>() + offset, W, H);
250
+ cc2d::merge<<<grid, block, 0, stream>>>(
251
+ inputs.data_ptr<uint8_t>() + offset,
252
+ labels.data_ptr<int32_t>() + offset,
253
+ W,
254
+ H);
255
+ cc2d::compression<<<grid, block, 0, stream>>>(
256
+ labels.data_ptr<int32_t>() + offset, W, H);
257
+ cc2d::final_labeling<<<grid, block, 0, stream>>>(
258
+ inputs.data_ptr<uint8_t>() + offset,
259
+ labels.data_ptr<int32_t>() + offset,
260
+ W,
261
+ H);
262
+
263
+ // get the counting of each pixel
264
+ cc2d::init_counting<<<grid_count, block_count, 0, stream>>>(
265
+ labels.data_ptr<int32_t>() + offset,
266
+ counts_init.data_ptr<int32_t>() + offset,
267
+ W,
268
+ H);
269
+ cc2d::final_counting<<<grid_count, block_count, 0, stream>>>(
270
+ labels.data_ptr<int32_t>() + offset,
271
+ counts_init.data_ptr<int32_t>() + offset,
272
+ counts_final.data_ptr<int32_t>() + offset,
273
+ W,
274
+ H);
275
+ }
276
+
277
+ // returned values are [labels, counts]
278
+ std::vector<torch::Tensor> outputs;
279
+ outputs.push_back(labels);
280
+ outputs.push_back(counts_final);
281
+ return outputs;
282
+ }
283
+
284
+ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
285
+ m.def(
286
+ "get_connected_componnets",
287
+ &get_connected_componnets,
288
+ "get_connected_componnets");
289
+ }
sam2/loss_fns.py ADDED
@@ -0,0 +1,288 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ from collections import defaultdict
8
+ from typing import Dict, List
9
+
10
+ import torch
11
+ import torch.distributed
12
+ import torch.nn as nn
13
+ import torch.nn.functional as F
14
+ from nncore.engine import comm
15
+
16
+
17
+ def dice_loss(inputs, targets, num_objects, loss_on_multimask=False):
18
+ """
19
+ Compute the DICE loss, similar to generalized IOU for masks
20
+ Args:
21
+ inputs: A float tensor of arbitrary shape.
22
+ The predictions for each example.
23
+ targets: A float tensor with the same shape as inputs. Stores the binary
24
+ classification label for each element in inputs
25
+ (0 for the negative class and 1 for the positive class).
26
+ num_objects: Number of objects in the batch
27
+ loss_on_multimask: True if multimask prediction is enabled
28
+ Returns:
29
+ Dice loss tensor
30
+ """
31
+ inputs = inputs.sigmoid()
32
+ if loss_on_multimask:
33
+ # inputs and targets are [N, M, H, W] where M corresponds to multiple predicted masks
34
+ assert inputs.dim() == 4 and targets.dim() == 4
35
+ # flatten spatial dimension while keeping multimask channel dimension
36
+ inputs = inputs.flatten(2)
37
+ targets = targets.flatten(2)
38
+ numerator = 2 * (inputs * targets).sum(-1)
39
+ else:
40
+ inputs = inputs.flatten(1)
41
+ numerator = 2 * (inputs * targets).sum(1)
42
+ denominator = inputs.sum(-1) + targets.sum(-1)
43
+ loss = 1 - (numerator + 1) / (denominator + 1)
44
+ if loss_on_multimask:
45
+ return loss / num_objects
46
+ return loss.sum() / num_objects
47
+
48
+
49
+ def sigmoid_focal_loss(
50
+ inputs,
51
+ targets,
52
+ num_objects,
53
+ alpha: float = 0.25,
54
+ gamma: float = 2,
55
+ loss_on_multimask=False,
56
+ ):
57
+ """
58
+ Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.
59
+ Args:
60
+ inputs: A float tensor of arbitrary shape.
61
+ The predictions for each example.
62
+ targets: A float tensor with the same shape as inputs. Stores the binary
63
+ classification label for each element in inputs
64
+ (0 for the negative class and 1 for the positive class).
65
+ num_objects: Number of objects in the batch
66
+ alpha: (optional) Weighting factor in range (0,1) to balance
67
+ positive vs negative examples. Default = -1 (no weighting).
68
+ gamma: Exponent of the modulating factor (1 - p_t) to
69
+ balance easy vs hard examples.
70
+ loss_on_multimask: True if multimask prediction is enabled
71
+ Returns:
72
+ focal loss tensor
73
+ """
74
+ prob = inputs.sigmoid()
75
+ ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
76
+ p_t = prob * targets + (1 - prob) * (1 - targets)
77
+ loss = ce_loss * ((1 - p_t)**gamma)
78
+
79
+ if alpha >= 0:
80
+ alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
81
+ loss = alpha_t * loss
82
+
83
+ if loss_on_multimask:
84
+ # loss is [N, M, H, W] where M corresponds to multiple predicted masks
85
+ assert loss.dim() == 4
86
+ return loss.flatten(2).mean(-1) / num_objects # average over spatial dims
87
+ return loss.mean(1).sum() / num_objects
88
+
89
+
90
+ def iou_loss(inputs, targets, pred_ious, num_objects, loss_on_multimask=False, use_l1_loss=False):
91
+ """
92
+ Args:
93
+ inputs: A float tensor of arbitrary shape.
94
+ The predictions for each example.
95
+ targets: A float tensor with the same shape as inputs. Stores the binary
96
+ classification label for each element in inputs
97
+ (0 for the negative class and 1 for the positive class).
98
+ pred_ious: A float tensor containing the predicted IoUs scores per mask
99
+ num_objects: Number of objects in the batch
100
+ loss_on_multimask: True if multimask prediction is enabled
101
+ use_l1_loss: Whether to use L1 loss is used instead of MSE loss
102
+ Returns:
103
+ IoU loss tensor
104
+ """
105
+ assert inputs.dim() == 4 and targets.dim() == 4
106
+ pred_mask = inputs.flatten(2) > 0
107
+ gt_mask = targets.flatten(2) > 0
108
+ area_i = torch.sum(pred_mask & gt_mask, dim=-1).float()
109
+ area_u = torch.sum(pred_mask | gt_mask, dim=-1).float()
110
+ actual_ious = area_i / torch.clamp(area_u, min=1.0)
111
+
112
+ if use_l1_loss:
113
+ loss = F.l1_loss(pred_ious, actual_ious, reduction="none")
114
+ else:
115
+ loss = F.mse_loss(pred_ious, actual_ious, reduction="none")
116
+ if loss_on_multimask:
117
+ return loss / num_objects
118
+ return loss.sum() / num_objects
119
+
120
+
121
+ class MultiStepMultiMasksAndIous(nn.Module):
122
+
123
+ def __init__(
124
+ self,
125
+ weight_dict,
126
+ focal_alpha=0.25,
127
+ focal_gamma=2,
128
+ supervise_all_iou=False,
129
+ iou_use_l1_loss=False,
130
+ pred_obj_scores=False,
131
+ focal_gamma_obj_score=0.0,
132
+ focal_alpha_obj_score=-1,
133
+ ):
134
+ """
135
+ This class computes the multi-step multi-mask and IoU losses.
136
+ Args:
137
+ weight_dict: dict containing weights for focal, dice, iou losses
138
+ focal_alpha: alpha for sigmoid focal loss
139
+ focal_gamma: gamma for sigmoid focal loss
140
+ supervise_all_iou: if True, back-prop iou losses for all predicted masks
141
+ iou_use_l1_loss: use L1 loss instead of MSE loss for iou
142
+ pred_obj_scores: if True, compute loss for object scores
143
+ focal_gamma_obj_score: gamma for sigmoid focal loss on object scores
144
+ focal_alpha_obj_score: alpha for sigmoid focal loss on object scores
145
+ """
146
+
147
+ super().__init__()
148
+ self.weight_dict = weight_dict
149
+ self.focal_alpha = focal_alpha
150
+ self.focal_gamma = focal_gamma
151
+ assert "loss_mask" in self.weight_dict
152
+ assert "loss_dice" in self.weight_dict
153
+ assert "loss_iou" in self.weight_dict
154
+ if "loss_class" not in self.weight_dict:
155
+ self.weight_dict["loss_class"] = 0.0
156
+
157
+ self.focal_alpha_obj_score = focal_alpha_obj_score
158
+ self.focal_gamma_obj_score = focal_gamma_obj_score
159
+ self.supervise_all_iou = supervise_all_iou
160
+ self.iou_use_l1_loss = iou_use_l1_loss
161
+ self.pred_obj_scores = pred_obj_scores
162
+
163
+ def forward(self, outs_batch: List[Dict], targets_batch: torch.Tensor):
164
+ assert len(outs_batch) == len(targets_batch)
165
+ num_objects = torch.tensor((targets_batch.shape[1]), device=targets_batch.device,
166
+ dtype=torch.float) # Number of objects is fixed within a batch
167
+ if comm.is_distributed():
168
+ torch.distributed.all_reduce(num_objects)
169
+ num_objects = torch.clamp(num_objects / comm.get_world_size(), min=1).item()
170
+
171
+ losses = defaultdict(int)
172
+ for outs, targets in zip(outs_batch, targets_batch):
173
+ cur_losses = self._forward(outs, targets, num_objects)
174
+ for k, v in cur_losses.items():
175
+ losses[k] += v
176
+
177
+ return losses
178
+
179
+ def _forward(self, outputs: Dict, targets: torch.Tensor, num_objects):
180
+ """
181
+ Compute the losses related to the masks: the focal loss and the dice loss.
182
+ and also the MAE or MSE loss between predicted IoUs and actual IoUs.
183
+
184
+ Here "multistep_pred_multimasks_high_res" is a list of multimasks (tensors
185
+ of shape [N, M, H, W], where M could be 1 or larger, corresponding to
186
+ one or multiple predicted masks from a click.
187
+
188
+ We back-propagate focal, dice losses only on the prediction channel
189
+ with the lowest focal+dice loss between predicted mask and ground-truth.
190
+ If `supervise_all_iou` is True, we backpropagate ious losses for all predicted masks.
191
+ """
192
+
193
+ target_masks = targets.unsqueeze(1).float()
194
+ assert target_masks.dim() == 4 # [N, 1, H, W]
195
+ src_masks_list = outputs["multistep_pred_multimasks_high_res"]
196
+ ious_list = outputs["multistep_pred_ious"]
197
+ object_score_logits_list = outputs["multistep_object_score_logits"]
198
+
199
+ assert len(src_masks_list) == len(ious_list)
200
+ assert len(object_score_logits_list) == len(ious_list)
201
+
202
+ # accumulate the loss over prediction steps
203
+ losses = {"loss_mask": 0, "loss_dice": 0, "loss_iou": 0, "loss_class": 0}
204
+ for src_masks, ious, object_score_logits in zip(src_masks_list, ious_list, object_score_logits_list):
205
+ self._update_losses(losses, src_masks, target_masks, ious, num_objects, object_score_logits)
206
+ losses["core_loss"] = self.reduce_loss(losses)
207
+ return losses
208
+
209
+ def _update_losses(self, losses, src_masks, target_masks, ious, num_objects, object_score_logits):
210
+ target_masks = target_masks.expand_as(src_masks)
211
+ # get focal, dice and iou loss on all output masks in a prediction step
212
+ loss_multimask = sigmoid_focal_loss(
213
+ src_masks,
214
+ target_masks,
215
+ num_objects,
216
+ alpha=self.focal_alpha,
217
+ gamma=self.focal_gamma,
218
+ loss_on_multimask=True,
219
+ )
220
+ loss_multidice = dice_loss(src_masks, target_masks, num_objects, loss_on_multimask=True)
221
+ if not self.pred_obj_scores:
222
+ loss_class = torch.tensor(0.0, dtype=loss_multimask.dtype, device=loss_multimask.device)
223
+ target_obj = torch.ones(
224
+ loss_multimask.shape[0],
225
+ 1,
226
+ dtype=loss_multimask.dtype,
227
+ device=loss_multimask.device,
228
+ )
229
+ else:
230
+ target_obj = torch.any((target_masks[:, 0] > 0).flatten(1), dim=-1)[..., None].float()
231
+ loss_class = sigmoid_focal_loss(
232
+ object_score_logits,
233
+ target_obj,
234
+ num_objects,
235
+ alpha=self.focal_alpha_obj_score,
236
+ gamma=self.focal_gamma_obj_score,
237
+ )
238
+
239
+ loss_multiiou = iou_loss(
240
+ src_masks,
241
+ target_masks,
242
+ ious,
243
+ num_objects,
244
+ loss_on_multimask=True,
245
+ use_l1_loss=self.iou_use_l1_loss,
246
+ )
247
+ assert loss_multimask.dim() == 2
248
+ assert loss_multidice.dim() == 2
249
+ assert loss_multiiou.dim() == 2
250
+ if loss_multimask.size(1) > 1:
251
+ # take the mask indices with the smallest focal + dice loss for back propagation
252
+ loss_combo = (
253
+ loss_multimask * self.weight_dict["loss_mask"] + loss_multidice * self.weight_dict["loss_dice"])
254
+ best_loss_inds = torch.argmin(loss_combo, dim=-1)
255
+ batch_inds = torch.arange(loss_combo.size(0), device=loss_combo.device)
256
+ loss_mask = loss_multimask[batch_inds, best_loss_inds].unsqueeze(1)
257
+ loss_dice = loss_multidice[batch_inds, best_loss_inds].unsqueeze(1)
258
+ # calculate the iou prediction and slot losses only in the index
259
+ # with the minimum loss for each mask (to be consistent w/ SAM)
260
+ if self.supervise_all_iou:
261
+ loss_iou = loss_multiiou.mean(dim=-1).unsqueeze(1)
262
+ else:
263
+ loss_iou = loss_multiiou[batch_inds, best_loss_inds].unsqueeze(1)
264
+ else:
265
+ loss_mask = loss_multimask
266
+ loss_dice = loss_multidice
267
+ loss_iou = loss_multiiou
268
+
269
+ # backprop focal, dice and iou loss only if obj present
270
+ loss_mask = loss_mask * target_obj
271
+ loss_dice = loss_dice * target_obj
272
+ loss_iou = loss_iou * target_obj
273
+
274
+ # sum over batch dimension (note that the losses are already divided by num_objects)
275
+ losses["loss_mask"] += loss_mask.sum()
276
+ losses["loss_dice"] += loss_dice.sum()
277
+ losses["loss_iou"] += loss_iou.sum()
278
+ losses["loss_class"] += loss_class
279
+
280
+ def reduce_loss(self, losses):
281
+ reduced_loss = 0.0
282
+ for loss_key, weight in self.weight_dict.items():
283
+ if loss_key not in losses:
284
+ raise ValueError(f"{type(self)} doesn't compute {loss_key}")
285
+ if weight != 0:
286
+ reduced_loss += losses[loss_key] * weight
287
+
288
+ return reduced_loss
sam2/modeling/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
sam2/modeling/backbones/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
sam2/modeling/backbones/hieradet.py ADDED
@@ -0,0 +1,312 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import logging
8
+ from functools import partial
9
+ from typing import List, Tuple, Union
10
+
11
+ import torch
12
+ import torch.nn as nn
13
+ import torch.nn.functional as F
14
+ from iopath.common.file_io import g_pathmgr
15
+
16
+ from sam2.modeling.backbones.utils import (
17
+ PatchEmbed,
18
+ window_partition,
19
+ window_unpartition,
20
+ )
21
+
22
+ from sam2.modeling.sam2_utils import DropPath, MLP
23
+
24
+
25
+ def do_pool(x: torch.Tensor, pool: nn.Module, norm: nn.Module = None) -> torch.Tensor:
26
+ if pool is None:
27
+ return x
28
+ # (B, H, W, C) -> (B, C, H, W)
29
+ x = x.permute(0, 3, 1, 2)
30
+ x = pool(x.float()).to(x.dtype)
31
+ # (B, C, H', W') -> (B, H', W', C)
32
+ x = x.permute(0, 2, 3, 1)
33
+ if norm:
34
+ x = norm(x)
35
+
36
+ return x
37
+
38
+
39
+ class MultiScaleAttention(nn.Module):
40
+
41
+ def __init__(
42
+ self,
43
+ dim: int,
44
+ dim_out: int,
45
+ num_heads: int,
46
+ q_pool: nn.Module = None,
47
+ ):
48
+ super().__init__()
49
+
50
+ self.dim = dim
51
+ self.dim_out = dim_out
52
+ self.num_heads = num_heads
53
+ self.q_pool = q_pool
54
+ self.qkv = nn.Linear(dim, dim_out * 3)
55
+ self.proj = nn.Linear(dim_out, dim_out)
56
+
57
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
58
+ B, H, W, _ = x.shape
59
+ # qkv with shape (B, H * W, 3, nHead, C)
60
+ qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, -1)
61
+ # q, k, v with shape (B, H * W, nheads, C)
62
+ q, k, v = torch.unbind(qkv, 2)
63
+
64
+ # Q pooling (for downsample at stage changes)
65
+ if self.q_pool:
66
+ q = do_pool(q.reshape(B, H, W, -1), self.q_pool)
67
+ H, W = q.shape[1:3] # downsampled shape
68
+ q = q.reshape(B, H * W, self.num_heads, -1)
69
+
70
+ # Torch's SDPA expects [B, nheads, H*W, C] so we transpose
71
+ x = F.scaled_dot_product_attention(
72
+ q.transpose(1, 2),
73
+ k.transpose(1, 2),
74
+ v.transpose(1, 2),
75
+ )
76
+ # Transpose back
77
+ x = x.transpose(1, 2)
78
+ x = x.reshape(B, H, W, -1)
79
+
80
+ x = self.proj(x)
81
+
82
+ return x
83
+
84
+
85
+ class MultiScaleBlock(nn.Module):
86
+
87
+ def __init__(
88
+ self,
89
+ dim: int,
90
+ dim_out: int,
91
+ num_heads: int,
92
+ mlp_ratio: float = 4.0,
93
+ drop_path: float = 0.0,
94
+ norm_layer: Union[nn.Module, str] = "LayerNorm",
95
+ q_stride: Tuple[int, int] = None,
96
+ act_layer: nn.Module = nn.GELU,
97
+ window_size: int = 0,
98
+ ):
99
+ super().__init__()
100
+
101
+ if isinstance(norm_layer, str):
102
+ norm_layer = partial(getattr(nn, norm_layer), eps=1e-6)
103
+
104
+ self.dim = dim
105
+ self.dim_out = dim_out
106
+ self.norm1 = norm_layer(dim)
107
+
108
+ self.window_size = window_size
109
+
110
+ self.pool, self.q_stride = None, q_stride
111
+ if self.q_stride:
112
+ self.pool = nn.MaxPool2d(kernel_size=q_stride, stride=q_stride, ceil_mode=False)
113
+
114
+ self.attn = MultiScaleAttention(
115
+ dim,
116
+ dim_out,
117
+ num_heads=num_heads,
118
+ q_pool=self.pool,
119
+ )
120
+ self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
121
+
122
+ self.norm2 = norm_layer(dim_out)
123
+ self.mlp = MLP(
124
+ dim_out,
125
+ int(dim_out * mlp_ratio),
126
+ dim_out,
127
+ num_layers=2,
128
+ activation=act_layer,
129
+ )
130
+
131
+ if dim != dim_out:
132
+ self.proj = nn.Linear(dim, dim_out)
133
+
134
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
135
+ shortcut = x # B, H, W, C
136
+ x = self.norm1(x)
137
+
138
+ # Skip connection
139
+ if self.dim != self.dim_out:
140
+ shortcut = do_pool(self.proj(x), self.pool)
141
+
142
+ # Window partition
143
+ window_size = self.window_size
144
+ if window_size > 0:
145
+ H, W = x.shape[1], x.shape[2]
146
+ x, pad_hw = window_partition(x, window_size)
147
+
148
+ # Window Attention + Q Pooling (if stage change)
149
+ # Apply chunks to reduce memory
150
+ CHUNK_SIZE, batch_size = 64, x.size(0)
151
+ if batch_size > CHUNK_SIZE:
152
+ chunks = []
153
+ for i in range(0, batch_size, CHUNK_SIZE):
154
+ chunks.append(self.attn(x[i:i + CHUNK_SIZE]))
155
+ x = torch.cat(chunks)
156
+ assert x.size(0) == batch_size
157
+ else:
158
+ x = self.attn(x)
159
+
160
+ if self.q_stride:
161
+ # Shapes have changed due to Q pooling
162
+ window_size = self.window_size // self.q_stride[0]
163
+ H, W = shortcut.shape[1:3]
164
+
165
+ pad_h = (window_size - H % window_size) % window_size
166
+ pad_w = (window_size - W % window_size) % window_size
167
+ pad_hw = (H + pad_h, W + pad_w)
168
+
169
+ # Reverse window partition
170
+ if self.window_size > 0:
171
+ x = window_unpartition(x, window_size, pad_hw, (H, W))
172
+
173
+ x = shortcut + self.drop_path(x)
174
+ # MLP
175
+ x = x + self.drop_path(self.mlp(self.norm2(x)))
176
+ return x
177
+
178
+
179
+ class Hiera(nn.Module):
180
+ """
181
+ Reference: https://arxiv.org/abs/2306.00989
182
+ """
183
+
184
+ def __init__(
185
+ self,
186
+ embed_dim: int = 96, # initial embed dim
187
+ num_heads: int = 1, # initial number of heads
188
+ drop_path_rate: float = 0.0, # stochastic depth
189
+ q_pool: int = 3, # number of q_pool stages
190
+ q_stride: Tuple[int, int] = (2, 2), # downsample stride bet. stages
191
+ stages: Tuple[int, ...] = (2, 3, 16, 3), # blocks per stage
192
+ dim_mul: float = 2.0, # dim_mul factor at stage shift
193
+ head_mul: float = 2.0, # head_mul factor at stage shift
194
+ window_pos_embed_bkg_spatial_size: Tuple[int, int] = (14, 14),
195
+ # window size per stage, when not using global att.
196
+ window_spec: Tuple[int, ...] = (
197
+ 8,
198
+ 4,
199
+ 14,
200
+ 7,
201
+ ),
202
+ # global attn in these blocks
203
+ global_att_blocks: Tuple[int, ...] = (
204
+ 12,
205
+ 16,
206
+ 20,
207
+ ),
208
+ weights_path=None,
209
+ return_interm_layers=True, # return feats from every stage
210
+ ):
211
+ super().__init__()
212
+
213
+ assert len(stages) == len(window_spec)
214
+ self.window_spec = window_spec
215
+
216
+ depth = sum(stages)
217
+ self.q_stride = q_stride
218
+ self.stage_ends = [sum(stages[:i]) - 1 for i in range(1, len(stages) + 1)]
219
+ assert 0 <= q_pool <= len(self.stage_ends[:-1])
220
+ self.q_pool_blocks = [x + 1 for x in self.stage_ends[:-1]][:q_pool]
221
+ self.return_interm_layers = return_interm_layers
222
+
223
+ self.patch_embed = PatchEmbed(embed_dim=embed_dim, )
224
+ # Which blocks have global att?
225
+ self.global_att_blocks = global_att_blocks
226
+
227
+ # Windowed positional embedding (https://arxiv.org/abs/2311.05613)
228
+ self.window_pos_embed_bkg_spatial_size = window_pos_embed_bkg_spatial_size
229
+ self.pos_embed = nn.Parameter(torch.zeros(1, embed_dim, *self.window_pos_embed_bkg_spatial_size))
230
+ self.pos_embed_window = nn.Parameter(torch.zeros(1, embed_dim, self.window_spec[0], self.window_spec[0]))
231
+
232
+ dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
233
+
234
+ cur_stage = 1
235
+ self.blocks = nn.ModuleList()
236
+
237
+ for i in range(depth):
238
+ dim_out = embed_dim
239
+ # lags by a block, so first block of
240
+ # next stage uses an initial window size
241
+ # of previous stage and final window size of current stage
242
+ window_size = self.window_spec[cur_stage - 1]
243
+
244
+ if self.global_att_blocks is not None:
245
+ window_size = 0 if i in self.global_att_blocks else window_size
246
+
247
+ if i - 1 in self.stage_ends:
248
+ dim_out = int(embed_dim * dim_mul)
249
+ num_heads = int(num_heads * head_mul)
250
+ cur_stage += 1
251
+
252
+ block = MultiScaleBlock(
253
+ dim=embed_dim,
254
+ dim_out=dim_out,
255
+ num_heads=num_heads,
256
+ drop_path=dpr[i],
257
+ q_stride=self.q_stride if i in self.q_pool_blocks else None,
258
+ window_size=window_size,
259
+ )
260
+
261
+ embed_dim = dim_out
262
+ self.blocks.append(block)
263
+
264
+ self.channel_list = ([self.blocks[i].dim_out
265
+ for i in self.stage_ends[::-1]] if return_interm_layers else [self.blocks[-1].dim_out])
266
+
267
+ if weights_path is not None:
268
+ with g_pathmgr.open(weights_path, "rb") as f:
269
+ chkpt = torch.load(f, map_location="cpu")
270
+ logging.info("loading Hiera", self.load_state_dict(chkpt, strict=False))
271
+
272
+ def _get_pos_embed(self, hw: Tuple[int, int]) -> torch.Tensor:
273
+ h, w = hw
274
+ window_embed = self.pos_embed_window
275
+ pos_embed = F.interpolate(self.pos_embed.float(), size=(h, w), mode="bicubic").to(self.pos_embed.dtype)
276
+ pos_embed = pos_embed + window_embed.tile([x // y for x, y in zip(pos_embed.shape, window_embed.shape)])
277
+ pos_embed = pos_embed.permute(0, 2, 3, 1)
278
+ return pos_embed
279
+
280
+ def forward(self, x: torch.Tensor) -> List[torch.Tensor]:
281
+ x = self.patch_embed(x)
282
+ # x: (B, H, W, C)
283
+
284
+ # Add pos embed
285
+ x = x + self._get_pos_embed(x.shape[1:3])
286
+
287
+ outputs = []
288
+ for i, blk in enumerate(self.blocks):
289
+ x = blk(x)
290
+ if (i == self.stage_ends[-1]) or (i in self.stage_ends and self.return_interm_layers):
291
+ feats = x.permute(0, 3, 1, 2)
292
+ outputs.append(feats)
293
+
294
+ return outputs
295
+
296
+ def get_layer_id(self, layer_name):
297
+ # https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py#L33
298
+ num_layers = self.get_num_layers()
299
+
300
+ if layer_name.find("rel_pos") != -1:
301
+ return num_layers + 1
302
+ elif layer_name.find("pos_embed") != -1:
303
+ return 0
304
+ elif layer_name.find("patch_embed") != -1:
305
+ return 0
306
+ elif layer_name.find("blocks") != -1:
307
+ return int(layer_name.split("blocks")[1].split(".")[1]) + 1
308
+ else:
309
+ return num_layers + 1
310
+
311
+ def get_num_layers(self) -> int:
312
+ return len(self.blocks)
sam2/modeling/backbones/image_encoder.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ from typing import List, Optional
8
+
9
+ import torch
10
+ import torch.nn as nn
11
+ import torch.nn.functional as F
12
+
13
+
14
+ class ImageEncoder(nn.Module):
15
+
16
+ def __init__(
17
+ self,
18
+ trunk: nn.Module,
19
+ neck: nn.Module,
20
+ scalp: int = 0,
21
+ ):
22
+ super().__init__()
23
+ self.trunk = trunk
24
+ self.neck = neck
25
+ self.scalp = scalp
26
+ assert (
27
+ self.trunk.channel_list == self.neck.backbone_channel_list
28
+ ), f"Channel dims of trunk and neck do not match. Trunk: {self.trunk.channel_list}, neck: {self.neck.backbone_channel_list}"
29
+
30
+ def forward(self, sample: torch.Tensor):
31
+ # Forward through backbone
32
+ # features, pos = self.neck(self.trunk(sample))
33
+
34
+ # NOTE: use chunk to reduce memory ------------------------------
35
+ features, pos, chunk_size = [], [], 16
36
+ for base_idx in range(0, sample.size(0), chunk_size):
37
+ chunk_features, chunk_pos = self.neck(self.trunk(sample[base_idx:base_idx + chunk_size]))
38
+ features.append(chunk_features)
39
+ pos.append(chunk_pos)
40
+ features = [torch.cat([e[i] for e in features]) for i in range(len(features[0]))]
41
+ pos = [torch.cat([e[i] for e in pos]) for i in range(len(pos[0]))]
42
+ assert features[0].size(0) == pos[0].size(0) == sample.size(0)
43
+ # ---------------------------------------------------------------
44
+
45
+ if self.scalp > 0:
46
+ # Discard the lowest resolution features
47
+ features, pos = features[:-self.scalp], pos[:-self.scalp]
48
+
49
+ src = features[-1]
50
+ output = {
51
+ "vision_features": src,
52
+ "vision_pos_enc": pos,
53
+ "backbone_fpn": features,
54
+ }
55
+ return output
56
+
57
+
58
+ class FpnNeck(nn.Module):
59
+ """
60
+ A modified variant of Feature Pyramid Network (FPN) neck
61
+ (we remove output conv and also do bicubic interpolation similar to ViT
62
+ pos embed interpolation)
63
+ """
64
+
65
+ def __init__(
66
+ self,
67
+ position_encoding: nn.Module,
68
+ d_model: int,
69
+ backbone_channel_list: List[int],
70
+ kernel_size: int = 1,
71
+ stride: int = 1,
72
+ padding: int = 0,
73
+ fpn_interp_model: str = "bilinear",
74
+ fuse_type: str = "sum",
75
+ fpn_top_down_levels: Optional[List[int]] = None,
76
+ ):
77
+ """Initialize the neck
78
+ :param trunk: the backbone
79
+ :param position_encoding: the positional encoding to use
80
+ :param d_model: the dimension of the model
81
+ :param neck_norm: the normalization to use
82
+ """
83
+ super().__init__()
84
+ self.position_encoding = position_encoding
85
+ self.convs = nn.ModuleList()
86
+ self.backbone_channel_list = backbone_channel_list
87
+ self.d_model = d_model
88
+ for dim in backbone_channel_list:
89
+ current = nn.Sequential()
90
+ current.add_module(
91
+ "conv",
92
+ nn.Conv2d(
93
+ in_channels=dim,
94
+ out_channels=d_model,
95
+ kernel_size=kernel_size,
96
+ stride=stride,
97
+ padding=padding,
98
+ ),
99
+ )
100
+
101
+ self.convs.append(current)
102
+ self.fpn_interp_model = fpn_interp_model
103
+ assert fuse_type in ["sum", "avg"]
104
+ self.fuse_type = fuse_type
105
+
106
+ # levels to have top-down features in its outputs
107
+ # e.g. if fpn_top_down_levels is [2, 3], then only outputs of level 2 and 3
108
+ # have top-down propagation, while outputs of level 0 and level 1 have only
109
+ # lateral features from the same backbone level.
110
+ if fpn_top_down_levels is None:
111
+ # default is to have top-down features on all levels
112
+ fpn_top_down_levels = range(len(self.convs))
113
+ self.fpn_top_down_levels = list(fpn_top_down_levels)
114
+
115
+ def forward(self, xs: List[torch.Tensor]):
116
+
117
+ out = [None] * len(self.convs)
118
+ pos = [None] * len(self.convs)
119
+ assert len(xs) == len(self.convs)
120
+ # fpn forward pass
121
+ # see https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/fpn.py
122
+ prev_features = None
123
+ # forward in top-down order (from low to high resolution)
124
+ n = len(self.convs) - 1
125
+ for i in range(n, -1, -1):
126
+ x = xs[i]
127
+ lateral_features = self.convs[n - i](x)
128
+ if i in self.fpn_top_down_levels and prev_features is not None:
129
+ top_down_features = F.interpolate(
130
+ prev_features.float(),
131
+ scale_factor=2.0,
132
+ mode=self.fpn_interp_model,
133
+ align_corners=(None if self.fpn_interp_model == "nearest" else False),
134
+ antialias=False,
135
+ ).to(prev_features.dtype)
136
+ prev_features = lateral_features + top_down_features
137
+ if self.fuse_type == "avg":
138
+ prev_features /= 2
139
+ else:
140
+ prev_features = lateral_features
141
+ x_out = prev_features
142
+ out[i] = x_out
143
+ pos[i] = self.position_encoding(x_out).to(x_out.dtype)
144
+
145
+ return out, pos
sam2/modeling/backbones/utils.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+ """Some utilities for backbones, in particular for windowing"""
7
+
8
+ from typing import Tuple
9
+
10
+ import torch
11
+ import torch.nn as nn
12
+ import torch.nn.functional as F
13
+
14
+
15
+ def window_partition(x, window_size):
16
+ """
17
+ Partition into non-overlapping windows with padding if needed.
18
+ Args:
19
+ x (tensor): input tokens with [B, H, W, C].
20
+ window_size (int): window size.
21
+ Returns:
22
+ windows: windows after partition with [B * num_windows, window_size, window_size, C].
23
+ (Hp, Wp): padded height and width before partition
24
+ """
25
+ B, H, W, C = x.shape
26
+
27
+ pad_h = (window_size - H % window_size) % window_size
28
+ pad_w = (window_size - W % window_size) % window_size
29
+ if pad_h > 0 or pad_w > 0:
30
+ x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h))
31
+ Hp, Wp = H + pad_h, W + pad_w
32
+
33
+ x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C)
34
+ windows = x.permute(0, 1, 3, 2, 4, 5).reshape(-1, window_size, window_size, C)
35
+ return windows, (Hp, Wp)
36
+
37
+
38
+ def window_unpartition(windows, window_size, pad_hw, hw):
39
+ """
40
+ Window unpartition into original sequences and removing padding.
41
+ Args:
42
+ x (tensor): input tokens with [B * num_windows, window_size, window_size, C].
43
+ window_size (int): window size.
44
+ pad_hw (Tuple): padded height and width (Hp, Wp).
45
+ hw (Tuple): original height and width (H, W) before padding.
46
+ Returns:
47
+ x: unpartitioned sequences with [B, H, W, C].
48
+ """
49
+ Hp, Wp = pad_hw
50
+ H, W = hw
51
+ B = windows.shape[0] // (Hp * Wp // window_size // window_size)
52
+ x = windows.reshape(B, Hp // window_size, Wp // window_size, window_size, window_size, -1)
53
+ x = x.permute(0, 1, 3, 2, 4, 5).reshape(B, Hp, Wp, -1)
54
+
55
+ if Hp > H or Wp > W:
56
+ x = x[:, :H, :W, :]
57
+ return x
58
+
59
+
60
+ class PatchEmbed(nn.Module):
61
+ """
62
+ Image to Patch Embedding.
63
+ """
64
+
65
+ def __init__(
66
+ self,
67
+ kernel_size: Tuple[int, ...] = (7, 7),
68
+ stride: Tuple[int, ...] = (4, 4),
69
+ padding: Tuple[int, ...] = (3, 3),
70
+ in_chans: int = 3,
71
+ embed_dim: int = 768,
72
+ ):
73
+ """
74
+ Args:
75
+ kernel_size (Tuple): kernel size of the projection layer.
76
+ stride (Tuple): stride of the projection layer.
77
+ padding (Tuple): padding size of the projection layer.
78
+ in_chans (int): Number of input image channels.
79
+ embed_dim (int): embed_dim (int): Patch embedding dimension.
80
+ """
81
+ super().__init__()
82
+ self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding)
83
+
84
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
85
+ x = self.proj(x)
86
+ # B C H W -> B H W C
87
+ x = x.permute(0, 2, 3, 1)
88
+ return x
sam2/modeling/memory_attention.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ from typing import Optional
8
+
9
+ import torch
10
+ from torch import nn, Tensor
11
+
12
+ from sam2.modeling.sam.transformer import RoPEAttention
13
+
14
+ from sam2.modeling.sam2_utils import get_activation_fn, get_clones
15
+
16
+
17
+ class MemoryAttentionLayer(nn.Module):
18
+
19
+ def __init__(
20
+ self,
21
+ activation: str,
22
+ cross_attention: nn.Module,
23
+ d_model: int,
24
+ dim_feedforward: int,
25
+ dropout: float,
26
+ pos_enc_at_attn: bool,
27
+ pos_enc_at_cross_attn_keys: bool,
28
+ pos_enc_at_cross_attn_queries: bool,
29
+ self_attention: nn.Module,
30
+ ):
31
+ super().__init__()
32
+ self.d_model = d_model
33
+ self.dim_feedforward = dim_feedforward
34
+ self.dropout_value = dropout
35
+ self.self_attn = self_attention
36
+ self.cross_attn_image = cross_attention
37
+
38
+ # Implementation of Feedforward model
39
+ self.linear1 = nn.Linear(d_model, dim_feedforward)
40
+ self.dropout = nn.Dropout(dropout)
41
+ self.linear2 = nn.Linear(dim_feedforward, d_model)
42
+
43
+ self.norm1 = nn.LayerNorm(d_model)
44
+ self.norm2 = nn.LayerNorm(d_model)
45
+ self.norm3 = nn.LayerNorm(d_model)
46
+ self.dropout1 = nn.Dropout(dropout)
47
+ self.dropout2 = nn.Dropout(dropout)
48
+ self.dropout3 = nn.Dropout(dropout)
49
+
50
+ self.activation_str = activation
51
+ self.activation = get_activation_fn(activation)
52
+
53
+ # Where to add pos enc
54
+ self.pos_enc_at_attn = pos_enc_at_attn
55
+ self.pos_enc_at_cross_attn_queries = pos_enc_at_cross_attn_queries
56
+ self.pos_enc_at_cross_attn_keys = pos_enc_at_cross_attn_keys
57
+
58
+ def _forward_sa(self, tgt, query_pos):
59
+ # Self-Attention
60
+ tgt2 = self.norm1(tgt)
61
+ q = k = tgt2 + query_pos if self.pos_enc_at_attn else tgt2
62
+ tgt2 = self.self_attn(q, k, v=tgt2)
63
+ tgt = tgt + self.dropout1(tgt2)
64
+ return tgt
65
+
66
+ def _forward_ca(self, tgt, memory, query_pos, pos, num_k_exclude_rope=0):
67
+ kwds = {}
68
+ if num_k_exclude_rope > 0:
69
+ assert isinstance(self.cross_attn_image, RoPEAttention)
70
+ kwds = {"num_k_exclude_rope": num_k_exclude_rope}
71
+
72
+ # Cross-Attention
73
+ tgt2 = self.norm2(tgt)
74
+ tgt2 = self.cross_attn_image(
75
+ q=tgt2 + query_pos if self.pos_enc_at_cross_attn_queries else tgt2,
76
+ k=memory + pos if self.pos_enc_at_cross_attn_keys else memory,
77
+ v=memory,
78
+ **kwds,
79
+ )
80
+ tgt = tgt + self.dropout2(tgt2)
81
+ return tgt
82
+
83
+ def forward(
84
+ self,
85
+ tgt,
86
+ memory,
87
+ pos: Optional[Tensor] = None,
88
+ query_pos: Optional[Tensor] = None,
89
+ num_k_exclude_rope: int = 0,
90
+ ) -> torch.Tensor:
91
+
92
+ # Self-Attn, Cross-Attn
93
+ tgt = self._forward_sa(tgt, query_pos)
94
+ tgt = self._forward_ca(tgt, memory, query_pos, pos, num_k_exclude_rope)
95
+ # MLP
96
+ tgt2 = self.norm3(tgt)
97
+ tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
98
+ tgt = tgt + self.dropout3(tgt2)
99
+ return tgt
100
+
101
+
102
+ class MemoryAttention(nn.Module):
103
+
104
+ def __init__(
105
+ self,
106
+ d_model: int,
107
+ pos_enc_at_input: bool,
108
+ layer: nn.Module,
109
+ num_layers: int,
110
+ batch_first: bool = True, # Do layers expect batch first input?
111
+ ):
112
+ super().__init__()
113
+ self.d_model = d_model
114
+ self.layers = get_clones(layer, num_layers)
115
+ self.num_layers = num_layers
116
+ self.norm = nn.LayerNorm(d_model)
117
+ self.pos_enc_at_input = pos_enc_at_input
118
+ self.batch_first = batch_first
119
+
120
+ def forward(
121
+ self,
122
+ curr: torch.Tensor, # self-attention inputs
123
+ memory: torch.Tensor, # cross-attention inputs
124
+ curr_pos: Optional[Tensor] = None, # pos_enc for self-attention inputs
125
+ memory_pos: Optional[Tensor] = None, # pos_enc for cross-attention inputs
126
+ num_obj_ptr_tokens: int = 0, # number of object pointer *tokens*
127
+ ):
128
+ if isinstance(curr, list):
129
+ assert isinstance(curr_pos, list)
130
+ assert len(curr) == len(curr_pos) == 1
131
+ curr, curr_pos = (
132
+ curr[0],
133
+ curr_pos[0],
134
+ )
135
+
136
+ assert (curr.shape[1] == memory.shape[1]), "Batch size must be the same for curr and memory"
137
+
138
+ output = curr
139
+ if self.pos_enc_at_input and curr_pos is not None:
140
+ output = output + 0.1 * curr_pos
141
+
142
+ if self.batch_first:
143
+ # Convert to batch first
144
+ output = output.transpose(0, 1)
145
+ curr_pos = curr_pos.transpose(0, 1)
146
+ memory = memory.transpose(0, 1)
147
+ memory_pos = memory_pos.transpose(0, 1)
148
+
149
+ for layer in self.layers:
150
+ kwds = {}
151
+ if isinstance(layer.cross_attn_image, RoPEAttention):
152
+ kwds = {"num_k_exclude_rope": num_obj_ptr_tokens}
153
+
154
+ output = layer(
155
+ tgt=output,
156
+ memory=memory,
157
+ pos=memory_pos,
158
+ query_pos=curr_pos,
159
+ **kwds,
160
+ )
161
+ normed_output = self.norm(output)
162
+
163
+ if self.batch_first:
164
+ # Convert back to seq first
165
+ normed_output = normed_output.transpose(0, 1)
166
+ curr_pos = curr_pos.transpose(0, 1)
167
+
168
+ return normed_output
sam2/modeling/memory_encoder.py ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import math
8
+ from typing import Tuple
9
+
10
+ import torch
11
+ import torch.nn as nn
12
+ import torch.nn.functional as F
13
+
14
+ from sam2.modeling.sam2_utils import DropPath, get_clones, LayerNorm2d
15
+
16
+
17
+ class MaskDownSampler(nn.Module):
18
+ """
19
+ Progressively downsample a mask by total_stride, each time by stride.
20
+ Note that LayerNorm is applied per *token*, like in ViT.
21
+
22
+ With each downsample (by a factor stride**2), channel capacity increases by the same factor.
23
+ In the end, we linearly project to embed_dim channels.
24
+ """
25
+
26
+ def __init__(
27
+ self,
28
+ embed_dim=256,
29
+ kernel_size=4,
30
+ stride=4,
31
+ padding=0,
32
+ total_stride=16,
33
+ activation=nn.GELU,
34
+ ):
35
+ super().__init__()
36
+ num_layers = int(math.log2(total_stride) // math.log2(stride))
37
+ assert stride**num_layers == total_stride
38
+ self.encoder = nn.Sequential()
39
+ mask_in_chans, mask_out_chans = 1, 1
40
+ for _ in range(num_layers):
41
+ mask_out_chans = mask_in_chans * (stride**2)
42
+ self.encoder.append(
43
+ nn.Conv2d(
44
+ mask_in_chans,
45
+ mask_out_chans,
46
+ kernel_size=kernel_size,
47
+ stride=stride,
48
+ padding=padding,
49
+ ))
50
+ self.encoder.append(LayerNorm2d(mask_out_chans))
51
+ self.encoder.append(activation())
52
+ mask_in_chans = mask_out_chans
53
+
54
+ self.encoder.append(nn.Conv2d(mask_out_chans, embed_dim, kernel_size=1))
55
+
56
+ def forward(self, x):
57
+ return self.encoder(x)
58
+
59
+
60
+ # Lightly adapted from ConvNext (https://github.com/facebookresearch/ConvNeXt)
61
+ class CXBlock(nn.Module):
62
+ r"""ConvNeXt Block. There are two equivalent implementations:
63
+ (1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W)
64
+ (2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back
65
+ We use (2) as we find it slightly faster in PyTorch
66
+
67
+ Args:
68
+ dim (int): Number of input channels.
69
+ drop_path (float): Stochastic depth rate. Default: 0.0
70
+ layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6.
71
+ """
72
+
73
+ def __init__(
74
+ self,
75
+ dim,
76
+ kernel_size=7,
77
+ padding=3,
78
+ drop_path=0.0,
79
+ layer_scale_init_value=1e-6,
80
+ use_dwconv=True,
81
+ ):
82
+ super().__init__()
83
+ self.dwconv = nn.Conv2d(
84
+ dim,
85
+ dim,
86
+ kernel_size=kernel_size,
87
+ padding=padding,
88
+ groups=dim if use_dwconv else 1,
89
+ ) # depthwise conv
90
+ self.norm = LayerNorm2d(dim, eps=1e-6)
91
+ self.pwconv1 = nn.Linear(dim, 4 * dim) # pointwise/1x1 convs, implemented with linear layers
92
+ self.act = nn.GELU()
93
+ self.pwconv2 = nn.Linear(4 * dim, dim)
94
+ # NOTE: changed from gamma to weight
95
+ # https://github.com/huggingface/transformers/issues/29554
96
+ self.weight = (
97
+ nn.Parameter(layer_scale_init_value * torch.ones(
98
+ (dim)), requires_grad=True) if layer_scale_init_value > 0 else None)
99
+ self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
100
+
101
+ def forward(self, x):
102
+ input = x
103
+ x = self.dwconv(x)
104
+ x = self.norm(x)
105
+ x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C)
106
+ x = self.pwconv1(x)
107
+ x = self.act(x)
108
+ x = self.pwconv2(x)
109
+ if self.weight is not None:
110
+ x = self.weight * x
111
+ x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W)
112
+
113
+ x = input + self.drop_path(x)
114
+ return x
115
+
116
+
117
+ class Fuser(nn.Module):
118
+
119
+ def __init__(self, layer, num_layers, dim=None, input_projection=False):
120
+ super().__init__()
121
+ self.proj = nn.Identity()
122
+ self.layers = get_clones(layer, num_layers)
123
+
124
+ if input_projection:
125
+ assert dim is not None
126
+ self.proj = nn.Conv2d(dim, dim, kernel_size=1)
127
+
128
+ def forward(self, x):
129
+ # normally x: (N, C, H, W)
130
+ x = self.proj(x)
131
+ for layer in self.layers:
132
+ x = layer(x)
133
+ return x
134
+
135
+
136
+ class MemoryEncoder(nn.Module):
137
+
138
+ def __init__(
139
+ self,
140
+ out_dim,
141
+ mask_downsampler,
142
+ fuser,
143
+ position_encoding,
144
+ in_dim=256, # in_dim of pix_feats
145
+ ):
146
+ super().__init__()
147
+
148
+ self.mask_downsampler = mask_downsampler
149
+
150
+ self.pix_feat_proj = nn.Conv2d(in_dim, in_dim, kernel_size=1)
151
+ self.fuser = fuser
152
+ self.position_encoding = position_encoding
153
+ self.out_proj = nn.Identity()
154
+ if out_dim != in_dim:
155
+ self.out_proj = nn.Conv2d(in_dim, out_dim, kernel_size=1)
156
+
157
+ def forward(
158
+ self,
159
+ pix_feat: torch.Tensor,
160
+ masks: torch.Tensor,
161
+ skip_mask_sigmoid: bool = False,
162
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
163
+ # Process masks
164
+ # sigmoid, so that less domain shift from gt masks which are bool
165
+ if not skip_mask_sigmoid:
166
+ masks = F.sigmoid(masks)
167
+ masks = self.mask_downsampler(masks)
168
+
169
+ # Fuse pix_feats and downsampled masks
170
+ # in case the visual features are on CPU, cast them to CUDA
171
+ pix_feat = pix_feat.to(masks.device)
172
+
173
+ x = self.pix_feat_proj(pix_feat)
174
+ x = x + masks
175
+ x = self.fuser(x)
176
+ x = self.out_proj(x)
177
+
178
+ pos = self.position_encoding(x).to(x.dtype)
179
+
180
+ return {"vision_features": x, "vision_pos_enc": [pos]}
sam2/modeling/position_encoding.py ADDED
@@ -0,0 +1,312 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import math
8
+ from typing import Optional, Tuple
9
+
10
+ import numpy as np
11
+ import torch
12
+ from torch import nn
13
+
14
+
15
+ class PositionEmbeddingSine(nn.Module):
16
+ """
17
+ This is a more standard version of the position embedding, very similar to the one
18
+ used by the Attention Is All You Need paper, generalized to work on images.
19
+ """
20
+
21
+ def __init__(
22
+ self,
23
+ num_pos_feats,
24
+ temperature: int = 10000,
25
+ normalize: bool = True,
26
+ scale: Optional[float] = None,
27
+ # Following settings only relevant
28
+ # for warmping up cache for compilation
29
+ warmup_cache: bool = True,
30
+ image_size: int = 1024,
31
+ strides: Tuple[int] = (4, 8, 16, 32),
32
+ ):
33
+ super().__init__()
34
+ assert num_pos_feats % 2 == 0, "Expecting even model width"
35
+ self.num_pos_feats = num_pos_feats // 2
36
+ self.temperature = temperature
37
+ self.normalize = normalize
38
+ if scale is not None and normalize is False:
39
+ raise ValueError("normalize should be True if scale is passed")
40
+ if scale is None:
41
+ scale = 2 * math.pi
42
+ self.scale = scale
43
+
44
+ self.cache = {}
45
+ if warmup_cache:
46
+ # Warmup cache for cuda and npu, to help with compilation
47
+ try:
48
+ import torch_npu
49
+ has_npu = torch_npu.npu.is_available()
50
+ except ImportError:
51
+ has_npu = False
52
+ if torch.cuda.is_available() or has_npu:
53
+ device = torch.device("cuda" if torch.cuda.is_available() else "npu")
54
+ for stride in strides:
55
+ cache_key = (image_size // stride, image_size // stride)
56
+ self._pe(1, device, None, *cache_key)
57
+
58
+ def _encode_xy(self, x, y):
59
+ # NOTE: disable autocasting here
60
+ raise NotImplementedError
61
+ # The positions are expected to be normalized
62
+ assert len(x) == len(y) and x.ndim == y.ndim == 1
63
+ x_embed = x * self.scale
64
+ y_embed = y * self.scale
65
+
66
+ dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
67
+ dim_t = self.temperature**(2 * (dim_t // 2) / self.num_pos_feats)
68
+
69
+ pos_x = x_embed[:, None] / dim_t
70
+ pos_y = y_embed[:, None] / dim_t
71
+ pos_x = torch.stack((pos_x[:, 0::2].sin(), pos_x[:, 1::2].cos()), dim=2).flatten(1)
72
+ pos_y = torch.stack((pos_y[:, 0::2].sin(), pos_y[:, 1::2].cos()), dim=2).flatten(1)
73
+ return pos_x, pos_y
74
+
75
+ @torch.no_grad()
76
+ def encode_boxes(self, x, y, w, h):
77
+ # NOTE: disable autocasting here
78
+ raise NotImplementedError
79
+ pos_x, pos_y = self._encode_xy(x, y)
80
+ pos = torch.cat((pos_y, pos_x, h[:, None], w[:, None]), dim=1)
81
+ return pos
82
+
83
+ encode = encode_boxes # Backwards compatibility
84
+
85
+ @torch.no_grad()
86
+ def encode_points(self, x, y, labels):
87
+ # NOTE: disable autocasting here
88
+ raise NotImplementedError
89
+ (bx, nx), (by, ny), (bl, nl) = x.shape, y.shape, labels.shape
90
+ assert bx == by and nx == ny and bx == bl and nx == nl
91
+ pos_x, pos_y = self._encode_xy(x.flatten(), y.flatten())
92
+ pos_x, pos_y = pos_x.reshape(bx, nx, -1), pos_y.reshape(by, ny, -1)
93
+ pos = torch.cat((pos_y, pos_x, labels[:, :, None]), dim=2)
94
+ return pos
95
+
96
+ @torch.no_grad()
97
+ def _pe(self, B, device, dtype, *cache_key):
98
+ H, W = cache_key
99
+ if cache_key in self.cache:
100
+ return self.cache[cache_key].to(device)[None].repeat(B, 1, 1, 1)
101
+
102
+ # Force fp32 (https://github.com/huggingface/transformers/pull/29285)
103
+ with torch.autocast(device_type=device.type, enabled=False):
104
+ y_embed = torch.arange(1, H + 1, dtype=torch.float32, device=device).view(1, -1, 1).repeat(B, 1, W)
105
+ x_embed = torch.arange(1, W + 1, dtype=torch.float32, device=device).view(1, 1, -1).repeat(B, H, 1)
106
+
107
+ if self.normalize:
108
+ eps = 1e-6
109
+ y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
110
+ x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
111
+
112
+ dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=device)
113
+ dim_t = self.temperature**(2 * (dim_t // 2) / self.num_pos_feats)
114
+
115
+ pos_x = x_embed[:, :, :, None] / dim_t
116
+ pos_y = y_embed[:, :, :, None] / dim_t
117
+ pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
118
+ pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
119
+ pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
120
+
121
+ if dtype is not None:
122
+ pos = pos.to(dtype)
123
+
124
+ self.cache[cache_key] = pos[0]
125
+ return pos
126
+
127
+ @torch.no_grad()
128
+ def forward(self, x: torch.Tensor):
129
+ B = x.shape[0]
130
+ cache_key = (x.shape[-2], x.shape[-1])
131
+ return self._pe(B, x.device, x.dtype, *cache_key)
132
+
133
+
134
+ class PositionEmbeddingRandom(nn.Module):
135
+ """
136
+ Positional encoding using random spatial frequencies.
137
+ """
138
+
139
+ def __init__(self, num_pos_feats: int = 64, scale: Optional[float] = None) -> None:
140
+ super().__init__()
141
+ if scale is None or scale <= 0.0:
142
+ scale = 1.0
143
+ self.register_buffer(
144
+ "positional_encoding_gaussian_matrix",
145
+ scale * torch.randn((2, num_pos_feats)),
146
+ )
147
+
148
+ @torch.no_grad()
149
+ def _pe_encoding(self, coords: torch.Tensor) -> torch.Tensor:
150
+ """Positionally encode points that are normalized to [0,1]."""
151
+ # assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape
152
+ coords = 2 * coords - 1
153
+ coords = coords @ self.positional_encoding_gaussian_matrix.to(coords.dtype)
154
+ coords = 2 * np.pi * coords
155
+ # outputs d_1 x ... x d_n x C shape
156
+ return torch.cat([torch.sin(coords), torch.cos(coords)], dim=-1)
157
+
158
+ @torch.no_grad()
159
+ def forward(self, size: Tuple[int, int]) -> torch.Tensor:
160
+ """Generate positional encoding for a grid of the specified size."""
161
+ h, w = size
162
+ device = self.positional_encoding_gaussian_matrix.device
163
+
164
+ # Force fp32 (https://github.com/huggingface/transformers/pull/29285)
165
+ with torch.autocast(device_type=device.type, enabled=False):
166
+ grid = torch.ones((h, w), device=device, dtype=torch.float32)
167
+ y_embed = grid.cumsum(dim=0) - 0.5
168
+ x_embed = grid.cumsum(dim=1) - 0.5
169
+ y_embed = y_embed / h
170
+ x_embed = x_embed / w
171
+ pe = self._pe_encoding(torch.stack([x_embed, y_embed], dim=-1))
172
+
173
+ pe = pe.to(self.positional_encoding_gaussian_matrix.dtype)
174
+ return pe.permute(2, 0, 1) # C x H x W
175
+
176
+ @torch.no_grad()
177
+ def forward_with_coords(self, coords_input: torch.Tensor, image_size: Tuple[int, int]) -> torch.Tensor:
178
+ """Positionally encode points that are not normalized to [0,1]."""
179
+ assert coords_input.dtype == torch.float, 'coords_input must be in float32'
180
+
181
+ # Force fp32 (https://github.com/huggingface/transformers/pull/29285)
182
+ with torch.autocast(device_type=coords_input.device.type, enabled=False):
183
+ coords = coords_input.clone()
184
+ coords[:, :, 0] = coords[:, :, 0] / image_size[1]
185
+ coords[:, :, 1] = coords[:, :, 1] / image_size[0]
186
+ pe = self._pe_encoding(coords.to(torch.float)) # B x N x C
187
+
188
+ pe = pe.to(self.positional_encoding_gaussian_matrix.dtype)
189
+ return pe
190
+
191
+
192
+ class PositionEmbedding1DRandom(nn.Module):
193
+ """
194
+ Positional encoding using random frequencies for 1D inputs.
195
+ """
196
+
197
+ def __init__(self, num_pos_feats: int = 64, scale: Optional[float] = None) -> None:
198
+ super().__init__()
199
+ if scale is None or scale <= 0.0:
200
+ scale = 1.0
201
+ self.register_buffer(
202
+ "positional_encoding_gaussian_matrix",
203
+ scale * torch.randn((1, num_pos_feats)),
204
+ )
205
+
206
+ @torch.no_grad()
207
+ def _pe_encoding(self, coords: torch.Tensor) -> torch.Tensor:
208
+ """Positionally encode points that are normalized to [0,1]."""
209
+ coords = 2 * coords - 1
210
+ coords = coords @ self.positional_encoding_gaussian_matrix.to(coords.dtype)
211
+ coords = 2 * np.pi * coords
212
+ return torch.cat([torch.sin(coords), torch.cos(coords)], dim=-1)
213
+
214
+ @torch.no_grad()
215
+ def forward(self, size: int) -> torch.Tensor:
216
+ """Generate positional encoding for a sequence of the specified length."""
217
+ device = self.positional_encoding_gaussian_matrix.device
218
+
219
+ # Force fp32 (https://github.com/huggingface/transformers/pull/29285)
220
+ with torch.autocast(device_type=device.type, enabled=False):
221
+ positions = torch.arange(size, device=device, dtype=torch.float32)
222
+ positions = positions / (size - 1)
223
+ positions = positions.unsqueeze(-1)
224
+ pe = self._pe_encoding(positions)
225
+
226
+ pe = pe.to(self.positional_encoding_gaussian_matrix.dtype)
227
+ return pe.permute(1, 0) # C x L
228
+
229
+ @torch.no_grad()
230
+ def forward_with_coords(self, coords_input: torch.Tensor, seq_length: int) -> torch.Tensor:
231
+ """Positionally encode raw coordinates by normalizing to [0,1]."""
232
+ assert coords_input.dtype == torch.float, 'coords_input must be in float32'
233
+
234
+ # Force fp32 (https://github.com/huggingface/transformers/pull/29285)
235
+ with torch.autocast(device_type=coords_input.device.type, enabled=False):
236
+ coords = coords_input.clone()
237
+ coords = coords / (seq_length - 1)
238
+ if coords.dim() == 2:
239
+ coords = coords.unsqueeze(-1)
240
+ pe = self._pe_encoding(coords.to(torch.float)) # B x N x C
241
+
242
+ pe = pe.to(self.positional_encoding_gaussian_matrix.dtype)
243
+ return pe
244
+
245
+
246
+ # Rotary Positional Encoding, adapted from:
247
+ # 1. https://github.com/meta-llama/codellama/blob/main/llama/model.py
248
+ # 2. https://github.com/naver-ai/rope-vit
249
+ # 3. https://github.com/lucidrains/rotary-embedding-torch
250
+
251
+
252
+ @torch.no_grad()
253
+ def init_t_xy(end_x: int, end_y: int):
254
+ t = torch.arange(end_x * end_y, dtype=torch.float32)
255
+ t_x = (t % end_x).float()
256
+ t_y = torch.div(t, end_x, rounding_mode="floor").float()
257
+ return t_x, t_y
258
+
259
+
260
+ @torch.no_grad()
261
+ def compute_axial_cis(dim: int, end_x: int, end_y: int, theta: float = 10000.0):
262
+ # Force fp32 on CPU (see https://github.com/huggingface/transformers/pull/29285)
263
+ with torch.autocast(device_type='cpu', enabled=False):
264
+ freqs_x = 1.0 / (theta**(torch.arange(0, dim, 4)[:(dim // 4)].float() / dim))
265
+ freqs_y = 1.0 / (theta**(torch.arange(0, dim, 4)[:(dim // 4)].float() / dim))
266
+
267
+ t_x, t_y = init_t_xy(end_x, end_y)
268
+ freqs_x = torch.outer(t_x, freqs_x)
269
+ freqs_y = torch.outer(t_y, freqs_y)
270
+ freqs_cis_x = torch.polar(torch.ones_like(freqs_x), freqs_x)
271
+ freqs_cis_y = torch.polar(torch.ones_like(freqs_y), freqs_y)
272
+
273
+ return torch.cat([freqs_cis_x, freqs_cis_y], dim=-1)
274
+
275
+
276
+ @torch.no_grad()
277
+ def reshape_for_broadcast(freqs_cis: torch.Tensor, x: torch.Tensor):
278
+ ndim = x.ndim
279
+ assert 0 <= 1 < ndim
280
+ assert freqs_cis.shape == (x.shape[-2], x.shape[-1])
281
+ shape = [d if i >= ndim - 2 else 1 for i, d in enumerate(x.shape)]
282
+ return freqs_cis.view(*shape)
283
+
284
+
285
+ @torch.no_grad()
286
+ def apply_rotary_enc(
287
+ xq: torch.Tensor,
288
+ xk: torch.Tensor,
289
+ freqs_cis: torch.Tensor,
290
+ repeat_freqs_k: bool = False,
291
+ ):
292
+ # Force fp32 (https://github.com/huggingface/transformers/pull/29285)
293
+ with torch.autocast(device_type=freqs_cis.device.type, enabled=False):
294
+ xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2))
295
+ xk_ = (torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2)) if xk.shape[-2] != 0 else None)
296
+ freqs_cis = reshape_for_broadcast(freqs_cis, xq_)
297
+ xq_out = torch.view_as_real(xq_ * freqs_cis).flatten(3)
298
+ if xk_ is None:
299
+ # no keys to rotate, due to dropout
300
+ return xq_out.type_as(xq).to(xq.device), xk
301
+ # repeat freqs along seq_len dim to match k seq_len
302
+ if repeat_freqs_k:
303
+ r = xk_.shape[-2] // xq_.shape[-2]
304
+ if freqs_cis.is_cuda:
305
+ freqs_cis = freqs_cis.repeat(*([1] * (freqs_cis.ndim - 2)), r, 1)
306
+ else:
307
+ # torch.repeat on complex numbers may not be supported on non-CUDA devices
308
+ # (freqs_cis has 4 dims and we repeat on dim 2) so we use expand + flatten
309
+ freqs_cis = freqs_cis.unsqueeze(2).expand(-1, -1, r, -1, -1).flatten(2, 3)
310
+ xk_out = torch.view_as_real(xk_ * freqs_cis).flatten(3)
311
+
312
+ return xq_out.type_as(xq).to(xq.device), xk_out.type_as(xk).to(xk.device)
sam2/modeling/sam/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
sam2/modeling/sam/mask_decoder.py ADDED
@@ -0,0 +1,274 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ from typing import List, Optional, Tuple, Type
8
+
9
+ import torch
10
+ from torch import nn
11
+
12
+ from sam2.modeling.sam2_utils import LayerNorm2d, MLP
13
+
14
+
15
+ class MaskDecoder(nn.Module):
16
+
17
+ def __init__(
18
+ self,
19
+ *,
20
+ transformer_dim: int,
21
+ transformer: nn.Module,
22
+ num_multimask_outputs: int = 3,
23
+ activation: Type[nn.Module] = nn.GELU,
24
+ iou_head_depth: int = 3,
25
+ iou_head_hidden_dim: int = 256,
26
+ use_high_res_features: bool = False,
27
+ iou_prediction_use_sigmoid=False,
28
+ dynamic_multimask_via_stability=False,
29
+ dynamic_multimask_stability_delta=0.05,
30
+ dynamic_multimask_stability_thresh=0.98,
31
+ pred_obj_scores: bool = False,
32
+ pred_obj_scores_mlp: bool = False,
33
+ use_multimask_token_for_obj_ptr: bool = False,
34
+ ) -> None:
35
+ """
36
+ Predicts masks given an image and prompt embeddings, using a
37
+ transformer architecture.
38
+
39
+ Arguments:
40
+ transformer_dim (int): the channel dimension of the transformer
41
+ transformer (nn.Module): the transformer used to predict masks
42
+ num_multimask_outputs (int): the number of masks to predict
43
+ when disambiguating masks
44
+ activation (nn.Module): the type of activation to use when
45
+ upscaling masks
46
+ iou_head_depth (int): the depth of the MLP used to predict
47
+ mask quality
48
+ iou_head_hidden_dim (int): the hidden dimension of the MLP
49
+ used to predict mask quality
50
+ """
51
+ super().__init__()
52
+ self.transformer_dim = transformer_dim
53
+ self.transformer = transformer
54
+
55
+ self.num_multimask_outputs = num_multimask_outputs
56
+
57
+ self.iou_token = nn.Embedding(1, transformer_dim)
58
+ self.num_mask_tokens = num_multimask_outputs + 1
59
+ self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim)
60
+
61
+ self.pred_obj_scores = pred_obj_scores
62
+ if self.pred_obj_scores:
63
+ self.obj_score_token = nn.Embedding(1, transformer_dim)
64
+ self.use_multimask_token_for_obj_ptr = use_multimask_token_for_obj_ptr
65
+
66
+ self.output_upscaling = nn.Sequential(
67
+ nn.ConvTranspose2d(transformer_dim, transformer_dim // 4, kernel_size=2, stride=2),
68
+ LayerNorm2d(transformer_dim // 4),
69
+ activation(),
70
+ nn.ConvTranspose2d(transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2),
71
+ activation(),
72
+ )
73
+ self.use_high_res_features = use_high_res_features
74
+ if use_high_res_features:
75
+ self.conv_s0 = nn.Conv2d(transformer_dim, transformer_dim // 8, kernel_size=1, stride=1)
76
+ self.conv_s1 = nn.Conv2d(transformer_dim, transformer_dim // 4, kernel_size=1, stride=1)
77
+
78
+ self.output_hypernetworks_mlps = nn.ModuleList(
79
+ [MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3) for i in range(self.num_mask_tokens)])
80
+
81
+ self.iou_prediction_head = MLP(
82
+ transformer_dim,
83
+ iou_head_hidden_dim,
84
+ self.num_mask_tokens,
85
+ iou_head_depth,
86
+ sigmoid_output=iou_prediction_use_sigmoid,
87
+ )
88
+ if self.pred_obj_scores:
89
+ self.pred_obj_score_head = nn.Linear(transformer_dim, 1)
90
+ if pred_obj_scores_mlp:
91
+ self.pred_obj_score_head = MLP(transformer_dim, transformer_dim, 1, 3)
92
+
93
+ # When outputting a single mask, optionally we can dynamically fall back to the best
94
+ # multimask output token if the single mask output token gives low stability scores.
95
+ self.dynamic_multimask_via_stability = dynamic_multimask_via_stability
96
+ self.dynamic_multimask_stability_delta = dynamic_multimask_stability_delta
97
+ self.dynamic_multimask_stability_thresh = dynamic_multimask_stability_thresh
98
+
99
+ def forward(
100
+ self,
101
+ image_embeddings: torch.Tensor,
102
+ image_pe: torch.Tensor,
103
+ sparse_prompt_embeddings: torch.Tensor,
104
+ dense_prompt_embeddings: torch.Tensor,
105
+ multimask_output: bool,
106
+ repeat_image: bool,
107
+ high_res_features: Optional[List[torch.Tensor]] = None,
108
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
109
+ """
110
+ Predict masks given image and prompt embeddings.
111
+
112
+ Arguments:
113
+ image_embeddings (torch.Tensor): the embeddings from the image encoder
114
+ image_pe (torch.Tensor): positional encoding with the shape of image_embeddings
115
+ sparse_prompt_embeddings (torch.Tensor): the embeddings of the points and boxes
116
+ dense_prompt_embeddings (torch.Tensor): the embeddings of the mask inputs
117
+ multimask_output (bool): Whether to return multiple masks or a single
118
+ mask.
119
+
120
+ Returns:
121
+ torch.Tensor: batched predicted masks
122
+ torch.Tensor: batched predictions of mask quality
123
+ torch.Tensor: batched SAM token for mask output
124
+ """
125
+ masks, iou_pred, mask_tokens_out, object_score_logits = self.predict_masks(
126
+ image_embeddings=image_embeddings,
127
+ image_pe=image_pe,
128
+ sparse_prompt_embeddings=sparse_prompt_embeddings,
129
+ dense_prompt_embeddings=dense_prompt_embeddings,
130
+ repeat_image=repeat_image,
131
+ high_res_features=high_res_features,
132
+ )
133
+
134
+ # Select the correct mask or masks for output
135
+ if multimask_output:
136
+ masks = masks[:, 1:, :, :]
137
+ iou_pred = iou_pred[:, 1:]
138
+ elif self.dynamic_multimask_via_stability and not self.training:
139
+ masks, iou_pred = self._dynamic_multimask_via_stability(masks, iou_pred)
140
+ else:
141
+ masks = masks[:, 0:1, :, :]
142
+ iou_pred = iou_pred[:, 0:1]
143
+
144
+ if multimask_output and self.use_multimask_token_for_obj_ptr:
145
+ sam_tokens_out = mask_tokens_out[:, 1:] # [b, 3, c] shape
146
+ else:
147
+ # Take the mask output token. Here we *always* use the token for single mask output.
148
+ # At test time, even if we track after 1-click (and using multimask_output=True),
149
+ # we still take the single mask token here. The rationale is that we always track
150
+ # after multiple clicks during training, so the past tokens seen during training
151
+ # are always the single mask token (and we'll let it be the object-memory token).
152
+ sam_tokens_out = mask_tokens_out[:, 0:1] # [b, 1, c] shape
153
+
154
+ # Prepare output
155
+ return masks, iou_pred, sam_tokens_out, object_score_logits
156
+
157
+ def predict_masks(
158
+ self,
159
+ image_embeddings: torch.Tensor,
160
+ image_pe: torch.Tensor,
161
+ sparse_prompt_embeddings: torch.Tensor,
162
+ dense_prompt_embeddings: torch.Tensor,
163
+ repeat_image: bool,
164
+ high_res_features: Optional[List[torch.Tensor]] = None,
165
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
166
+ """Predicts masks. See 'forward' for more details."""
167
+ # Concatenate output tokens
168
+ s = 0
169
+ if self.pred_obj_scores:
170
+ output_tokens = torch.cat(
171
+ [
172
+ self.obj_score_token.weight,
173
+ self.iou_token.weight,
174
+ self.mask_tokens.weight,
175
+ ],
176
+ dim=0,
177
+ )
178
+ s = 1
179
+ else:
180
+ output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight], dim=0)
181
+ output_tokens = output_tokens.unsqueeze(0).expand(sparse_prompt_embeddings.size(0), -1, -1)
182
+ tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1)
183
+
184
+ # Expand per-image data in batch direction to be per-mask
185
+ if repeat_image:
186
+ src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0)
187
+ else:
188
+ assert image_embeddings.shape[0] == tokens.shape[0]
189
+ src = image_embeddings
190
+ src = src + dense_prompt_embeddings
191
+ assert (image_pe.size(0) == 1), "image_pe should have size 1 in batch dim (from `get_dense_pe()`)"
192
+ pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0)
193
+ b, c, h, w = src.shape
194
+
195
+ # Run the transformer
196
+ hs, src = self.transformer(src, pos_src, tokens)
197
+ iou_token_out = hs[:, s, :]
198
+ mask_tokens_out = hs[:, s + 1:(s + 1 + self.num_mask_tokens), :]
199
+
200
+ # Upscale mask embeddings and predict masks using the mask tokens
201
+ src = src.transpose(1, 2).view(b, c, h, w)
202
+ if not self.use_high_res_features:
203
+ upscaled_embedding = self.output_upscaling(src)
204
+ else:
205
+ dc1, ln1, act1, dc2, act2 = self.output_upscaling
206
+ feat_s0, feat_s1 = high_res_features
207
+ upscaled_embedding = act1(ln1(dc1(src) + feat_s1))
208
+ upscaled_embedding = act2(dc2(upscaled_embedding) + feat_s0)
209
+
210
+ hyper_in_list: List[torch.Tensor] = []
211
+ for i in range(self.num_mask_tokens):
212
+ hyper_in_list.append(self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :]))
213
+ hyper_in = torch.stack(hyper_in_list, dim=1)
214
+ b, c, h, w = upscaled_embedding.shape
215
+ masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(b, -1, h, w)
216
+
217
+ # Generate mask quality predictions
218
+ iou_pred = self.iou_prediction_head(iou_token_out)
219
+ if self.pred_obj_scores:
220
+ assert s == 1
221
+ object_score_logits = self.pred_obj_score_head(hs[:, 0, :])
222
+ else:
223
+ # Obj scores logits - default to 10.0, i.e. assuming the object is present, sigmoid(10)=1
224
+ object_score_logits = 10.0 * iou_pred.new_ones(iou_pred.shape[0], 1)
225
+
226
+ return masks, iou_pred, mask_tokens_out, object_score_logits
227
+
228
+ def _get_stability_scores(self, mask_logits):
229
+ """
230
+ Compute stability scores of the mask logits based on the IoU between upper and
231
+ lower thresholds.
232
+ """
233
+ mask_logits = mask_logits.flatten(-2)
234
+ stability_delta = self.dynamic_multimask_stability_delta
235
+ area_i = torch.sum(mask_logits > stability_delta, dim=-1).float()
236
+ area_u = torch.sum(mask_logits > -stability_delta, dim=-1).float()
237
+ stability_scores = torch.where(area_u > 0, area_i / area_u, 1.0)
238
+ return stability_scores
239
+
240
+ def _dynamic_multimask_via_stability(self, all_mask_logits, all_iou_scores):
241
+ """
242
+ When outputting a single mask, if the stability score from the current single-mask
243
+ output (based on output token 0) falls below a threshold, we instead select from
244
+ multi-mask outputs (based on output token 1~3) the mask with the highest predicted
245
+ IoU score. This is intended to ensure a valid mask for both clicking and tracking.
246
+ """
247
+ # The best mask from multimask output tokens (1~3)
248
+ multimask_logits = all_mask_logits[:, 1:, :, :]
249
+ multimask_iou_scores = all_iou_scores[:, 1:]
250
+ best_scores_inds = torch.argmax(multimask_iou_scores, dim=-1)
251
+ batch_inds = torch.arange(multimask_iou_scores.size(0), device=all_iou_scores.device)
252
+ best_multimask_logits = multimask_logits[batch_inds, best_scores_inds]
253
+ best_multimask_logits = best_multimask_logits.unsqueeze(1)
254
+ best_multimask_iou_scores = multimask_iou_scores[batch_inds, best_scores_inds]
255
+ best_multimask_iou_scores = best_multimask_iou_scores.unsqueeze(1)
256
+
257
+ # The mask from singlemask output token 0 and its stability score
258
+ singlemask_logits = all_mask_logits[:, 0:1, :, :]
259
+ singlemask_iou_scores = all_iou_scores[:, 0:1]
260
+ stability_scores = self._get_stability_scores(singlemask_logits)
261
+ is_stable = stability_scores >= self.dynamic_multimask_stability_thresh
262
+
263
+ # Dynamically fall back to best multimask output upon low stability scores.
264
+ mask_logits_out = torch.where(
265
+ is_stable[..., None, None].expand_as(singlemask_logits),
266
+ singlemask_logits,
267
+ best_multimask_logits,
268
+ )
269
+ iou_scores_out = torch.where(
270
+ is_stable.expand_as(singlemask_iou_scores),
271
+ singlemask_iou_scores,
272
+ best_multimask_iou_scores,
273
+ )
274
+ return mask_logits_out, iou_scores_out
sam2/modeling/sam/prompt_encoder.py ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ from typing import Optional, Tuple, Type
8
+
9
+ import torch
10
+ from torch import nn
11
+
12
+ from sam2.modeling.position_encoding import PositionEmbeddingRandom
13
+ from sam2.modeling.sam2_utils import LayerNorm2d
14
+
15
+
16
+ class PromptEncoder(nn.Module):
17
+
18
+ def __init__(
19
+ self,
20
+ embed_dim: int,
21
+ image_embedding_size: Tuple[int, int],
22
+ input_image_size: Tuple[int, int],
23
+ mask_in_chans: int,
24
+ activation: Type[nn.Module] = nn.GELU,
25
+ ) -> None:
26
+ """
27
+ Encodes prompts for input to SAM's mask decoder.
28
+
29
+ Arguments:
30
+ embed_dim (int): The prompts' embedding dimension
31
+ image_embedding_size (tuple(int, int)): The spatial size of the
32
+ image embedding, as (H, W).
33
+ input_image_size (int): The padded size of the image as input
34
+ to the image encoder, as (H, W).
35
+ mask_in_chans (int): The number of hidden channels used for
36
+ encoding input masks.
37
+ activation (nn.Module): The activation to use when encoding
38
+ input masks.
39
+ """
40
+ super().__init__()
41
+ self.embed_dim = embed_dim
42
+ self.input_image_size = input_image_size
43
+ self.image_embedding_size = image_embedding_size
44
+ self.pe_layer = PositionEmbeddingRandom(embed_dim // 2)
45
+
46
+ self.num_point_embeddings: int = 4 # pos/neg point + 2 box corners
47
+ point_embeddings = [nn.Embedding(1, embed_dim) for i in range(self.num_point_embeddings)]
48
+ self.point_embeddings = nn.ModuleList(point_embeddings)
49
+ self.not_a_point_embed = nn.Embedding(1, embed_dim)
50
+
51
+ self.mask_input_size = (
52
+ 4 * image_embedding_size[0],
53
+ 4 * image_embedding_size[1],
54
+ )
55
+ self.mask_downscaling = nn.Sequential(
56
+ nn.Conv2d(1, mask_in_chans // 4, kernel_size=2, stride=2),
57
+ LayerNorm2d(mask_in_chans // 4),
58
+ activation(),
59
+ nn.Conv2d(mask_in_chans // 4, mask_in_chans, kernel_size=2, stride=2),
60
+ LayerNorm2d(mask_in_chans),
61
+ activation(),
62
+ nn.Conv2d(mask_in_chans, embed_dim, kernel_size=1),
63
+ )
64
+ self.no_mask_embed = nn.Embedding(1, embed_dim)
65
+
66
+ def get_dense_pe(self) -> torch.Tensor:
67
+ """
68
+ Returns the positional encoding used to encode point prompts,
69
+ applied to a dense set of points the shape of the image encoding.
70
+
71
+ Returns:
72
+ torch.Tensor: Positional encoding with shape
73
+ 1x(embed_dim)x(embedding_h)x(embedding_w)
74
+ """
75
+ return self.pe_layer(self.image_embedding_size).unsqueeze(0)
76
+
77
+ def _embed_points(
78
+ self,
79
+ points: torch.Tensor,
80
+ labels: torch.Tensor,
81
+ pad: bool,
82
+ ) -> torch.Tensor:
83
+ """Embeds point prompts."""
84
+ points = points + 0.5 # Shift to center of pixel
85
+ if pad:
86
+ padding_point = torch.zeros((points.shape[0], 1, 2), device=points.device)
87
+ padding_label = -torch.ones((labels.shape[0], 1), device=labels.device)
88
+ points = torch.cat([points, padding_point], dim=1)
89
+ labels = torch.cat([labels, padding_label], dim=1)
90
+ point_embedding = self.pe_layer.forward_with_coords(points, self.input_image_size)
91
+ point_embedding = torch.where((labels == -1).unsqueeze(-1),
92
+ torch.zeros_like(point_embedding) + self.not_a_point_embed.weight,
93
+ point_embedding)
94
+ point_embedding = torch.where((labels == 0).unsqueeze(-1), point_embedding + self.point_embeddings[0].weight,
95
+ point_embedding)
96
+ point_embedding = torch.where((labels == 1).unsqueeze(-1), point_embedding + self.point_embeddings[1].weight,
97
+ point_embedding)
98
+ point_embedding = torch.where((labels == 2).unsqueeze(-1), point_embedding + self.point_embeddings[2].weight,
99
+ point_embedding)
100
+ point_embedding = torch.where((labels == 3).unsqueeze(-1), point_embedding + self.point_embeddings[3].weight,
101
+ point_embedding)
102
+ return point_embedding
103
+
104
+ def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor:
105
+ """Embeds box prompts."""
106
+ boxes = boxes + 0.5 # Shift to center of pixel
107
+ coords = boxes.reshape(-1, 2, 2)
108
+ corner_embedding = self.pe_layer.forward_with_coords(coords, self.input_image_size)
109
+ corner_embedding[:, 0, :] += self.point_embeddings[2].weight
110
+ corner_embedding[:, 1, :] += self.point_embeddings[3].weight
111
+ return corner_embedding
112
+
113
+ def _embed_masks(self, masks: torch.Tensor) -> torch.Tensor:
114
+ """Embeds mask inputs."""
115
+ mask_embedding = self.mask_downscaling(masks)
116
+ return mask_embedding
117
+
118
+ def _get_batch_size(
119
+ self,
120
+ points: Optional[Tuple[torch.Tensor, torch.Tensor]],
121
+ boxes: Optional[torch.Tensor],
122
+ masks: Optional[torch.Tensor],
123
+ hidden: Optional[torch.Tensor],
124
+ ) -> int:
125
+ """
126
+ Gets the batch size of the output given the batch size of the input prompts.
127
+ """
128
+ if points is not None:
129
+ return points[0].shape[0]
130
+ elif boxes is not None:
131
+ return boxes.shape[0]
132
+ elif masks is not None:
133
+ return masks.shape[0]
134
+ elif hidden is not None:
135
+ return hidden.shape[0]
136
+ else:
137
+ return 1
138
+
139
+ def _get_device(self) -> torch.device:
140
+ return self.point_embeddings[0].weight.device
141
+
142
+ def forward(
143
+ self,
144
+ points: Optional[Tuple[torch.Tensor, torch.Tensor]],
145
+ boxes: Optional[torch.Tensor],
146
+ masks: Optional[torch.Tensor],
147
+ hidden: Optional[torch.Tensor] = None,
148
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
149
+ """
150
+ Embeds different types of prompts, returning both sparse and dense
151
+ embeddings.
152
+
153
+ Arguments:
154
+ points (tuple(torch.Tensor, torch.Tensor) or none): point coordinates
155
+ and labels to embed.
156
+ boxes (torch.Tensor or none): boxes to embed
157
+ masks (torch.Tensor or none): masks to embed
158
+
159
+ Returns:
160
+ torch.Tensor: sparse embeddings for the points and boxes, with shape
161
+ BxNx(embed_dim), where N is determined by the number of input points
162
+ and boxes.
163
+ torch.Tensor: dense embeddings for the masks, in the shape
164
+ Bx(embed_dim)x(embed_H)x(embed_W)
165
+ """
166
+ bs = self._get_batch_size(points, boxes, masks, hidden)
167
+ sparse_embeddings = torch.empty((bs, 0, self.embed_dim),
168
+ dtype=self.no_mask_embed.weight.dtype,
169
+ device=self._get_device())
170
+ if points is not None:
171
+ coords, labels = points
172
+ point_embeddings = self._embed_points(coords, labels, pad=(boxes is None))
173
+ sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1)
174
+ if boxes is not None:
175
+ box_embeddings = self._embed_boxes(boxes)
176
+ sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=1)
177
+
178
+ if hidden is not None:
179
+ sparse_embeddings = torch.cat([sparse_embeddings, hidden], dim=1)
180
+
181
+ if masks is not None:
182
+ dense_embeddings = self._embed_masks(masks)
183
+ else:
184
+ dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1,
185
+ 1).expand(bs, -1, self.image_embedding_size[0],
186
+ self.image_embedding_size[1])
187
+
188
+ return sparse_embeddings, dense_embeddings
sam2/modeling/sam/transformer.py ADDED
@@ -0,0 +1,303 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import math
8
+ from functools import partial
9
+ from typing import Tuple, Type
10
+
11
+ import torch
12
+ import torch.nn.functional as F
13
+ from torch import Tensor, nn
14
+
15
+ from sam2.modeling.position_encoding import apply_rotary_enc, compute_axial_cis
16
+ from sam2.modeling.sam2_utils import MLP
17
+
18
+
19
+ class TwoWayTransformer(nn.Module):
20
+
21
+ def __init__(
22
+ self,
23
+ depth: int,
24
+ embedding_dim: int,
25
+ num_heads: int,
26
+ mlp_dim: int,
27
+ activation: Type[nn.Module] = nn.ReLU,
28
+ attention_downsample_rate: int = 2,
29
+ ) -> None:
30
+ """
31
+ A transformer decoder that attends to an input image using
32
+ queries whose positional embedding is supplied.
33
+
34
+ Args:
35
+ depth (int): number of layers in the transformer
36
+ embedding_dim (int): the channel dimension for the input embeddings
37
+ num_heads (int): the number of heads for multihead attention. Must
38
+ divide embedding_dim
39
+ mlp_dim (int): the channel dimension internal to the MLP block
40
+ activation (nn.Module): the activation to use in the MLP block
41
+ """
42
+ super().__init__()
43
+ self.depth = depth
44
+ self.embedding_dim = embedding_dim
45
+ self.num_heads = num_heads
46
+ self.mlp_dim = mlp_dim
47
+ self.layers = nn.ModuleList()
48
+
49
+ for i in range(depth):
50
+ self.layers.append(
51
+ TwoWayAttentionBlock(
52
+ embedding_dim=embedding_dim,
53
+ num_heads=num_heads,
54
+ mlp_dim=mlp_dim,
55
+ activation=activation,
56
+ attention_downsample_rate=attention_downsample_rate,
57
+ skip_first_layer_pe=(i == 0),
58
+ ))
59
+
60
+ self.final_attn_token_to_image = Attention(embedding_dim, num_heads, downsample_rate=attention_downsample_rate)
61
+ self.norm_final_attn = nn.LayerNorm(embedding_dim)
62
+
63
+ def forward(
64
+ self,
65
+ image_embedding: Tensor,
66
+ image_pe: Tensor,
67
+ point_embedding: Tensor,
68
+ ) -> Tuple[Tensor, Tensor]:
69
+ """
70
+ Args:
71
+ image_embedding (torch.Tensor): image to attend to. Should be shape
72
+ B x embedding_dim x h x w for any h and w.
73
+ image_pe (torch.Tensor): the positional encoding to add to the image. Must
74
+ have the same shape as image_embedding.
75
+ point_embedding (torch.Tensor): the embedding to add to the query points.
76
+ Must have shape B x N_points x embedding_dim for any N_points.
77
+
78
+ Returns:
79
+ torch.Tensor: the processed point_embedding
80
+ torch.Tensor: the processed image_embedding
81
+ """
82
+ # BxCxHxW -> BxHWxC == B x N_image_tokens x C
83
+ bs, c, h, w = image_embedding.shape
84
+ image_embedding = image_embedding.flatten(2).permute(0, 2, 1)
85
+ image_pe = image_pe.flatten(2).permute(0, 2, 1)
86
+
87
+ # Prepare queries
88
+ queries = point_embedding
89
+ keys = image_embedding
90
+
91
+ # Apply transformer blocks and final layernorm
92
+ for layer in self.layers:
93
+ queries, keys = layer(
94
+ queries=queries,
95
+ keys=keys,
96
+ query_pe=point_embedding,
97
+ key_pe=image_pe,
98
+ )
99
+
100
+ # Apply the final attention layer from the points to the image
101
+ q = queries + point_embedding
102
+ k = keys + image_pe
103
+ attn_out = self.final_attn_token_to_image(q=q, k=k, v=keys)
104
+ queries = queries + attn_out
105
+ queries = self.norm_final_attn(queries)
106
+
107
+ return queries, keys
108
+
109
+
110
+ class TwoWayAttentionBlock(nn.Module):
111
+
112
+ def __init__(
113
+ self,
114
+ embedding_dim: int,
115
+ num_heads: int,
116
+ mlp_dim: int = 2048,
117
+ activation: Type[nn.Module] = nn.ReLU,
118
+ attention_downsample_rate: int = 2,
119
+ skip_first_layer_pe: bool = False,
120
+ ) -> None:
121
+ """
122
+ A transformer block with four layers: (1) self-attention of sparse
123
+ inputs, (2) cross attention of sparse inputs to dense inputs, (3) mlp
124
+ block on sparse inputs, and (4) cross attention of dense inputs to sparse
125
+ inputs.
126
+
127
+ Arguments:
128
+ embedding_dim (int): the channel dimension of the embeddings
129
+ num_heads (int): the number of heads in the attention layers
130
+ mlp_dim (int): the hidden dimension of the mlp block
131
+ activation (nn.Module): the activation of the mlp block
132
+ skip_first_layer_pe (bool): skip the PE on the first layer
133
+ """
134
+ super().__init__()
135
+ self.self_attn = Attention(embedding_dim, num_heads)
136
+ self.norm1 = nn.LayerNorm(embedding_dim)
137
+
138
+ self.cross_attn_token_to_image = Attention(embedding_dim, num_heads, downsample_rate=attention_downsample_rate)
139
+ self.norm2 = nn.LayerNorm(embedding_dim)
140
+
141
+ self.mlp = MLP(embedding_dim, mlp_dim, embedding_dim, num_layers=2, activation=activation)
142
+ self.norm3 = nn.LayerNorm(embedding_dim)
143
+
144
+ self.norm4 = nn.LayerNorm(embedding_dim)
145
+ self.cross_attn_image_to_token = Attention(embedding_dim, num_heads, downsample_rate=attention_downsample_rate)
146
+
147
+ self.skip_first_layer_pe = skip_first_layer_pe
148
+
149
+ def forward(self, queries: Tensor, keys: Tensor, query_pe: Tensor, key_pe: Tensor) -> Tuple[Tensor, Tensor]:
150
+ # Self attention block
151
+ if self.skip_first_layer_pe:
152
+ queries = self.self_attn(q=queries, k=queries, v=queries)
153
+ else:
154
+ q = queries + query_pe
155
+ attn_out = self.self_attn(q=q, k=q, v=queries)
156
+ queries = queries + attn_out
157
+ queries = self.norm1(queries)
158
+
159
+ # Cross attention block, tokens attending to image embedding
160
+ q = queries + query_pe
161
+ k = keys + key_pe
162
+ attn_out = self.cross_attn_token_to_image(q=q, k=k, v=keys)
163
+ queries = queries + attn_out
164
+ queries = self.norm2(queries)
165
+
166
+ # MLP block
167
+ mlp_out = self.mlp(queries)
168
+ queries = queries + mlp_out
169
+ queries = self.norm3(queries)
170
+
171
+ # Cross attention block, image embedding attending to tokens
172
+ q = queries + query_pe
173
+ k = keys + key_pe
174
+ attn_out = self.cross_attn_image_to_token(q=k, k=q, v=queries)
175
+ keys = keys + attn_out
176
+ keys = self.norm4(keys)
177
+
178
+ return queries, keys
179
+
180
+
181
+ class Attention(nn.Module):
182
+ """
183
+ An attention layer that allows for downscaling the size of the embedding
184
+ after projection to queries, keys, and values.
185
+ """
186
+
187
+ def __init__(
188
+ self,
189
+ embedding_dim: int,
190
+ num_heads: int,
191
+ downsample_rate: int = 1,
192
+ dropout: float = 0.0,
193
+ kv_in_dim: int = None,
194
+ ) -> None:
195
+ super().__init__()
196
+ self.embedding_dim = embedding_dim
197
+ self.kv_in_dim = kv_in_dim if kv_in_dim is not None else embedding_dim
198
+ self.internal_dim = embedding_dim // downsample_rate
199
+ self.num_heads = num_heads
200
+ assert (self.internal_dim % num_heads == 0), "num_heads must divide embedding_dim."
201
+
202
+ self.q_proj = nn.Linear(embedding_dim, self.internal_dim)
203
+ self.k_proj = nn.Linear(self.kv_in_dim, self.internal_dim)
204
+ self.v_proj = nn.Linear(self.kv_in_dim, self.internal_dim)
205
+ self.out_proj = nn.Linear(self.internal_dim, embedding_dim)
206
+
207
+ self.dropout_p = dropout
208
+
209
+ def _separate_heads(self, x: Tensor, num_heads: int) -> Tensor:
210
+ b, n, c = x.shape
211
+ x = x.reshape(b, n, num_heads, c // num_heads)
212
+ return x.transpose(1, 2) # B x N_heads x N_tokens x C_per_head
213
+
214
+ def _recombine_heads(self, x: Tensor) -> Tensor:
215
+ b, n_heads, n_tokens, c_per_head = x.shape
216
+ x = x.transpose(1, 2)
217
+ return x.reshape(b, n_tokens, n_heads * c_per_head) # B x N_tokens x C
218
+
219
+ def forward(self, q: Tensor, k: Tensor, v: Tensor) -> Tensor:
220
+ # Input projections
221
+ q = self.q_proj(q)
222
+ k = self.k_proj(k)
223
+ v = self.v_proj(v)
224
+
225
+ # Separate into heads
226
+ q = self._separate_heads(q, self.num_heads)
227
+ k = self._separate_heads(k, self.num_heads)
228
+ v = self._separate_heads(v, self.num_heads)
229
+
230
+ dropout_p = self.dropout_p if self.training else 0.0
231
+ # Attention
232
+ out = F.scaled_dot_product_attention(q, k, v, dropout_p=dropout_p)
233
+
234
+ out = self._recombine_heads(out)
235
+ out = self.out_proj(out)
236
+
237
+ return out
238
+
239
+
240
+ class RoPEAttention(Attention):
241
+ """Attention with rotary position encoding."""
242
+
243
+ def __init__(
244
+ self,
245
+ *args,
246
+ rope_theta=10000.0,
247
+ # whether to repeat q rope to match k length
248
+ # this is needed for cross-attention to memories
249
+ rope_k_repeat=False,
250
+ feat_sizes=(64, 64), # [w, h] for stride 16 feats at 1024 resolution
251
+ **kwargs,
252
+ ):
253
+ super().__init__(*args, **kwargs)
254
+
255
+ self.compute_cis = partial(compute_axial_cis, dim=self.internal_dim // self.num_heads, theta=rope_theta)
256
+ freqs_cis = self.compute_cis(end_x=feat_sizes[0], end_y=feat_sizes[1])
257
+ try:
258
+ import torch_npu
259
+ has_npu = torch_npu.npu.is_available()
260
+ except ImportError:
261
+ has_npu = False
262
+ if torch.cuda.is_available():
263
+ freqs_cis = freqs_cis.to("cuda")
264
+ elif has_npu:
265
+ freqs_cis = freqs_cis.to("npu")
266
+ self.freqs_cis = freqs_cis
267
+ self.rope_k_repeat = rope_k_repeat
268
+
269
+ def forward(self, q: Tensor, k: Tensor, v: Tensor, num_k_exclude_rope: int = 0) -> Tensor:
270
+ # Input projections
271
+ q = self.q_proj(q)
272
+ k = self.k_proj(k)
273
+ v = self.v_proj(v)
274
+
275
+ # Separate into heads
276
+ q = self._separate_heads(q, self.num_heads)
277
+ k = self._separate_heads(k, self.num_heads)
278
+ v = self._separate_heads(v, self.num_heads)
279
+
280
+ # Apply rotary position encoding
281
+ w = h = math.sqrt(q.shape[-2])
282
+ self.freqs_cis = self.freqs_cis.to(q.device)
283
+ if self.freqs_cis.shape[0] != q.shape[-2]:
284
+ self.freqs_cis = self.compute_cis(end_x=w, end_y=h).to(q.device)
285
+ if q.shape[-2] != k.shape[-2]:
286
+ assert self.rope_k_repeat
287
+
288
+ num_k_rope = k.size(-2) - num_k_exclude_rope
289
+ q, k[:, :, :num_k_rope] = apply_rotary_enc(
290
+ q,
291
+ k[:, :, :num_k_rope],
292
+ freqs_cis=self.freqs_cis,
293
+ repeat_freqs_k=self.rope_k_repeat,
294
+ )
295
+
296
+ dropout_p = self.dropout_p if self.training else 0.0
297
+ # Attention
298
+ out = F.scaled_dot_product_attention(q, k, v, dropout_p=dropout_p)
299
+
300
+ out = self._recombine_heads(out)
301
+ out = self.out_proj(out)
302
+
303
+ return out
sam2/modeling/sam2_base.py ADDED
@@ -0,0 +1,882 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import torch
8
+ import torch.distributed
9
+ import torch.nn.functional as F
10
+ from torch.nn.init import trunc_normal_
11
+
12
+ from sam2.modeling.sam2_utils import MLP, get_1d_sine_pe, select_closest_cond_frames
13
+ from sam2.modeling.sam.mask_decoder import MaskDecoder
14
+ from sam2.modeling.sam.prompt_encoder import PromptEncoder
15
+ from sam2.modeling.sam.transformer import TwoWayTransformer
16
+
17
+ # a large negative value as a placeholder score for missing objects
18
+ NO_OBJ_SCORE = -1024.0
19
+
20
+
21
+ class SAM2Base(torch.nn.Module):
22
+
23
+ def __init__(
24
+ self,
25
+ image_encoder,
26
+ memory_attention,
27
+ memory_encoder,
28
+ num_maskmem=7, # default 1 input frame + 6 previous frames
29
+ image_size=512,
30
+ backbone_stride=16, # stride of the image backbone output
31
+ sigmoid_scale_for_mem_enc=1.0, # scale factor for mask sigmoid prob
32
+ sigmoid_bias_for_mem_enc=0.0, # bias factor for mask sigmoid prob
33
+ # During evaluation, whether to binarize the sigmoid mask logits on interacted frames with clicks
34
+ binarize_mask_from_pts_for_mem_enc=False,
35
+ use_mask_input_as_output_without_sam=False, # on frames with mask input, whether to directly output the input mask without using a SAM prompt encoder + mask decoder
36
+ # The maximum number of conditioning frames to participate in the memory attention (-1 means no limit; if there are more conditioning frames than this limit,
37
+ # we only cross-attend to the temporally closest `max_cond_frames_in_attn` conditioning frames in the encoder when tracking each frame). This gives the model
38
+ # a temporal locality when handling a large number of annotated frames (since closer frames should be more important) and also avoids GPU OOM.
39
+ max_cond_frames_in_attn=-1,
40
+ # on the first frame, whether to directly add the no-memory embedding to the image feature
41
+ # (instead of using the transformer encoder)
42
+ directly_add_no_mem_embed=False,
43
+ # whether to use high-resolution feature maps in the SAM mask decoder
44
+ use_high_res_features_in_sam=False,
45
+ # whether to output multiple (3) masks for the first click on initial conditioning frames
46
+ multimask_output_in_sam=False,
47
+ # the minimum and maximum number of clicks to use multimask_output_in_sam (only relevant when `multimask_output_in_sam=True`;
48
+ # default is 1 for both, meaning that only the first click gives multimask output; also note that a box counts as two points)
49
+ multimask_min_pt_num=1,
50
+ multimask_max_pt_num=1,
51
+ # whether to also use multimask output for tracking (not just for the first click on initial conditioning frames; only relevant when `multimask_output_in_sam=True`)
52
+ multimask_output_for_tracking=False,
53
+ # Whether to use multimask tokens for obj ptr; Only relevant when both
54
+ # use_obj_ptrs_in_encoder=True and multimask_output_for_tracking=True
55
+ use_multimask_token_for_obj_ptr: bool = False,
56
+ # whether to use sigmoid to restrict ious prediction to [0-1]
57
+ iou_prediction_use_sigmoid=False,
58
+ # The memory bank's temporal stride during evaluation (i.e. the `r` parameter in XMem and Cutie; XMem and Cutie use r=5).
59
+ # For r>1, the (self.num_maskmem - 1) non-conditioning memory frames consist of
60
+ # (self.num_maskmem - 2) nearest frames from every r-th frames, plus the last frame.
61
+ memory_temporal_stride_for_eval=1,
62
+ # whether to apply non-overlapping constraints on the object masks in the memory encoder during evaluation (to avoid/alleviate superposing masks)
63
+ non_overlap_masks_for_mem_enc=False,
64
+ # whether to cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
65
+ use_obj_ptrs_in_encoder=False,
66
+ # the maximum number of object pointers from other frames in encoder cross attention (only relevant when `use_obj_ptrs_in_encoder=True`)
67
+ max_obj_ptrs_in_encoder=16,
68
+ # whether to add temporal positional encoding to the object pointers in the encoder (only relevant when `use_obj_ptrs_in_encoder=True`)
69
+ add_tpos_enc_to_obj_ptrs=True,
70
+ # whether to add an extra linear projection layer for the temporal positional encoding in the object pointers to avoid potential interference
71
+ # with spatial positional encoding (only relevant when both `use_obj_ptrs_in_encoder=True` and `add_tpos_enc_to_obj_ptrs=True`)
72
+ proj_tpos_enc_in_obj_ptrs=False,
73
+ # whether to use signed distance (instead of unsigned absolute distance) in the temporal positional encoding in the object pointers
74
+ # (only relevant when both `use_obj_ptrs_in_encoder=True` and `add_tpos_enc_to_obj_ptrs=True`)
75
+ use_signed_tpos_enc_to_obj_ptrs=False,
76
+ # whether to only attend to object pointers in the past (before the current frame) in the encoder during evaluation
77
+ # (only relevant when `use_obj_ptrs_in_encoder=True`; this might avoid pointer information too far in the future to distract the initial tracking)
78
+ only_obj_ptrs_in_the_past_for_eval=False,
79
+ # Whether to predict if there is an object in the frame
80
+ pred_obj_scores: bool = False,
81
+ # Whether to use an MLP to predict object scores
82
+ pred_obj_scores_mlp: bool = False,
83
+ # Only relevant if pred_obj_scores=True and use_obj_ptrs_in_encoder=True;
84
+ # Whether to have a fixed no obj pointer when there is no object present
85
+ # or to use it as an additive embedding with obj_ptr produced by decoder
86
+ fixed_no_obj_ptr: bool = False,
87
+ # Soft no object, i.e. mix in no_obj_ptr softly,
88
+ # hope to make recovery easier if there is a mistake and mitigate accumulation of errors
89
+ soft_no_obj_ptr: bool = False,
90
+ use_mlp_for_obj_ptr_proj: bool = False,
91
+ # add no obj embedding to spatial frames
92
+ no_obj_embed_spatial: bool = False,
93
+ # extra arguments used to construct the SAM mask decoder; if not None, it should be a dict of kwargs to be passed into `MaskDecoder` class.
94
+ sam_mask_decoder_extra_args=None,
95
+ compile_image_encoder: bool = False,
96
+ **kwargs,
97
+ ):
98
+ super().__init__()
99
+
100
+ # Part 1: the image backbone
101
+ self.image_encoder = image_encoder
102
+ # Use level 0, 1, 2 for high-res setting, or just level 2 for the default setting
103
+ self.use_high_res_features_in_sam = use_high_res_features_in_sam
104
+ self.num_feature_levels = 3 if use_high_res_features_in_sam else 1
105
+ self.use_obj_ptrs_in_encoder = use_obj_ptrs_in_encoder
106
+ self.max_obj_ptrs_in_encoder = max_obj_ptrs_in_encoder
107
+ if use_obj_ptrs_in_encoder:
108
+ # A conv layer to downsample the mask prompt to stride 4 (the same stride as
109
+ # low-res SAM mask logits) and to change its scales from 0~1 to SAM logit scale,
110
+ # so that it can be fed into the SAM mask decoder to generate a pointer.
111
+ self.mask_downsample = torch.nn.Conv2d(1, 1, kernel_size=4, stride=4)
112
+ self.add_tpos_enc_to_obj_ptrs = add_tpos_enc_to_obj_ptrs
113
+ if proj_tpos_enc_in_obj_ptrs:
114
+ assert add_tpos_enc_to_obj_ptrs # these options need to be used together
115
+ self.proj_tpos_enc_in_obj_ptrs = proj_tpos_enc_in_obj_ptrs
116
+ self.use_signed_tpos_enc_to_obj_ptrs = use_signed_tpos_enc_to_obj_ptrs
117
+ self.only_obj_ptrs_in_the_past_for_eval = only_obj_ptrs_in_the_past_for_eval
118
+
119
+ # Part 2: memory attention to condition current frame's visual features
120
+ # with memories (and obj ptrs) from past frames
121
+ self.memory_attention = memory_attention
122
+ self.hidden_dim = image_encoder.neck.d_model
123
+
124
+ # Part 3: memory encoder for the previous frame's outputs
125
+ self.memory_encoder = memory_encoder
126
+ self.mem_dim = self.hidden_dim
127
+ if hasattr(self.memory_encoder, "out_proj") and hasattr(self.memory_encoder.out_proj, "weight"):
128
+ # if there is compression of memories along channel dim
129
+ self.mem_dim = self.memory_encoder.out_proj.weight.shape[0]
130
+ self.num_maskmem = num_maskmem # Number of memories accessible
131
+ # Temporal encoding of the memories
132
+ self.maskmem_tpos_enc = torch.nn.Parameter(torch.zeros(num_maskmem, 1, 1, self.mem_dim))
133
+ trunc_normal_(self.maskmem_tpos_enc, std=0.02)
134
+ # a single token to indicate no memory embedding from previous frames
135
+ self.no_mem_embed = torch.nn.Parameter(torch.zeros(1, 1, self.hidden_dim))
136
+ self.no_mem_pos_enc = torch.nn.Parameter(torch.zeros(1, 1, self.hidden_dim))
137
+ trunc_normal_(self.no_mem_embed, std=0.02)
138
+ trunc_normal_(self.no_mem_pos_enc, std=0.02)
139
+ self.directly_add_no_mem_embed = directly_add_no_mem_embed
140
+ # Apply sigmoid to the output raw mask logits (to turn them from
141
+ # range (-inf, +inf) to range (0, 1)) before feeding them into the memory encoder
142
+ self.sigmoid_scale_for_mem_enc = sigmoid_scale_for_mem_enc
143
+ self.sigmoid_bias_for_mem_enc = sigmoid_bias_for_mem_enc
144
+ self.binarize_mask_from_pts_for_mem_enc = binarize_mask_from_pts_for_mem_enc
145
+ self.non_overlap_masks_for_mem_enc = non_overlap_masks_for_mem_enc
146
+ self.memory_temporal_stride_for_eval = memory_temporal_stride_for_eval
147
+ # On frames with mask input, whether to directly output the input mask without
148
+ # using a SAM prompt encoder + mask decoder
149
+ self.use_mask_input_as_output_without_sam = use_mask_input_as_output_without_sam
150
+ self.multimask_output_in_sam = multimask_output_in_sam
151
+ self.multimask_min_pt_num = multimask_min_pt_num
152
+ self.multimask_max_pt_num = multimask_max_pt_num
153
+ self.multimask_output_for_tracking = multimask_output_for_tracking
154
+ self.use_multimask_token_for_obj_ptr = use_multimask_token_for_obj_ptr
155
+ self.iou_prediction_use_sigmoid = iou_prediction_use_sigmoid
156
+
157
+ # Part 4: SAM-style prompt encoder (for both mask and point inputs)
158
+ # and SAM-style mask decoder for the final mask output
159
+ self.image_size = image_size
160
+ self.backbone_stride = backbone_stride
161
+ self.sam_mask_decoder_extra_args = sam_mask_decoder_extra_args
162
+ self.pred_obj_scores = pred_obj_scores
163
+ self.pred_obj_scores_mlp = pred_obj_scores_mlp
164
+ self.fixed_no_obj_ptr = fixed_no_obj_ptr
165
+ self.soft_no_obj_ptr = soft_no_obj_ptr
166
+ if self.fixed_no_obj_ptr:
167
+ assert self.pred_obj_scores
168
+ assert self.use_obj_ptrs_in_encoder
169
+ if self.pred_obj_scores and self.use_obj_ptrs_in_encoder:
170
+ self.no_obj_ptr = torch.nn.Parameter(torch.zeros(1, self.hidden_dim))
171
+ trunc_normal_(self.no_obj_ptr, std=0.02)
172
+ self.use_mlp_for_obj_ptr_proj = use_mlp_for_obj_ptr_proj
173
+ self.no_obj_embed_spatial = None
174
+ if no_obj_embed_spatial:
175
+ self.no_obj_embed_spatial = torch.nn.Parameter(torch.zeros(1, self.mem_dim))
176
+ trunc_normal_(self.no_obj_embed_spatial, std=0.02)
177
+
178
+ self._build_sam_heads()
179
+ self.max_cond_frames_in_attn = max_cond_frames_in_attn
180
+
181
+ # Model compilation
182
+ if compile_image_encoder:
183
+ # Compile the forward function (not the full module) to allow loading checkpoints.
184
+ print("Image encoder compilation is enabled. First forward pass will be slow.")
185
+ self.image_encoder.forward = torch.compile(
186
+ self.image_encoder.forward,
187
+ mode="max-autotune",
188
+ fullgraph=True,
189
+ dynamic=False,
190
+ )
191
+
192
+ @property
193
+ def device(self):
194
+ return next(self.parameters()).device
195
+
196
+ def forward(self, *args, **kwargs):
197
+ raise NotImplementedError(
198
+ "Please use the corresponding methods in SAM2VideoPredictor for inference or SAM2Train for training/fine-tuning"
199
+ "See notebooks/video_predictor_example.ipynb for an inference example.")
200
+
201
+ def _build_sam_heads(self):
202
+ """Build SAM-style prompt encoder and mask decoder."""
203
+ self.sam_prompt_embed_dim = self.hidden_dim
204
+ self.sam_image_embedding_size = self.image_size // self.backbone_stride
205
+
206
+ # build PromptEncoder and MaskDecoder from SAM
207
+ # (their hyperparameters like `mask_in_chans=16` are from SAM code)
208
+ self.sam_prompt_encoder = PromptEncoder(
209
+ embed_dim=self.sam_prompt_embed_dim,
210
+ image_embedding_size=(
211
+ self.sam_image_embedding_size,
212
+ self.sam_image_embedding_size,
213
+ ),
214
+ input_image_size=(self.image_size, self.image_size),
215
+ mask_in_chans=16,
216
+ )
217
+ self.sam_mask_decoder = MaskDecoder(
218
+ num_multimask_outputs=3,
219
+ transformer=TwoWayTransformer(
220
+ depth=2,
221
+ embedding_dim=self.sam_prompt_embed_dim,
222
+ mlp_dim=2048,
223
+ num_heads=8,
224
+ ),
225
+ transformer_dim=self.sam_prompt_embed_dim,
226
+ iou_head_depth=3,
227
+ iou_head_hidden_dim=256,
228
+ use_high_res_features=self.use_high_res_features_in_sam,
229
+ iou_prediction_use_sigmoid=self.iou_prediction_use_sigmoid,
230
+ pred_obj_scores=self.pred_obj_scores,
231
+ pred_obj_scores_mlp=self.pred_obj_scores_mlp,
232
+ use_multimask_token_for_obj_ptr=self.use_multimask_token_for_obj_ptr,
233
+ **(self.sam_mask_decoder_extra_args or {}),
234
+ )
235
+ if self.use_obj_ptrs_in_encoder:
236
+ # a linear projection on SAM output tokens to turn them into object pointers
237
+ self.obj_ptr_proj = torch.nn.Linear(self.hidden_dim, self.hidden_dim)
238
+ if self.use_mlp_for_obj_ptr_proj:
239
+ self.obj_ptr_proj = MLP(self.hidden_dim, self.hidden_dim, self.hidden_dim, 3)
240
+ else:
241
+ self.obj_ptr_proj = torch.nn.Identity()
242
+ if self.proj_tpos_enc_in_obj_ptrs:
243
+ # a linear projection on temporal positional encoding in object pointers to
244
+ # avoid potential interference with spatial positional encoding
245
+ self.obj_ptr_tpos_proj = torch.nn.Linear(self.hidden_dim, self.mem_dim)
246
+ else:
247
+ self.obj_ptr_tpos_proj = torch.nn.Identity()
248
+
249
+ def _forward_sam_heads(
250
+ self,
251
+ backbone_features,
252
+ point_inputs=None,
253
+ mask_inputs=None,
254
+ hidden_inputs=None,
255
+ high_res_features=None,
256
+ multimask_output=False,
257
+ ):
258
+ """
259
+ Forward SAM prompt encoders and mask heads.
260
+
261
+ Inputs:
262
+ - backbone_features: image features of [B, C, H, W] shape
263
+ - point_inputs: a dictionary with "point_coords" and "point_labels", where
264
+ 1) "point_coords" has [B, P, 2] shape and float32 dtype and contains the
265
+ absolute pixel-unit coordinate in (x, y) format of the P input points
266
+ 2) "point_labels" has shape [B, P] and int32 dtype, where 1 means
267
+ positive clicks, 0 means negative clicks, and -1 means padding
268
+ - mask_inputs: a mask of [B, 1, H*16, W*16] shape, float or bool, with the
269
+ same spatial size as the image.
270
+ - high_res_features: either 1) None or 2) or a list of length 2 containing
271
+ two feature maps of [B, C, 4*H, 4*W] and [B, C, 2*H, 2*W] shapes respectively,
272
+ which will be used as high-resolution feature maps for SAM decoder.
273
+ - multimask_output: if it's True, we output 3 candidate masks and their 3
274
+ corresponding IoU estimates, and if it's False, we output only 1 mask and
275
+ its corresponding IoU estimate.
276
+
277
+ Outputs:
278
+ - low_res_multimasks: [B, M, H*4, W*4] shape (where M = 3 if
279
+ `multimask_output=True` and M = 1 if `multimask_output=False`), the SAM
280
+ output mask logits (before sigmoid) for the low-resolution masks, with 4x
281
+ the resolution (1/4 stride) of the input backbone_features.
282
+ - high_res_multimasks: [B, M, H*16, W*16] shape (where M = 3
283
+ if `multimask_output=True` and M = 1 if `multimask_output=False`),
284
+ upsampled from the low-resolution masks, with shape size as the image
285
+ (stride is 1 pixel).
286
+ - ious, [B, M] shape, where (where M = 3 if `multimask_output=True` and M = 1
287
+ if `multimask_output=False`), the estimated IoU of each output mask.
288
+ - low_res_masks: [B, 1, H*4, W*4] shape, the best mask in `low_res_multimasks`.
289
+ If `multimask_output=True`, it's the mask with the highest IoU estimate.
290
+ If `multimask_output=False`, it's the same as `low_res_multimasks`.
291
+ - high_res_masks: [B, 1, H*16, W*16] shape, the best mask in `high_res_multimasks`.
292
+ If `multimask_output=True`, it's the mask with the highest IoU estimate.
293
+ If `multimask_output=False`, it's the same as `high_res_multimasks`.
294
+ - obj_ptr: [B, C] shape, the object pointer vector for the output mask, extracted
295
+ based on the output token from the SAM mask decoder.
296
+ """
297
+ B = backbone_features.size(0)
298
+ device = backbone_features.device
299
+ assert backbone_features.size(1) == self.sam_prompt_embed_dim
300
+ assert backbone_features.size(2) == self.sam_image_embedding_size
301
+ assert backbone_features.size(3) == self.sam_image_embedding_size
302
+
303
+ # a) Handle point prompts
304
+ if point_inputs is not None:
305
+ sam_point_coords = point_inputs["point_coords"]
306
+ sam_point_labels = point_inputs["point_labels"]
307
+ assert sam_point_coords.size(0) == B and sam_point_labels.size(0) == B
308
+ else:
309
+ # If no points are provide, pad with an empty point (with label -1)
310
+ sam_point_coords = torch.zeros(B, 1, 2, device=device)
311
+ sam_point_labels = -torch.ones(B, 1, dtype=torch.int32, device=device)
312
+
313
+ # b) Handle mask prompts
314
+ if mask_inputs is not None:
315
+ # If mask_inputs is provided, downsize it into low-res mask input if needed
316
+ # and feed it as a dense mask prompt into the SAM mask encoder
317
+ assert len(mask_inputs.shape) == 4 and mask_inputs.shape[:2] == (B, 1)
318
+ if mask_inputs.shape[-2:] != self.sam_prompt_encoder.mask_input_size:
319
+ sam_mask_prompt = F.interpolate(
320
+ mask_inputs.float(),
321
+ size=self.sam_prompt_encoder.mask_input_size,
322
+ align_corners=False,
323
+ mode="bilinear",
324
+ antialias=True, # use antialias for downsampling
325
+ )
326
+ else:
327
+ sam_mask_prompt = mask_inputs
328
+ else:
329
+ # Otherwise, simply feed None (and SAM's prompt encoder will add
330
+ # a learned `no_mask_embed` to indicate no mask input in this case).
331
+ sam_mask_prompt = None
332
+
333
+ sparse_embeddings, dense_embeddings = self.sam_prompt_encoder(
334
+ points=(sam_point_coords, sam_point_labels),
335
+ boxes=None,
336
+ masks=sam_mask_prompt,
337
+ hidden=hidden_inputs,
338
+ )
339
+ (
340
+ low_res_multimasks,
341
+ ious,
342
+ sam_output_tokens,
343
+ object_score_logits,
344
+ ) = self.sam_mask_decoder(
345
+ image_embeddings=backbone_features,
346
+ image_pe=self.sam_prompt_encoder.get_dense_pe(),
347
+ sparse_prompt_embeddings=sparse_embeddings,
348
+ dense_prompt_embeddings=dense_embeddings,
349
+ multimask_output=multimask_output,
350
+ repeat_image=False, # the image is already batched
351
+ high_res_features=high_res_features,
352
+ )
353
+ if self.pred_obj_scores:
354
+ is_obj_appearing = object_score_logits > 0
355
+
356
+ # Mask used for spatial memories is always a *hard* choice between obj and no obj,
357
+ # consistent with the actual mask prediction
358
+ # NOTE: whether to mask here during inference?
359
+ if getattr(self, 'inference_mode', False):
360
+ low_res_multimasks = torch.where(
361
+ is_obj_appearing[:, None, None],
362
+ low_res_multimasks,
363
+ NO_OBJ_SCORE,
364
+ )
365
+
366
+ # convert masks from possibly bfloat16 (or float16) to float32
367
+ # low_res_multimasks = low_res_multimasks.float()
368
+ high_res_multimasks = F.interpolate(
369
+ low_res_multimasks.float(),
370
+ size=(self.image_size, self.image_size),
371
+ mode="bilinear",
372
+ align_corners=False,
373
+ ).to(low_res_multimasks.dtype)
374
+
375
+ sam_output_token = sam_output_tokens[:, 0]
376
+ if multimask_output:
377
+ # take the best mask prediction (with the highest IoU estimation)
378
+ best_iou_inds = torch.argmax(ious, dim=-1)
379
+ batch_inds = torch.arange(B, device=device)
380
+ low_res_masks = low_res_multimasks[batch_inds, best_iou_inds].unsqueeze(1)
381
+ high_res_masks = high_res_multimasks[batch_inds, best_iou_inds].unsqueeze(1)
382
+ if sam_output_tokens.size(1) > 1:
383
+ sam_output_token = sam_output_tokens[batch_inds, best_iou_inds]
384
+ else:
385
+ low_res_masks, high_res_masks = low_res_multimasks, high_res_multimasks
386
+
387
+ # Extract object pointer from the SAM output token (with occlusion handling)
388
+ obj_ptr = self.obj_ptr_proj(sam_output_token)
389
+ if self.pred_obj_scores:
390
+ # Allow *soft* no obj ptr, unlike for masks
391
+ if self.soft_no_obj_ptr:
392
+ lambda_is_obj_appearing = object_score_logits.sigmoid()
393
+ else:
394
+ lambda_is_obj_appearing = is_obj_appearing.to(object_score_logits.dtype)
395
+
396
+ if self.fixed_no_obj_ptr:
397
+ obj_ptr = lambda_is_obj_appearing * obj_ptr
398
+ obj_ptr = obj_ptr + (1 - lambda_is_obj_appearing) * self.no_obj_ptr
399
+
400
+ return (
401
+ low_res_multimasks,
402
+ high_res_multimasks,
403
+ ious,
404
+ low_res_masks,
405
+ high_res_masks,
406
+ obj_ptr,
407
+ object_score_logits,
408
+ )
409
+
410
+ def _use_mask_as_output(self, backbone_features, high_res_features, mask_inputs):
411
+ """
412
+ Directly turn binary `mask_inputs` into a output mask logits without using SAM.
413
+ (same input and output shapes as in _forward_sam_heads above).
414
+ """
415
+ # Use -10/+10 as logits for neg/pos pixels (very close to 0/1 in prob after sigmoid).
416
+ out_scale, out_bias = 20.0, -10.0 # sigmoid(-10.0)=4.5398e-05
417
+ mask_inputs_float = mask_inputs.float()
418
+ high_res_masks = mask_inputs_float * out_scale + out_bias
419
+ low_res_masks = F.interpolate(
420
+ high_res_masks,
421
+ size=(high_res_masks.size(-2) // 4, high_res_masks.size(-1) // 4),
422
+ align_corners=False,
423
+ mode="bilinear",
424
+ antialias=True, # use antialias for downsampling
425
+ )
426
+ # a dummy IoU prediction of all 1's under mask input
427
+ ious = mask_inputs.new_ones(mask_inputs.size(0), 1).float()
428
+ if not self.use_obj_ptrs_in_encoder:
429
+ # all zeros as a dummy object pointer (of shape [B, C])
430
+ obj_ptr = torch.zeros(mask_inputs.size(0), self.hidden_dim, device=mask_inputs.device)
431
+ else:
432
+ # produce an object pointer using the SAM decoder from the mask input
433
+ _, _, _, _, _, obj_ptr, _ = self._forward_sam_heads(
434
+ backbone_features=backbone_features,
435
+ mask_inputs=self.mask_downsample(mask_inputs_float),
436
+ high_res_features=high_res_features,
437
+ )
438
+ # In this method, we are treating mask_input as output, e.g. using it directly to create spatial mem;
439
+ # Below, we follow the same design axiom to use mask_input to decide if obj appears or not instead of relying
440
+ # on the object_scores from the SAM decoder.
441
+ is_obj_appearing = torch.any(mask_inputs.flatten(1).float() > 0.0, dim=1)
442
+ is_obj_appearing = is_obj_appearing[..., None]
443
+ lambda_is_obj_appearing = is_obj_appearing.float()
444
+ object_score_logits = out_scale * lambda_is_obj_appearing + out_bias
445
+ if self.pred_obj_scores:
446
+ if self.fixed_no_obj_ptr:
447
+ obj_ptr = lambda_is_obj_appearing * obj_ptr
448
+ obj_ptr = obj_ptr + (1 - lambda_is_obj_appearing) * self.no_obj_ptr
449
+
450
+ return (
451
+ low_res_masks,
452
+ high_res_masks,
453
+ ious,
454
+ low_res_masks,
455
+ high_res_masks,
456
+ obj_ptr,
457
+ object_score_logits,
458
+ )
459
+
460
+ def forward_image(self, img_batch: torch.Tensor):
461
+ """Get the image feature on the input batch."""
462
+ backbone_out = self.image_encoder(img_batch)
463
+ if self.use_high_res_features_in_sam:
464
+ # precompute projected level 0 and level 1 features in SAM decoder
465
+ # to avoid running it again on every SAM click
466
+ backbone_out["backbone_fpn"][0] = self.sam_mask_decoder.conv_s0(backbone_out["backbone_fpn"][0])
467
+ backbone_out["backbone_fpn"][1] = self.sam_mask_decoder.conv_s1(backbone_out["backbone_fpn"][1])
468
+ return backbone_out
469
+
470
+ def _prepare_backbone_features(self, backbone_out):
471
+ """Prepare and flatten visual features."""
472
+ backbone_out = backbone_out.copy()
473
+ assert len(backbone_out["backbone_fpn"]) == len(backbone_out["vision_pos_enc"])
474
+ assert len(backbone_out["backbone_fpn"]) >= self.num_feature_levels
475
+
476
+ feature_maps = backbone_out["backbone_fpn"][-self.num_feature_levels:]
477
+ vision_pos_embeds = backbone_out["vision_pos_enc"][-self.num_feature_levels:]
478
+
479
+ feat_sizes = [(x.shape[-2], x.shape[-1]) for x in vision_pos_embeds]
480
+ # flatten NxCxHxW to HWxNxC
481
+ vision_feats = [x.flatten(2).permute(2, 0, 1) for x in feature_maps]
482
+ vision_pos_embeds = [x.flatten(2).permute(2, 0, 1) for x in vision_pos_embeds]
483
+
484
+ return backbone_out, vision_feats, vision_pos_embeds, feat_sizes
485
+
486
+ def _prepare_memory_conditioned_features(
487
+ self,
488
+ frame_idx,
489
+ is_init_cond_frame,
490
+ current_vision_feats,
491
+ current_vision_pos_embeds,
492
+ feat_sizes,
493
+ output_dict,
494
+ num_frames,
495
+ track_in_reverse=False, # tracking in reverse time order (for demo usage)
496
+ ):
497
+ """Fuse the current frame's visual feature map with previous memory."""
498
+ B = current_vision_feats[-1].size(1) # batch size on this frame
499
+ C = self.hidden_dim
500
+ H, W = feat_sizes[-1] # top-level (lowest-resolution) feature size
501
+ device = current_vision_feats[-1].device
502
+ # The case of `self.num_maskmem == 0` below is primarily used for reproducing SAM on images.
503
+ # In this case, we skip the fusion with any memory.
504
+ if self.num_maskmem == 0: # Disable memory and skip fusion
505
+ pix_feat = current_vision_feats[-1].permute(1, 2, 0).view(B, C, H, W)
506
+ return pix_feat
507
+
508
+ num_obj_ptr_tokens = 0
509
+ tpos_sign_mul = -1 if track_in_reverse else 1
510
+ # Step 1: condition the visual features of the current frame on previous memories
511
+ if not is_init_cond_frame:
512
+ # Retrieve the memories encoded with the maskmem backbone
513
+ to_cat_memory, to_cat_memory_pos_embed = [], []
514
+ # Add conditioning frames's output first (all cond frames have t_pos=0 for
515
+ # when getting temporal positional embedding below)
516
+ assert len(output_dict["cond_frame_outputs"]) > 0
517
+ # Select a maximum number of temporally closest cond frames for cross attention
518
+ cond_outputs = output_dict["cond_frame_outputs"]
519
+ selected_cond_outputs, unselected_cond_outputs = select_closest_cond_frames(
520
+ frame_idx, cond_outputs, self.max_cond_frames_in_attn)
521
+ t_pos_and_prevs = [(0, out) for out in selected_cond_outputs.values()]
522
+ # Add last (self.num_maskmem - 1) frames before current frame for non-conditioning memory
523
+ # the earliest one has t_pos=1 and the latest one has t_pos=self.num_maskmem-1
524
+ # We also allow taking the memory frame non-consecutively (with stride>1), in which case
525
+ # we take (self.num_maskmem - 2) frames among every stride-th frames plus the last frame.
526
+ stride = 1 if self.training else self.memory_temporal_stride_for_eval
527
+ for t_pos in range(1, self.num_maskmem):
528
+ t_rel = self.num_maskmem - t_pos # how many frames before current frame
529
+ if t_rel == 1:
530
+ # for t_rel == 1, we take the last frame (regardless of r)
531
+ if not track_in_reverse:
532
+ # the frame immediately before this frame (i.e. frame_idx - 1)
533
+ prev_frame_idx = frame_idx - t_rel
534
+ else:
535
+ # the frame immediately after this frame (i.e. frame_idx + 1)
536
+ prev_frame_idx = frame_idx + t_rel
537
+ else:
538
+ # for t_rel >= 2, we take the memory frame from every r-th frames
539
+ if not track_in_reverse:
540
+ # first find the nearest frame among every r-th frames before this frame
541
+ # for r=1, this would be (frame_idx - 2)
542
+ prev_frame_idx = ((frame_idx - 2) // stride) * stride
543
+ # then seek further among every r-th frames
544
+ prev_frame_idx = prev_frame_idx - (t_rel - 2) * stride
545
+ else:
546
+ # first find the nearest frame among every r-th frames after this frame
547
+ # for r=1, this would be (frame_idx + 2)
548
+ prev_frame_idx = -(-(frame_idx + 2) // stride) * stride
549
+ # then seek further among every r-th frames
550
+ prev_frame_idx = prev_frame_idx + (t_rel - 2) * stride
551
+ out = output_dict["non_cond_frame_outputs"].get(prev_frame_idx, None)
552
+ if out is None:
553
+ # If an unselected conditioning frame is among the last (self.num_maskmem - 1)
554
+ # frames, we still attend to it as if it's a non-conditioning frame.
555
+ out = unselected_cond_outputs.get(prev_frame_idx, None)
556
+ t_pos_and_prevs.append((t_pos, out))
557
+
558
+ for t_pos, prev in t_pos_and_prevs:
559
+ if prev is None:
560
+ continue # skip padding frames
561
+ # "maskmem_features" might have been offloaded to CPU in demo use cases,
562
+ # so we load it back to GPU (it's a no-op if it's already on GPU).
563
+ feats = prev["maskmem_features"].to(device, non_blocking=True)
564
+ to_cat_memory.append(feats.flatten(2).permute(2, 0, 1))
565
+ # Spatial positional encoding (it might have been offloaded to CPU in eval)
566
+ maskmem_enc = prev["maskmem_pos_enc"][-1].to(device)
567
+ maskmem_enc = maskmem_enc.flatten(2).permute(2, 0, 1)
568
+ # Temporal positional encoding
569
+ maskmem_enc = (maskmem_enc + self.maskmem_tpos_enc[self.num_maskmem - t_pos - 1])
570
+ to_cat_memory_pos_embed.append(maskmem_enc)
571
+
572
+ # Construct the list of past object pointers
573
+ if self.use_obj_ptrs_in_encoder:
574
+ max_obj_ptrs_in_encoder = min(num_frames, self.max_obj_ptrs_in_encoder)
575
+ # First add those object pointers from selected conditioning frames
576
+ # (optionally, only include object pointers in the past during evaluation)
577
+ if not self.training and self.only_obj_ptrs_in_the_past_for_eval:
578
+ ptr_cond_outputs = {
579
+ t: out
580
+ for t, out in selected_cond_outputs.items()
581
+ if (t >= frame_idx if track_in_reverse else t <= frame_idx)
582
+ }
583
+ else:
584
+ ptr_cond_outputs = selected_cond_outputs
585
+ pos_and_ptrs = [
586
+ # Temporal pos encoding contains how far away each pointer is from current frame
587
+ (
588
+ ((frame_idx - t) * tpos_sign_mul if self.use_signed_tpos_enc_to_obj_ptrs else abs(frame_idx -
589
+ t)),
590
+ out["obj_ptr"],
591
+ ) for t, out in ptr_cond_outputs.items()
592
+ ]
593
+ # Add up to (max_obj_ptrs_in_encoder - 1) non-conditioning frames before current frame
594
+ for t_diff in range(1, max_obj_ptrs_in_encoder):
595
+ t = frame_idx + t_diff if track_in_reverse else frame_idx - t_diff
596
+ if t < 0 or (num_frames is not None and t >= num_frames):
597
+ break
598
+ out = output_dict["non_cond_frame_outputs"].get(t, unselected_cond_outputs.get(t, None))
599
+ if out is not None:
600
+ pos_and_ptrs.append((t_diff, out["obj_ptr"]))
601
+ # If we have at least one object pointer, add them to the across attention
602
+ if len(pos_and_ptrs) > 0:
603
+ pos_list, ptrs_list = zip(*pos_and_ptrs)
604
+ # stack object pointers along dim=0 into [ptr_seq_len, B, C] shape
605
+ obj_ptrs = torch.stack(ptrs_list, dim=0)
606
+ # a temporal positional embedding based on how far each object pointer is from
607
+ # the current frame (sine embedding normalized by the max pointer num).
608
+ if self.add_tpos_enc_to_obj_ptrs:
609
+ t_diff_max = max_obj_ptrs_in_encoder - 1
610
+ tpos_dim = C if self.proj_tpos_enc_in_obj_ptrs else self.mem_dim
611
+ obj_pos = torch.tensor(pos_list).to(device=device, non_blocking=True)
612
+ obj_pos = get_1d_sine_pe(obj_pos / t_diff_max, dim=tpos_dim)
613
+ obj_pos = self.obj_ptr_tpos_proj(obj_pos.to(self.obj_ptr_tpos_proj.weight.dtype))
614
+ obj_pos = obj_pos.unsqueeze(1).expand(-1, B, self.mem_dim)
615
+ else:
616
+ obj_pos = obj_ptrs.new_zeros(len(pos_list), B, self.mem_dim)
617
+ if self.mem_dim < C:
618
+ # split a pointer into (C // self.mem_dim) tokens for self.mem_dim < C
619
+ obj_ptrs = obj_ptrs.reshape(-1, B, C // self.mem_dim, self.mem_dim)
620
+ obj_ptrs = obj_ptrs.permute(0, 2, 1, 3).flatten(0, 1)
621
+ obj_pos = obj_pos.repeat_interleave(C // self.mem_dim, dim=0)
622
+ to_cat_memory.append(obj_ptrs)
623
+ to_cat_memory_pos_embed.append(obj_pos)
624
+ num_obj_ptr_tokens = obj_ptrs.shape[0]
625
+ else:
626
+ num_obj_ptr_tokens = 0
627
+ else:
628
+ # for initial conditioning frames, encode them without using any previous memory
629
+ if self.directly_add_no_mem_embed:
630
+ # directly add no-mem embedding (instead of using the transformer encoder)
631
+ pix_feat_with_mem = current_vision_feats[-1] + self.no_mem_embed
632
+ pix_feat_with_mem = pix_feat_with_mem.permute(1, 2, 0).view(B, C, H, W)
633
+ return pix_feat_with_mem
634
+
635
+ # Use a dummy token on the first frame (to avoid empty memory input to tranformer encoder)
636
+ to_cat_memory = [self.no_mem_embed.expand(1, B, self.mem_dim)]
637
+ to_cat_memory_pos_embed = [self.no_mem_pos_enc.expand(1, B, self.mem_dim)]
638
+
639
+ # Step 2: Concatenate the memories and forward through the transformer encoder
640
+ memory = torch.cat(to_cat_memory, dim=0)
641
+ memory_pos_embed = torch.cat(to_cat_memory_pos_embed, dim=0)
642
+
643
+ pix_feat_with_mem = self.memory_attention(
644
+ curr=current_vision_feats,
645
+ curr_pos=current_vision_pos_embeds,
646
+ memory=memory,
647
+ memory_pos=memory_pos_embed,
648
+ num_obj_ptr_tokens=num_obj_ptr_tokens,
649
+ )
650
+ # reshape the output (HW)BC => BCHW
651
+ pix_feat_with_mem = pix_feat_with_mem.permute(1, 2, 0).view(B, C, H, W)
652
+ return pix_feat_with_mem
653
+
654
+ def _encode_new_memory(
655
+ self,
656
+ current_vision_feats,
657
+ feat_sizes,
658
+ pred_masks_high_res,
659
+ object_score_logits,
660
+ is_mask_from_pts,
661
+ ):
662
+ """Encode the current image and its prediction into a memory feature."""
663
+ B = current_vision_feats[-1].size(1) # batch size on this frame
664
+ C = self.hidden_dim
665
+ H, W = feat_sizes[-1] # top-level (lowest-resolution) feature size
666
+ # top-level feature, (HW)BC => BCHW
667
+ pix_feat = current_vision_feats[-1].permute(1, 2, 0).view(B, C, H, W)
668
+ if self.non_overlap_masks_for_mem_enc and not self.training:
669
+ # optionally, apply non-overlapping constraints to the masks (it's applied
670
+ # in the batch dimension and should only be used during eval, where all
671
+ # the objects come from the same video under batch size 1).
672
+ pred_masks_high_res = self._apply_non_overlapping_constraints(pred_masks_high_res)
673
+ # scale the raw mask logits with a temperature before applying sigmoid
674
+ binarize = self.binarize_mask_from_pts_for_mem_enc and is_mask_from_pts
675
+ if binarize and not self.training:
676
+ mask_for_mem = (pred_masks_high_res > 0).to(pred_masks_high_res.dtype)
677
+ else:
678
+ # apply sigmoid on the raw mask logits to turn them into range (0, 1)
679
+ mask_for_mem = torch.sigmoid(pred_masks_high_res)
680
+ # apply scale and bias terms to the sigmoid probabilities
681
+ if self.sigmoid_scale_for_mem_enc != 1.0:
682
+ mask_for_mem = mask_for_mem * self.sigmoid_scale_for_mem_enc
683
+ if self.sigmoid_bias_for_mem_enc != 0.0:
684
+ mask_for_mem = mask_for_mem + self.sigmoid_bias_for_mem_enc
685
+ maskmem_out = self.memory_encoder(
686
+ pix_feat,
687
+ mask_for_mem,
688
+ skip_mask_sigmoid=True # sigmoid already applied
689
+ )
690
+ maskmem_features = maskmem_out["vision_features"]
691
+ maskmem_pos_enc = maskmem_out["vision_pos_enc"]
692
+ # add a no-object embedding to the spatial memory to indicate that the frame
693
+ # is predicted to be occluded (i.e. no object is appearing in the frame)
694
+ if self.no_obj_embed_spatial is not None:
695
+ is_obj_appearing = (object_score_logits > 0).to(object_score_logits.dtype)
696
+ maskmem_features += (1 - is_obj_appearing[..., None, None]
697
+ ) * self.no_obj_embed_spatial[..., None, None].expand(*maskmem_features.shape)
698
+
699
+ return maskmem_features, maskmem_pos_enc
700
+
701
+ def _track_step(
702
+ self,
703
+ frame_idx,
704
+ is_init_cond_frame,
705
+ current_vision_feats,
706
+ current_vision_pos_embeds,
707
+ feat_sizes,
708
+ point_inputs,
709
+ mask_inputs,
710
+ hidden_inputs,
711
+ output_dict,
712
+ num_frames,
713
+ track_in_reverse,
714
+ prev_sam_mask_logits,
715
+ ):
716
+ current_out = {"point_inputs": point_inputs, "mask_inputs": mask_inputs, "hidden_inputs": hidden_inputs}
717
+ # High-resolution feature maps for the SAM head, reshape (HW)BC => BCHW
718
+ if len(current_vision_feats) > 1:
719
+ high_res_features = [
720
+ x.permute(1, 2, 0).view(x.size(1), x.size(2), *s)
721
+ for x, s in zip(current_vision_feats[:-1], feat_sizes[:-1])
722
+ ]
723
+ else:
724
+ high_res_features = None
725
+ if mask_inputs is not None and self.use_mask_input_as_output_without_sam:
726
+ # When use_mask_input_as_output_without_sam=True, we directly output the mask input
727
+ # (see it as a GT mask) without using a SAM prompt encoder + mask decoder.
728
+ pix_feat = current_vision_feats[-1].permute(1, 2, 0)
729
+ pix_feat = pix_feat.view(-1, self.hidden_dim, *feat_sizes[-1])
730
+ sam_outputs = self._use_mask_as_output(pix_feat, high_res_features, mask_inputs)
731
+ else:
732
+ # fused the visual feature with previous memory features in the memory bank
733
+ pix_feat = self._prepare_memory_conditioned_features(
734
+ frame_idx=frame_idx,
735
+ is_init_cond_frame=is_init_cond_frame,
736
+ current_vision_feats=current_vision_feats[-1:],
737
+ current_vision_pos_embeds=current_vision_pos_embeds[-1:],
738
+ feat_sizes=feat_sizes[-1:],
739
+ output_dict=output_dict,
740
+ num_frames=num_frames,
741
+ track_in_reverse=track_in_reverse,
742
+ )
743
+ # apply SAM-style segmentation head
744
+ # here we might feed previously predicted low-res SAM mask logits into the SAM mask decoder,
745
+ # e.g. in demo where such logits come from earlier interaction instead of correction sampling
746
+ # (in this case, any `mask_inputs` shouldn't reach here as they are sent to _use_mask_as_output instead)
747
+ if prev_sam_mask_logits is not None:
748
+ assert point_inputs is not None and mask_inputs is None
749
+ mask_inputs = prev_sam_mask_logits
750
+ multimask_output = self._use_multimask(is_init_cond_frame, point_inputs)
751
+ sam_outputs = self._forward_sam_heads(
752
+ backbone_features=pix_feat,
753
+ point_inputs=point_inputs,
754
+ mask_inputs=mask_inputs,
755
+ hidden_inputs=hidden_inputs,
756
+ high_res_features=high_res_features,
757
+ multimask_output=multimask_output,
758
+ )
759
+
760
+ return current_out, sam_outputs, high_res_features, pix_feat
761
+
762
+ def _encode_memory_in_output(
763
+ self,
764
+ current_vision_feats,
765
+ feat_sizes,
766
+ point_inputs,
767
+ run_mem_encoder,
768
+ high_res_masks,
769
+ object_score_logits,
770
+ current_out,
771
+ ):
772
+ if run_mem_encoder and self.num_maskmem > 0:
773
+ high_res_masks_for_mem_enc = high_res_masks
774
+ maskmem_features, maskmem_pos_enc = self._encode_new_memory(
775
+ current_vision_feats=current_vision_feats,
776
+ feat_sizes=feat_sizes,
777
+ pred_masks_high_res=high_res_masks_for_mem_enc,
778
+ object_score_logits=object_score_logits,
779
+ is_mask_from_pts=(point_inputs is not None),
780
+ )
781
+ current_out["maskmem_features"] = maskmem_features
782
+ current_out["maskmem_pos_enc"] = maskmem_pos_enc
783
+ else:
784
+ current_out["maskmem_features"] = None
785
+ current_out["maskmem_pos_enc"] = None
786
+
787
+ def track_step(
788
+ self,
789
+ frame_idx,
790
+ is_init_cond_frame,
791
+ current_vision_feats,
792
+ current_vision_pos_embeds,
793
+ feat_sizes,
794
+ point_inputs,
795
+ mask_inputs,
796
+ hidden_inputs,
797
+ output_dict,
798
+ num_frames,
799
+ track_in_reverse=False, # tracking in reverse time order (for demo usage)
800
+ # Whether to run the memory encoder on the predicted masks. Sometimes we might want
801
+ # to skip the memory encoder with `run_mem_encoder=False`. For example,
802
+ # in demo we might call `track_step` multiple times for each user click,
803
+ # and only encode the memory when the user finalizes their clicks. And in ablation
804
+ # settings like SAM training on static images, we don't need the memory encoder.
805
+ run_mem_encoder=True,
806
+ # The previously predicted SAM mask logits (which can be fed together with new clicks in demo).
807
+ prev_sam_mask_logits=None,
808
+ ):
809
+ current_out, sam_outputs, _, _ = self._track_step(
810
+ frame_idx,
811
+ is_init_cond_frame,
812
+ current_vision_feats,
813
+ current_vision_pos_embeds,
814
+ feat_sizes,
815
+ point_inputs,
816
+ mask_inputs,
817
+ hidden_inputs,
818
+ output_dict,
819
+ num_frames,
820
+ track_in_reverse,
821
+ prev_sam_mask_logits,
822
+ )
823
+
824
+ (
825
+ _,
826
+ _,
827
+ _,
828
+ low_res_masks,
829
+ high_res_masks,
830
+ obj_ptr,
831
+ object_score_logits,
832
+ ) = sam_outputs
833
+
834
+ current_out["pred_masks"] = low_res_masks
835
+ current_out["pred_masks_high_res"] = high_res_masks
836
+ current_out["obj_ptr"] = obj_ptr
837
+ if not self.training:
838
+ # Only add this in inference (to avoid unused param in activation checkpointing;
839
+ # it's mainly used in the demo to encode spatial memories w/ consolidated masks)
840
+ current_out["object_score_logits"] = object_score_logits
841
+
842
+ # Finally run the memory encoder on the predicted mask to encode
843
+ # it into a new memory feature (that can be used in future frames)
844
+ self._encode_memory_in_output(
845
+ current_vision_feats,
846
+ feat_sizes,
847
+ point_inputs,
848
+ run_mem_encoder,
849
+ high_res_masks,
850
+ object_score_logits,
851
+ current_out,
852
+ )
853
+
854
+ return current_out
855
+
856
+ def _use_multimask(self, is_init_cond_frame, point_inputs):
857
+ """Whether to use multimask output in the SAM head."""
858
+ num_pts = 0 if point_inputs is None else point_inputs["point_labels"].size(1)
859
+ multimask_output = (
860
+ self.multimask_output_in_sam and (is_init_cond_frame or self.multimask_output_for_tracking)
861
+ and (self.multimask_min_pt_num <= num_pts <= self.multimask_max_pt_num))
862
+ return multimask_output
863
+
864
+ def _apply_non_overlapping_constraints(self, pred_masks):
865
+ """
866
+ Apply non-overlapping constraints to the object scores in pred_masks. Here we
867
+ keep only the highest scoring object at each spatial location in pred_masks.
868
+ """
869
+ batch_size = pred_masks.size(0)
870
+ if batch_size == 1:
871
+ return pred_masks
872
+
873
+ device = pred_masks.device
874
+ # "max_obj_inds": object index of the object with the highest score at each location
875
+ max_obj_inds = torch.argmax(pred_masks, dim=0, keepdim=True)
876
+ # "batch_obj_inds": object index of each object slice (along dim 0) in `pred_masks`
877
+ batch_obj_inds = torch.arange(batch_size, device=device)[:, None, None, None]
878
+ keep = max_obj_inds == batch_obj_inds
879
+ # suppress overlapping regions' scores below -10.0 so that the foreground regions
880
+ # don't overlap (here sigmoid(-10.0)=4.5398e-05)
881
+ pred_masks = torch.where(keep, pred_masks, torch.clamp(pred_masks, max=-10.0))
882
+ return pred_masks
sam2/modeling/sam2_utils.py ADDED
@@ -0,0 +1,320 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import copy
8
+ from typing import Tuple
9
+
10
+ import numpy as np
11
+ import torch
12
+ import torch.nn as nn
13
+ import torch.nn.functional as F
14
+
15
+ from sam2.utils.misc import mask_to_box
16
+
17
+
18
+ def select_closest_cond_frames(frame_idx, cond_frame_outputs, max_cond_frame_num):
19
+ """
20
+ Select up to `max_cond_frame_num` conditioning frames from `cond_frame_outputs`
21
+ that are temporally closest to the current frame at `frame_idx`. Here, we take
22
+ - a) the closest conditioning frame before `frame_idx` (if any);
23
+ - b) the closest conditioning frame after `frame_idx` (if any);
24
+ - c) any other temporally closest conditioning frames until reaching a total
25
+ of `max_cond_frame_num` conditioning frames.
26
+
27
+ Outputs:
28
+ - selected_outputs: selected items (keys & values) from `cond_frame_outputs`.
29
+ - unselected_outputs: items (keys & values) not selected in `cond_frame_outputs`.
30
+ """
31
+ if max_cond_frame_num == -1 or len(cond_frame_outputs) <= max_cond_frame_num:
32
+ selected_outputs = cond_frame_outputs
33
+ unselected_outputs = {}
34
+ else:
35
+ assert max_cond_frame_num >= 2, "we should allow using 2+ conditioning frames"
36
+ selected_outputs = {}
37
+
38
+ # the closest conditioning frame before `frame_idx` (if any)
39
+ idx_before = max((t for t in cond_frame_outputs if t < frame_idx), default=None)
40
+ if idx_before is not None:
41
+ selected_outputs[idx_before] = cond_frame_outputs[idx_before]
42
+
43
+ # the closest conditioning frame after `frame_idx` (if any)
44
+ idx_after = min((t for t in cond_frame_outputs if t >= frame_idx), default=None)
45
+ if idx_after is not None:
46
+ selected_outputs[idx_after] = cond_frame_outputs[idx_after]
47
+
48
+ # add other temporally closest conditioning frames until reaching a total
49
+ # of `max_cond_frame_num` conditioning frames.
50
+ num_remain = max_cond_frame_num - len(selected_outputs)
51
+ inds_remain = sorted(
52
+ (t for t in cond_frame_outputs if t not in selected_outputs),
53
+ key=lambda x: abs(x - frame_idx),
54
+ )[:num_remain]
55
+ selected_outputs.update((t, cond_frame_outputs[t]) for t in inds_remain)
56
+ unselected_outputs = {t: v for t, v in cond_frame_outputs.items() if t not in selected_outputs}
57
+
58
+ return selected_outputs, unselected_outputs
59
+
60
+
61
+ def get_1d_sine_pe(pos_inds, dim, temperature=10000):
62
+ """
63
+ Get 1D sine positional embedding as in the original Transformer paper.
64
+ """
65
+ pe_dim = dim // 2
66
+ dim_t = torch.arange(pe_dim, dtype=torch.float32, device=pos_inds.device)
67
+ dim_t = temperature**(2 * (dim_t // 2) / pe_dim)
68
+
69
+ pos_embed = pos_inds.unsqueeze(-1) / dim_t
70
+ pos_embed = torch.cat([pos_embed.sin(), pos_embed.cos()], dim=-1)
71
+ return pos_embed
72
+
73
+
74
+ def get_activation_fn(activation):
75
+ """Return an activation function given a string"""
76
+ if activation == "relu":
77
+ return F.relu
78
+ if activation == "gelu":
79
+ return F.gelu
80
+ if activation == "glu":
81
+ return F.glu
82
+ raise RuntimeError(f"activation should be relu/gelu, not {activation}.")
83
+
84
+
85
+ def get_clones(module, N):
86
+ return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
87
+
88
+
89
+ class DropPath(nn.Module):
90
+ # adapted from https://github.com/huggingface/pytorch-image-models/blob/main/timm/layers/drop.py
91
+ def __init__(self, drop_prob=0.0, scale_by_keep=True):
92
+ super(DropPath, self).__init__()
93
+ self.drop_prob = drop_prob
94
+ self.scale_by_keep = scale_by_keep
95
+
96
+ def forward(self, x):
97
+ if self.drop_prob == 0.0 or not self.training:
98
+ return x
99
+ keep_prob = 1 - self.drop_prob
100
+ shape = (x.shape[0], ) + (1, ) * (x.ndim - 1)
101
+ random_tensor = x.new_empty(shape).bernoulli_(keep_prob)
102
+ if keep_prob > 0.0 and self.scale_by_keep:
103
+ random_tensor.div_(keep_prob)
104
+ return x * random_tensor
105
+
106
+
107
+ # Lightly adapted from
108
+ # https://github.com/facebookresearch/MaskFormer/blob/main/mask_former/modeling/transformer/transformer_predictor.py # noqa
109
+ class MLP(nn.Module):
110
+
111
+ def __init__(
112
+ self,
113
+ input_dim: int,
114
+ hidden_dim: int,
115
+ output_dim: int,
116
+ num_layers: int,
117
+ activation: nn.Module = nn.ReLU,
118
+ sigmoid_output: bool = False,
119
+ ) -> None:
120
+ super().__init__()
121
+ self.num_layers = num_layers
122
+ h = [hidden_dim] * (num_layers - 1)
123
+ self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
124
+ self.sigmoid_output = sigmoid_output
125
+ self.act = activation()
126
+
127
+ def forward(self, x):
128
+ for i, layer in enumerate(self.layers):
129
+ x = self.act(layer(x)) if i < self.num_layers - 1 else layer(x)
130
+ if self.sigmoid_output:
131
+ x = F.sigmoid(x)
132
+ return x
133
+
134
+
135
+ # From https://github.com/facebookresearch/detectron2/blob/main/detectron2/layers/batch_norm.py # noqa
136
+ # Itself from https://github.com/facebookresearch/ConvNeXt/blob/d1fa8f6fef0a165b27399986cc2bdacc92777e40/models/convnext.py#L119 # noqa
137
+ class LayerNorm2d(nn.Module):
138
+
139
+ def __init__(self, num_channels: int, eps: float = 1e-6) -> None:
140
+ super().__init__()
141
+ self.weight = nn.Parameter(torch.ones(num_channels))
142
+ self.bias = nn.Parameter(torch.zeros(num_channels))
143
+ self.eps = eps
144
+
145
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
146
+ u = x.mean(1, keepdim=True)
147
+ s = (x - u).pow(2).mean(1, keepdim=True)
148
+ x = (x - u) / torch.sqrt(s + self.eps)
149
+ x = self.weight[:, None, None] * x + self.bias[:, None, None]
150
+ return x
151
+
152
+
153
+ def sample_box_points(
154
+ masks: torch.Tensor,
155
+ noise: float = 0.1, # SAM default
156
+ noise_bound: int = 20, # SAM default
157
+ top_left_label: int = 2,
158
+ bottom_right_label: int = 3,
159
+ ) -> Tuple[np.array, np.array]:
160
+ """
161
+ Sample a noised version of the top left and bottom right corners of a given `bbox`
162
+
163
+ Inputs:
164
+ - masks: [B, 1, H,W] boxes, dtype=torch.Tensor
165
+ - noise: noise as a fraction of box width and height, dtype=float
166
+ - noise_bound: maximum amount of noise (in pure pixesl), dtype=int
167
+
168
+ Returns:
169
+ - box_coords: [B, num_pt, 2], contains (x, y) coordinates of top left and bottom right box corners, dtype=torch.float
170
+ - box_labels: [B, num_pt], label 2 is reserverd for top left and 3 for bottom right corners, dtype=torch.int32
171
+ """
172
+ device = masks.device
173
+ box_coords = mask_to_box(masks)
174
+ B, _, H, W = masks.shape
175
+ box_labels = torch.tensor([top_left_label, bottom_right_label], dtype=torch.int, device=device).repeat(B)
176
+ if noise > 0.0:
177
+ if not isinstance(noise_bound, torch.Tensor):
178
+ noise_bound = torch.tensor(noise_bound, device=device)
179
+ bbox_w = box_coords[..., 2] - box_coords[..., 0]
180
+ bbox_h = box_coords[..., 3] - box_coords[..., 1]
181
+ max_dx = torch.min(bbox_w * noise, noise_bound)
182
+ max_dy = torch.min(bbox_h * noise, noise_bound)
183
+ box_noise = 2 * torch.rand(B, 1, 4, device=device) - 1
184
+ box_noise = box_noise * torch.stack((max_dx, max_dy, max_dx, max_dy), dim=-1)
185
+
186
+ box_coords = box_coords + box_noise
187
+ img_bounds = (torch.tensor([W, H, W, H], device=device) - 1) # uncentered pixel coords
188
+ box_coords.clamp_(torch.zeros_like(img_bounds), img_bounds) # In place clamping
189
+
190
+ box_coords = box_coords.reshape(-1, 2, 2) # always 2 points
191
+ box_labels = box_labels.reshape(-1, 2)
192
+ return box_coords, box_labels
193
+
194
+
195
+ def sample_random_points_from_errors(gt_masks, pred_masks, num_pt=1, positive_only=False):
196
+ """
197
+ Sample `num_pt` random points (along with their labels) independently from the error regions.
198
+
199
+ Inputs:
200
+ - gt_masks: [B, 1, H_im, W_im] masks, dtype=torch.bool
201
+ - pred_masks: [B, 1, H_im, W_im] masks, dtype=torch.bool or None
202
+ - num_pt: int, number of points to sample independently for each of the B error maps
203
+
204
+ Outputs:
205
+ - points: [B, num_pt, 2], dtype=torch.float, contains (x, y) coordinates of each sampled point
206
+ - labels: [B, num_pt], dtype=torch.int32, where 1 means positive clicks and 0 means
207
+ negative clicks
208
+ """
209
+ if pred_masks is None: # if pred_masks is not provided, treat it as empty
210
+ pred_masks = torch.zeros_like(gt_masks)
211
+ assert gt_masks.dtype == torch.bool and gt_masks.size(1) == 1
212
+ assert pred_masks.dtype == torch.bool and pred_masks.shape == gt_masks.shape
213
+ assert num_pt >= 0
214
+
215
+ B, _, H_im, W_im = gt_masks.shape
216
+ device = gt_masks.device
217
+
218
+ # false positive region, a new point sampled in this region should have
219
+ # negative label to correct the FP error
220
+ fp_masks = ~gt_masks & pred_masks
221
+ # false negative region, a new point sampled in this region should have
222
+ # positive label to correct the FN error
223
+ fn_masks = gt_masks & ~pred_masks
224
+ # whether the prediction completely match the ground-truth on each mask
225
+ all_correct = torch.all((gt_masks == pred_masks).flatten(2), dim=2)
226
+ all_correct = all_correct[..., None, None]
227
+
228
+ # channel 0 is FP map, while channel 1 is FN map
229
+ pts_noise = torch.rand(B, num_pt, H_im, W_im, 2, device=device)
230
+ # sample a negative new click from FP region or a positive new click
231
+ # from FN region, depend on where the maximum falls,
232
+ # and in case the predictions are all correct (no FP or FN), we just
233
+ # sample a negative click from the background region
234
+ pts_noise[..., 0] *= fp_masks | (all_correct & ~gt_masks)
235
+ if positive_only:
236
+ pts_noise[..., 0] = -1
237
+ pts_noise[..., 1] *= fn_masks
238
+ pts_idx = pts_noise.flatten(2).argmax(dim=2)
239
+ labels = (pts_idx % 2).to(torch.int32)
240
+ pts_idx = pts_idx // 2
241
+ pts_x = pts_idx % W_im
242
+ pts_y = pts_idx // W_im
243
+ points = torch.stack([pts_x, pts_y], dim=2).to(torch.float)
244
+ return points, labels
245
+
246
+
247
+ def sample_one_point_from_error_center(gt_masks, pred_masks, padding=True, positive_only=False):
248
+ """
249
+ Sample 1 random point (along with its label) from the center of each error region,
250
+ that is, the point with the largest distance to the boundary of each error region.
251
+ This is the RITM sampling method from https://github.com/saic-vul/ritm_interactive_segmentation/blob/master/isegm/inference/clicker.py
252
+
253
+ Inputs:
254
+ - gt_masks: [B, 1, H_im, W_im] masks, dtype=torch.bool
255
+ - pred_masks: [B, 1, H_im, W_im] masks, dtype=torch.bool or None
256
+ - padding: if True, pad with boundary of 1 px for distance transform
257
+
258
+ Outputs:
259
+ - points: [B, 1, 2], dtype=torch.float, contains (x, y) coordinates of each sampled point
260
+ - labels: [B, 1], dtype=torch.int32, where 1 means positive clicks and 0 means negative clicks
261
+ """
262
+ import cv2
263
+
264
+ if pred_masks is None:
265
+ pred_masks = torch.zeros_like(gt_masks)
266
+ assert gt_masks.dtype == torch.bool and gt_masks.size(1) == 1
267
+ assert pred_masks.dtype == torch.bool and pred_masks.shape == gt_masks.shape
268
+
269
+ B, _, _, W_im = gt_masks.shape
270
+ device = gt_masks.device
271
+
272
+ # false positive region, a new point sampled in this region should have
273
+ # negative label to correct the FP error
274
+ fp_masks = ~gt_masks & pred_masks
275
+ # false negative region, a new point sampled in this region should have
276
+ # positive label to correct the FN error
277
+ fn_masks = gt_masks & ~pred_masks
278
+
279
+ fp_masks = fp_masks.cpu().numpy()
280
+ fn_masks = fn_masks.cpu().numpy()
281
+ points = torch.zeros(B, 1, 2, dtype=torch.float)
282
+ labels = torch.ones(B, 1, dtype=torch.int32)
283
+ for b in range(B):
284
+ fn_mask = fn_masks[b, 0]
285
+ fp_mask = fp_masks[b, 0]
286
+ if padding:
287
+ fn_mask = np.pad(fn_mask, ((1, 1), (1, 1)), "constant")
288
+ fp_mask = np.pad(fp_mask, ((1, 1), (1, 1)), "constant")
289
+ # compute the distance of each point in FN/FP region to its boundary
290
+ fn_mask_dt = cv2.distanceTransform(fn_mask.astype(np.uint8), cv2.DIST_L2, 0)
291
+ fp_mask_dt = cv2.distanceTransform(fp_mask.astype(np.uint8), cv2.DIST_L2, 0)
292
+ if padding:
293
+ fn_mask_dt = fn_mask_dt[1:-1, 1:-1]
294
+ fp_mask_dt = fp_mask_dt[1:-1, 1:-1]
295
+
296
+ # take the point in FN/FP region with the largest distance to its boundary
297
+ fn_mask_dt_flat = fn_mask_dt.reshape(-1)
298
+ fp_mask_dt_flat = fp_mask_dt.reshape(-1)
299
+ fn_argmax = np.argmax(fn_mask_dt_flat)
300
+ fp_argmax = np.argmax(fp_mask_dt_flat)
301
+ is_positive = fn_mask_dt_flat[fn_argmax] > fp_mask_dt_flat[fp_argmax]
302
+ if positive_only:
303
+ is_positive = True
304
+ pt_idx = fn_argmax if is_positive else fp_argmax
305
+ points[b, 0, 0] = pt_idx % W_im # x
306
+ points[b, 0, 1] = pt_idx // W_im # y
307
+ labels[b, 0] = int(is_positive)
308
+
309
+ points = points.to(device)
310
+ labels = labels.to(device)
311
+ return points, labels
312
+
313
+
314
+ def get_next_point(gt_masks, pred_masks, method, positive_only=True):
315
+ if method == "uniform":
316
+ return sample_random_points_from_errors(gt_masks, pred_masks, positive_only=positive_only)
317
+ elif method == "center":
318
+ return sample_one_point_from_error_center(gt_masks, pred_masks, positive_only=positive_only)
319
+ else:
320
+ raise ValueError(f"unknown sampling method {method}")
sam2/sam2_image_predictor.py ADDED
@@ -0,0 +1,428 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import logging
8
+
9
+ from typing import List, Optional, Tuple, Union
10
+
11
+ import numpy as np
12
+ import torch
13
+ from PIL.Image import Image
14
+
15
+ from sam2.modeling.sam2_base import SAM2Base
16
+
17
+ from sam2.utils.transforms import SAM2Transforms
18
+
19
+
20
+ class SAM2ImagePredictor:
21
+
22
+ def __init__(
23
+ self,
24
+ sam_model: SAM2Base,
25
+ mask_threshold=0.0,
26
+ max_hole_area=0.0,
27
+ max_sprinkle_area=0.0,
28
+ **kwargs,
29
+ ) -> None:
30
+ """
31
+ Uses SAM-2 to calculate the image embedding for an image, and then
32
+ allow repeated, efficient mask prediction given prompts.
33
+
34
+ Arguments:
35
+ sam_model (Sam-2): The model to use for mask prediction.
36
+ mask_threshold (float): The threshold to use when converting mask logits
37
+ to binary masks. Masks are thresholded at 0 by default.
38
+ max_hole_area (int): If max_hole_area > 0, we fill small holes in up to
39
+ the maximum area of max_hole_area in low_res_masks.
40
+ max_sprinkle_area (int): If max_sprinkle_area > 0, we remove small sprinkles up to
41
+ the maximum area of max_sprinkle_area in low_res_masks.
42
+ """
43
+ super().__init__()
44
+ self.model = sam_model
45
+ self._transforms = SAM2Transforms(
46
+ resolution=self.model.image_size,
47
+ mask_threshold=mask_threshold,
48
+ max_hole_area=max_hole_area,
49
+ max_sprinkle_area=max_sprinkle_area,
50
+ )
51
+
52
+ # Predictor state
53
+ self._is_image_set = False
54
+ self._features = None
55
+ self._orig_hw = None
56
+ # Whether the predictor is set for single image or a batch of images
57
+ self._is_batch = False
58
+
59
+ # Predictor config
60
+ self.mask_threshold = mask_threshold
61
+
62
+ # Spatial dim for backbone feature maps
63
+ self._bb_feat_sizes = [
64
+ (256, 256),
65
+ (128, 128),
66
+ (64, 64),
67
+ ]
68
+
69
+ @classmethod
70
+ def from_pretrained(cls, model_id: str, **kwargs) -> "SAM2ImagePredictor":
71
+ """
72
+ Load a pretrained model from the Hugging Face hub.
73
+
74
+ Arguments:
75
+ model_id (str): The Hugging Face repository ID.
76
+ **kwargs: Additional arguments to pass to the model constructor.
77
+
78
+ Returns:
79
+ (SAM2ImagePredictor): The loaded model.
80
+ """
81
+ from sam2.build_sam import build_sam2_hf
82
+
83
+ sam_model = build_sam2_hf(model_id, **kwargs)
84
+ return cls(sam_model, **kwargs)
85
+
86
+ @torch.no_grad()
87
+ def set_image(
88
+ self,
89
+ image: Union[np.ndarray, Image],
90
+ ) -> None:
91
+ """
92
+ Calculates the image embeddings for the provided image, allowing
93
+ masks to be predicted with the 'predict' method.
94
+
95
+ Arguments:
96
+ image (np.ndarray or PIL Image): The input image to embed in RGB format. The image should be in HWC format if np.ndarray, or WHC format if PIL Image
97
+ with pixel values in [0, 255].
98
+ image_format (str): The color format of the image, in ['RGB', 'BGR'].
99
+ """
100
+ self.reset_predictor()
101
+ # Transform the image to the form expected by the model
102
+ if isinstance(image, np.ndarray):
103
+ logging.info("For numpy array image, we assume (HxWxC) format")
104
+ self._orig_hw = [image.shape[:2]]
105
+ elif isinstance(image, Image):
106
+ w, h = image.size
107
+ self._orig_hw = [(h, w)]
108
+ else:
109
+ raise NotImplementedError("Image format not supported")
110
+
111
+ input_image = self._transforms(image)
112
+ input_image = input_image[None, ...].to(self.device)
113
+
114
+ assert (len(input_image.shape) == 4
115
+ and input_image.shape[1] == 3), f"input_image must be of size 1x3xHxW, got {input_image.shape}"
116
+ logging.info("Computing image embeddings for the provided image...")
117
+ backbone_out = self.model.forward_image(input_image)
118
+ _, vision_feats, _, _ = self.model._prepare_backbone_features(backbone_out)
119
+ # Add no_mem_embed, which is added to the lowest rest feat. map during training on videos
120
+ if self.model.directly_add_no_mem_embed:
121
+ vision_feats[-1] = vision_feats[-1] + self.model.no_mem_embed
122
+
123
+ feats = [
124
+ feat.permute(1, 2, 0).view(1, -1, *feat_size)
125
+ for feat, feat_size in zip(vision_feats[::-1], self._bb_feat_sizes[::-1])
126
+ ][::-1]
127
+ self._features = {"image_embed": feats[-1], "high_res_feats": feats[:-1]}
128
+ self._is_image_set = True
129
+ logging.info("Image embeddings computed.")
130
+
131
+ @torch.no_grad()
132
+ def set_image_batch(
133
+ self,
134
+ image_list: List[Union[np.ndarray]],
135
+ ) -> None:
136
+ """
137
+ Calculates the image embeddings for the provided image batch, allowing
138
+ masks to be predicted with the 'predict_batch' method.
139
+
140
+ Arguments:
141
+ image_list (List[np.ndarray]): The input images to embed in RGB format. The image should be in HWC format if np.ndarray
142
+ with pixel values in [0, 255].
143
+ """
144
+ self.reset_predictor()
145
+ assert isinstance(image_list, list)
146
+ self._orig_hw = []
147
+ for image in image_list:
148
+ assert isinstance(image,
149
+ np.ndarray), "Images are expected to be an np.ndarray in RGB format, and of shape HWC"
150
+ self._orig_hw.append(image.shape[:2])
151
+ # Transform the image to the form expected by the model
152
+ img_batch = self._transforms.forward_batch(image_list)
153
+ img_batch = img_batch.to(self.device)
154
+ batch_size = img_batch.shape[0]
155
+ assert (len(img_batch.shape) == 4
156
+ and img_batch.shape[1] == 3), f"img_batch must be of size Bx3xHxW, got {img_batch.shape}"
157
+ logging.info("Computing image embeddings for the provided images...")
158
+ backbone_out = self.model.forward_image(img_batch)
159
+ _, vision_feats, _, _ = self.model._prepare_backbone_features(backbone_out)
160
+ # Add no_mem_embed, which is added to the lowest rest feat. map during training on videos
161
+ if self.model.directly_add_no_mem_embed:
162
+ vision_feats[-1] = vision_feats[-1] + self.model.no_mem_embed
163
+
164
+ feats = [
165
+ feat.permute(1, 2, 0).view(batch_size, -1, *feat_size)
166
+ for feat, feat_size in zip(vision_feats[::-1], self._bb_feat_sizes[::-1])
167
+ ][::-1]
168
+ self._features = {"image_embed": feats[-1], "high_res_feats": feats[:-1]}
169
+ self._is_image_set = True
170
+ self._is_batch = True
171
+ logging.info("Image embeddings computed.")
172
+
173
+ def predict_batch(
174
+ self,
175
+ point_coords_batch: List[np.ndarray] = None,
176
+ point_labels_batch: List[np.ndarray] = None,
177
+ box_batch: List[np.ndarray] = None,
178
+ mask_input_batch: List[np.ndarray] = None,
179
+ multimask_output: bool = True,
180
+ return_logits: bool = False,
181
+ normalize_coords=True,
182
+ ) -> Tuple[List[np.ndarray], List[np.ndarray], List[np.ndarray]]:
183
+ """This function is very similar to predict(...), however it is used for batched mode, when the model is expected to generate predictions on multiple images.
184
+ It returns a tuple of lists of masks, ious, and low_res_masks_logits.
185
+ """
186
+ assert self._is_batch, "This function should only be used when in batched mode"
187
+ if not self._is_image_set:
188
+ raise RuntimeError("An image must be set with .set_image_batch(...) before mask prediction.")
189
+ num_images = len(self._features["image_embed"])
190
+ all_masks = []
191
+ all_ious = []
192
+ all_low_res_masks = []
193
+ for img_idx in range(num_images):
194
+ # Transform input prompts
195
+ point_coords = (point_coords_batch[img_idx] if point_coords_batch is not None else None)
196
+ point_labels = (point_labels_batch[img_idx] if point_labels_batch is not None else None)
197
+ box = box_batch[img_idx] if box_batch is not None else None
198
+ mask_input = (mask_input_batch[img_idx] if mask_input_batch is not None else None)
199
+ mask_input, unnorm_coords, labels, unnorm_box = self._prep_prompts(
200
+ point_coords,
201
+ point_labels,
202
+ box,
203
+ mask_input,
204
+ normalize_coords,
205
+ img_idx=img_idx,
206
+ )
207
+ masks, iou_predictions, low_res_masks = self._predict(
208
+ unnorm_coords,
209
+ labels,
210
+ unnorm_box,
211
+ mask_input,
212
+ multimask_output,
213
+ return_logits=return_logits,
214
+ img_idx=img_idx,
215
+ )
216
+ masks_np = masks.squeeze(0).float().detach().cpu().numpy()
217
+ iou_predictions_np = (iou_predictions.squeeze(0).float().detach().cpu().numpy())
218
+ low_res_masks_np = low_res_masks.squeeze(0).float().detach().cpu().numpy()
219
+ all_masks.append(masks_np)
220
+ all_ious.append(iou_predictions_np)
221
+ all_low_res_masks.append(low_res_masks_np)
222
+
223
+ return all_masks, all_ious, all_low_res_masks
224
+
225
+ def predict(
226
+ self,
227
+ point_coords: Optional[np.ndarray] = None,
228
+ point_labels: Optional[np.ndarray] = None,
229
+ box: Optional[np.ndarray] = None,
230
+ mask_input: Optional[np.ndarray] = None,
231
+ multimask_output: bool = True,
232
+ return_logits: bool = False,
233
+ normalize_coords=True,
234
+ ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
235
+ """
236
+ Predict masks for the given input prompts, using the currently set image.
237
+
238
+ Arguments:
239
+ point_coords (np.ndarray or None): A Nx2 array of point prompts to the
240
+ model. Each point is in (X,Y) in pixels.
241
+ point_labels (np.ndarray or None): A length N array of labels for the
242
+ point prompts. 1 indicates a foreground point and 0 indicates a
243
+ background point.
244
+ box (np.ndarray or None): A length 4 array given a box prompt to the
245
+ model, in XYXY format.
246
+ mask_input (np.ndarray): A low resolution mask input to the model, typically
247
+ coming from a previous prediction iteration. Has form 1xHxW, where
248
+ for SAM, H=W=256.
249
+ multimask_output (bool): If true, the model will return three masks.
250
+ For ambiguous input prompts (such as a single click), this will often
251
+ produce better masks than a single prediction. If only a single
252
+ mask is needed, the model's predicted quality score can be used
253
+ to select the best mask. For non-ambiguous prompts, such as multiple
254
+ input prompts, multimask_output=False can give better results.
255
+ return_logits (bool): If true, returns un-thresholded masks logits
256
+ instead of a binary mask.
257
+ normalize_coords (bool): If true, the point coordinates will be normalized to the range [0,1] and point_coords is expected to be wrt. image dimensions.
258
+
259
+ Returns:
260
+ (np.ndarray): The output masks in CxHxW format, where C is the
261
+ number of masks, and (H, W) is the original image size.
262
+ (np.ndarray): An array of length C containing the model's
263
+ predictions for the quality of each mask.
264
+ (np.ndarray): An array of shape CxHxW, where C is the number
265
+ of masks and H=W=256. These low resolution logits can be passed to
266
+ a subsequent iteration as mask input.
267
+ """
268
+ if not self._is_image_set:
269
+ raise RuntimeError("An image must be set with .set_image(...) before mask prediction.")
270
+
271
+ # Transform input prompts
272
+
273
+ mask_input, unnorm_coords, labels, unnorm_box = self._prep_prompts(point_coords, point_labels, box, mask_input,
274
+ normalize_coords)
275
+
276
+ masks, iou_predictions, low_res_masks = self._predict(
277
+ unnorm_coords,
278
+ labels,
279
+ unnorm_box,
280
+ mask_input,
281
+ multimask_output,
282
+ return_logits=return_logits,
283
+ )
284
+
285
+ masks_np = masks.squeeze(0).float().detach().cpu().numpy()
286
+ iou_predictions_np = iou_predictions.squeeze(0).float().detach().cpu().numpy()
287
+ low_res_masks_np = low_res_masks.squeeze(0).float().detach().cpu().numpy()
288
+ return masks_np, iou_predictions_np, low_res_masks_np
289
+
290
+ def _prep_prompts(self, point_coords, point_labels, box, mask_logits, normalize_coords, img_idx=-1):
291
+
292
+ unnorm_coords, labels, unnorm_box, mask_input = None, None, None, None
293
+ if point_coords is not None:
294
+ assert (point_labels is not None), "point_labels must be supplied if point_coords is supplied."
295
+ point_coords = torch.as_tensor(point_coords, dtype=torch.float, device=self.device)
296
+ unnorm_coords = self._transforms.transform_coords(
297
+ point_coords, normalize=normalize_coords, orig_hw=self._orig_hw[img_idx])
298
+ labels = torch.as_tensor(point_labels, dtype=torch.int, device=self.device)
299
+ if len(unnorm_coords.shape) == 2:
300
+ unnorm_coords, labels = unnorm_coords[None, ...], labels[None, ...]
301
+ if box is not None:
302
+ box = torch.as_tensor(box, dtype=torch.float, device=self.device)
303
+ unnorm_box = self._transforms.transform_boxes(
304
+ box, normalize=normalize_coords, orig_hw=self._orig_hw[img_idx]) # Bx2x2
305
+ if mask_logits is not None:
306
+ mask_input = torch.as_tensor(mask_logits, dtype=torch.float, device=self.device)
307
+ if len(mask_input.shape) == 3:
308
+ mask_input = mask_input[None, :, :, :]
309
+ return mask_input, unnorm_coords, labels, unnorm_box
310
+
311
+ @torch.no_grad()
312
+ def _predict(
313
+ self,
314
+ point_coords: Optional[torch.Tensor],
315
+ point_labels: Optional[torch.Tensor],
316
+ boxes: Optional[torch.Tensor] = None,
317
+ mask_input: Optional[torch.Tensor] = None,
318
+ multimask_output: bool = True,
319
+ return_logits: bool = False,
320
+ img_idx: int = -1,
321
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
322
+ """
323
+ Predict masks for the given input prompts, using the currently set image.
324
+ Input prompts are batched torch tensors and are expected to already be
325
+ transformed to the input frame using SAM2Transforms.
326
+
327
+ Arguments:
328
+ point_coords (torch.Tensor or None): A BxNx2 array of point prompts to the
329
+ model. Each point is in (X,Y) in pixels.
330
+ point_labels (torch.Tensor or None): A BxN array of labels for the
331
+ point prompts. 1 indicates a foreground point and 0 indicates a
332
+ background point.
333
+ boxes (np.ndarray or None): A Bx4 array given a box prompt to the
334
+ model, in XYXY format.
335
+ mask_input (np.ndarray): A low resolution mask input to the model, typically
336
+ coming from a previous prediction iteration. Has form Bx1xHxW, where
337
+ for SAM, H=W=256. Masks returned by a previous iteration of the
338
+ predict method do not need further transformation.
339
+ multimask_output (bool): If true, the model will return three masks.
340
+ For ambiguous input prompts (such as a single click), this will often
341
+ produce better masks than a single prediction. If only a single
342
+ mask is needed, the model's predicted quality score can be used
343
+ to select the best mask. For non-ambiguous prompts, such as multiple
344
+ input prompts, multimask_output=False can give better results.
345
+ return_logits (bool): If true, returns un-thresholded masks logits
346
+ instead of a binary mask.
347
+
348
+ Returns:
349
+ (torch.Tensor): The output masks in BxCxHxW format, where C is the
350
+ number of masks, and (H, W) is the original image size.
351
+ (torch.Tensor): An array of shape BxC containing the model's
352
+ predictions for the quality of each mask.
353
+ (torch.Tensor): An array of shape BxCxHxW, where C is the number
354
+ of masks and H=W=256. These low res logits can be passed to
355
+ a subsequent iteration as mask input.
356
+ """
357
+ if not self._is_image_set:
358
+ raise RuntimeError("An image must be set with .set_image(...) before mask prediction.")
359
+
360
+ if point_coords is not None:
361
+ concat_points = (point_coords, point_labels)
362
+ else:
363
+ concat_points = None
364
+
365
+ # Embed prompts
366
+ if boxes is not None:
367
+ box_coords = boxes.reshape(-1, 2, 2)
368
+ box_labels = torch.tensor([[2, 3]], dtype=torch.int, device=boxes.device)
369
+ box_labels = box_labels.repeat(boxes.size(0), 1)
370
+ # we merge "boxes" and "points" into a single "concat_points" input (where
371
+ # boxes are added at the beginning) to sam_prompt_encoder
372
+ if concat_points is not None:
373
+ concat_coords = torch.cat([box_coords, concat_points[0]], dim=1)
374
+ concat_labels = torch.cat([box_labels, concat_points[1]], dim=1)
375
+ concat_points = (concat_coords, concat_labels)
376
+ else:
377
+ concat_points = (box_coords, box_labels)
378
+
379
+ sparse_embeddings, dense_embeddings = self.model.sam_prompt_encoder(
380
+ points=concat_points,
381
+ boxes=None,
382
+ masks=mask_input,
383
+ )
384
+
385
+ # Predict masks
386
+ batched_mode = (concat_points is not None and concat_points[0].shape[0] > 1) # multi object prediction
387
+ high_res_features = [feat_level[img_idx].unsqueeze(0) for feat_level in self._features["high_res_feats"]]
388
+ low_res_masks, iou_predictions, _, _ = self.model.sam_mask_decoder(
389
+ image_embeddings=self._features["image_embed"][img_idx].unsqueeze(0),
390
+ image_pe=self.model.sam_prompt_encoder.get_dense_pe(),
391
+ sparse_prompt_embeddings=sparse_embeddings,
392
+ dense_prompt_embeddings=dense_embeddings,
393
+ multimask_output=multimask_output,
394
+ repeat_image=batched_mode,
395
+ high_res_features=high_res_features,
396
+ )
397
+
398
+ # Upscale the masks to the original image resolution
399
+ masks = self._transforms.postprocess_masks(low_res_masks, self._orig_hw[img_idx])
400
+ low_res_masks = torch.clamp(low_res_masks, -32.0, 32.0)
401
+ if not return_logits:
402
+ masks = masks > self.mask_threshold
403
+
404
+ return masks, iou_predictions, low_res_masks
405
+
406
+ def get_image_embedding(self) -> torch.Tensor:
407
+ """
408
+ Returns the image embeddings for the currently set image, with
409
+ shape 1xCxHxW, where C is the embedding dimension and (H,W) are
410
+ the embedding spatial dimension of SAM (typically C=256, H=W=64).
411
+ """
412
+ if not self._is_image_set:
413
+ raise RuntimeError("An image must be set with .set_image(...) to generate an embedding.")
414
+ assert (self._features is not None), "Features must exist if an image has been set."
415
+ return self._features["image_embed"]
416
+
417
+ @property
418
+ def device(self) -> torch.device:
419
+ return self.model.device
420
+
421
+ def reset_predictor(self) -> None:
422
+ """
423
+ Resets the image embeddings and other state variables.
424
+ """
425
+ self._is_image_set = False
426
+ self._features = None
427
+ self._orig_hw = None
428
+ self._is_batch = False
sam2/sam2_train.py ADDED
@@ -0,0 +1,575 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import logging
8
+
9
+ import numpy as np
10
+ import torch
11
+ import torch.distributed
12
+ from tensordict import tensorclass
13
+
14
+ from sam2.modeling.sam2_base import SAM2Base
15
+ from sam2.modeling.sam2_utils import get_next_point, sample_box_points
16
+ from sam2.utils.misc import concat_points
17
+
18
+
19
+ @tensorclass
20
+ class BatchedVideoDatapoint:
21
+ """
22
+ This class represents a batch of videos with associated annotations.
23
+ Attributes:
24
+ img_batch: A [TxBxCxHxW] tensor containing the image data for each frame in the batch, where T is the number of frames per video, and B is the number of videos in the batch.
25
+ obj_to_frame_idx: A [TxOx2] tensor containing the image_batch index which the object belongs to. O is the number of objects in the batch.
26
+ masks: A [TxOxHxW] tensor containing binary masks for each object in the batch.
27
+ """
28
+
29
+ img_batch: torch.FloatTensor
30
+ obj_to_frame_idx: torch.IntTensor
31
+ masks: torch.BoolTensor
32
+
33
+ @property
34
+ def num_frames(self) -> int:
35
+ """
36
+ Returns the number of frames per video.
37
+ """
38
+ return self.img_batch.shape[0]
39
+
40
+ @property
41
+ def num_videos(self) -> int:
42
+ """
43
+ Returns the number of videos in the batch.
44
+ """
45
+ return self.img_batch.shape[1]
46
+
47
+ @property
48
+ def flat_obj_to_img_idx(self) -> torch.IntTensor:
49
+ """
50
+ Returns a flattened tensor containing the object to img index.
51
+ The flat index can be used to access a flattened img_batch of shape [(T*B)xCxHxW]
52
+ """
53
+ frame_idx, video_idx = self.obj_to_frame_idx.unbind(dim=-1)
54
+ flat_idx = video_idx * self.num_frames + frame_idx
55
+ return flat_idx
56
+
57
+ @property
58
+ def flat_img_batch(self) -> torch.FloatTensor:
59
+ """
60
+ Returns a flattened img_batch_tensor of shape [(B*T)xCxHxW]
61
+ """
62
+ return self.img_batch.transpose(0, 1).flatten(0, 1)
63
+
64
+
65
+ class SAM2Train(SAM2Base):
66
+
67
+ def __init__(
68
+ self,
69
+ image_encoder,
70
+ memory_attention=None,
71
+ memory_encoder=None,
72
+ prob_to_use_pt_input_for_train=0.0,
73
+ prob_to_use_pt_input_for_eval=0.0,
74
+ prob_to_use_box_input_for_train=0.0,
75
+ prob_to_use_box_input_for_eval=0.0,
76
+ # if it is greater than 1, we interactive point sampling in the 1st frame and other randomly selected frames
77
+ num_frames_to_correct_for_train=1, # default: only iteratively sample on first frame
78
+ num_frames_to_correct_for_eval=1, # default: only iteratively sample on first frame
79
+ rand_frames_to_correct_for_train=False,
80
+ rand_frames_to_correct_for_eval=False,
81
+ # how many frames to use as initial conditioning frames (for both point input and mask input; the first frame is always used as an initial conditioning frame)
82
+ # - if `rand_init_cond_frames` below is True, we randomly sample 1~num_init_cond_frames initial conditioning frames
83
+ # - otherwise we sample a fixed number of num_init_cond_frames initial conditioning frames
84
+ # note: for point input, we sample correction points on all such initial conditioning frames, and we require that `num_frames_to_correct` >= `num_init_cond_frames`;
85
+ # these are initial conditioning frames because as we track the video, more conditioning frames might be added
86
+ # when a frame receives correction clicks under point input if `add_all_frames_to_correct_as_cond=True`
87
+ num_init_cond_frames_for_train=1, # default: only use the first frame as initial conditioning frame
88
+ num_init_cond_frames_for_eval=1, # default: only use the first frame as initial conditioning frame
89
+ rand_init_cond_frames_for_train=True, # default: random 1~num_init_cond_frames_for_train cond frames (to be constent w/ previous TA data loader)
90
+ rand_init_cond_frames_for_eval=False,
91
+ # if `add_all_frames_to_correct_as_cond` is True, we also append to the conditioning frame list any frame that receives a later correction click
92
+ # if `add_all_frames_to_correct_as_cond` is False, we conditioning frame list to only use those initial conditioning frames
93
+ add_all_frames_to_correct_as_cond=False,
94
+ # how many additional correction points to sample (on each frame selected to be corrected)
95
+ # note that the first frame receives an initial input click (in addition to any correction clicks)
96
+ num_correction_pt_per_frame=7,
97
+ # method for point sampling during evaluation
98
+ # "uniform" (sample uniformly from error region) or "center" (use the point with the largest distance to error region boundary)
99
+ # default to "center" to be consistent with evaluation in the SAM paper
100
+ pt_sampling_for_eval="center",
101
+ # During training, we optionally allow sampling the correction points from GT regions
102
+ # instead of the prediction error regions with a small probability. This might allow the
103
+ # model to overfit less to the error regions in training datasets
104
+ prob_to_sample_from_gt_for_train=0.0,
105
+ use_act_ckpt_iterative_pt_sampling=False,
106
+ # whether to forward image features per frame (as it's being tracked) during evaluation, instead of forwarding image features
107
+ # of all frames at once. This avoids backbone OOM errors on very long videos in evaluation, but could be slightly slower.
108
+ forward_backbone_per_frame_for_eval=False,
109
+ freeze_image_encoder=False,
110
+ **kwargs,
111
+ ):
112
+ super().__init__(image_encoder, memory_attention, memory_encoder, **kwargs)
113
+ self.use_act_ckpt_iterative_pt_sampling = use_act_ckpt_iterative_pt_sampling
114
+ self.forward_backbone_per_frame_for_eval = forward_backbone_per_frame_for_eval
115
+
116
+ # Point sampler and conditioning frames
117
+ self.prob_to_use_pt_input_for_train = prob_to_use_pt_input_for_train
118
+ self.prob_to_use_box_input_for_train = prob_to_use_box_input_for_train
119
+ self.prob_to_use_pt_input_for_eval = prob_to_use_pt_input_for_eval
120
+ self.prob_to_use_box_input_for_eval = prob_to_use_box_input_for_eval
121
+ if prob_to_use_pt_input_for_train > 0 or prob_to_use_pt_input_for_eval > 0:
122
+ logging.info(f"Training with points (sampled from masks) as inputs with p={prob_to_use_pt_input_for_train}")
123
+ assert num_frames_to_correct_for_train >= num_init_cond_frames_for_train
124
+ assert num_frames_to_correct_for_eval >= num_init_cond_frames_for_eval
125
+
126
+ self.num_frames_to_correct_for_train = num_frames_to_correct_for_train
127
+ self.num_frames_to_correct_for_eval = num_frames_to_correct_for_eval
128
+ self.rand_frames_to_correct_for_train = rand_frames_to_correct_for_train
129
+ self.rand_frames_to_correct_for_eval = rand_frames_to_correct_for_eval
130
+ # Initial multi-conditioning frames
131
+ self.num_init_cond_frames_for_train = num_init_cond_frames_for_train
132
+ self.num_init_cond_frames_for_eval = num_init_cond_frames_for_eval
133
+ self.rand_init_cond_frames_for_train = rand_init_cond_frames_for_train
134
+ self.rand_init_cond_frames_for_eval = rand_init_cond_frames_for_eval
135
+ self.add_all_frames_to_correct_as_cond = add_all_frames_to_correct_as_cond
136
+ self.num_correction_pt_per_frame = num_correction_pt_per_frame
137
+ self.pt_sampling_for_eval = pt_sampling_for_eval
138
+ self.prob_to_sample_from_gt_for_train = prob_to_sample_from_gt_for_train
139
+ # A random number generator with a fixed initial seed across GPUs
140
+ self.rng = np.random.default_rng(seed=42)
141
+
142
+ if freeze_image_encoder:
143
+ for p in self.image_encoder.parameters():
144
+ p.requires_grad = False
145
+
146
+ def forward(self, input: BatchedVideoDatapoint, hidden):
147
+ if self.training or not self.forward_backbone_per_frame_for_eval:
148
+ # precompute image features on all frames before tracking
149
+ backbone_out = self.forward_image(input.flat_img_batch)
150
+ else:
151
+ # defer image feature computation on a frame until it's being tracked
152
+ backbone_out = {"backbone_fpn": None, "vision_pos_enc": None}
153
+ # NOTE: backbone_out = self.prepare_prompt_inputs(backbone_out, input)
154
+ previous_stages_out = self.forward_tracking(backbone_out, input, hidden)
155
+
156
+ return previous_stages_out
157
+
158
+ def _prepare_backbone_features_per_frame(self, img_batch, img_ids):
159
+ """Compute the image backbone features on the fly for the given img_ids."""
160
+ # Only forward backbone on unique image ids to avoid repetitive computation
161
+ # (if `img_ids` has only one element, it's already unique so we skip this step).
162
+ if img_ids.numel() > 1:
163
+ unique_img_ids, inv_ids = torch.unique(img_ids, return_inverse=True)
164
+ else:
165
+ unique_img_ids, inv_ids = img_ids, None
166
+
167
+ # Compute the image features on those unique image ids
168
+ image = img_batch[unique_img_ids]
169
+ backbone_out = self.forward_image(image)
170
+ (
171
+ _,
172
+ vision_feats,
173
+ vision_pos_embeds,
174
+ feat_sizes,
175
+ ) = self._prepare_backbone_features(backbone_out)
176
+ # Inverse-map image features for `unique_img_ids` to the final image features
177
+ # for the original input `img_ids`.
178
+ if inv_ids is not None:
179
+ image = image[inv_ids]
180
+ vision_feats = [x[:, inv_ids] for x in vision_feats]
181
+ vision_pos_embeds = [x[:, inv_ids] for x in vision_pos_embeds]
182
+
183
+ return image, vision_feats, vision_pos_embeds, feat_sizes
184
+
185
+ def prepare_prompt_inputs(self, backbone_out, input, start_frame_idx=0):
186
+ """
187
+ Prepare input mask, point or box prompts. Optionally, we allow tracking from
188
+ a custom `start_frame_idx` to the end of the video (for evaluation purposes).
189
+ """
190
+ # Load the ground-truth masks on all frames (so that we can later
191
+ # sample correction points from them)
192
+ # gt_masks_per_frame = {
193
+ # stage_id: targets.segments.unsqueeze(1) # [B, 1, H_im, W_im]
194
+ # for stage_id, targets in enumerate(input.find_targets)
195
+ # }
196
+ gt_masks_per_frame = {
197
+ stage_id: masks.unsqueeze(1) # [B, 1, H_im, W_im]
198
+ for stage_id, masks in enumerate(input.masks)
199
+ }
200
+ # gt_masks_per_frame = input.masks.unsqueeze(2) # [T,B,1,H_im,W_im] keep everything in tensor form
201
+ backbone_out["gt_masks_per_frame"] = gt_masks_per_frame
202
+ num_frames = input.num_frames
203
+ backbone_out["num_frames"] = num_frames
204
+
205
+ # Randomly decide whether to use point inputs or mask inputs
206
+ if self.training:
207
+ prob_to_use_pt_input = self.prob_to_use_pt_input_for_train
208
+ prob_to_use_box_input = self.prob_to_use_box_input_for_train
209
+ num_frames_to_correct = self.num_frames_to_correct_for_train
210
+ rand_frames_to_correct = self.rand_frames_to_correct_for_train
211
+ num_init_cond_frames = self.num_init_cond_frames_for_train
212
+ rand_init_cond_frames = self.rand_init_cond_frames_for_train
213
+ else:
214
+ prob_to_use_pt_input = self.prob_to_use_pt_input_for_eval
215
+ prob_to_use_box_input = self.prob_to_use_box_input_for_eval
216
+ num_frames_to_correct = self.num_frames_to_correct_for_eval
217
+ rand_frames_to_correct = self.rand_frames_to_correct_for_eval
218
+ num_init_cond_frames = self.num_init_cond_frames_for_eval
219
+ rand_init_cond_frames = self.rand_init_cond_frames_for_eval
220
+ if num_frames == 1:
221
+ # here we handle a special case for mixing video + SAM on image training,
222
+ # where we force using point input for the SAM task on static images
223
+ prob_to_use_pt_input = 1.0
224
+ num_frames_to_correct = 1
225
+ num_init_cond_frames = 1
226
+ assert num_init_cond_frames >= 1
227
+ # (here `self.rng.random()` returns value in range 0.0 <= X < 1.0)
228
+ use_pt_input = self.rng.random() < prob_to_use_pt_input
229
+ if rand_init_cond_frames and num_init_cond_frames > 1:
230
+ # randomly select 1 to `num_init_cond_frames` frames as initial conditioning frames
231
+ num_init_cond_frames = self.rng.integers(1, num_init_cond_frames, endpoint=True)
232
+ if (use_pt_input and rand_frames_to_correct and num_frames_to_correct > num_init_cond_frames):
233
+ # randomly select `num_init_cond_frames` to `num_frames_to_correct` frames to sample
234
+ # correction clicks (only for the case of point input)
235
+ num_frames_to_correct = self.rng.integers(num_init_cond_frames, num_frames_to_correct, endpoint=True)
236
+ backbone_out["use_pt_input"] = use_pt_input
237
+
238
+ # Sample initial conditioning frames
239
+ if num_init_cond_frames == 1:
240
+ init_cond_frames = [start_frame_idx] # starting frame
241
+ else:
242
+ # starting frame + randomly selected remaining frames (without replacement)
243
+ init_cond_frames = [start_frame_idx] + self.rng.choice(
244
+ range(start_frame_idx + 1, num_frames),
245
+ num_init_cond_frames - 1,
246
+ replace=False,
247
+ ).tolist()
248
+ backbone_out["init_cond_frames"] = init_cond_frames
249
+ backbone_out["frames_not_in_init_cond"] = [
250
+ t for t in range(start_frame_idx, num_frames) if t not in init_cond_frames
251
+ ]
252
+ # Prepare mask or point inputs on initial conditioning frames
253
+ backbone_out["mask_inputs_per_frame"] = {} # {frame_idx: <input_masks>}
254
+ backbone_out["point_inputs_per_frame"] = {} # {frame_idx: <input_points>}
255
+ for t in init_cond_frames:
256
+ if not use_pt_input:
257
+ backbone_out["mask_inputs_per_frame"][t] = gt_masks_per_frame[t]
258
+ else:
259
+ # During training # P(box) = prob_to_use_pt_input * prob_to_use_box_input
260
+ use_box_input = self.rng.random() < prob_to_use_box_input
261
+ if use_box_input:
262
+ points, labels = sample_box_points(gt_masks_per_frame[t], )
263
+ else:
264
+ # (here we only sample **one initial point** on initial conditioning frames from the
265
+ # ground-truth mask; we may sample more correction points on the fly)
266
+ points, labels = get_next_point(
267
+ gt_masks=gt_masks_per_frame[t],
268
+ pred_masks=None,
269
+ method=("uniform" if self.training else self.pt_sampling_for_eval),
270
+ )
271
+
272
+ point_inputs = {"point_coords": points, "point_labels": labels}
273
+ backbone_out["point_inputs_per_frame"][t] = point_inputs
274
+
275
+ # Sample frames where we will add correction clicks on the fly
276
+ # based on the error between prediction and ground-truth masks
277
+ if not use_pt_input:
278
+ # no correction points will be sampled when using mask inputs
279
+ frames_to_add_correction_pt = []
280
+ elif num_frames_to_correct == num_init_cond_frames:
281
+ frames_to_add_correction_pt = init_cond_frames
282
+ else:
283
+ assert num_frames_to_correct > num_init_cond_frames
284
+ # initial cond frame + randomly selected remaining frames (without replacement)
285
+ extra_num = num_frames_to_correct - num_init_cond_frames
286
+ frames_to_add_correction_pt = (
287
+ init_cond_frames +
288
+ self.rng.choice(backbone_out["frames_not_in_init_cond"], extra_num, replace=False).tolist())
289
+ backbone_out["frames_to_add_correction_pt"] = frames_to_add_correction_pt
290
+
291
+ return backbone_out
292
+
293
+ def forward_tracking(self, backbone_out, input: BatchedVideoDatapoint, hidden, return_dict=False):
294
+ """Forward video tracking on each frame (and sample correction clicks)."""
295
+ img_feats_already_computed = backbone_out["backbone_fpn"] is not None
296
+ if img_feats_already_computed:
297
+ # Prepare the backbone features
298
+ # - vision_feats and vision_pos_embeds are in (HW)BC format
299
+ (
300
+ _,
301
+ vision_feats,
302
+ vision_pos_embeds,
303
+ feat_sizes,
304
+ ) = self._prepare_backbone_features(backbone_out)
305
+
306
+ # Starting the stage loop
307
+ # NOTE: num_frames = backbone_out["num_frames"] =========================================
308
+ num_frames = input.num_frames
309
+ # =======================================================================================
310
+ # NOTE: init_cond_frames = backbone_out["init_cond_frames"] =============================
311
+ # init_cond_frames = list(range(num_frames))
312
+ init_cond_frames = [0]
313
+ # =======================================================================================
314
+ # NOTE: frames_to_add_correction_pt = backbone_out["frames_to_add_correction_pt"] =======
315
+ frames_to_add_correction_pt = []
316
+ # =======================================================================================
317
+ # first process all the initial conditioning frames to encode them as memory,
318
+ # and then conditioning on them to track the remaining frames
319
+ # NOTE: processing_order = init_cond_frames + backbone_out["frames_not_in_init_cond"] ===
320
+ frames_not_in_init_cond = [t for t in range(num_frames) if t not in init_cond_frames]
321
+ processing_order = init_cond_frames + frames_not_in_init_cond
322
+ # =======================================================================================
323
+ backbone_out["point_inputs_per_frame"] = {}
324
+ backbone_out["mask_inputs_per_frame"] = {}
325
+ # backbone_out["hidden_inputs_per_frame"] = {stage_id: hidden for stage_id in processing_order}
326
+ backbone_out["hidden_inputs_per_frame"] = {0: hidden}
327
+ backbone_out["gt_masks_per_frame"] = {
328
+ stage_id: masks.unsqueeze(1) # [B, 1, H_im, W_im]
329
+ for stage_id, masks in enumerate(input.masks)
330
+ }
331
+ # =======================================================================================
332
+ output_dict = {
333
+ "cond_frame_outputs": {}, # dict containing {frame_idx: <out>}
334
+ "non_cond_frame_outputs": {}, # dict containing {frame_idx: <out>}
335
+ }
336
+ for stage_id in processing_order:
337
+ # Get the image features for the current frames
338
+ # img_ids = input.find_inputs[stage_id].img_ids
339
+ img_ids = input.flat_obj_to_img_idx[stage_id]
340
+ if img_feats_already_computed:
341
+ # Retrieve image features according to img_ids (if they are already computed).
342
+ current_vision_feats = [x[:, img_ids] for x in vision_feats]
343
+ current_vision_pos_embeds = [x[:, img_ids] for x in vision_pos_embeds]
344
+ else:
345
+ # Otherwise, compute the image features on the fly for the given img_ids
346
+ # (this might be used for evaluation on long videos to avoid backbone OOM).
347
+ (
348
+ _,
349
+ current_vision_feats,
350
+ current_vision_pos_embeds,
351
+ feat_sizes,
352
+ ) = self._prepare_backbone_features_per_frame(input.flat_img_batch, img_ids)
353
+
354
+ # Get output masks based on this frame's prompts and previous memory
355
+ current_out = self.track_step(
356
+ frame_idx=stage_id,
357
+ is_init_cond_frame=stage_id in init_cond_frames,
358
+ current_vision_feats=current_vision_feats,
359
+ current_vision_pos_embeds=current_vision_pos_embeds,
360
+ feat_sizes=feat_sizes,
361
+ point_inputs=backbone_out["point_inputs_per_frame"].get(stage_id, None),
362
+ mask_inputs=backbone_out["mask_inputs_per_frame"].get(stage_id, None),
363
+ hidden_inputs=backbone_out["hidden_inputs_per_frame"].get(stage_id, None),
364
+ gt_masks=backbone_out["gt_masks_per_frame"].get(stage_id, None),
365
+ frames_to_add_correction_pt=frames_to_add_correction_pt,
366
+ output_dict=output_dict,
367
+ num_frames=num_frames,
368
+ )
369
+ # Append the output, depending on whether it's a conditioning frame
370
+ add_output_as_cond_frame = stage_id in init_cond_frames or (self.add_all_frames_to_correct_as_cond
371
+ and stage_id in frames_to_add_correction_pt)
372
+ if add_output_as_cond_frame:
373
+ output_dict["cond_frame_outputs"][stage_id] = current_out
374
+ else:
375
+ output_dict["non_cond_frame_outputs"][stage_id] = current_out
376
+
377
+ if return_dict:
378
+ return output_dict
379
+ # turn `output_dict` into a list for loss function
380
+ all_frame_outputs = {}
381
+ all_frame_outputs.update(output_dict["cond_frame_outputs"])
382
+ all_frame_outputs.update(output_dict["non_cond_frame_outputs"])
383
+ all_frame_outputs = [all_frame_outputs[t] for t in range(num_frames)]
384
+ # Make DDP happy with activation checkpointing by removing unused keys
385
+ all_frame_outputs = [{k: v for k, v in d.items() if k != "obj_ptr"} for d in all_frame_outputs]
386
+
387
+ return all_frame_outputs
388
+
389
+ def track_step(
390
+ self,
391
+ frame_idx,
392
+ is_init_cond_frame,
393
+ current_vision_feats,
394
+ current_vision_pos_embeds,
395
+ feat_sizes,
396
+ point_inputs,
397
+ mask_inputs,
398
+ hidden_inputs,
399
+ output_dict,
400
+ num_frames,
401
+ track_in_reverse=False, # tracking in reverse time order (for demo usage)
402
+ run_mem_encoder=True, # Whether to run the memory encoder on the predicted masks.
403
+ prev_sam_mask_logits=None, # The previously predicted SAM mask logits.
404
+ frames_to_add_correction_pt=None,
405
+ gt_masks=None,
406
+ ):
407
+ if frames_to_add_correction_pt is None:
408
+ frames_to_add_correction_pt = []
409
+ current_out, sam_outputs, high_res_features, pix_feat = self._track_step(
410
+ frame_idx,
411
+ is_init_cond_frame,
412
+ current_vision_feats,
413
+ current_vision_pos_embeds,
414
+ feat_sizes,
415
+ point_inputs,
416
+ mask_inputs,
417
+ hidden_inputs,
418
+ output_dict,
419
+ num_frames,
420
+ track_in_reverse,
421
+ prev_sam_mask_logits,
422
+ )
423
+
424
+ (
425
+ low_res_multimasks,
426
+ high_res_multimasks,
427
+ ious,
428
+ low_res_masks,
429
+ high_res_masks,
430
+ obj_ptr,
431
+ object_score_logits,
432
+ ) = sam_outputs
433
+
434
+ current_out["multistep_pred_masks"] = low_res_masks
435
+ current_out["multistep_pred_masks_high_res"] = high_res_masks
436
+ current_out["multistep_pred_multimasks"] = [low_res_multimasks]
437
+ current_out["multistep_pred_multimasks_high_res"] = [high_res_multimasks]
438
+ current_out["multistep_pred_ious"] = [ious]
439
+ current_out["multistep_point_inputs"] = [point_inputs]
440
+ current_out["multistep_object_score_logits"] = [object_score_logits]
441
+
442
+ # Optionally, sample correction points iteratively to correct the mask
443
+ if frame_idx in frames_to_add_correction_pt:
444
+ point_inputs, final_sam_outputs = self._iter_correct_pt_sampling(
445
+ is_init_cond_frame,
446
+ point_inputs,
447
+ gt_masks,
448
+ high_res_features,
449
+ pix_feat,
450
+ low_res_multimasks,
451
+ high_res_multimasks,
452
+ ious,
453
+ low_res_masks,
454
+ high_res_masks,
455
+ object_score_logits,
456
+ current_out,
457
+ )
458
+ (
459
+ _,
460
+ _,
461
+ _,
462
+ low_res_masks,
463
+ high_res_masks,
464
+ obj_ptr,
465
+ object_score_logits,
466
+ ) = final_sam_outputs
467
+
468
+ # Use the final prediction (after all correction steps for output and eval)
469
+ current_out["pred_masks"] = low_res_masks
470
+ current_out["pred_masks_high_res"] = high_res_masks
471
+ current_out["obj_ptr"] = obj_ptr
472
+
473
+ # Finally run the memory encoder on the predicted mask to encode
474
+ # it into a new memory feature (that can be used in future frames)
475
+ self._encode_memory_in_output(
476
+ current_vision_feats,
477
+ feat_sizes,
478
+ point_inputs,
479
+ run_mem_encoder,
480
+ high_res_masks,
481
+ object_score_logits,
482
+ current_out,
483
+ )
484
+ return current_out
485
+
486
+ def _iter_correct_pt_sampling(
487
+ self,
488
+ is_init_cond_frame,
489
+ point_inputs,
490
+ gt_masks,
491
+ high_res_features,
492
+ pix_feat_with_mem,
493
+ low_res_multimasks,
494
+ high_res_multimasks,
495
+ ious,
496
+ low_res_masks,
497
+ high_res_masks,
498
+ object_score_logits,
499
+ current_out,
500
+ ):
501
+
502
+ assert gt_masks is not None
503
+ all_pred_masks = [low_res_masks]
504
+ all_pred_high_res_masks = [high_res_masks]
505
+ all_pred_multimasks = [low_res_multimasks]
506
+ all_pred_high_res_multimasks = [high_res_multimasks]
507
+ all_pred_ious = [ious]
508
+ all_point_inputs = [point_inputs]
509
+ all_object_score_logits = [object_score_logits]
510
+ for _ in range(self.num_correction_pt_per_frame):
511
+ # sample a new point from the error between prediction and ground-truth
512
+ # (with a small probability, directly sample from GT masks instead of errors)
513
+ if self.training and self.prob_to_sample_from_gt_for_train > 0:
514
+ sample_from_gt = (self.rng.random() < self.prob_to_sample_from_gt_for_train)
515
+ else:
516
+ sample_from_gt = False
517
+ # if `pred_for_new_pt` is None, only GT masks will be used for point sampling
518
+ pred_for_new_pt = None if sample_from_gt else (high_res_masks > 0)
519
+ new_points, new_labels = get_next_point(
520
+ gt_masks=gt_masks,
521
+ pred_masks=pred_for_new_pt,
522
+ method="uniform" if self.training else self.pt_sampling_for_eval,
523
+ )
524
+ point_inputs = concat_points(point_inputs, new_points, new_labels)
525
+ # Feed the mask logits of the previous SAM outputs in the next SAM decoder step.
526
+ # For tracking, this means that when the user adds a correction click, we also feed
527
+ # the tracking output mask logits along with the click as input to the SAM decoder.
528
+ mask_inputs = low_res_masks
529
+ multimask_output = self._use_multimask(is_init_cond_frame, point_inputs)
530
+ if self.use_act_ckpt_iterative_pt_sampling and not multimask_output:
531
+ sam_outputs = torch.utils.checkpoint.checkpoint(
532
+ self._forward_sam_heads,
533
+ backbone_features=pix_feat_with_mem,
534
+ point_inputs=point_inputs,
535
+ mask_inputs=mask_inputs,
536
+ high_res_features=high_res_features,
537
+ multimask_output=multimask_output,
538
+ use_reentrant=False,
539
+ )
540
+ else:
541
+ sam_outputs = self._forward_sam_heads(
542
+ backbone_features=pix_feat_with_mem,
543
+ point_inputs=point_inputs,
544
+ mask_inputs=mask_inputs,
545
+ high_res_features=high_res_features,
546
+ multimask_output=multimask_output,
547
+ )
548
+ (
549
+ low_res_multimasks,
550
+ high_res_multimasks,
551
+ ious,
552
+ low_res_masks,
553
+ high_res_masks,
554
+ _,
555
+ object_score_logits,
556
+ ) = sam_outputs
557
+ all_pred_masks.append(low_res_masks)
558
+ all_pred_high_res_masks.append(high_res_masks)
559
+ all_pred_multimasks.append(low_res_multimasks)
560
+ all_pred_high_res_multimasks.append(high_res_multimasks)
561
+ all_pred_ious.append(ious)
562
+ all_point_inputs.append(point_inputs)
563
+ all_object_score_logits.append(object_score_logits)
564
+
565
+ # Concatenate the masks along channel (to compute losses on all of them,
566
+ # using `MultiStepIteractiveMasks`)
567
+ current_out["multistep_pred_masks"] = torch.cat(all_pred_masks, dim=1)
568
+ current_out["multistep_pred_masks_high_res"] = torch.cat(all_pred_high_res_masks, dim=1)
569
+ current_out["multistep_pred_multimasks"] = all_pred_multimasks
570
+ current_out["multistep_pred_multimasks_high_res"] = all_pred_high_res_multimasks
571
+ current_out["multistep_pred_ious"] = all_pred_ious
572
+ current_out["multistep_point_inputs"] = all_point_inputs
573
+ current_out["multistep_object_score_logits"] = all_object_score_logits
574
+
575
+ return point_inputs, sam_outputs
sam2/sam2_video_predictor.py ADDED
@@ -0,0 +1,1272 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ from collections import OrderedDict
8
+
9
+ import torch
10
+ import torch.nn.functional as F
11
+ from tqdm import tqdm
12
+
13
+ from sam2.modeling.sam2_base import NO_OBJ_SCORE, SAM2Base
14
+ from sam2.utils.misc import concat_points, fill_holes_in_mask_scores, load_video_frames
15
+
16
+
17
+ class SAM2VideoPredictor(SAM2Base):
18
+ """The predictor class to handle user interactions and manage inference states."""
19
+
20
+ def __init__(
21
+ self,
22
+ fill_hole_area=0,
23
+ # whether to apply non-overlapping constraints on the output object masks
24
+ non_overlap_masks=False,
25
+ # whether to clear non-conditioning memory of the surrounding frames (which may contain outdated information) after adding correction clicks;
26
+ # note that this would only apply to *single-object tracking* unless `clear_non_cond_mem_for_multi_obj` is also set to True)
27
+ clear_non_cond_mem_around_input=False,
28
+ # if `add_all_frames_to_correct_as_cond` is True, we also append to the conditioning frame list any frame that receives a later correction click
29
+ # if `add_all_frames_to_correct_as_cond` is False, we conditioning frame list to only use those initial conditioning frames
30
+ add_all_frames_to_correct_as_cond=False,
31
+ inference_mode=True,
32
+ **kwargs,
33
+ ):
34
+ super().__init__(**kwargs)
35
+ self.fill_hole_area = fill_hole_area
36
+ self.non_overlap_masks = non_overlap_masks
37
+ self.clear_non_cond_mem_around_input = clear_non_cond_mem_around_input
38
+ self.add_all_frames_to_correct_as_cond = add_all_frames_to_correct_as_cond
39
+ self.inference_mode = inference_mode
40
+
41
+ @property
42
+ def dtype(self):
43
+ return self.image_encoder.trunk.patch_embed.proj.weight.dtype
44
+
45
+ def init_state(
46
+ self,
47
+ frame,
48
+ frame_size=None,
49
+ offload_video_to_cpu=False,
50
+ offload_state_to_cpu=False,
51
+ async_loading_frames=False,
52
+ ):
53
+ """Initialize an inference state."""
54
+ compute_device = self.device # device of the model
55
+ if isinstance(frame, str):
56
+ images, video_height, video_width = load_video_frames(
57
+ video_path=frame,
58
+ image_size=self.image_size,
59
+ offload_video_to_cpu=offload_video_to_cpu,
60
+ async_loading_frames=async_loading_frames,
61
+ compute_device=compute_device,
62
+ )
63
+ else:
64
+ if frame_size is None:
65
+ frame_size = (self.image_size, self.image_size)
66
+ images, video_height, video_width = (frame, *frame_size)
67
+ inference_state = {}
68
+ inference_state["images"] = images
69
+ inference_state["num_frames"] = len(images)
70
+ # whether to offload the video frames to CPU memory
71
+ # turning on this option saves the GPU memory with only a very small overhead
72
+ inference_state["offload_video_to_cpu"] = offload_video_to_cpu
73
+ # whether to offload the inference state to CPU memory
74
+ # turning on this option saves the GPU memory at the cost of a lower tracking fps
75
+ # (e.g. in a test case of 768x768 model, fps dropped from 27 to 24 when tracking one object
76
+ # and from 24 to 21 when tracking two objects)
77
+ inference_state["offload_state_to_cpu"] = offload_state_to_cpu
78
+ # the original video height and width, used for resizing final output scores
79
+ inference_state["video_height"] = video_height
80
+ inference_state["video_width"] = video_width
81
+ inference_state["device"] = compute_device
82
+ if offload_state_to_cpu:
83
+ inference_state["storage_device"] = torch.device("cpu")
84
+ else:
85
+ inference_state["storage_device"] = compute_device
86
+ # inputs on each frame
87
+ inference_state["point_inputs_per_obj"] = {}
88
+ inference_state["mask_inputs_per_obj"] = {}
89
+ # visual features on a small number of recently visited frames for quick interactions
90
+ inference_state["cached_features"] = {}
91
+ # values that don't change across frames (so we only need to hold one copy of them)
92
+ inference_state["constants"] = {}
93
+ # mapping between client-side object id and model-side object index
94
+ inference_state["obj_id_to_idx"] = OrderedDict()
95
+ inference_state["obj_idx_to_id"] = OrderedDict()
96
+ inference_state["obj_ids"] = []
97
+ # Slice (view) of each object tracking results, sharing the same memory with "output_dict"
98
+ inference_state["output_dict_per_obj"] = {}
99
+ # A temporary storage to hold new outputs when user interact with a frame
100
+ # to add clicks or mask (it's merged into "output_dict" before propagation starts)
101
+ inference_state["temp_output_dict_per_obj"] = {}
102
+ # Frames that already holds consolidated outputs from click or mask inputs
103
+ # (we directly use their consolidated outputs during tracking)
104
+ # metadata for each tracking frame (e.g. which direction it's tracked)
105
+ inference_state["frames_tracked_per_obj"] = {}
106
+ # Warm up the visual backbone and cache the image feature on all frames
107
+ self._get_image_feature(inference_state, frame_idx=0, batch_size=1)
108
+ return inference_state
109
+
110
+ @classmethod
111
+ def from_pretrained(cls, model_id: str, **kwargs) -> "SAM2VideoPredictor":
112
+ """
113
+ Load a pretrained model from the Hugging Face hub.
114
+
115
+ Arguments:
116
+ model_id (str): The Hugging Face repository ID.
117
+ **kwargs: Additional arguments to pass to the model constructor.
118
+
119
+ Returns:
120
+ (SAM2VideoPredictor): The loaded model.
121
+ """
122
+ from sam2.build_sam import build_sam2_video_predictor_hf
123
+
124
+ sam_model = build_sam2_video_predictor_hf(model_id, **kwargs)
125
+ return sam_model
126
+
127
+ def _obj_id_to_idx(self, inference_state, obj_id):
128
+ """Map client-side object id to model-side object index."""
129
+ obj_idx = inference_state["obj_id_to_idx"].get(obj_id, None)
130
+ if obj_idx is not None:
131
+ return obj_idx
132
+
133
+ # We always allow adding new objects (including after tracking starts)
134
+ # get the next object slot
135
+ obj_idx = len(inference_state["obj_id_to_idx"])
136
+ inference_state["obj_id_to_idx"][obj_id] = obj_idx
137
+ inference_state["obj_idx_to_id"][obj_idx] = obj_id
138
+ inference_state["obj_ids"] = list(inference_state["obj_id_to_idx"])
139
+ # set up input and output structures for this object
140
+ inference_state["point_inputs_per_obj"][obj_idx] = {}
141
+ inference_state["mask_inputs_per_obj"][obj_idx] = {}
142
+ inference_state["output_dict_per_obj"][obj_idx] = {
143
+ "cond_frame_outputs": {}, # dict containing {frame_idx: <out>}
144
+ "non_cond_frame_outputs": {}, # dict containing {frame_idx: <out>}
145
+ }
146
+ inference_state["temp_output_dict_per_obj"][obj_idx] = {
147
+ "cond_frame_outputs": {}, # dict containing {frame_idx: <out>}
148
+ "non_cond_frame_outputs": {}, # dict containing {frame_idx: <out>}
149
+ }
150
+ inference_state["frames_tracked_per_obj"][obj_idx] = {}
151
+ return obj_idx
152
+
153
+ def _obj_idx_to_id(self, inference_state, obj_idx):
154
+ """Map model-side object index to client-side object id."""
155
+ return inference_state["obj_idx_to_id"][obj_idx]
156
+
157
+ def _get_obj_num(self, inference_state):
158
+ """Get the total number of unique object ids received so far in this session."""
159
+ return len(inference_state["obj_idx_to_id"])
160
+
161
+ @torch.inference_mode()
162
+ def add_new_hidden_state(
163
+ self,
164
+ inference_state,
165
+ frame_idx,
166
+ obj_id,
167
+ hidden,
168
+ ):
169
+ obj_idx = self._obj_id_to_idx(inference_state, obj_id)
170
+ # If this frame hasn't been tracked before, we treat it as an initial conditioning
171
+ # frame, meaning that the inputs points are to generate segments on this frame without
172
+ # using any memory from other frames, like in SAM. Otherwise (if it has been tracked),
173
+ # the input points will be used to correct the already tracked masks.
174
+ obj_frames_tracked = inference_state["frames_tracked_per_obj"][obj_idx]
175
+ is_init_cond_frame = frame_idx not in obj_frames_tracked
176
+ # whether to track in reverse time order
177
+ if is_init_cond_frame:
178
+ reverse = False
179
+ else:
180
+ reverse = obj_frames_tracked[frame_idx]["reverse"]
181
+ obj_output_dict = inference_state["output_dict_per_obj"][obj_idx]
182
+ obj_temp_output_dict = inference_state["temp_output_dict_per_obj"][obj_idx]
183
+ # Add a frame to conditioning output if it's an initial conditioning frame or
184
+ # if the model sees all frames receiving clicks/mask as conditioning frames.
185
+ is_cond = is_init_cond_frame or self.add_all_frames_to_correct_as_cond
186
+ storage_key = "cond_frame_outputs" if is_cond else "non_cond_frame_outputs"
187
+
188
+ # Get any previously predicted mask logits on this object and feed it along with
189
+ # the new clicks into the SAM mask decoder.
190
+ prev_sam_mask_logits = None
191
+ # lookup temporary output dict first, which contains the most recent output
192
+ # (if not found, then lookup conditioning and non-conditioning frame output)
193
+ prev_out = obj_temp_output_dict[storage_key].get(frame_idx)
194
+ if prev_out is None:
195
+ prev_out = obj_output_dict["cond_frame_outputs"].get(frame_idx)
196
+ if prev_out is None:
197
+ prev_out = obj_output_dict["non_cond_frame_outputs"].get(frame_idx)
198
+
199
+ if prev_out is not None and prev_out["pred_masks"] is not None:
200
+ device = inference_state["device"]
201
+ prev_sam_mask_logits = prev_out["pred_masks"].to(device, non_blocking=True)
202
+ # Clamp the scale of prev_sam_mask_logits to avoid rare numerical issues.
203
+ prev_sam_mask_logits = torch.clamp(prev_sam_mask_logits, -32.0, 32.0)
204
+ current_out, _ = self._run_single_frame_inference(
205
+ inference_state=inference_state,
206
+ output_dict=obj_output_dict, # run on the slice of a single object
207
+ frame_idx=frame_idx,
208
+ batch_size=1, # run on the slice of a single object
209
+ is_init_cond_frame=is_init_cond_frame,
210
+ point_inputs=None,
211
+ mask_inputs=None,
212
+ hidden_inputs=hidden,
213
+ reverse=reverse,
214
+ # Skip the memory encoder when adding clicks or mask. We execute the memory encoder
215
+ # at the beginning of `propagate_in_video` (after user finalize their clicks). This
216
+ # allows us to enforce non-overlapping constraints on all objects before encoding
217
+ # them into memory.
218
+ run_mem_encoder=False,
219
+ prev_sam_mask_logits=prev_sam_mask_logits,
220
+ )
221
+ # Add the output to the output dict (to be used as future memory)
222
+ obj_temp_output_dict[storage_key][frame_idx] = current_out
223
+
224
+ # Resize the output mask to the original video resolution
225
+ obj_ids = inference_state["obj_ids"]
226
+ consolidated_out = self._consolidate_temp_output_across_obj(
227
+ inference_state,
228
+ frame_idx,
229
+ is_cond=is_cond,
230
+ consolidate_at_video_res=True,
231
+ )
232
+ _, video_res_masks = self._get_orig_video_res_output(inference_state, consolidated_out["pred_masks_video_res"])
233
+ return frame_idx, obj_ids, video_res_masks
234
+
235
+ @torch.inference_mode()
236
+ def add_new_points_or_box(
237
+ self,
238
+ inference_state,
239
+ frame_idx,
240
+ obj_id,
241
+ points=None,
242
+ labels=None,
243
+ clear_old_points=True,
244
+ normalize_coords=True,
245
+ box=None,
246
+ ):
247
+ """Add new points to a frame."""
248
+ obj_idx = self._obj_id_to_idx(inference_state, obj_id)
249
+ point_inputs_per_frame = inference_state["point_inputs_per_obj"][obj_idx]
250
+ mask_inputs_per_frame = inference_state["mask_inputs_per_obj"][obj_idx]
251
+
252
+ if (points is not None) != (labels is not None):
253
+ raise ValueError("points and labels must be provided together")
254
+ if points is None and box is None:
255
+ raise ValueError("at least one of points or box must be provided as input")
256
+
257
+ if points is None:
258
+ points = torch.zeros(0, 2, dtype=torch.float32)
259
+ elif not isinstance(points, torch.Tensor):
260
+ points = torch.tensor(points, dtype=torch.float32)
261
+ if labels is None:
262
+ labels = torch.zeros(0, dtype=torch.int32)
263
+ elif not isinstance(labels, torch.Tensor):
264
+ labels = torch.tensor(labels, dtype=torch.int32)
265
+ if points.dim() == 2:
266
+ points = points.unsqueeze(0) # add batch dimension
267
+ if labels.dim() == 1:
268
+ labels = labels.unsqueeze(0) # add batch dimension
269
+
270
+ # If `box` is provided, we add it as the first two points with labels 2 and 3
271
+ # along with the user-provided points (consistent with how SAM 2 is trained).
272
+ if box is not None:
273
+ if not clear_old_points:
274
+ raise ValueError("cannot add box without clearing old points, since "
275
+ "box prompt must be provided before any point prompt "
276
+ "(please use clear_old_points=True instead)")
277
+ if not isinstance(box, torch.Tensor):
278
+ box = torch.tensor(box, dtype=torch.float32, device=points.device)
279
+ box_coords = box.reshape(1, 2, 2)
280
+ box_labels = torch.tensor([2, 3], dtype=torch.int32, device=labels.device)
281
+ box_labels = box_labels.reshape(1, 2)
282
+ points = torch.cat([box_coords, points], dim=1)
283
+ labels = torch.cat([box_labels, labels], dim=1)
284
+
285
+ if normalize_coords:
286
+ video_H = inference_state["video_height"]
287
+ video_W = inference_state["video_width"]
288
+ points = points / torch.tensor([video_W, video_H]).to(points.device)
289
+ # scale the (normalized) coordinates by the model's internal image size
290
+ points = points * self.image_size
291
+ points = points.to(inference_state["device"])
292
+ labels = labels.to(inference_state["device"])
293
+
294
+ if not clear_old_points:
295
+ point_inputs = point_inputs_per_frame.get(frame_idx, None)
296
+ else:
297
+ point_inputs = None
298
+ point_inputs = concat_points(point_inputs, points, labels)
299
+
300
+ point_inputs_per_frame[frame_idx] = point_inputs
301
+ mask_inputs_per_frame.pop(frame_idx, None)
302
+ # If this frame hasn't been tracked before, we treat it as an initial conditioning
303
+ # frame, meaning that the inputs points are to generate segments on this frame without
304
+ # using any memory from other frames, like in SAM. Otherwise (if it has been tracked),
305
+ # the input points will be used to correct the already tracked masks.
306
+ obj_frames_tracked = inference_state["frames_tracked_per_obj"][obj_idx]
307
+ is_init_cond_frame = frame_idx not in obj_frames_tracked
308
+ # whether to track in reverse time order
309
+ if is_init_cond_frame:
310
+ reverse = False
311
+ else:
312
+ reverse = obj_frames_tracked[frame_idx]["reverse"]
313
+ obj_output_dict = inference_state["output_dict_per_obj"][obj_idx]
314
+ obj_temp_output_dict = inference_state["temp_output_dict_per_obj"][obj_idx]
315
+ # Add a frame to conditioning output if it's an initial conditioning frame or
316
+ # if the model sees all frames receiving clicks/mask as conditioning frames.
317
+ is_cond = is_init_cond_frame or self.add_all_frames_to_correct_as_cond
318
+ storage_key = "cond_frame_outputs" if is_cond else "non_cond_frame_outputs"
319
+
320
+ # Get any previously predicted mask logits on this object and feed it along with
321
+ # the new clicks into the SAM mask decoder.
322
+ prev_sam_mask_logits = None
323
+ # lookup temporary output dict first, which contains the most recent output
324
+ # (if not found, then lookup conditioning and non-conditioning frame output)
325
+ prev_out = obj_temp_output_dict[storage_key].get(frame_idx)
326
+ if prev_out is None:
327
+ prev_out = obj_output_dict["cond_frame_outputs"].get(frame_idx)
328
+ if prev_out is None:
329
+ prev_out = obj_output_dict["non_cond_frame_outputs"].get(frame_idx)
330
+
331
+ if prev_out is not None and prev_out["pred_masks"] is not None:
332
+ device = inference_state["device"]
333
+ prev_sam_mask_logits = prev_out["pred_masks"].to(device, non_blocking=True)
334
+ # Clamp the scale of prev_sam_mask_logits to avoid rare numerical issues.
335
+ prev_sam_mask_logits = torch.clamp(prev_sam_mask_logits, -32.0, 32.0)
336
+ current_out, _ = self._run_single_frame_inference(
337
+ inference_state=inference_state,
338
+ output_dict=obj_output_dict, # run on the slice of a single object
339
+ frame_idx=frame_idx,
340
+ batch_size=1, # run on the slice of a single object
341
+ is_init_cond_frame=is_init_cond_frame,
342
+ point_inputs=point_inputs,
343
+ mask_inputs=None,
344
+ hidden_inputs=None,
345
+ reverse=reverse,
346
+ # Skip the memory encoder when adding clicks or mask. We execute the memory encoder
347
+ # at the beginning of `propagate_in_video` (after user finalize their clicks). This
348
+ # allows us to enforce non-overlapping constraints on all objects before encoding
349
+ # them into memory.
350
+ run_mem_encoder=False,
351
+ prev_sam_mask_logits=prev_sam_mask_logits,
352
+ )
353
+ # Add the output to the output dict (to be used as future memory)
354
+ obj_temp_output_dict[storage_key][frame_idx] = current_out
355
+
356
+ # Resize the output mask to the original video resolution
357
+ obj_ids = inference_state["obj_ids"]
358
+ consolidated_out = self._consolidate_temp_output_across_obj(
359
+ inference_state,
360
+ frame_idx,
361
+ is_cond=is_cond,
362
+ consolidate_at_video_res=True,
363
+ )
364
+ _, video_res_masks = self._get_orig_video_res_output(inference_state, consolidated_out["pred_masks_video_res"])
365
+ return frame_idx, obj_ids, video_res_masks
366
+
367
+ def add_new_points(self, *args, **kwargs):
368
+ """Deprecated method. Please use `add_new_points_or_box` instead."""
369
+ return self.add_new_points_or_box(*args, **kwargs)
370
+
371
+ @torch.inference_mode()
372
+ def add_new_mask(
373
+ self,
374
+ inference_state,
375
+ frame_idx,
376
+ obj_id,
377
+ mask,
378
+ ):
379
+ """Add new mask to a frame."""
380
+ obj_idx = self._obj_id_to_idx(inference_state, obj_id)
381
+ point_inputs_per_frame = inference_state["point_inputs_per_obj"][obj_idx]
382
+ mask_inputs_per_frame = inference_state["mask_inputs_per_obj"][obj_idx]
383
+
384
+ if not isinstance(mask, torch.Tensor):
385
+ mask = torch.tensor(mask, dtype=torch.bool)
386
+ assert mask.dim() == 2
387
+ mask_H, mask_W = mask.shape
388
+ mask_inputs_orig = mask[None, None] # add batch and channel dimension
389
+ mask_inputs_orig = mask_inputs_orig.float().to(inference_state["device"])
390
+
391
+ # resize the mask if it doesn't match the model's image size
392
+ if mask_H != self.image_size or mask_W != self.image_size:
393
+ mask_inputs = torch.nn.functional.interpolate(
394
+ mask_inputs_orig,
395
+ size=(self.image_size, self.image_size),
396
+ align_corners=False,
397
+ mode="bilinear",
398
+ antialias=True, # use antialias for downsampling
399
+ )
400
+ mask_inputs = (mask_inputs >= 0.5).float()
401
+ else:
402
+ mask_inputs = mask_inputs_orig
403
+
404
+ mask_inputs_per_frame[frame_idx] = mask_inputs
405
+ point_inputs_per_frame.pop(frame_idx, None)
406
+ # If this frame hasn't been tracked before, we treat it as an initial conditioning
407
+ # frame, meaning that the inputs points are to generate segments on this frame without
408
+ # using any memory from other frames, like in SAM. Otherwise (if it has been tracked),
409
+ # the input points will be used to correct the already tracked masks.
410
+ obj_frames_tracked = inference_state["frames_tracked_per_obj"][obj_idx]
411
+ is_init_cond_frame = frame_idx not in obj_frames_tracked
412
+ # whether to track in reverse time order
413
+ if is_init_cond_frame:
414
+ reverse = False
415
+ else:
416
+ reverse = obj_frames_tracked[frame_idx]["reverse"]
417
+ obj_output_dict = inference_state["output_dict_per_obj"][obj_idx]
418
+ obj_temp_output_dict = inference_state["temp_output_dict_per_obj"][obj_idx]
419
+ # Add a frame to conditioning output if it's an initial conditioning frame or
420
+ # if the model sees all frames receiving clicks/mask as conditioning frames.
421
+ is_cond = is_init_cond_frame or self.add_all_frames_to_correct_as_cond
422
+ storage_key = "cond_frame_outputs" if is_cond else "non_cond_frame_outputs"
423
+
424
+ current_out, _ = self._run_single_frame_inference(
425
+ inference_state=inference_state,
426
+ output_dict=obj_output_dict, # run on the slice of a single object
427
+ frame_idx=frame_idx,
428
+ batch_size=1, # run on the slice of a single object
429
+ is_init_cond_frame=is_init_cond_frame,
430
+ point_inputs=None,
431
+ mask_inputs=mask_inputs,
432
+ hidden_inputs=None,
433
+ reverse=reverse,
434
+ # Skip the memory encoder when adding clicks or mask. We execute the memory encoder
435
+ # at the beginning of `propagate_in_video` (after user finalize their clicks). This
436
+ # allows us to enforce non-overlapping constraints on all objects before encoding
437
+ # them into memory.
438
+ run_mem_encoder=False,
439
+ )
440
+ # Add the output to the output dict (to be used as future memory)
441
+ obj_temp_output_dict[storage_key][frame_idx] = current_out
442
+
443
+ # Resize the output mask to the original video resolution
444
+ obj_ids = inference_state["obj_ids"]
445
+ consolidated_out = self._consolidate_temp_output_across_obj(
446
+ inference_state,
447
+ frame_idx,
448
+ is_cond=is_cond,
449
+ consolidate_at_video_res=True,
450
+ )
451
+ _, video_res_masks = self._get_orig_video_res_output(inference_state, consolidated_out["pred_masks_video_res"])
452
+ return frame_idx, obj_ids, video_res_masks
453
+
454
+ def _get_orig_video_res_output(self, inference_state, any_res_masks):
455
+ """
456
+ Resize the object scores to the original video resolution (video_res_masks)
457
+ and apply non-overlapping constraints for final output.
458
+ """
459
+ device = inference_state["device"]
460
+ video_H = inference_state["video_height"]
461
+ video_W = inference_state["video_width"]
462
+ any_res_masks = any_res_masks.to(device, non_blocking=True)
463
+ if any_res_masks.shape[-2:] == (video_H, video_W):
464
+ video_res_masks = any_res_masks
465
+ else:
466
+ video_res_masks = torch.nn.functional.interpolate(
467
+ any_res_masks,
468
+ size=(video_H, video_W),
469
+ mode="bilinear",
470
+ align_corners=False,
471
+ )
472
+ if self.non_overlap_masks:
473
+ video_res_masks = self._apply_non_overlapping_constraints(video_res_masks)
474
+ return any_res_masks, video_res_masks
475
+
476
+ def _consolidate_temp_output_across_obj(
477
+ self,
478
+ inference_state,
479
+ frame_idx,
480
+ is_cond,
481
+ consolidate_at_video_res=False,
482
+ ):
483
+ """
484
+ Consolidate the per-object temporary outputs in `temp_output_dict_per_obj` on
485
+ a frame into a single output for all objects, including
486
+ 1) fill any missing objects either from `output_dict_per_obj` (if they exist in
487
+ `output_dict_per_obj` for this frame) or leave them as placeholder values
488
+ (if they don't exist in `output_dict_per_obj` for this frame);
489
+ 2) if specified, rerun memory encoder after apply non-overlapping constraints
490
+ on the object scores.
491
+ """
492
+ batch_size = self._get_obj_num(inference_state)
493
+ storage_key = "cond_frame_outputs" if is_cond else "non_cond_frame_outputs"
494
+ # Optionally, we allow consolidating the temporary outputs at the original
495
+ # video resolution (to provide a better editing experience for mask prompts).
496
+ if consolidate_at_video_res:
497
+ consolidated_H = inference_state["video_height"]
498
+ consolidated_W = inference_state["video_width"]
499
+ consolidated_mask_key = "pred_masks_video_res"
500
+ else:
501
+ consolidated_H = consolidated_W = self.image_size // 4
502
+ consolidated_mask_key = "pred_masks"
503
+
504
+ # Initialize `consolidated_out`. Its "maskmem_features" and "maskmem_pos_enc"
505
+ # will be added when rerunning the memory encoder after applying non-overlapping
506
+ # constraints to object scores. Its "pred_masks" are prefilled with a large
507
+ # negative value (NO_OBJ_SCORE) to represent missing objects.
508
+ consolidated_out = {
509
+ consolidated_mask_key:
510
+ torch.full(
511
+ size=(batch_size, 1, consolidated_H, consolidated_W),
512
+ fill_value=NO_OBJ_SCORE,
513
+ dtype=inference_state["cached_features"][frame_idx][0].dtype,
514
+ device=inference_state["storage_device"],
515
+ ),
516
+ }
517
+ for obj_idx in range(batch_size):
518
+ obj_temp_output_dict = inference_state["temp_output_dict_per_obj"][obj_idx]
519
+ obj_output_dict = inference_state["output_dict_per_obj"][obj_idx]
520
+ out = obj_temp_output_dict[storage_key].get(frame_idx, None)
521
+ # If the object doesn't appear in "temp_output_dict_per_obj" on this frame,
522
+ # we fall back and look up its previous output in "output_dict_per_obj".
523
+ # We look up both "cond_frame_outputs" and "non_cond_frame_outputs" in
524
+ # "output_dict_per_obj" to find a previous output for this object.
525
+ if out is None:
526
+ out = obj_output_dict["cond_frame_outputs"].get(frame_idx, None)
527
+ if out is None:
528
+ out = obj_output_dict["non_cond_frame_outputs"].get(frame_idx, None)
529
+ # If the object doesn't appear in "output_dict_per_obj" either, we skip it
530
+ # and leave its mask scores to the default scores (i.e. the NO_OBJ_SCORE
531
+ # placeholder above) and set its object pointer to be a dummy pointer.
532
+ if out is None:
533
+ continue
534
+ # Add the temporary object output mask to consolidated output mask
535
+ obj_mask = out["pred_masks"]
536
+ consolidated_pred_masks = consolidated_out[consolidated_mask_key]
537
+ if obj_mask.shape[-2:] == consolidated_pred_masks.shape[-2:]:
538
+ consolidated_pred_masks[obj_idx:obj_idx + 1] = obj_mask
539
+ else:
540
+ # Resize first if temporary object mask has a different resolution
541
+ resized_obj_mask = torch.nn.functional.interpolate(
542
+ obj_mask,
543
+ size=consolidated_pred_masks.shape[-2:],
544
+ mode="bilinear",
545
+ align_corners=False,
546
+ )
547
+ consolidated_pred_masks[obj_idx:obj_idx + 1] = resized_obj_mask
548
+
549
+ return consolidated_out
550
+
551
+ @torch.inference_mode()
552
+ def propagate_in_video_preflight(self, inference_state):
553
+ """Prepare inference_state and consolidate temporary outputs before tracking."""
554
+ # Check and make sure that every object has received input points or masks.
555
+ batch_size = self._get_obj_num(inference_state)
556
+ if batch_size == 0:
557
+ raise RuntimeError("No input points or masks are provided for any object; please add inputs first.")
558
+
559
+ # Consolidate per-object temporary outputs in "temp_output_dict_per_obj" and
560
+ # add them into "output_dict".
561
+ for obj_idx in range(batch_size):
562
+ obj_output_dict = inference_state["output_dict_per_obj"][obj_idx]
563
+ obj_temp_output_dict = inference_state["temp_output_dict_per_obj"][obj_idx]
564
+ for is_cond in [False, True]:
565
+ # Separately consolidate conditioning and non-conditioning temp outputs
566
+ storage_key = ("cond_frame_outputs" if is_cond else "non_cond_frame_outputs")
567
+ # Find all the frames that contain temporary outputs for any objects
568
+ # (these should be the frames that have just received clicks for mask inputs
569
+ # via `add_new_points_or_box` or `add_new_mask`)
570
+ for frame_idx, out in obj_temp_output_dict[storage_key].items():
571
+ # Run memory encoder on the temporary outputs (if the memory feature is missing)
572
+ if out["maskmem_features"] is None:
573
+ high_res_masks = torch.nn.functional.interpolate(
574
+ out["pred_masks"].to(inference_state["device"]),
575
+ size=(self.image_size, self.image_size),
576
+ mode="bilinear",
577
+ align_corners=False,
578
+ )
579
+ maskmem_features, maskmem_pos_enc = self._run_memory_encoder(
580
+ inference_state=inference_state,
581
+ frame_idx=frame_idx,
582
+ batch_size=1, # run on the slice of a single object
583
+ high_res_masks=high_res_masks,
584
+ object_score_logits=out["object_score_logits"],
585
+ # these frames are what the user interacted with
586
+ is_mask_from_pts=True,
587
+ )
588
+ out["maskmem_features"] = maskmem_features
589
+ out["maskmem_pos_enc"] = maskmem_pos_enc
590
+
591
+ obj_output_dict[storage_key][frame_idx] = out
592
+ if self.clear_non_cond_mem_around_input:
593
+ # clear non-conditioning memory of the surrounding frames
594
+ self._clear_obj_non_cond_mem_around_input(inference_state, frame_idx, obj_idx)
595
+
596
+ # clear temporary outputs in `temp_output_dict_per_obj`
597
+ obj_temp_output_dict[storage_key].clear()
598
+
599
+ # check and make sure that every object has received input points or masks
600
+ obj_output_dict = inference_state["output_dict_per_obj"][obj_idx]
601
+ if len(obj_output_dict["cond_frame_outputs"]) == 0:
602
+ obj_id = self._obj_idx_to_id(inference_state, obj_idx)
603
+ raise RuntimeError(
604
+ f"No input points or masks are provided for object id {obj_id}; please add inputs first.")
605
+ # edge case: if an output is added to "cond_frame_outputs", we remove any prior
606
+ # output on the same frame in "non_cond_frame_outputs"
607
+ for frame_idx in obj_output_dict["cond_frame_outputs"]:
608
+ obj_output_dict["non_cond_frame_outputs"].pop(frame_idx, None)
609
+
610
+ @torch.inference_mode()
611
+ def propagate_in_video(
612
+ self,
613
+ inference_state,
614
+ start_frame_idx=None,
615
+ max_frame_num_to_track=None,
616
+ reverse=False,
617
+ verbose=True,
618
+ ):
619
+ """Propagate the input points across frames to track in the entire video."""
620
+ self.propagate_in_video_preflight(inference_state)
621
+
622
+ obj_ids = inference_state["obj_ids"]
623
+ num_frames = inference_state["num_frames"]
624
+ batch_size = self._get_obj_num(inference_state)
625
+
626
+ # set start index, end index, and processing order
627
+ if start_frame_idx is None:
628
+ # default: start from the earliest frame with input points
629
+ start_frame_idx = min(t for obj_output_dict in inference_state["output_dict_per_obj"].values()
630
+ for t in obj_output_dict["cond_frame_outputs"])
631
+ if max_frame_num_to_track is None:
632
+ # default: track all the frames in the video
633
+ max_frame_num_to_track = num_frames
634
+ if reverse:
635
+ end_frame_idx = max(start_frame_idx - max_frame_num_to_track, 0)
636
+ if start_frame_idx > 0:
637
+ processing_order = range(start_frame_idx, end_frame_idx - 1, -1)
638
+ else:
639
+ processing_order = [] # skip reverse tracking if starting from frame 0
640
+ else:
641
+ end_frame_idx = min(start_frame_idx + max_frame_num_to_track, num_frames - 1)
642
+ processing_order = range(start_frame_idx, end_frame_idx + 1)
643
+
644
+ for frame_idx in tqdm(processing_order, desc="propagate in video", disable=not verbose):
645
+ pred_masks_per_obj = [None] * batch_size
646
+ for obj_idx in range(batch_size):
647
+ obj_output_dict = inference_state["output_dict_per_obj"][obj_idx]
648
+ # We skip those frames already in consolidated outputs (these are frames
649
+ # that received input clicks or mask). Note that we cannot directly run
650
+ # batched forward on them via `_run_single_frame_inference` because the
651
+ # number of clicks on each object might be different.
652
+ if frame_idx in obj_output_dict["cond_frame_outputs"]:
653
+ storage_key = "cond_frame_outputs"
654
+ current_out = obj_output_dict[storage_key][frame_idx]
655
+ device = inference_state["device"]
656
+ pred_masks = current_out["pred_masks"].to(device, non_blocking=True)
657
+ if self.clear_non_cond_mem_around_input:
658
+ # clear non-conditioning memory of the surrounding frames
659
+ self._clear_obj_non_cond_mem_around_input(inference_state, frame_idx, obj_idx)
660
+ else:
661
+ storage_key = "non_cond_frame_outputs"
662
+ current_out, pred_masks = self._run_single_frame_inference(
663
+ inference_state=inference_state,
664
+ output_dict=obj_output_dict,
665
+ frame_idx=frame_idx,
666
+ batch_size=1, # run on the slice of a single object
667
+ is_init_cond_frame=False,
668
+ point_inputs=None,
669
+ mask_inputs=None,
670
+ hidden_inputs=None,
671
+ reverse=reverse,
672
+ run_mem_encoder=True,
673
+ )
674
+ obj_output_dict[storage_key][frame_idx] = current_out
675
+
676
+ inference_state["frames_tracked_per_obj"][obj_idx][frame_idx] = {"reverse": reverse}
677
+ pred_masks_per_obj[obj_idx] = pred_masks
678
+
679
+ # Resize the output mask to the original video resolution (we directly use
680
+ # the mask scores on GPU for output to avoid any CPU conversion in between)
681
+ if len(pred_masks_per_obj) > 1:
682
+ all_pred_masks = torch.cat(pred_masks_per_obj, dim=0)
683
+ else:
684
+ all_pred_masks = pred_masks_per_obj[0]
685
+ _, video_res_masks = self._get_orig_video_res_output(inference_state, all_pred_masks)
686
+ yield frame_idx, obj_ids, video_res_masks
687
+
688
+ @torch.inference_mode()
689
+ def clear_all_prompts_in_frame(self, inference_state, frame_idx, obj_id, need_output=True):
690
+ """Remove all input points or mask in a specific frame for a given object."""
691
+ obj_idx = self._obj_id_to_idx(inference_state, obj_id)
692
+
693
+ # Clear the conditioning information on the given frame
694
+ inference_state["point_inputs_per_obj"][obj_idx].pop(frame_idx, None)
695
+ inference_state["mask_inputs_per_obj"][obj_idx].pop(frame_idx, None)
696
+
697
+ temp_output_dict_per_obj = inference_state["temp_output_dict_per_obj"]
698
+ temp_output_dict_per_obj[obj_idx]["cond_frame_outputs"].pop(frame_idx, None)
699
+ temp_output_dict_per_obj[obj_idx]["non_cond_frame_outputs"].pop(frame_idx, None)
700
+
701
+ # Remove the frame's conditioning output (possibly downgrading it to non-conditioning)
702
+ obj_output_dict = inference_state["output_dict_per_obj"][obj_idx]
703
+ out = obj_output_dict["cond_frame_outputs"].pop(frame_idx, None)
704
+ if out is not None:
705
+ # The frame is not a conditioning frame anymore since it's not receiving inputs,
706
+ # so we "downgrade" its output (if exists) to a non-conditioning frame output.
707
+ obj_output_dict["non_cond_frame_outputs"][frame_idx] = out
708
+ inference_state["frames_tracked_per_obj"][obj_idx].pop(frame_idx, None)
709
+
710
+ if not need_output:
711
+ return
712
+ # Finally, output updated masks per object (after removing the inputs above)
713
+ obj_ids = inference_state["obj_ids"]
714
+ is_cond = any(frame_idx in obj_temp_output_dict["cond_frame_outputs"]
715
+ for obj_temp_output_dict in temp_output_dict_per_obj.values())
716
+ consolidated_out = self._consolidate_temp_output_across_obj(
717
+ inference_state,
718
+ frame_idx,
719
+ is_cond=is_cond,
720
+ consolidate_at_video_res=True,
721
+ )
722
+ _, video_res_masks = self._get_orig_video_res_output(inference_state, consolidated_out["pred_masks_video_res"])
723
+ return frame_idx, obj_ids, video_res_masks
724
+
725
+ @torch.inference_mode()
726
+ def reset_state(self, inference_state):
727
+ """Remove all input points or mask in all frames throughout the video."""
728
+ self._reset_tracking_results(inference_state)
729
+ # Remove all object ids
730
+ inference_state["obj_id_to_idx"].clear()
731
+ inference_state["obj_idx_to_id"].clear()
732
+ inference_state["obj_ids"].clear()
733
+ inference_state["point_inputs_per_obj"].clear()
734
+ inference_state["mask_inputs_per_obj"].clear()
735
+ inference_state["output_dict_per_obj"].clear()
736
+ inference_state["temp_output_dict_per_obj"].clear()
737
+ inference_state["frames_tracked_per_obj"].clear()
738
+
739
+ def _reset_tracking_results(self, inference_state):
740
+ """Reset all tracking inputs and results across the videos."""
741
+ for v in inference_state["point_inputs_per_obj"].values():
742
+ v.clear()
743
+ for v in inference_state["mask_inputs_per_obj"].values():
744
+ v.clear()
745
+ for v in inference_state["output_dict_per_obj"].values():
746
+ v["cond_frame_outputs"].clear()
747
+ v["non_cond_frame_outputs"].clear()
748
+ for v in inference_state["temp_output_dict_per_obj"].values():
749
+ v["cond_frame_outputs"].clear()
750
+ v["non_cond_frame_outputs"].clear()
751
+ for v in inference_state["frames_tracked_per_obj"].values():
752
+ v.clear()
753
+
754
+ def _get_image_feature(self, inference_state, frame_idx, batch_size):
755
+ """Compute the image features on a given frame."""
756
+ # NOTE: check me ======================================================================
757
+ # # Look up in the cache first
758
+ # image, backbone_out = inference_state["cached_features"].get(frame_idx, (None, None))
759
+ # if backbone_out is None:
760
+ # # Cache miss -- we will run inference on a single image
761
+ # device = inference_state["device"]
762
+ # image = inference_state["images"][frame_idx].to(device).unsqueeze(0)
763
+ # backbone_out = self.forward_image(image)
764
+ # # Cache the most recent frame's feature (for repeated interactions with
765
+ # # a frame; we can use an LRU cache for more frames in the future).
766
+ # inference_state["cached_features"] = {frame_idx: (image, backbone_out)}
767
+ # =====================================================================================
768
+
769
+ # build cache for image features
770
+ if not inference_state["cached_features"]:
771
+ image = inference_state["images"].to(inference_state["device"])
772
+ backbone_out = self.forward_image(image)
773
+ inference_state["cached_features"] = {
774
+ i: (image[i, None], {
775
+ k: v[i, None] if torch.is_tensor(v) else [t[i, None] for t in v]
776
+ for k, v in backbone_out.items()
777
+ })
778
+ for i in range(image.size(0))
779
+ }
780
+
781
+ # retrieve from cache
782
+ image, backbone_out = inference_state["cached_features"][frame_idx]
783
+
784
+ # expand the features to have the same dimension as the number of objects
785
+ expanded_image = image.expand(batch_size, -1, -1, -1)
786
+ expanded_backbone_out = {
787
+ "backbone_fpn": backbone_out["backbone_fpn"].copy(),
788
+ "vision_pos_enc": backbone_out["vision_pos_enc"].copy(),
789
+ }
790
+ for i, feat in enumerate(expanded_backbone_out["backbone_fpn"]):
791
+ expanded_backbone_out["backbone_fpn"][i] = feat.expand(batch_size, -1, -1, -1)
792
+ for i, pos in enumerate(expanded_backbone_out["vision_pos_enc"]):
793
+ pos = pos.expand(batch_size, -1, -1, -1)
794
+ expanded_backbone_out["vision_pos_enc"][i] = pos
795
+
796
+ features = self._prepare_backbone_features(expanded_backbone_out)
797
+ features = (expanded_image, ) + features
798
+ return features
799
+
800
+ def _run_single_frame_inference(
801
+ self,
802
+ inference_state,
803
+ output_dict,
804
+ frame_idx,
805
+ batch_size,
806
+ is_init_cond_frame,
807
+ point_inputs,
808
+ mask_inputs,
809
+ hidden_inputs,
810
+ reverse,
811
+ run_mem_encoder,
812
+ prev_sam_mask_logits=None,
813
+ ):
814
+ """Run tracking on a single frame based on current inputs and previous memory."""
815
+ # Retrieve correct image features
816
+ (
817
+ _,
818
+ _,
819
+ current_vision_feats,
820
+ current_vision_pos_embeds,
821
+ feat_sizes,
822
+ ) = self._get_image_feature(inference_state, frame_idx, batch_size)
823
+
824
+ # point and mask should not appear as input simultaneously on the same frame
825
+ assert point_inputs is None or mask_inputs is None
826
+ current_out = self.track_step(
827
+ frame_idx=frame_idx,
828
+ is_init_cond_frame=is_init_cond_frame,
829
+ current_vision_feats=current_vision_feats,
830
+ current_vision_pos_embeds=current_vision_pos_embeds,
831
+ feat_sizes=feat_sizes,
832
+ point_inputs=point_inputs,
833
+ mask_inputs=mask_inputs,
834
+ hidden_inputs=hidden_inputs,
835
+ output_dict=output_dict,
836
+ num_frames=inference_state["num_frames"],
837
+ track_in_reverse=reverse,
838
+ run_mem_encoder=run_mem_encoder,
839
+ prev_sam_mask_logits=prev_sam_mask_logits,
840
+ )
841
+
842
+ # optionally offload the output to CPU memory to save GPU space
843
+ storage_device = inference_state["storage_device"]
844
+ maskmem_features = current_out["maskmem_features"]
845
+ if maskmem_features is not None:
846
+ maskmem_features = maskmem_features.to(inference_state["cached_features"][frame_idx][0].dtype)
847
+ maskmem_features = maskmem_features.to(storage_device, non_blocking=True)
848
+ pred_masks_gpu = current_out["pred_masks"]
849
+ # potentially fill holes in the predicted masks
850
+ if self.fill_hole_area > 0:
851
+ pred_masks_gpu = fill_holes_in_mask_scores(pred_masks_gpu, self.fill_hole_area)
852
+ pred_masks = pred_masks_gpu.to(storage_device, non_blocking=True)
853
+ # "maskmem_pos_enc" is the same across frames, so we only need to store one copy of it
854
+ maskmem_pos_enc = self._get_maskmem_pos_enc(inference_state, current_out)
855
+ # object pointer is a small tensor, so we always keep it on GPU memory for fast access
856
+ obj_ptr = current_out["obj_ptr"]
857
+ object_score_logits = current_out["object_score_logits"]
858
+ # make a compact version of this frame's output to reduce the state size
859
+ compact_current_out = {
860
+ "maskmem_features": maskmem_features,
861
+ "maskmem_pos_enc": maskmem_pos_enc,
862
+ "pred_masks": pred_masks,
863
+ "obj_ptr": obj_ptr,
864
+ "object_score_logits": object_score_logits,
865
+ }
866
+ # NOTE: reduce memory during inference ----------------------------------------
867
+ # https://github.com/facebookresearch/sam2/issues/196
868
+ # step = self.num_maskmem * self.memory_temporal_stride_for_eval * 2
869
+ # drop_frame_inds = [
870
+ # i for i in output_dict["non_cond_frame_outputs"].keys()
871
+ # if (i > frame_idx + step if reverse else i < frame_idx - step)
872
+ # ]
873
+ # for idx in drop_frame_inds:
874
+ # output_dict["non_cond_frame_outputs"].pop(idx)
875
+ # for obj_output_dict in inference_state["output_dict_per_obj"].values():
876
+ # obj_output_dict["non_cond_frame_outputs"].pop(idx, None)
877
+ # -----------------------------------------------------------------------------
878
+ return compact_current_out, pred_masks_gpu
879
+
880
+ def _run_memory_encoder(
881
+ self,
882
+ inference_state,
883
+ frame_idx,
884
+ batch_size,
885
+ high_res_masks,
886
+ object_score_logits,
887
+ is_mask_from_pts,
888
+ ):
889
+ """
890
+ Run the memory encoder on `high_res_masks`. This is usually after applying
891
+ non-overlapping constraints to object scores. Since their scores changed, their
892
+ memory also need to be computed again with the memory encoder.
893
+ """
894
+ # Retrieve correct image features
895
+ _, _, current_vision_feats, _, feat_sizes = self._get_image_feature(inference_state, frame_idx, batch_size)
896
+ maskmem_features, maskmem_pos_enc = self._encode_new_memory(
897
+ current_vision_feats=current_vision_feats,
898
+ feat_sizes=feat_sizes,
899
+ pred_masks_high_res=high_res_masks,
900
+ object_score_logits=object_score_logits,
901
+ is_mask_from_pts=is_mask_from_pts,
902
+ )
903
+
904
+ # optionally offload the output to CPU memory to save GPU space
905
+ storage_device = inference_state["storage_device"]
906
+ maskmem_features = maskmem_features.to(inference_state["cached_features"][frame_idx][0].dtype)
907
+ maskmem_features = maskmem_features.to(storage_device, non_blocking=True)
908
+ # "maskmem_pos_enc" is the same across frames, so we only need to store one copy of it
909
+ maskmem_pos_enc = self._get_maskmem_pos_enc(inference_state, {"maskmem_pos_enc": maskmem_pos_enc})
910
+ return maskmem_features, maskmem_pos_enc
911
+
912
+ def _get_maskmem_pos_enc(self, inference_state, current_out):
913
+ """
914
+ `maskmem_pos_enc` is the same across frames and objects, so we cache it as
915
+ a constant in the inference session to reduce session storage size.
916
+ """
917
+ model_constants = inference_state["constants"]
918
+ # "out_maskmem_pos_enc" should be either a list of tensors or None
919
+ out_maskmem_pos_enc = current_out["maskmem_pos_enc"]
920
+ if out_maskmem_pos_enc is not None:
921
+ if "maskmem_pos_enc" not in model_constants:
922
+ assert isinstance(out_maskmem_pos_enc, list)
923
+ # only take the slice for one object, since it's same across objects
924
+ maskmem_pos_enc = [x[0:1].clone() for x in out_maskmem_pos_enc]
925
+ model_constants["maskmem_pos_enc"] = maskmem_pos_enc
926
+ else:
927
+ maskmem_pos_enc = model_constants["maskmem_pos_enc"]
928
+ # expand the cached maskmem_pos_enc to the actual batch size
929
+ batch_size = out_maskmem_pos_enc[0].size(0)
930
+ expanded_maskmem_pos_enc = [x.expand(batch_size, -1, -1, -1) for x in maskmem_pos_enc]
931
+ else:
932
+ expanded_maskmem_pos_enc = None
933
+ return expanded_maskmem_pos_enc
934
+
935
+ @torch.inference_mode()
936
+ def remove_object(self, inference_state, obj_id, strict=False, need_output=True):
937
+ """
938
+ Remove an object id from the tracking state. If strict is True, we check whether
939
+ the object id actually exists and raise an error if it doesn't exist.
940
+ """
941
+ old_obj_idx_to_rm = inference_state["obj_id_to_idx"].get(obj_id, None)
942
+ updated_frames = []
943
+ # Check whether this object_id to remove actually exists and possibly raise an error.
944
+ if old_obj_idx_to_rm is None:
945
+ if not strict:
946
+ return inference_state["obj_ids"], updated_frames
947
+ raise RuntimeError(f"Cannot remove object id {obj_id} as it doesn't exist. "
948
+ f"All existing object ids: {inference_state['obj_ids']}.")
949
+
950
+ # If this is the only remaining object id, we simply reset the state.
951
+ if len(inference_state["obj_id_to_idx"]) == 1:
952
+ self.reset_state(inference_state)
953
+ return inference_state["obj_ids"], updated_frames
954
+
955
+ # There are still remaining objects after removing this object id. In this case,
956
+ # we need to delete the object storage from inference state tensors.
957
+ # Step 0: clear the input on those frames where this object id has point or mask input
958
+ # (note that this step is required as it might downgrade conditioning frames to
959
+ # non-conditioning ones)
960
+ obj_input_frames_inds = set()
961
+ obj_input_frames_inds.update(inference_state["point_inputs_per_obj"][old_obj_idx_to_rm])
962
+ obj_input_frames_inds.update(inference_state["mask_inputs_per_obj"][old_obj_idx_to_rm])
963
+ for frame_idx in obj_input_frames_inds:
964
+ self.clear_all_prompts_in_frame(inference_state, frame_idx, obj_id, need_output=False)
965
+
966
+ # Step 1: Update the object id mapping (note that it must be done after Step 0,
967
+ # since Step 0 still requires the old object id mappings in inference_state)
968
+ old_obj_ids = inference_state["obj_ids"]
969
+ old_obj_inds = list(range(len(old_obj_ids)))
970
+ remain_old_obj_inds = old_obj_inds.copy()
971
+ remain_old_obj_inds.remove(old_obj_idx_to_rm)
972
+ new_obj_ids = [old_obj_ids[old_idx] for old_idx in remain_old_obj_inds]
973
+ new_obj_inds = list(range(len(new_obj_ids)))
974
+ # build new mappings
975
+ old_idx_to_new_idx = dict(zip(remain_old_obj_inds, new_obj_inds))
976
+ inference_state["obj_id_to_idx"] = dict(zip(new_obj_ids, new_obj_inds))
977
+ inference_state["obj_idx_to_id"] = dict(zip(new_obj_inds, new_obj_ids))
978
+ inference_state["obj_ids"] = new_obj_ids
979
+
980
+ # Step 2: For per-object tensor storage, we shift their obj_idx in the dict keys.
981
+ def _map_keys(container):
982
+ new_kvs = []
983
+ for k in old_obj_inds:
984
+ v = container.pop(k)
985
+ if k in old_idx_to_new_idx:
986
+ new_kvs.append((old_idx_to_new_idx[k], v))
987
+ container.update(new_kvs)
988
+
989
+ _map_keys(inference_state["point_inputs_per_obj"])
990
+ _map_keys(inference_state["mask_inputs_per_obj"])
991
+ _map_keys(inference_state["output_dict_per_obj"])
992
+ _map_keys(inference_state["temp_output_dict_per_obj"])
993
+ _map_keys(inference_state["frames_tracked_per_obj"])
994
+
995
+ # Step 3: Further collect the outputs on those frames in `obj_input_frames_inds`, which
996
+ # could show an updated mask for objects previously occluded by the object being removed
997
+ if need_output:
998
+ temp_output_dict_per_obj = inference_state["temp_output_dict_per_obj"]
999
+ for frame_idx in obj_input_frames_inds:
1000
+ is_cond = any(frame_idx in obj_temp_output_dict["cond_frame_outputs"]
1001
+ for obj_temp_output_dict in temp_output_dict_per_obj.values())
1002
+ consolidated_out = self._consolidate_temp_output_across_obj(
1003
+ inference_state,
1004
+ frame_idx,
1005
+ is_cond=is_cond,
1006
+ consolidate_at_video_res=True,
1007
+ )
1008
+ _, video_res_masks = self._get_orig_video_res_output(inference_state,
1009
+ consolidated_out["pred_masks_video_res"])
1010
+ updated_frames.append((frame_idx, video_res_masks))
1011
+
1012
+ return inference_state["obj_ids"], updated_frames
1013
+
1014
+ def _clear_non_cond_mem_around_input(self, inference_state, frame_idx):
1015
+ """
1016
+ Remove the non-conditioning memory around the input frame. When users provide
1017
+ correction clicks, the surrounding frames' non-conditioning memories can still
1018
+ contain outdated object appearance information and could confuse the model.
1019
+
1020
+ This method clears those non-conditioning memories surrounding the interacted
1021
+ frame to avoid giving the model both old and new information about the object.
1022
+ """
1023
+ r = self.memory_temporal_stride_for_eval
1024
+ frame_idx_begin = frame_idx - r * self.num_maskmem
1025
+ frame_idx_end = frame_idx + r * self.num_maskmem
1026
+ batch_size = self._get_obj_num(inference_state)
1027
+ for obj_idx in range(batch_size):
1028
+ obj_output_dict = inference_state["output_dict_per_obj"][obj_idx]
1029
+ non_cond_frame_outputs = obj_output_dict["non_cond_frame_outputs"]
1030
+ for t in range(frame_idx_begin, frame_idx_end + 1):
1031
+ non_cond_frame_outputs.pop(t, None)
1032
+
1033
+
1034
+ class SAM2VideoPredictorVOS(SAM2VideoPredictor):
1035
+ """Optimized for the VOS setting"""
1036
+
1037
+ def __init__(self, *args, **kwargs):
1038
+ raise NotImplementedError("SAM2VideoPredictorVOS has not been modified for LLMs")
1039
+ super().__init__(*args, **kwargs)
1040
+ self._compile_all_components()
1041
+
1042
+ def _compile_all_components(self):
1043
+ print("Compiling all components for VOS setting. First time may be very slow.")
1044
+ self.memory_encoder.forward = torch.compile(
1045
+ self.memory_encoder.forward,
1046
+ mode="max-autotune",
1047
+ fullgraph=True,
1048
+ dynamic=False,
1049
+ )
1050
+
1051
+ self.memory_attention.forward = torch.compile(
1052
+ self.memory_attention.forward,
1053
+ mode="max-autotune",
1054
+ fullgraph=True,
1055
+ dynamic=True, # Num. of memories varies
1056
+ )
1057
+
1058
+ self.sam_prompt_encoder.forward = torch.compile(
1059
+ self.sam_prompt_encoder.forward,
1060
+ mode="max-autotune",
1061
+ fullgraph=True,
1062
+ dynamic=False, # Accuracy regression on True
1063
+ )
1064
+
1065
+ self.sam_mask_decoder.forward = torch.compile(
1066
+ self.sam_mask_decoder.forward,
1067
+ mode="max-autotune",
1068
+ fullgraph=True,
1069
+ dynamic=False, # Accuracy regression on True
1070
+ )
1071
+
1072
+ def forward_image(self, img_batch: torch.Tensor):
1073
+ """
1074
+ Identical to the corresponding method in the parent (SAM2VideoPredictor), but
1075
+ cloning the backbone features and pos encoding to enable compilation.
1076
+ """
1077
+ backbone_out = self.image_encoder(img_batch)
1078
+ if self.use_high_res_features_in_sam:
1079
+ # precompute projected level 0 and level 1 features in SAM decoder
1080
+ # to avoid running it again on every SAM click
1081
+ backbone_out["backbone_fpn"][0] = self.sam_mask_decoder.conv_s0(backbone_out["backbone_fpn"][0])
1082
+ backbone_out["backbone_fpn"][1] = self.sam_mask_decoder.conv_s1(backbone_out["backbone_fpn"][1])
1083
+ # Clone to help torch.compile
1084
+ for i in range(len(backbone_out["backbone_fpn"])):
1085
+ backbone_out["backbone_fpn"][i] = backbone_out["backbone_fpn"][i].clone()
1086
+ backbone_out["vision_pos_enc"][i] = backbone_out["vision_pos_enc"][i].clone()
1087
+ return backbone_out
1088
+
1089
+ def _forward_sam_heads(
1090
+ self,
1091
+ backbone_features,
1092
+ point_inputs=None,
1093
+ mask_inputs=None,
1094
+ high_res_features=None,
1095
+ multimask_output=False,
1096
+ ):
1097
+ """
1098
+ Identical to the corresponding method in the parent (SAM2VideoPredictor), but
1099
+ cloning the outputs of prompt_encoder and mask_decoder to enable compilation.
1100
+ """
1101
+ B = backbone_features.size(0)
1102
+ device = backbone_features.device
1103
+ assert backbone_features.size(1) == self.sam_prompt_embed_dim
1104
+ assert backbone_features.size(2) == self.sam_image_embedding_size
1105
+ assert backbone_features.size(3) == self.sam_image_embedding_size
1106
+
1107
+ # a) Handle point prompts
1108
+ if point_inputs is not None:
1109
+ sam_point_coords = point_inputs["point_coords"]
1110
+ sam_point_labels = point_inputs["point_labels"]
1111
+ assert sam_point_coords.size(0) == B and sam_point_labels.size(0) == B
1112
+ else:
1113
+ # If no points are provide, pad with an empty point (with label -1)
1114
+ sam_point_coords = torch.zeros(B, 1, 2, device=device)
1115
+ sam_point_labels = -torch.ones(B, 1, dtype=torch.int32, device=device)
1116
+
1117
+ # b) Handle mask prompts
1118
+ if mask_inputs is not None:
1119
+ # If mask_inputs is provided, downsize it into low-res mask input if needed
1120
+ # and feed it as a dense mask prompt into the SAM mask encoder
1121
+ assert len(mask_inputs.shape) == 4 and mask_inputs.shape[:2] == (B, 1)
1122
+ if mask_inputs.shape[-2:] != self.sam_prompt_encoder.mask_input_size:
1123
+ sam_mask_prompt = F.interpolate(
1124
+ mask_inputs.float(),
1125
+ size=self.sam_prompt_encoder.mask_input_size,
1126
+ align_corners=False,
1127
+ mode="bilinear",
1128
+ antialias=True, # use antialias for downsampling
1129
+ )
1130
+ else:
1131
+ sam_mask_prompt = mask_inputs
1132
+ else:
1133
+ # Otherwise, simply feed None (and SAM's prompt encoder will add
1134
+ # a learned `no_mask_embed` to indicate no mask input in this case).
1135
+ sam_mask_prompt = None
1136
+
1137
+ sparse_embeddings, dense_embeddings = self.sam_prompt_encoder(
1138
+ points=(sam_point_coords, sam_point_labels),
1139
+ boxes=None,
1140
+ masks=sam_mask_prompt,
1141
+ )
1142
+ # Clone image_pe and the outputs of sam_prompt_encoder
1143
+ # to enable compilation
1144
+ sparse_embeddings = sparse_embeddings.clone()
1145
+ dense_embeddings = dense_embeddings.clone()
1146
+ image_pe = self.sam_prompt_encoder.get_dense_pe().clone()
1147
+ (
1148
+ low_res_multimasks,
1149
+ ious,
1150
+ sam_output_tokens,
1151
+ object_score_logits,
1152
+ ) = self.sam_mask_decoder(
1153
+ image_embeddings=backbone_features,
1154
+ image_pe=image_pe,
1155
+ sparse_prompt_embeddings=sparse_embeddings,
1156
+ dense_prompt_embeddings=dense_embeddings,
1157
+ multimask_output=multimask_output,
1158
+ repeat_image=False, # the image is already batched
1159
+ high_res_features=high_res_features,
1160
+ )
1161
+ # Clone the output of sam_mask_decoder
1162
+ # to enable compilation
1163
+ low_res_multimasks = low_res_multimasks.clone()
1164
+ ious = ious.clone()
1165
+ sam_output_tokens = sam_output_tokens.clone()
1166
+ object_score_logits = object_score_logits.clone()
1167
+
1168
+ if self.pred_obj_scores:
1169
+ is_obj_appearing = object_score_logits > 0
1170
+
1171
+ # Mask used for spatial memories is always a *hard* choice between obj and no obj,
1172
+ # consistent with the actual mask prediction
1173
+ low_res_multimasks = torch.where(
1174
+ is_obj_appearing[:, None, None],
1175
+ low_res_multimasks,
1176
+ NO_OBJ_SCORE,
1177
+ )
1178
+
1179
+ # convert masks from possibly bfloat16 (or float16) to float32
1180
+ low_res_multimasks = low_res_multimasks.float()
1181
+ high_res_multimasks = F.interpolate(
1182
+ low_res_multimasks,
1183
+ size=(self.image_size, self.image_size),
1184
+ mode="bilinear",
1185
+ align_corners=False,
1186
+ )
1187
+
1188
+ sam_output_token = sam_output_tokens[:, 0]
1189
+ if multimask_output:
1190
+ # take the best mask prediction (with the highest IoU estimation)
1191
+ best_iou_inds = torch.argmax(ious, dim=-1)
1192
+ batch_inds = torch.arange(B, device=device)
1193
+ low_res_masks = low_res_multimasks[batch_inds, best_iou_inds].unsqueeze(1)
1194
+ high_res_masks = high_res_multimasks[batch_inds, best_iou_inds].unsqueeze(1)
1195
+ if sam_output_tokens.size(1) > 1:
1196
+ sam_output_token = sam_output_tokens[batch_inds, best_iou_inds]
1197
+ else:
1198
+ low_res_masks, high_res_masks = low_res_multimasks, high_res_multimasks
1199
+
1200
+ # Extract object pointer from the SAM output token (with occlusion handling)
1201
+ obj_ptr = self.obj_ptr_proj(sam_output_token)
1202
+ if self.pred_obj_scores:
1203
+ # Allow *soft* no obj ptr, unlike for masks
1204
+ if self.soft_no_obj_ptr:
1205
+ lambda_is_obj_appearing = object_score_logits.sigmoid()
1206
+ else:
1207
+ lambda_is_obj_appearing = is_obj_appearing.float()
1208
+
1209
+ if self.fixed_no_obj_ptr:
1210
+ obj_ptr = lambda_is_obj_appearing * obj_ptr
1211
+ obj_ptr = obj_ptr + (1 - lambda_is_obj_appearing) * self.no_obj_ptr
1212
+
1213
+ return (
1214
+ low_res_multimasks,
1215
+ high_res_multimasks,
1216
+ ious,
1217
+ low_res_masks,
1218
+ high_res_masks,
1219
+ obj_ptr,
1220
+ object_score_logits,
1221
+ )
1222
+
1223
+ def _encode_new_memory(
1224
+ self,
1225
+ current_vision_feats,
1226
+ feat_sizes,
1227
+ pred_masks_high_res,
1228
+ object_score_logits,
1229
+ is_mask_from_pts,
1230
+ ):
1231
+ """
1232
+ Identical to the corresponding method in the parent (SAM2VideoPredictor), but
1233
+ cloning the memories and their pos enc to enable compilation.
1234
+ """
1235
+ B = current_vision_feats[-1].size(1) # batch size on this frame
1236
+ C = self.hidden_dim
1237
+ H, W = feat_sizes[-1] # top-level (lowest-resolution) feature size
1238
+ # top-level feature, (HW)BC => BCHW
1239
+ pix_feat = current_vision_feats[-1].permute(1, 2, 0).view(B, C, H, W)
1240
+ if self.non_overlap_masks_for_mem_enc and not self.training:
1241
+ # optionally, apply non-overlapping constraints to the masks (it's applied
1242
+ # in the batch dimension and should only be used during eval, where all
1243
+ # the objects come from the same video under batch size 1).
1244
+ pred_masks_high_res = self._apply_non_overlapping_constraints(pred_masks_high_res)
1245
+ # scale the raw mask logits with a temperature before applying sigmoid
1246
+ binarize = self.binarize_mask_from_pts_for_mem_enc and is_mask_from_pts
1247
+ if binarize and not self.training:
1248
+ mask_for_mem = (pred_masks_high_res > 0).float()
1249
+ else:
1250
+ # apply sigmoid on the raw mask logits to turn them into range (0, 1)
1251
+ mask_for_mem = torch.sigmoid(pred_masks_high_res)
1252
+ # apply scale and bias terms to the sigmoid probabilities
1253
+ if self.sigmoid_scale_for_mem_enc != 1.0:
1254
+ mask_for_mem = mask_for_mem * self.sigmoid_scale_for_mem_enc
1255
+ if self.sigmoid_bias_for_mem_enc != 0.0:
1256
+ mask_for_mem = mask_for_mem + self.sigmoid_bias_for_mem_enc
1257
+ maskmem_out = self.memory_encoder(
1258
+ pix_feat,
1259
+ mask_for_mem,
1260
+ skip_mask_sigmoid=True # sigmoid already applied
1261
+ )
1262
+ # Clone the feats and pos_enc to enable compilation
1263
+ maskmem_features = maskmem_out["vision_features"].clone()
1264
+ maskmem_pos_enc = [m.clone() for m in maskmem_out["vision_pos_enc"]]
1265
+ # add a no-object embedding to the spatial memory to indicate that the frame
1266
+ # is predicted to be occluded (i.e. no object is appearing in the frame)
1267
+ if self.no_obj_embed_spatial is not None:
1268
+ is_obj_appearing = (object_score_logits > 0).float()
1269
+ maskmem_features += (1 - is_obj_appearing[..., None, None]
1270
+ ) * self.no_obj_embed_spatial[..., None, None].expand(*maskmem_features.shape)
1271
+
1272
+ return maskmem_features, maskmem_pos_enc
sam2/utils/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
sam2/utils/amg.py ADDED
@@ -0,0 +1,328 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import math
8
+ from copy import deepcopy
9
+ from itertools import product
10
+ from typing import Any, Dict, Generator, ItemsView, List, Tuple
11
+
12
+ import numpy as np
13
+ import torch
14
+
15
+ # Very lightly adapted from https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/utils/amg.py
16
+
17
+
18
+ class MaskData:
19
+ """
20
+ A structure for storing masks and their related data in batched format.
21
+ Implements basic filtering and concatenation.
22
+ """
23
+
24
+ def __init__(self, **kwargs) -> None:
25
+ for v in kwargs.values():
26
+ assert isinstance(
27
+ v, (list, np.ndarray, torch.Tensor)), "MaskData only supports list, numpy arrays, and torch tensors."
28
+ self._stats = dict(**kwargs)
29
+
30
+ def __setitem__(self, key: str, item: Any) -> None:
31
+ assert isinstance(
32
+ item, (list, np.ndarray, torch.Tensor)), "MaskData only supports list, numpy arrays, and torch tensors."
33
+ self._stats[key] = item
34
+
35
+ def __delitem__(self, key: str) -> None:
36
+ del self._stats[key]
37
+
38
+ def __getitem__(self, key: str) -> Any:
39
+ return self._stats[key]
40
+
41
+ def items(self) -> ItemsView[str, Any]:
42
+ return self._stats.items()
43
+
44
+ def filter(self, keep: torch.Tensor) -> None:
45
+ for k, v in self._stats.items():
46
+ if v is None:
47
+ self._stats[k] = None
48
+ elif isinstance(v, torch.Tensor):
49
+ self._stats[k] = v[torch.as_tensor(keep, device=v.device)]
50
+ elif isinstance(v, np.ndarray):
51
+ self._stats[k] = v[keep.detach().cpu().numpy()]
52
+ elif isinstance(v, list) and keep.dtype == torch.bool:
53
+ self._stats[k] = [a for i, a in enumerate(v) if keep[i]]
54
+ elif isinstance(v, list):
55
+ self._stats[k] = [v[i] for i in keep]
56
+ else:
57
+ raise TypeError(f"MaskData key {k} has an unsupported type {type(v)}.")
58
+
59
+ def cat(self, new_stats: "MaskData") -> None:
60
+ for k, v in new_stats.items():
61
+ if k not in self._stats or self._stats[k] is None:
62
+ self._stats[k] = deepcopy(v)
63
+ elif isinstance(v, torch.Tensor):
64
+ self._stats[k] = torch.cat([self._stats[k], v], dim=0)
65
+ elif isinstance(v, np.ndarray):
66
+ self._stats[k] = np.concatenate([self._stats[k], v], axis=0)
67
+ elif isinstance(v, list):
68
+ self._stats[k] = self._stats[k] + deepcopy(v)
69
+ else:
70
+ raise TypeError(f"MaskData key {k} has an unsupported type {type(v)}.")
71
+
72
+ def to_numpy(self) -> None:
73
+ for k, v in self._stats.items():
74
+ if isinstance(v, torch.Tensor):
75
+ self._stats[k] = v.float().detach().cpu().numpy()
76
+
77
+
78
+ def is_box_near_crop_edge(boxes: torch.Tensor,
79
+ crop_box: List[int],
80
+ orig_box: List[int],
81
+ atol: float = 20.0) -> torch.Tensor:
82
+ """Filter masks at the edge of a crop, but not at the edge of the original image."""
83
+ crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device)
84
+ orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device)
85
+ boxes = uncrop_boxes_xyxy(boxes, crop_box).float()
86
+ near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0)
87
+ near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0)
88
+ near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge)
89
+ return torch.any(near_crop_edge, dim=1)
90
+
91
+
92
+ def box_xyxy_to_xywh(box_xyxy: torch.Tensor) -> torch.Tensor:
93
+ box_xywh = deepcopy(box_xyxy)
94
+ box_xywh[2] = box_xywh[2] - box_xywh[0]
95
+ box_xywh[3] = box_xywh[3] - box_xywh[1]
96
+ return box_xywh
97
+
98
+
99
+ def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]:
100
+ assert len(args) > 0 and all(len(a) == len(args[0])
101
+ for a in args), "Batched iteration must have inputs of all the same size."
102
+ n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0)
103
+ for b in range(n_batches):
104
+ yield [arg[b * batch_size:(b + 1) * batch_size] for arg in args]
105
+
106
+
107
+ def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]:
108
+ """
109
+ Encodes masks to an uncompressed RLE, in the format expected by
110
+ pycoco tools.
111
+ """
112
+ # Put in fortran order and flatten h,w
113
+ b, h, w = tensor.shape
114
+ tensor = tensor.permute(0, 2, 1).flatten(1)
115
+
116
+ # Compute change indices
117
+ diff = tensor[:, 1:] ^ tensor[:, :-1]
118
+ change_indices = diff.nonzero()
119
+
120
+ # Encode run length
121
+ out = []
122
+ for i in range(b):
123
+ cur_idxs = change_indices[change_indices[:, 0] == i, 1]
124
+ cur_idxs = torch.cat([
125
+ torch.tensor([0], dtype=cur_idxs.dtype, device=cur_idxs.device),
126
+ cur_idxs + 1,
127
+ torch.tensor([h * w], dtype=cur_idxs.dtype, device=cur_idxs.device),
128
+ ])
129
+ btw_idxs = cur_idxs[1:] - cur_idxs[:-1]
130
+ counts = [] if tensor[i, 0] == 0 else [0]
131
+ counts.extend(btw_idxs.detach().cpu().tolist())
132
+ out.append({"size": [h, w], "counts": counts})
133
+ return out
134
+
135
+
136
+ def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray:
137
+ """Compute a binary mask from an uncompressed RLE."""
138
+ h, w = rle["size"]
139
+ mask = np.empty(h * w, dtype=bool)
140
+ idx = 0
141
+ parity = False
142
+ for count in rle["counts"]:
143
+ mask[idx:idx + count] = parity
144
+ idx += count
145
+ parity ^= True
146
+ mask = mask.reshape(w, h)
147
+ return mask.transpose() # Put in C order
148
+
149
+
150
+ def area_from_rle(rle: Dict[str, Any]) -> int:
151
+ return sum(rle["counts"][1::2])
152
+
153
+
154
+ def calculate_stability_score(masks: torch.Tensor, mask_threshold: float, threshold_offset: float) -> torch.Tensor:
155
+ """
156
+ Computes the stability score for a batch of masks. The stability
157
+ score is the IoU between the binary masks obtained by thresholding
158
+ the predicted mask logits at high and low values.
159
+ """
160
+ # One mask is always contained inside the other.
161
+ # Save memory by preventing unnecessary cast to torch.int64
162
+ intersections = ((masks > (mask_threshold + threshold_offset)).sum(-1,
163
+ dtype=torch.int16).sum(-1, dtype=torch.int32))
164
+ unions = ((masks > (mask_threshold - threshold_offset)).sum(-1, dtype=torch.int16).sum(-1, dtype=torch.int32))
165
+ return intersections / unions
166
+
167
+
168
+ def build_point_grid(n_per_side: int) -> np.ndarray:
169
+ """Generates a 2D grid of points evenly spaced in [0,1]x[0,1]."""
170
+ offset = 1 / (2 * n_per_side)
171
+ points_one_side = np.linspace(offset, 1 - offset, n_per_side)
172
+ points_x = np.tile(points_one_side[None, :], (n_per_side, 1))
173
+ points_y = np.tile(points_one_side[:, None], (1, n_per_side))
174
+ points = np.stack([points_x, points_y], axis=-1).reshape(-1, 2)
175
+ return points
176
+
177
+
178
+ def build_all_layer_point_grids(n_per_side: int, n_layers: int, scale_per_layer: int) -> List[np.ndarray]:
179
+ """Generates point grids for all crop layers."""
180
+ points_by_layer = []
181
+ for i in range(n_layers + 1):
182
+ n_points = int(n_per_side / (scale_per_layer**i))
183
+ points_by_layer.append(build_point_grid(n_points))
184
+ return points_by_layer
185
+
186
+
187
+ def generate_crop_boxes(im_size: Tuple[int, ...], n_layers: int,
188
+ overlap_ratio: float) -> Tuple[List[List[int]], List[int]]:
189
+ """
190
+ Generates a list of crop boxes of different sizes. Each layer
191
+ has (2**i)**2 boxes for the ith layer.
192
+ """
193
+ crop_boxes, layer_idxs = [], []
194
+ im_h, im_w = im_size
195
+ short_side = min(im_h, im_w)
196
+
197
+ # Original image
198
+ crop_boxes.append([0, 0, im_w, im_h])
199
+ layer_idxs.append(0)
200
+
201
+ def crop_len(orig_len, n_crops, overlap):
202
+ return int(math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops))
203
+
204
+ for i_layer in range(n_layers):
205
+ n_crops_per_side = 2**(i_layer + 1)
206
+ overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side))
207
+
208
+ crop_w = crop_len(im_w, n_crops_per_side, overlap)
209
+ crop_h = crop_len(im_h, n_crops_per_side, overlap)
210
+
211
+ crop_box_x0 = [int((crop_w - overlap) * i) for i in range(n_crops_per_side)]
212
+ crop_box_y0 = [int((crop_h - overlap) * i) for i in range(n_crops_per_side)]
213
+
214
+ # Crops in XYWH format
215
+ for x0, y0 in product(crop_box_x0, crop_box_y0):
216
+ box = [x0, y0, min(x0 + crop_w, im_w), min(y0 + crop_h, im_h)]
217
+ crop_boxes.append(box)
218
+ layer_idxs.append(i_layer + 1)
219
+
220
+ return crop_boxes, layer_idxs
221
+
222
+
223
+ def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: List[int]) -> torch.Tensor:
224
+ x0, y0, _, _ = crop_box
225
+ offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device)
226
+ # Check if boxes has a channel dimension
227
+ if len(boxes.shape) == 3:
228
+ offset = offset.unsqueeze(1)
229
+ return boxes + offset
230
+
231
+
232
+ def uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor:
233
+ x0, y0, _, _ = crop_box
234
+ offset = torch.tensor([[x0, y0]], device=points.device)
235
+ # Check if points has a channel dimension
236
+ if len(points.shape) == 3:
237
+ offset = offset.unsqueeze(1)
238
+ return points + offset
239
+
240
+
241
+ def uncrop_masks(masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int) -> torch.Tensor:
242
+ x0, y0, x1, y1 = crop_box
243
+ if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h:
244
+ return masks
245
+ # Coordinate transform masks
246
+ pad_x, pad_y = orig_w - (x1 - x0), orig_h - (y1 - y0)
247
+ pad = (x0, pad_x - x0, y0, pad_y - y0)
248
+ return torch.nn.functional.pad(masks, pad, value=0)
249
+
250
+
251
+ def remove_small_regions(mask: np.ndarray, area_thresh: float, mode: str) -> Tuple[np.ndarray, bool]:
252
+ """
253
+ Removes small disconnected regions and holes in a mask. Returns the
254
+ mask and an indicator of if the mask has been modified.
255
+ """
256
+ import cv2 # type: ignore
257
+
258
+ assert mode in ["holes", "islands"]
259
+ correct_holes = mode == "holes"
260
+ working_mask = (correct_holes ^ mask).astype(np.uint8)
261
+ n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8)
262
+ sizes = stats[:, -1][1:] # Row 0 is background label
263
+ small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh]
264
+ if len(small_regions) == 0:
265
+ return mask, False
266
+ fill_labels = [0] + small_regions
267
+ if not correct_holes:
268
+ fill_labels = [i for i in range(n_labels) if i not in fill_labels]
269
+ # If every region is below threshold, keep largest
270
+ if len(fill_labels) == 0:
271
+ fill_labels = [int(np.argmax(sizes)) + 1]
272
+ mask = np.isin(regions, fill_labels)
273
+ return mask, True
274
+
275
+
276
+ def coco_encode_rle(uncompressed_rle: Dict[str, Any]) -> Dict[str, Any]:
277
+ from pycocotools import mask as mask_utils # type: ignore
278
+
279
+ h, w = uncompressed_rle["size"]
280
+ rle = mask_utils.frPyObjects(uncompressed_rle, h, w)
281
+ rle["counts"] = rle["counts"].decode("utf-8") # Necessary to serialize with json
282
+ return rle
283
+
284
+
285
+ def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor:
286
+ """
287
+ Calculates boxes in XYXY format around masks. Return [0,0,0,0] for
288
+ an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4.
289
+ """
290
+ # torch.max below raises an error on empty inputs, just skip in this case
291
+ if torch.numel(masks) == 0:
292
+ return torch.zeros(*masks.shape[:-2], 4, device=masks.device)
293
+
294
+ # Normalize shape to CxHxW
295
+ shape = masks.shape
296
+ h, w = shape[-2:]
297
+ if len(shape) > 2:
298
+ masks = masks.flatten(0, -3)
299
+ else:
300
+ masks = masks.unsqueeze(0)
301
+
302
+ # Get top and bottom edges
303
+ in_height, _ = torch.max(masks, dim=-1)
304
+ in_height_coords = in_height * torch.arange(h, device=in_height.device)[None, :]
305
+ bottom_edges, _ = torch.max(in_height_coords, dim=-1)
306
+ in_height_coords = in_height_coords + h * (~in_height)
307
+ top_edges, _ = torch.min(in_height_coords, dim=-1)
308
+
309
+ # Get left and right edges
310
+ in_width, _ = torch.max(masks, dim=-2)
311
+ in_width_coords = in_width * torch.arange(w, device=in_width.device)[None, :]
312
+ right_edges, _ = torch.max(in_width_coords, dim=-1)
313
+ in_width_coords = in_width_coords + w * (~in_width)
314
+ left_edges, _ = torch.min(in_width_coords, dim=-1)
315
+
316
+ # If the mask is empty the right edge will be to the left of the left edge.
317
+ # Replace these boxes with [0, 0, 0, 0]
318
+ empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)
319
+ out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1)
320
+ out = out * (~empty_filter).unsqueeze(-1)
321
+
322
+ # Return to original shape
323
+ if len(shape) > 2:
324
+ out = out.reshape(*shape[:-2], 4)
325
+ else:
326
+ out = out[0]
327
+
328
+ return out
sam2/utils/misc.py ADDED
@@ -0,0 +1,340 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import os
8
+ import warnings
9
+ from threading import Thread
10
+
11
+ import numpy as np
12
+ import torch
13
+ from PIL import Image
14
+ from tqdm import tqdm
15
+
16
+
17
+ def get_sdpa_settings():
18
+ if torch.cuda.is_available():
19
+ old_gpu = torch.cuda.get_device_properties(0).major < 7
20
+ # only use Flash Attention on Ampere (8.0) or newer GPUs
21
+ use_flash_attn = torch.cuda.get_device_properties(0).major >= 8
22
+ if not use_flash_attn:
23
+ warnings.warn(
24
+ "Flash Attention is disabled as it requires a GPU with Ampere (8.0) CUDA capability.",
25
+ category=UserWarning,
26
+ stacklevel=2,
27
+ )
28
+ # keep math kernel for PyTorch versions before 2.2 (Flash Attention v2 is only
29
+ # available on PyTorch 2.2+, while Flash Attention v1 cannot handle all cases)
30
+ pytorch_version = tuple(int(v) for v in torch.__version__.split(".")[:2])
31
+ if pytorch_version < (2, 2):
32
+ warnings.warn(
33
+ f"You are using PyTorch {torch.__version__} without Flash Attention v2 support. "
34
+ "Consider upgrading to PyTorch 2.2+ for Flash Attention v2 (which could be faster).",
35
+ category=UserWarning,
36
+ stacklevel=2,
37
+ )
38
+ math_kernel_on = pytorch_version < (2, 2) or not use_flash_attn
39
+ else:
40
+ old_gpu = True
41
+ use_flash_attn = False
42
+ math_kernel_on = True
43
+
44
+ return old_gpu, use_flash_attn, math_kernel_on
45
+
46
+
47
+ def get_connected_components(mask):
48
+ """
49
+ Get the connected components (8-connectivity) of binary masks of shape (N, 1, H, W).
50
+
51
+ Inputs:
52
+ - mask: A binary mask tensor of shape (N, 1, H, W), where 1 is foreground and 0 is
53
+ background.
54
+
55
+ Outputs:
56
+ - labels: A tensor of shape (N, 1, H, W) containing the connected component labels
57
+ for foreground pixels and 0 for background pixels.
58
+ - counts: A tensor of shape (N, 1, H, W) containing the area of the connected
59
+ components for foreground pixels and 0 for background pixels.
60
+ """
61
+ from sam2 import _C
62
+
63
+ return _C.get_connected_componnets(mask.to(torch.uint8).contiguous())
64
+
65
+
66
+ def mask_to_box(masks: torch.Tensor):
67
+ """
68
+ compute bounding box given an input mask
69
+
70
+ Inputs:
71
+ - masks: [B, 1, H, W] masks, dtype=torch.Tensor
72
+
73
+ Returns:
74
+ - box_coords: [B, 1, 4], contains (x, y) coordinates of top left and bottom right box corners, dtype=torch.Tensor
75
+ """
76
+ B, _, h, w = masks.shape
77
+ device = masks.device
78
+ xs = torch.arange(w, device=device, dtype=torch.int32)
79
+ ys = torch.arange(h, device=device, dtype=torch.int32)
80
+ grid_xs, grid_ys = torch.meshgrid(xs, ys, indexing="xy")
81
+ grid_xs = grid_xs[None, None, ...].expand(B, 1, h, w)
82
+ grid_ys = grid_ys[None, None, ...].expand(B, 1, h, w)
83
+ min_xs, _ = torch.min(torch.where(masks, grid_xs, w).flatten(-2), dim=-1)
84
+ max_xs, _ = torch.max(torch.where(masks, grid_xs, -1).flatten(-2), dim=-1)
85
+ min_ys, _ = torch.min(torch.where(masks, grid_ys, h).flatten(-2), dim=-1)
86
+ max_ys, _ = torch.max(torch.where(masks, grid_ys, -1).flatten(-2), dim=-1)
87
+ bbox_coords = torch.stack((min_xs, min_ys, max_xs, max_ys), dim=-1)
88
+
89
+ return bbox_coords
90
+
91
+
92
+ def _load_img_as_tensor(img_path, image_size):
93
+ img_pil = Image.open(img_path)
94
+ img_np = np.array(img_pil.convert("RGB").resize((image_size, image_size)))
95
+ if img_np.dtype == np.uint8: # np.uint8 is expected for JPEG images
96
+ img_np = img_np / 255.0
97
+ else:
98
+ raise RuntimeError(f"Unknown image dtype: {img_np.dtype} on {img_path}")
99
+ img = torch.from_numpy(img_np).permute(2, 0, 1)
100
+ video_width, video_height = img_pil.size # the original video size
101
+ return img, video_height, video_width
102
+
103
+
104
+ class AsyncVideoFrameLoader:
105
+ """
106
+ A list of video frames to be load asynchronously without blocking session start.
107
+ """
108
+
109
+ def __init__(
110
+ self,
111
+ img_paths,
112
+ image_size,
113
+ offload_video_to_cpu,
114
+ img_mean,
115
+ img_std,
116
+ compute_device,
117
+ ):
118
+ self.img_paths = img_paths
119
+ self.image_size = image_size
120
+ self.offload_video_to_cpu = offload_video_to_cpu
121
+ self.img_mean = img_mean
122
+ self.img_std = img_std
123
+ # items in `self.images` will be loaded asynchronously
124
+ self.images = [None] * len(img_paths)
125
+ # catch and raise any exceptions in the async loading thread
126
+ self.exception = None
127
+ # video_height and video_width be filled when loading the first image
128
+ self.video_height = None
129
+ self.video_width = None
130
+ self.compute_device = compute_device
131
+
132
+ # load the first frame to fill video_height and video_width and also
133
+ # to cache it (since it's most likely where the user will click)
134
+ self.__getitem__(0)
135
+
136
+ # load the rest of frames asynchronously without blocking the session start
137
+ def _load_frames():
138
+ try:
139
+ for n in tqdm(range(len(self.images)), desc="frame loading (JPEG)"):
140
+ self.__getitem__(n)
141
+ except Exception as e:
142
+ self.exception = e
143
+
144
+ self.thread = Thread(target=_load_frames, daemon=True)
145
+ self.thread.start()
146
+
147
+ def __getitem__(self, index):
148
+ if self.exception is not None:
149
+ raise RuntimeError("Failure in frame loading thread") from self.exception
150
+
151
+ img = self.images[index]
152
+ if img is not None:
153
+ return img
154
+
155
+ img, video_height, video_width = _load_img_as_tensor(self.img_paths[index], self.image_size)
156
+ self.video_height = video_height
157
+ self.video_width = video_width
158
+ # normalize by mean and std
159
+ img -= self.img_mean
160
+ img /= self.img_std
161
+ if not self.offload_video_to_cpu:
162
+ img = img.to(self.compute_device, non_blocking=True)
163
+ self.images[index] = img
164
+ return img
165
+
166
+ def __len__(self):
167
+ return len(self.images)
168
+
169
+
170
+ def load_video_frames(
171
+ video_path,
172
+ image_size,
173
+ offload_video_to_cpu,
174
+ img_mean=(0.485, 0.456, 0.406),
175
+ img_std=(0.229, 0.224, 0.225),
176
+ async_loading_frames=False,
177
+ compute_device=torch.device("cuda"),
178
+ ):
179
+ """
180
+ Load the video frames from video_path. The frames are resized to image_size as in
181
+ the model and are loaded to GPU if offload_video_to_cpu=False. This is used by the demo.
182
+ """
183
+ is_bytes = isinstance(video_path, bytes)
184
+ is_str = isinstance(video_path, str)
185
+ is_mp4_path = is_str and os.path.splitext(video_path)[-1] in [".mp4", ".MP4"]
186
+ if is_bytes or is_mp4_path:
187
+ return load_video_frames_from_video_file(
188
+ video_path=video_path,
189
+ image_size=image_size,
190
+ offload_video_to_cpu=offload_video_to_cpu,
191
+ img_mean=img_mean,
192
+ img_std=img_std,
193
+ compute_device=compute_device,
194
+ )
195
+ elif is_str and os.path.isdir(video_path):
196
+ return load_video_frames_from_jpg_images(
197
+ video_path=video_path,
198
+ image_size=image_size,
199
+ offload_video_to_cpu=offload_video_to_cpu,
200
+ img_mean=img_mean,
201
+ img_std=img_std,
202
+ async_loading_frames=async_loading_frames,
203
+ compute_device=compute_device,
204
+ )
205
+ else:
206
+ raise NotImplementedError("Only MP4 video and JPEG folder are supported at this moment")
207
+
208
+
209
+ def load_video_frames_from_jpg_images(
210
+ video_path,
211
+ image_size,
212
+ offload_video_to_cpu,
213
+ img_mean=(0.485, 0.456, 0.406),
214
+ img_std=(0.229, 0.224, 0.225),
215
+ async_loading_frames=False,
216
+ compute_device=torch.device("cuda"),
217
+ ):
218
+ """
219
+ Load the video frames from a directory of JPEG files ("<frame_index>.jpg" format).
220
+
221
+ The frames are resized to image_size x image_size and are loaded to GPU if
222
+ `offload_video_to_cpu` is `False` and to CPU if `offload_video_to_cpu` is `True`.
223
+
224
+ You can load a frame asynchronously by setting `async_loading_frames` to `True`.
225
+ """
226
+ if isinstance(video_path, str) and os.path.isdir(video_path):
227
+ jpg_folder = video_path
228
+ else:
229
+ raise NotImplementedError(
230
+ "Only JPEG frames are supported at this moment. For video files, you may use "
231
+ "ffmpeg (https://ffmpeg.org/) to extract frames into a folder of JPEG files, such as \n"
232
+ "```\n"
233
+ "ffmpeg -i <your_video>.mp4 -q:v 2 -start_number 0 <output_dir>/'%05d.jpg'\n"
234
+ "```\n"
235
+ "where `-q:v` generates high-quality JPEG frames and `-start_number 0` asks "
236
+ "ffmpeg to start the JPEG file from 00000.jpg.")
237
+
238
+ frame_names = [p for p in os.listdir(jpg_folder) if os.path.splitext(p)[-1] in [".jpg", ".jpeg", ".JPG", ".JPEG"]]
239
+ frame_names.sort(key=lambda p: int(os.path.splitext(p)[0]))
240
+ num_frames = len(frame_names)
241
+ if num_frames == 0:
242
+ raise RuntimeError(f"no images found in {jpg_folder}")
243
+ img_paths = [os.path.join(jpg_folder, frame_name) for frame_name in frame_names]
244
+ img_mean = torch.tensor(img_mean, dtype=torch.float32)[:, None, None]
245
+ img_std = torch.tensor(img_std, dtype=torch.float32)[:, None, None]
246
+
247
+ if async_loading_frames:
248
+ lazy_images = AsyncVideoFrameLoader(
249
+ img_paths,
250
+ image_size,
251
+ offload_video_to_cpu,
252
+ img_mean,
253
+ img_std,
254
+ compute_device,
255
+ )
256
+ return lazy_images, lazy_images.video_height, lazy_images.video_width
257
+
258
+ images = torch.zeros(num_frames, 3, image_size, image_size, dtype=torch.float32)
259
+ for n, img_path in enumerate(tqdm(img_paths, desc="frame loading (JPEG)")):
260
+ images[n], video_height, video_width = _load_img_as_tensor(img_path, image_size)
261
+ if not offload_video_to_cpu:
262
+ images = images.to(compute_device)
263
+ img_mean = img_mean.to(compute_device)
264
+ img_std = img_std.to(compute_device)
265
+ # normalize by mean and std
266
+ images -= img_mean
267
+ images /= img_std
268
+ return images, video_height, video_width
269
+
270
+
271
+ def load_video_frames_from_video_file(
272
+ video_path,
273
+ image_size,
274
+ offload_video_to_cpu,
275
+ img_mean=(0.485, 0.456, 0.406),
276
+ img_std=(0.229, 0.224, 0.225),
277
+ compute_device=torch.device("cuda"),
278
+ ):
279
+ """Load the video frames from a video file."""
280
+ import decord
281
+
282
+ img_mean = torch.tensor(img_mean, dtype=torch.float32)[:, None, None]
283
+ img_std = torch.tensor(img_std, dtype=torch.float32)[:, None, None]
284
+ # Get the original video height and width
285
+ decord.bridge.set_bridge("torch")
286
+ video_height, video_width, _ = decord.VideoReader(video_path).next().shape
287
+ # Iterate over all frames in the video
288
+ images = []
289
+ for frame in decord.VideoReader(video_path, width=image_size, height=image_size):
290
+ images.append(frame.permute(2, 0, 1))
291
+
292
+ images = torch.stack(images, dim=0).float() / 255.0
293
+ if not offload_video_to_cpu:
294
+ images = images.to(compute_device)
295
+ img_mean = img_mean.to(compute_device)
296
+ img_std = img_std.to(compute_device)
297
+ # normalize by mean and std
298
+ images -= img_mean
299
+ images /= img_std
300
+ return images, video_height, video_width
301
+
302
+
303
+ def fill_holes_in_mask_scores(mask, max_area):
304
+ """
305
+ A post processor to fill small holes in mask scores with area under `max_area`.
306
+ """
307
+ # Holes are those connected components in background with area <= self.max_area
308
+ # (background regions are those with mask scores <= 0)
309
+ assert max_area > 0, "max_area must be positive"
310
+
311
+ input_mask = mask
312
+ try:
313
+ labels, areas = get_connected_components(mask <= 0)
314
+ is_hole = (labels > 0) & (areas <= max_area)
315
+ # We fill holes with a small positive mask score (0.1) to change them to foreground.
316
+ mask = torch.where(is_hole, 0.1, mask)
317
+ except Exception as e:
318
+ # Skip the post-processing step on removing small holes if the CUDA kernel fails
319
+ warnings.warn(
320
+ f"{e}\n\nSkipping the post-processing step due to the error above. You can "
321
+ "still use SAM 2 and it's OK to ignore the error above, although some post-processing "
322
+ "functionality may be limited (which doesn't affect the results in most cases; see "
323
+ "https://github.com/facebookresearch/sam2/blob/main/INSTALL.md).",
324
+ category=UserWarning,
325
+ stacklevel=2,
326
+ )
327
+ mask = input_mask
328
+
329
+ return mask
330
+
331
+
332
+ def concat_points(old_point_inputs, new_points, new_labels):
333
+ """Add new points and labels to previous point inputs (add at the end)."""
334
+ if old_point_inputs is None:
335
+ points, labels = new_points, new_labels
336
+ else:
337
+ points = torch.cat([old_point_inputs["point_coords"], new_points], dim=1)
338
+ labels = torch.cat([old_point_inputs["point_labels"], new_labels], dim=1)
339
+
340
+ return {"point_coords": points, "point_labels": labels}
sam2/utils/transforms.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import warnings
8
+
9
+ import torch
10
+ import torch.nn as nn
11
+ import torch.nn.functional as F
12
+ from torchvision.transforms import Normalize, Resize, ToTensor
13
+
14
+
15
+ class SAM2Transforms(nn.Module):
16
+
17
+ def __init__(self, resolution, mask_threshold, max_hole_area=0.0, max_sprinkle_area=0.0):
18
+ """
19
+ Transforms for SAM2.
20
+ """
21
+ super().__init__()
22
+ self.resolution = resolution
23
+ self.mask_threshold = mask_threshold
24
+ self.max_hole_area = max_hole_area
25
+ self.max_sprinkle_area = max_sprinkle_area
26
+ self.mean = [0.485, 0.456, 0.406]
27
+ self.std = [0.229, 0.224, 0.225]
28
+ self.to_tensor = ToTensor()
29
+ self.transforms = torch.jit.script(
30
+ nn.Sequential(
31
+ Resize((self.resolution, self.resolution)),
32
+ Normalize(self.mean, self.std),
33
+ ))
34
+
35
+ def __call__(self, x):
36
+ x = self.to_tensor(x)
37
+ return self.transforms(x)
38
+
39
+ def forward_batch(self, img_list):
40
+ img_batch = [self.transforms(self.to_tensor(img)) for img in img_list]
41
+ img_batch = torch.stack(img_batch, dim=0)
42
+ return img_batch
43
+
44
+ def transform_coords(self, coords: torch.Tensor, normalize=False, orig_hw=None) -> torch.Tensor:
45
+ """
46
+ Expects a torch tensor with length 2 in the last dimension. The coordinates can be in absolute image or normalized coordinates,
47
+ If the coords are in absolute image coordinates, normalize should be set to True and original image size is required.
48
+
49
+ Returns
50
+ Un-normalized coordinates in the range of [0, 1] which is expected by the SAM2 model.
51
+ """
52
+ if normalize:
53
+ assert orig_hw is not None
54
+ h, w = orig_hw
55
+ coords = coords.clone()
56
+ coords[..., 0] = coords[..., 0] / w
57
+ coords[..., 1] = coords[..., 1] / h
58
+
59
+ coords = coords * self.resolution # unnormalize coords
60
+ return coords
61
+
62
+ def transform_boxes(self, boxes: torch.Tensor, normalize=False, orig_hw=None) -> torch.Tensor:
63
+ """
64
+ Expects a tensor of shape Bx4. The coordinates can be in absolute image or normalized coordinates,
65
+ if the coords are in absolute image coordinates, normalize should be set to True and original image size is required.
66
+ """
67
+ boxes = self.transform_coords(boxes.reshape(-1, 2, 2), normalize, orig_hw)
68
+ return boxes
69
+
70
+ def postprocess_masks(self, masks: torch.Tensor, orig_hw) -> torch.Tensor:
71
+ """
72
+ Perform PostProcessing on output masks.
73
+ """
74
+ from sam2.utils.misc import get_connected_components
75
+
76
+ masks = masks.float()
77
+ input_masks = masks
78
+ mask_flat = masks.flatten(0, 1).unsqueeze(1) # flatten as 1-channel image
79
+ try:
80
+ if self.max_hole_area > 0:
81
+ # Holes are those connected components in background with area <= self.fill_hole_area
82
+ # (background regions are those with mask scores <= self.mask_threshold)
83
+ labels, areas = get_connected_components(mask_flat <= self.mask_threshold)
84
+ is_hole = (labels > 0) & (areas <= self.max_hole_area)
85
+ is_hole = is_hole.reshape_as(masks)
86
+ # We fill holes with a small positive mask score (10.0) to change them to foreground.
87
+ masks = torch.where(is_hole, self.mask_threshold + 10.0, masks)
88
+
89
+ if self.max_sprinkle_area > 0:
90
+ labels, areas = get_connected_components(mask_flat > self.mask_threshold)
91
+ is_hole = (labels > 0) & (areas <= self.max_sprinkle_area)
92
+ is_hole = is_hole.reshape_as(masks)
93
+ # We fill holes with negative mask score (-10.0) to change them to background.
94
+ masks = torch.where(is_hole, self.mask_threshold - 10.0, masks)
95
+ except Exception as e:
96
+ # Skip the post-processing step if the CUDA kernel fails
97
+ warnings.warn(
98
+ f"{e}\n\nSkipping the post-processing step due to the error above. You can "
99
+ "still use SAM 2 and it's OK to ignore the error above, although some post-processing "
100
+ "functionality may be limited (which doesn't affect the results in most cases; see "
101
+ "https://github.com/facebookresearch/sam2/blob/main/INSTALL.md).",
102
+ category=UserWarning,
103
+ stacklevel=2,
104
+ )
105
+ masks = input_masks
106
+
107
+ masks = F.interpolate(masks.float(), orig_hw, mode="bilinear", align_corners=False).to(masks.dtype)
108
+ return masks
setup.cfg ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [yapf]
2
+ column_limit = 120
3
+ based_on_style = pep8
4
+ blank_line_before_nested_class_or_def = true
5
+ split_before_expression_after_opening_paren = true
6
+
7
+ [isort]
8
+ line_length = 120
9
+ multi_line_output = 0
10
+ known_third_party = cv2,decord,deepspeed,gradio,hydra,imageio,matplotlib,nncore,numpy,omegaconf,pandas,peft,PIL,pycocotools,pysrt,requests,safetensors,spaces,tabulate,termplotlib,tqdm,tensordict,torch,torchvision,transformers
11
+ no_lines_before = STDLIB,LOCALFOLDER
12
+ default_section = FIRSTPARTY
13
+
14
+ [flake8]
15
+ max-line-length = 500
16
+ extend-ignore = E741
unipixel/constants.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2025 Ye Liu. Licensed under the BSD-3-Clause License.
2
+
3
+ IGNORE_INDEX = -100
4
+
5
+ REF_TOKEN = '<|ref|>'
6
+ SEG_TOKEN = '<|seg|>'
7
+ MEM_TOKEN = '<|mem|>'
unipixel/conversation.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2025 Ye Liu. Licensed under the BSD-3-Clause License.
2
+
3
+ from dataclasses import dataclass
4
+ from typing import List
5
+
6
+
7
+ @dataclass
8
+ class Conversation:
9
+ style: str
10
+ system: str
11
+ roles: List[str]
12
+ seps: List[str]
13
+ messages: List[str]
14
+
15
+ def append_message(self, role, msg):
16
+ self.messages.append([role, msg])
17
+
18
+ def clear(self):
19
+ self.messages = []
20
+
21
+ def get_prompt(self):
22
+ assert self.style in ('chatml', )
23
+
24
+ prompt = self.system + self.seps[0] if self.system is not None else ''
25
+
26
+ for i, (role, msg) in enumerate(self.messages):
27
+ prompt += role
28
+ sep = self.seps[i % 2]
29
+ if msg is not None:
30
+ prompt += msg
31
+ if not prompt.endswith(sep):
32
+ prompt += sep
33
+
34
+ prompt = prompt.lstrip('\n')
35
+ return prompt
36
+
37
+
38
+ def get_conv(conv_type):
39
+ if conv_type == 'chatml':
40
+ conv = Conversation(
41
+ style='chatml',
42
+ system='<|im_start|>system\nYou are a helpful assistant.',
43
+ roles=('\n<|im_start|>user\n', '\n<|im_start|>assistant\n'),
44
+ seps=('<|im_end|>', '<|im_end|>'),
45
+ messages=[])
46
+ else:
47
+ raise ValueError(f'unknown conversation type: {conv_type}')
48
+
49
+ return conv
unipixel/dataset/utils.py ADDED
@@ -0,0 +1,531 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2025 Ye Liu. Licensed under the BSD-3-Clause License.
2
+
3
+ import base64
4
+ import copy
5
+ import math
6
+ import os
7
+ import warnings
8
+ from io import BytesIO
9
+ from typing import Optional
10
+
11
+ import cv2
12
+ import decord
13
+ import nncore
14
+ import numpy as np
15
+ import requests
16
+ import torch
17
+ import torchvision.transforms.functional as T
18
+ from PIL import Image
19
+ from pycocotools.mask import decode, frPyObjects, merge
20
+ from torchvision import transforms
21
+ from torchvision.transforms import InterpolationMode
22
+
23
+ from unipixel.constants import IGNORE_INDEX
24
+ from unipixel.conversation import get_conv
25
+
26
+ IMAGE_FACTOR = 28
27
+ MIN_PIXELS = 4 * 28 * 28
28
+ MAX_PIXELS = 16384 * 28 * 28
29
+ MAX_RATIO = 200
30
+
31
+ VIDEO_MIN_PIXELS = 128 * 28 * 28
32
+ VIDEO_MAX_PIXELS = 768 * 28 * 28
33
+ FRAME_FACTOR = 2
34
+ FPS = 2.0
35
+ FPS_MIN_FRAMES = 4
36
+ FPS_MAX_FRAMES = 768
37
+
38
+ # Set the maximum number of video token inputs.
39
+ # Here, 128K represents the maximum number of input tokens for the VLLM model.
40
+ # Remember to adjust it according to your own configuration.
41
+ VIDEO_TOTAL_PIXELS = int(float(os.environ.get('VIDEO_MAX_PIXELS', 128000 * 28 * 28 * 0.9)))
42
+
43
+
44
+ def round_by_factor(number: int, factor: int) -> int:
45
+ """Returns the closest integer to 'number' that is divisible by 'factor'."""
46
+ return round(number / factor) * factor
47
+
48
+
49
+ def ceil_by_factor(number: int, factor: int) -> int:
50
+ """Returns the smallest integer greater than or equal to 'number' that is divisible by 'factor'."""
51
+ return math.ceil(number / factor) * factor
52
+
53
+
54
+ def floor_by_factor(number: int, factor: int) -> int:
55
+ """Returns the largest integer less than or equal to 'number' that is divisible by 'factor'."""
56
+ return math.floor(number / factor) * factor
57
+
58
+
59
+ def smart_resize(height: int,
60
+ width: int,
61
+ factor: int = IMAGE_FACTOR,
62
+ min_pixels: int = MIN_PIXELS,
63
+ max_pixels: int = MAX_PIXELS) -> tuple[int, int]:
64
+ """
65
+ Rescales the image so that the following conditions are met:
66
+
67
+ 1. Both dimensions (height and width) are divisible by 'factor'.
68
+
69
+ 2. The total number of pixels is within the range ['min_pixels', 'max_pixels'].
70
+
71
+ 3. The aspect ratio of the image is maintained as closely as possible.
72
+ """
73
+ if max(height, width) / min(height, width) > MAX_RATIO:
74
+ raise ValueError(
75
+ f"absolute aspect ratio must be smaller than {MAX_RATIO}, got {max(height, width) / min(height, width)}")
76
+ h_bar = max(factor, round_by_factor(height, factor))
77
+ w_bar = max(factor, round_by_factor(width, factor))
78
+ # change order here to ensure not exceeding max_pixels
79
+ if h_bar * w_bar < min_pixels:
80
+ beta = math.sqrt(min_pixels / (height * width))
81
+ h_bar = ceil_by_factor(height * beta, factor)
82
+ w_bar = ceil_by_factor(width * beta, factor)
83
+ if h_bar * w_bar > max_pixels:
84
+ beta = math.sqrt((height * width) / max_pixels)
85
+ h_bar = floor_by_factor(height / beta, factor)
86
+ w_bar = floor_by_factor(width / beta, factor)
87
+ return h_bar, w_bar
88
+
89
+
90
+ def to_rgb(pil_image: Image.Image) -> Image.Image:
91
+ if pil_image.mode == 'RGBA':
92
+ white_background = Image.new("RGB", pil_image.size, (255, 255, 255))
93
+ white_background.paste(pil_image, mask=pil_image.split()[3]) # Use alpha channel as mask
94
+ return white_background
95
+ else:
96
+ return pil_image.convert("RGB")
97
+
98
+
99
+ def fetch_image(ele: dict[str, str | Image.Image], size_factor: int = IMAGE_FACTOR) -> Image.Image:
100
+ if "image" in ele:
101
+ image = ele["image"]
102
+ else:
103
+ image = ele["image_url"]
104
+ image_obj = None
105
+ if isinstance(image, Image.Image):
106
+ image_obj = image
107
+ elif image.startswith("http://") or image.startswith("https://"):
108
+ # fix memory leak issue while using BytesIO
109
+ with requests.get(image, stream=True) as response:
110
+ response.raise_for_status()
111
+ with BytesIO(response.content) as bio:
112
+ image_obj = copy.deepcopy(Image.open(bio))
113
+ elif image.startswith("file://"):
114
+ image_obj = Image.open(image[7:])
115
+ elif image.startswith("data:image"):
116
+ if "base64," in image:
117
+ _, base64_data = image.split("base64,", 1)
118
+ data = base64.b64decode(base64_data)
119
+ # fix memory leak issue while using BytesIO
120
+ with BytesIO(data) as bio:
121
+ image_obj = copy.deepcopy(Image.open(bio))
122
+ else:
123
+ image_obj = Image.open(image)
124
+ if image_obj is None:
125
+ raise ValueError(f"Unrecognized image input, support local path, http url, base64 and PIL.Image, got {image}")
126
+ image = to_rgb(image_obj)
127
+
128
+ if "resized_height" in ele and "resized_width" in ele:
129
+ resized_height, resized_width = smart_resize(
130
+ ele["resized_height"],
131
+ ele["resized_width"],
132
+ factor=size_factor,
133
+ )
134
+ else:
135
+ width, height = image.size
136
+ min_pixels = ele.get("min_pixels", MIN_PIXELS)
137
+ max_pixels = ele.get("max_pixels", MAX_PIXELS)
138
+ resized_height, resized_width = smart_resize(
139
+ height,
140
+ width,
141
+ factor=size_factor,
142
+ min_pixels=min_pixels,
143
+ max_pixels=max_pixels,
144
+ )
145
+ image = image.resize((resized_width, resized_height))
146
+
147
+ return image
148
+
149
+
150
+ def smart_nframes(
151
+ ele: dict,
152
+ total_frames: int,
153
+ video_fps: int | float,
154
+ ) -> int:
155
+ """calculate the number of frames for video used for model inputs.
156
+
157
+ Args:
158
+ ele (dict): a dict contains the configuration of video.
159
+ support either `fps` or `nframes`:
160
+ - nframes: the number of frames to extract for model inputs.
161
+ - fps: the fps to extract frames for model inputs.
162
+ - min_frames: the minimum number of frames of the video, only used when fps is provided.
163
+ - max_frames: the maximum number of frames of the video, only used when fps is provided.
164
+ total_frames (int): the original total number of frames of the video.
165
+ video_fps (int | float): the original fps of the video.
166
+
167
+ Raises:
168
+ ValueError: nframes should in interval [FRAME_FACTOR, total_frames].
169
+
170
+ Returns:
171
+ int: the number of frames for video used for model inputs.
172
+ """
173
+ assert not ("fps" in ele and "nframes" in ele), "Only accept either `fps` or `nframes`"
174
+ if "nframes" in ele:
175
+ nframes = round_by_factor(ele["nframes"], FRAME_FACTOR)
176
+ else:
177
+ fps = ele.get("fps", FPS)
178
+ min_frames = ceil_by_factor(ele.get("min_frames", FPS_MIN_FRAMES), FRAME_FACTOR)
179
+ max_frames = floor_by_factor(ele.get("max_frames", min(FPS_MAX_FRAMES, total_frames)), FRAME_FACTOR)
180
+ nframes = total_frames / video_fps * fps
181
+ nframes = min(min(max(nframes, min_frames), max_frames), total_frames)
182
+ nframes = floor_by_factor(nframes, FRAME_FACTOR)
183
+ if not (FRAME_FACTOR <= nframes and nframes <= total_frames):
184
+ raise ValueError(f"nframes should in interval [{FRAME_FACTOR}, {total_frames}], but got {nframes}.")
185
+ return nframes
186
+
187
+
188
+ def calculate_video_frame_range(
189
+ ele: dict,
190
+ total_frames: int,
191
+ video_fps: float,
192
+ ) -> tuple[int, int, int]:
193
+ """
194
+ Calculate the start and end frame indices based on the given time range.
195
+
196
+ Args:
197
+ ele (dict): A dictionary containing optional 'video_start' and 'video_end' keys (in seconds).
198
+ total_frames (int): Total number of frames in the video.
199
+ video_fps (float): Frames per second of the video.
200
+
201
+ Returns:
202
+ tuple: A tuple containing (start_frame, end_frame, frame_count).
203
+
204
+ Raises:
205
+ ValueError: If input parameters are invalid or the time range is inconsistent.
206
+ """
207
+ # Validate essential parameters
208
+ if video_fps <= 0:
209
+ raise ValueError("video_fps must be a positive number")
210
+ if total_frames <= 0:
211
+ raise ValueError("total_frames must be a positive integer")
212
+
213
+ # Get start and end time in seconds
214
+ video_start = ele.get("video_start", None)
215
+ video_end = ele.get("video_end", None)
216
+ if video_start is None and video_end is None:
217
+ return 0, total_frames - 1, total_frames
218
+
219
+ max_duration = total_frames / video_fps
220
+ # Process start frame
221
+ if video_start is not None:
222
+ video_start_clamped = max(0.0, min(video_start, max_duration))
223
+ start_frame = math.ceil(video_start_clamped * video_fps)
224
+ else:
225
+ start_frame = 0
226
+ # Process end frame
227
+ if video_end is not None:
228
+ video_end_clamped = max(0.0, min(video_end, max_duration))
229
+ end_frame = math.floor(video_end_clamped * video_fps)
230
+ end_frame = min(end_frame, total_frames - 1)
231
+ else:
232
+ end_frame = total_frames - 1
233
+
234
+ # Validate frame order
235
+ if start_frame >= end_frame:
236
+ raise ValueError(
237
+ f"Invalid time range: Start frame {start_frame} (at {video_start_clamped if video_start is not None else 0}s) "
238
+ f"exceeds end frame {end_frame} (at {video_end_clamped if video_end is not None else max_duration}s). "
239
+ f"Video duration: {max_duration:.2f}s ({total_frames} frames @ {video_fps}fps)")
240
+
241
+ return start_frame, end_frame, end_frame - start_frame + 1
242
+
243
+
244
+ def _read_video_decord(ele: dict, ) -> (torch.Tensor, float):
245
+ """read video using decord.VideoReader
246
+
247
+ Args:
248
+ ele (dict): a dict contains the configuration of video.
249
+ support keys:
250
+ - video: the path of video. support "file://", "http://", "https://" and local path.
251
+ - video_start: the start time of video.
252
+ - video_end: the end time of video.
253
+ Returns:
254
+ torch.Tensor: the video tensor with shape (T, C, H, W).
255
+ """
256
+ decord.bridge.set_bridge("torch")
257
+ video_path = ele["video"]
258
+ vr = decord.VideoReader(video_path, num_threads=ele.get('num_threads', 0))
259
+ total_frames, video_fps = len(vr), vr.get_avg_fps()
260
+ start_frame, end_frame, total_frames = calculate_video_frame_range(
261
+ ele,
262
+ total_frames,
263
+ video_fps,
264
+ )
265
+ nframes = smart_nframes(ele, total_frames=total_frames, video_fps=video_fps)
266
+ idx = torch.linspace(start_frame, end_frame, nframes).round().long().tolist()
267
+ video = vr.get_batch(idx).permute(0, 3, 1, 2) # Convert to TCHW format
268
+ sample_fps = nframes / max(total_frames, 1e-6) * video_fps
269
+ return video, sample_fps
270
+
271
+
272
+ def fetch_video(ele: dict,
273
+ image_factor: int = IMAGE_FACTOR,
274
+ return_video_sample_fps: bool = False,
275
+ sanity_check=False) -> torch.Tensor | list[Image.Image]:
276
+ if isinstance(ele["video"], str):
277
+ video, sample_fps = _read_video_decord(ele)
278
+ nframes, _, height, width = video.shape
279
+ min_pixels = ele.get("min_pixels", VIDEO_MIN_PIXELS)
280
+ total_pixels = ele.get("total_pixels", VIDEO_TOTAL_PIXELS)
281
+ max_pixels = max(min(VIDEO_MAX_PIXELS, total_pixels / nframes * FRAME_FACTOR), int(min_pixels * 1.05))
282
+ max_pixels_supposed = ele.get("max_pixels", max_pixels)
283
+ max_pixels = min(max_pixels_supposed, max_pixels)
284
+ if "resized_height" in ele and "resized_width" in ele:
285
+ resized_height, resized_width = smart_resize(
286
+ ele["resized_height"],
287
+ ele["resized_width"],
288
+ factor=image_factor,
289
+ )
290
+ else:
291
+ resized_height, resized_width = smart_resize(
292
+ height,
293
+ width,
294
+ factor=image_factor,
295
+ min_pixels=min_pixels,
296
+ max_pixels=max_pixels,
297
+ )
298
+ video = transforms.functional.resize(
299
+ video,
300
+ [resized_height, resized_width],
301
+ interpolation=InterpolationMode.BICUBIC,
302
+ antialias=True,
303
+ ).float()
304
+
305
+ if sanity_check and (video == 0).all():
306
+ raise ValueError("video '{}' contains all zeros".format(ele["video"]))
307
+
308
+ if return_video_sample_fps:
309
+ return video, sample_fps
310
+ return video
311
+ else:
312
+ assert isinstance(ele["video"], (list, tuple))
313
+ process_info = ele.copy()
314
+ process_info.pop("type", None)
315
+ process_info.pop("video", None)
316
+ images = [
317
+ fetch_image({
318
+ "image": video_element,
319
+ **process_info
320
+ }, size_factor=image_factor) for video_element in ele["video"]
321
+ ]
322
+ nframes = ceil_by_factor(len(images), FRAME_FACTOR)
323
+ if len(images) < nframes:
324
+ images.extend([images[-1]] * (nframes - len(images)))
325
+ if return_video_sample_fps:
326
+ return images, process_info.pop("fps", 2.0)
327
+ return images
328
+
329
+
330
+ def extract_vision_info(conversations: list[dict] | list[list[dict]]) -> list[dict]:
331
+ vision_infos = []
332
+ if isinstance(conversations[0], dict):
333
+ conversations = [conversations]
334
+ for conversation in conversations:
335
+ for message in conversation:
336
+ if isinstance(message["content"], list):
337
+ for ele in message["content"]:
338
+ if ("image" in ele or "image_url" in ele or "video" in ele
339
+ or ele.get("type", "") in ("image", "image_url", "video")):
340
+ vision_infos.append(ele)
341
+ return vision_infos
342
+
343
+
344
+ def process_vision_info(
345
+ conversations: list[dict] | list[list[dict]],
346
+ return_video_kwargs: bool = False,
347
+ sanity_check=False
348
+ ) -> tuple[list[Image.Image] | None, list[torch.Tensor | list[Image.Image]] | None, Optional[dict]]:
349
+
350
+ vision_infos = extract_vision_info(conversations)
351
+ # Read images or videos
352
+ image_inputs = []
353
+ video_inputs = []
354
+ video_sample_fps_list = []
355
+ for vision_info in vision_infos:
356
+ if "image" in vision_info or "image_url" in vision_info:
357
+ image_inputs.append(fetch_image(vision_info))
358
+ elif "video" in vision_info:
359
+ video_input, video_sample_fps = fetch_video(
360
+ vision_info, return_video_sample_fps=True, sanity_check=sanity_check)
361
+ video_sample_fps_list.append(video_sample_fps)
362
+ video_inputs.append(video_input)
363
+ else:
364
+ raise ValueError("image, image_url or video should in content.")
365
+ if len(image_inputs) == 0:
366
+ image_inputs = None
367
+ if len(video_inputs) == 0:
368
+ video_inputs = None
369
+ if return_video_kwargs:
370
+ return image_inputs, video_inputs, {'fps': video_sample_fps_list}
371
+ return image_inputs, video_inputs
372
+
373
+
374
+ def resize(mask, size):
375
+ return T.resize(mask.unsqueeze(0).unsqueeze(0), size)[0, 0]
376
+
377
+
378
+ def process_masks(sample, frame_size, inds):
379
+ if sample['mask_type'] == 'image':
380
+ # case 1: list of masks or paths to masks
381
+ masks = []
382
+ for obj_oids in sample['oids']:
383
+ obj_masks = []
384
+ for i in inds:
385
+ label = sample['masks'][i]
386
+ if isinstance(label, str):
387
+ label = np.array(Image.open(label))
388
+ elif label is None:
389
+ label = np.full(frame_size, -1)
390
+ obj_masks.append(torch.from_numpy(sum([label == oid for oid in obj_oids])).float())
391
+ masks.append(obj_masks)
392
+ elif sample['mask_type'] == 'image_sep':
393
+ # case 2: list of masks or paths to masks (one object per image)
394
+ masks = []
395
+ for raw_obj_masks in sample['masks']:
396
+ obj_masks = []
397
+ for i in inds:
398
+ label = raw_obj_masks[i]
399
+ if isinstance(label, str):
400
+ label = np.array(Image.open(label))
401
+ elif label is None:
402
+ label = np.full(frame_size, -1)
403
+ obj_masks.append(torch.from_numpy(label == 255).float())
404
+ masks.append(obj_masks)
405
+ elif sample['mask_type'] == 'rle':
406
+ # case 3: list of lists of multi-region RLE masks
407
+ raw_masks = nncore.load(sample['masks']) if isinstance(sample['masks'], str) else sample['masks']
408
+ masks = []
409
+ for raw_obj_masks in raw_masks:
410
+ obj_masks = []
411
+ for i in inds:
412
+ mask = torch.zeros(frame_size)
413
+ for rle in raw_obj_masks[i]:
414
+ if isinstance(rle, list):
415
+ rles = frPyObjects(rle, sample.get('height', frame_size[0]), sample.get('width', frame_size[1]))
416
+ mask += resize(torch.from_numpy(decode(merge(rles))).float(), frame_size)
417
+ elif isinstance(rle, dict):
418
+ if isinstance(rle['counts'], list):
419
+ rle = frPyObjects(rle, *rle['size'])
420
+ mask += resize(torch.from_numpy(decode(rle)).float(), frame_size)
421
+ elif rle is None:
422
+ mask += 0
423
+ else:
424
+ raise TypeError(f'unknown rle mask: {rle}')
425
+ obj_masks.append((mask > 0).float())
426
+ masks.append(obj_masks)
427
+ elif sample['mask_type'] == 'polygon':
428
+ # case 4: list of lists of polygons
429
+ masks = []
430
+ for raw_obj_masks in sample['masks']:
431
+ obj_masks = []
432
+ for i in inds:
433
+ # step 1: sort shapes
434
+ areas = []
435
+ for shape in raw_obj_masks[i]:
436
+ tmp = np.zeros(frame_size, dtype=np.uint8)
437
+ cv2.polylines(tmp, np.array([shape['points']], dtype=np.int32), True, 1, 1)
438
+ cv2.fillPoly(tmp, np.array([shape['points']], dtype=np.int32), 1)
439
+ areas.append(tmp.sum())
440
+ shapes = [raw_obj_masks[i][j] for j in list(np.argsort(areas)[::-1].astype(np.int32))]
441
+ # step 2: draw masks
442
+ mask = np.zeros(frame_size, dtype=np.uint8)
443
+ for shape in shapes:
444
+ assert shape['label'] in ('target', 'ignore'), shape
445
+ label = 1 if shape['label'] == 'target' else -1 # replacing 255 with -1 here
446
+ cv2.polylines(mask, np.array([shape['points']], dtype=np.int32), True, label, 1)
447
+ cv2.fillPoly(mask, np.array([shape['points']], dtype=np.int32), label)
448
+ obj_masks.append(torch.from_numpy(mask).float())
449
+ masks.append(obj_masks)
450
+ elif sample['mask_type'] == 'vicas':
451
+ # case 5: special case for vicas dataset
452
+ masks = []
453
+ for obj_rle_path in sample['masks']:
454
+ obj_rles, obj_masks = nncore.load(obj_rle_path), []
455
+ for i in inds:
456
+ mask = torch.zeros(frame_size)
457
+ for rle in obj_rles[i]:
458
+ mask += 0 if rle is None else resize(torch.from_numpy(decode(rle)).float(), frame_size)
459
+ obj_masks.append((mask > 0).float())
460
+ masks.append(obj_masks)
461
+ elif sample['mask_type'] == 'sav':
462
+ # case 6: special case for sav dataset
463
+ annos = nncore.load(sample['masks'])['masklet']
464
+ masks = [[]]
465
+ for i in inds:
466
+ mask = resize(torch.from_numpy(decode(annos[i][int(sample['qid'])])).float(), frame_size)
467
+ masks[0].append(mask)
468
+ else:
469
+ raise TypeError(f"unknown mask type: {sample['mask_type']}")
470
+
471
+ return masks
472
+
473
+
474
+ def build_obj_to_frame_idx(label_mask, batch_mode):
475
+ step_t_obj_to_frame_idx = [[]] if batch_mode else [[] for _ in range(label_mask.size(0))]
476
+
477
+ # t: frame_idx v: video_idx
478
+ for t in range(len(step_t_obj_to_frame_idx)):
479
+ if batch_mode:
480
+ for v in range(label_mask.size(0)):
481
+ for _ in range(label_mask.size(1)):
482
+ step_t_obj_to_frame_idx[t].append(torch.IntTensor([t, v]))
483
+ else:
484
+ for _ in range(label_mask.size(1)):
485
+ step_t_obj_to_frame_idx[t].append(torch.IntTensor([t, 0]))
486
+
487
+ label_obj_to_frame_idx = torch.stack([torch.stack(o) for o in step_t_obj_to_frame_idx])
488
+ return label_obj_to_frame_idx
489
+
490
+
491
+ def preprocess_chatml(input_ids, text, tokenizer):
492
+ conv = get_conv('chatml')
493
+
494
+ rounds = [m + conv.seps[0] for m in text.split(conv.seps[0])]
495
+ assert (len(rounds) % 2 == 0) == (conv.system is not None)
496
+ assert rounds[-1] == conv.seps[0]
497
+ rounds = rounds[:-1]
498
+
499
+ if conv.system is None:
500
+ rounds = [''.join(rounds[i:i + 2]) for i in range(0, len(rounds), 2)]
501
+ else:
502
+ rounds = [''.join(rounds[:3])] + [''.join(rounds[i:i + 2]) for i in range(3, len(rounds), 2)]
503
+
504
+ labels = input_ids.clone()
505
+
506
+ sep = conv.seps[0] + conv.roles[1]
507
+ cur_len = 0
508
+
509
+ for i, rou in enumerate(rounds):
510
+ if len(rou) == 0:
511
+ break
512
+
513
+ ins = sep.join(rou.split(sep)[:-1]) + sep
514
+
515
+ rou_len = tokenizer(rou, return_length=True).length[0]
516
+ ins_len = tokenizer(ins, return_length=True).length[0]
517
+
518
+ labels[cur_len:cur_len + ins_len] = IGNORE_INDEX
519
+ cur_len += rou_len
520
+
521
+ if labels.size(0) != cur_len:
522
+ warnings.warn(f'Tokenization mismatch: {labels.size(0)} and {cur_len}')
523
+
524
+ return labels
525
+
526
+
527
+ def preprocess(input_ids, text, tokenizer, conv_type):
528
+ if conv_type == 'chatml':
529
+ return preprocess_chatml(input_ids, text, tokenizer)
530
+ else:
531
+ raise ValueError(f'unknown conversation type: {conv_type}')
unipixel/model/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Copyright (c) 2025 Ye Liu. Licensed under the BSD-3-Clause License.
2
+
3
+ from .qwen2_5_vl import PatchedQwen2_5_VLProcessor, PixelQwen2_5_VLConfig, PixelQwen2_5_VLForConditionalGeneration
4
+
5
+ MODELS = {'qwen2_5_vl': (PixelQwen2_5_VLConfig, PixelQwen2_5_VLForConditionalGeneration, PatchedQwen2_5_VLProcessor)}
unipixel/model/builder.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2025 Ye Liu. Licensed under the BSD-3-Clause License.
2
+
3
+ import nncore
4
+ import torch
5
+ import torch.nn as nn
6
+ from peft import PeftModel
7
+ from safetensors.torch import load_model
8
+ from transformers import AutoConfig, AutoModel, AutoProcessor, Qwen2_5_VLForConditionalGeneration
9
+
10
+ from unipixel.utils.env import get_auto_device
11
+
12
+
13
+ def build_model(model_path,
14
+ config=None,
15
+ image_size=None,
16
+ is_trainable=False,
17
+ merge_adapter=False,
18
+ attn_implementation='flash_attention_2',
19
+ device='auto',
20
+ dtype='bfloat16'):
21
+ # set do_resize to false to avoid duplicated resizing
22
+ # https://github.com/huggingface/transformers/blob/main/src/transformers/models/qwen2_vl/image_processing_qwen2_vl.py
23
+ processor = AutoProcessor.from_pretrained(model_path, use_fast=True, do_resize=False)
24
+
25
+ config = config or AutoConfig.from_pretrained(model_path)
26
+ config.sam2_inference_mode = not is_trainable
27
+
28
+ # override sam2 image size
29
+ if image_size is not None:
30
+ config.sam2_image_size = image_size
31
+
32
+ adapter_path = nncore.join(model_path, 'adapter_model.safetensors')
33
+ partial_path = nncore.join(model_path, 'pytorch_model.safetensors')
34
+
35
+ if nncore.is_file(adapter_path) or nncore.is_file(partial_path):
36
+ print(f'Loading base model from {config.base_model_path}...')
37
+ model = AutoModel.from_pretrained(
38
+ config.base_model_path,
39
+ config=config,
40
+ low_cpu_mem_usage=True,
41
+ ignore_mismatched_sizes=True,
42
+ attn_implementation=attn_implementation,
43
+ torch_dtype=dtype,
44
+ device_map='auto' if device == 'all' else None)
45
+
46
+ meta_state_dict = {
47
+ n: torch.empty_like(p, device='cpu')
48
+ for n, p in model.named_parameters() if p.device == torch.device('meta')
49
+ }
50
+ model.load_state_dict(meta_state_dict, strict=False, assign=True)
51
+
52
+ # sam2 weights might be replaced later
53
+ if model.config.sam2_checkpoint:
54
+ model.load_sam2_weights()
55
+
56
+ embed_tokens = model.get_input_embeddings()
57
+ size = (embed_tokens.num_embeddings, embed_tokens.embedding_dim)
58
+ if embed_tokens.weight.size() != size:
59
+ print(f'Resizing embed_tokens from {embed_tokens.weight.size()} to {size}...')
60
+ model.model.language_model.embed_tokens.weight = nn.Parameter(embed_tokens.weight.new_empty(size))
61
+
62
+ size = (model.lm_head.out_features, model.lm_head.in_features)
63
+ if model.lm_head.weight.size() != size:
64
+ print(f'Resizing lm_head from {model.lm_head.weight.size()} to {size}...')
65
+ model.lm_head.weight = nn.Parameter(model.lm_head.weight.new_empty(size))
66
+
67
+ if nncore.is_file(adapter_path):
68
+ print(f'Loading adapter from {model_path}...')
69
+ # transformers integration does not support merge_and_unload, use peft instead
70
+ model = PeftModel.from_pretrained(
71
+ model,
72
+ model_path,
73
+ is_trainable=is_trainable,
74
+ low_cpu_mem_usage=True,
75
+ # load adapters to the same device as embed_tokens
76
+ torch_device=str(embed_tokens.weight.device))
77
+
78
+ if nncore.is_file(partial_path):
79
+ print(f'Loading state dict from {partial_path}...')
80
+ _, unexpected = load_model(model, partial_path, strict=False, device=str(model.device))
81
+ assert len(unexpected) == 0, f'unexpected parameters: {unexpected}'
82
+
83
+ if (not is_trainable or merge_adapter) and nncore.is_file(adapter_path):
84
+ print('Merging adapter and unloading...')
85
+ model = model.merge_and_unload()
86
+ model._hf_peft_config_loaded = False
87
+ else:
88
+ print(f'Loading full model from {model_path}...')
89
+
90
+ if config.model_type == 'qwen2_5_vl':
91
+ model_cls = Qwen2_5_VLForConditionalGeneration
92
+ else:
93
+ model_cls = AutoModel
94
+
95
+ model = model_cls.from_pretrained(
96
+ model_path,
97
+ config=config,
98
+ low_cpu_mem_usage=True,
99
+ attn_implementation=attn_implementation,
100
+ torch_dtype=dtype,
101
+ device_map='auto' if device == 'all' else None)
102
+
103
+ model.requires_grad_(False)
104
+
105
+ if not is_trainable and device != 'all':
106
+ device = get_auto_device() if device == 'auto' else device
107
+ model = model.to(device).eval()
108
+
109
+ return model, processor