diff --git a/.gitattributes b/.gitattributes
deleted file mode 100644
index a6344aac8c09253b3b630fb776ae94478aa0275b..0000000000000000000000000000000000000000
--- a/.gitattributes
+++ /dev/null
@@ -1,35 +0,0 @@
-*.7z filter=lfs diff=lfs merge=lfs -text
-*.arrow filter=lfs diff=lfs merge=lfs -text
-*.bin filter=lfs diff=lfs merge=lfs -text
-*.bz2 filter=lfs diff=lfs merge=lfs -text
-*.ckpt filter=lfs diff=lfs merge=lfs -text
-*.ftz filter=lfs diff=lfs merge=lfs -text
-*.gz filter=lfs diff=lfs merge=lfs -text
-*.h5 filter=lfs diff=lfs merge=lfs -text
-*.joblib filter=lfs diff=lfs merge=lfs -text
-*.lfs.* filter=lfs diff=lfs merge=lfs -text
-*.mlmodel filter=lfs diff=lfs merge=lfs -text
-*.model filter=lfs diff=lfs merge=lfs -text
-*.msgpack filter=lfs diff=lfs merge=lfs -text
-*.npy filter=lfs diff=lfs merge=lfs -text
-*.npz filter=lfs diff=lfs merge=lfs -text
-*.onnx filter=lfs diff=lfs merge=lfs -text
-*.ot filter=lfs diff=lfs merge=lfs -text
-*.parquet filter=lfs diff=lfs merge=lfs -text
-*.pb filter=lfs diff=lfs merge=lfs -text
-*.pickle filter=lfs diff=lfs merge=lfs -text
-*.pkl filter=lfs diff=lfs merge=lfs -text
-*.pt filter=lfs diff=lfs merge=lfs -text
-*.pth filter=lfs diff=lfs merge=lfs -text
-*.rar filter=lfs diff=lfs merge=lfs -text
-*.safetensors filter=lfs diff=lfs merge=lfs -text
-saved_model/**/* filter=lfs diff=lfs merge=lfs -text
-*.tar.* filter=lfs diff=lfs merge=lfs -text
-*.tar filter=lfs diff=lfs merge=lfs -text
-*.tflite filter=lfs diff=lfs merge=lfs -text
-*.tgz filter=lfs diff=lfs merge=lfs -text
-*.wasm filter=lfs diff=lfs merge=lfs -text
-*.xz filter=lfs diff=lfs merge=lfs -text
-*.zip filter=lfs diff=lfs merge=lfs -text
-*.zst filter=lfs diff=lfs merge=lfs -text
-*tfevents* filter=lfs diff=lfs merge=lfs -text
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..fe47536fab4a3c41da6f2b55620f067d0b32fd66
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,9 @@
+# Byte-compiled / optimized / DLL files
+__pycache__
+*.egg-info
+*.py[cod]
+*$py.class
+
+# Temporary data
+.DS_Store
+._*
diff --git a/README.md b/README.md
index 72f8a554c0aeabeaa749a4a81bb9e389add3bd0a..1c301b8fb059b8b5dd6d3bb47d57f20ca02efc72 100644
--- a/README.md
+++ b/README.md
@@ -1,12 +1,12 @@
---
title: UniPixel
-emoji: 📉
-colorFrom: red
-colorTo: purple
+emoji: 🔮
+colorFrom: purple
+colorTo: yellow
sdk: gradio
sdk_version: 5.48.0
app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
+pinned: true
+license: bsd-3-clause
+short_description: Unified Object Referring and Segmentation for Pixel-Level Visual Reasoning
+---
\ No newline at end of file
diff --git a/app.py b/app.py
new file mode 100644
index 0000000000000000000000000000000000000000..27152004859e1e6d55ac9b1de2528e826bda7578
--- /dev/null
+++ b/app.py
@@ -0,0 +1,435 @@
+# Copyright (c) 2025 Ye Liu. Licensed under the BSD-3-Clause license.
+
+import os
+import re
+import uuid
+from functools import partial
+
+import gradio as gr
+import imageio.v3 as iio
+import spaces
+import torch
+import torch.nn.functional as F
+import torchvision.transforms.functional as T
+from PIL import Image
+
+from unipixel.constants import MEM_TOKEN, SEG_TOKEN
+from unipixel.dataset.utils import process_vision_info
+from unipixel.model.builder import build_model
+from unipixel.utils.io import load_image, load_video
+from unipixel.utils.transforms import get_sam2_transform
+from unipixel.utils.visualizer import draw_mask, sample_color
+
+PATH = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
+
+MODEL = 'PolyU-ChenLab/UniPixel-3B'
+
+TITLE = 'UniPixel: Unified Object Referring and Segmentation for Pixel-Level Visual Reasoning'
+
+HEADER = """
+

+Unified Object Referring and Segmentation for Pixel-Level Visual Reasoning
+
+UniPixel is a unified MLLM for pixel-level vision-language understanding. It flexibly supports a variety of fine-grained tasks, including image/video segmentation, regional understanding, and a novel PixelQA task that jointly requires object-centric referring, segmentation, and question-answering in videos. Please open an issue if you meet any problems.
+"""
+
+# https://github.com/gradio-app/gradio/pull/10552
+JS = """
+function init() {
+ if (window.innerWidth >= 1536) {
+ document.querySelector('main').style.maxWidth = '1536px'
+ }
+}
+"""
+
+model, processor = build_model(MODEL)
+device = next(model.parameters()).device
+
+sam2_transform = get_sam2_transform(model.config.sam2_image_size)
+
+colors = sample_color()
+color_map = {f'Target {i + 1}': f'#{int(c[0]):02x}{int(c[1]):02x}{int(c[2]):02x}' for i, c in enumerate(colors * 255)}
+color_map_light = {
+ f'Target {i + 1}': f'#{int(c[0] * 127.5 + 127.5):02x}{int(c[1] * 127.5 + 127.5):02x}{int(c[2] * 127.5 + 127.5):02x}'
+ for i, c in enumerate(colors)
+}
+
+
+def enable_btns():
+ return (gr.Button(interactive=True), ) * 4
+
+
+def disable_btns():
+ return (gr.Button(interactive=False), ) * 4
+
+
+def reset_seg():
+ return 16, gr.Button(interactive=False)
+
+
+def reset_reg():
+ return 1, gr.Button(interactive=False)
+
+
+def update_region(blob):
+ if blob['background'] is None or not blob['layers'][0].any():
+ return
+
+ region = blob['background'].copy()
+ region[blob['layers'][0][:, :, -1] == 0] = [0, 0, 0, 0]
+
+ return region
+
+
+def update_video(video, prompt_idx):
+ if video is None:
+ return
+
+ _, images = load_video(video, sample_frames=16)
+ path = images[prompt_idx - 1]
+
+ return path
+
+
+@spaces.GPU
+def infer_seg(media, query, sample_frames=16, media_type=None):
+ if not media:
+ gr.Warning('Please upload an image or a video.')
+ return None, None, None
+
+ if not query:
+ gr.Warning('Please provide a text prompt.')
+ return None, None, None
+
+ if any(media.endswith(k) for k in ('jpg', 'png')):
+ frames, images = load_image(media), [media]
+ else:
+ frames, images = load_video(media, sample_frames=sample_frames)
+
+ messages = [{
+ 'role':
+ 'user',
+ 'content': [{
+ 'type': 'video',
+ 'video': images,
+ 'min_pixels': 128 * 28 * 28,
+ 'max_pixels': 256 * 28 * 28 * int(sample_frames / len(images))
+ }, {
+ 'type': 'text',
+ 'text': query
+ }]
+ }]
+
+ text = processor.apply_chat_template(messages, add_generation_prompt=True)
+
+ images, videos, kwargs = process_vision_info(messages, return_video_kwargs=True)
+
+ data = processor(text=[text], images=images, videos=videos, return_tensors='pt', **kwargs)
+
+ data['frames'] = [sam2_transform(frames).to(model.sam2.dtype)]
+ data['frame_size'] = [frames.shape[1:3]]
+
+ output_ids = model.generate(
+ **data.to(device),
+ do_sample=False,
+ temperature=None,
+ top_k=None,
+ top_p=None,
+ repetition_penalty=None,
+ max_new_tokens=512)
+
+ assert data.input_ids.size(0) == output_ids.size(0) == 1
+ output_ids = output_ids[0, data.input_ids.size(1):]
+
+ if output_ids[-1] == processor.tokenizer.eos_token_id:
+ output_ids = output_ids[:-1]
+
+ response = processor.decode(output_ids, clean_up_tokenization_spaces=False)
+ response = response.replace(f' {SEG_TOKEN}', SEG_TOKEN).replace(f'{SEG_TOKEN} ', SEG_TOKEN)
+
+ entities = []
+ for i, m in enumerate(re.finditer(re.escape(SEG_TOKEN), response)):
+ entities.append(dict(entity=f'Target {i + 1}', start=m.start(), end=m.end()))
+
+ answer = dict(text=response, entities=entities)
+
+ imgs = draw_mask(frames, model.seg, colors=colors)
+
+ path = f"/tmp/{uuid.uuid4().hex}.{'gif' if len(imgs) > 1 else 'png'}"
+ iio.imwrite(path, imgs, duration=100, loop=0)
+
+ if media_type == 'image':
+ if len(model.seg) >= 1:
+ masks = media, [(m[0, 0].numpy(), f'Target {i + 1}') for i, m in enumerate(model.seg)]
+ else:
+ masks = None
+ else:
+ masks = path
+
+ return answer, masks, path
+
+
+infer_seg_image = partial(infer_seg, media_type='image')
+infer_seg_video = partial(infer_seg, media_type='video')
+
+
+@spaces.GPU
+def infer_reg(blob, query, prompt_idx=1, video=None):
+ if blob['background'] is None:
+ gr.Warning('Please upload an image or a video.')
+ return
+
+ if not blob['layers'][0].any():
+ gr.Warning('Please provide a mask prompt.')
+ return
+
+ if not query:
+ gr.Warning('Please provide a text prompt.')
+ return
+
+ if video is None:
+ frames = torch.from_numpy(blob['background'][:, :, :3]).unsqueeze(0)
+ images = [Image.fromarray(blob['background'], mode='RGBA')]
+ else:
+ frames, images = load_video(video, sample_frames=16)
+
+ frame_size = frames.shape[1:3]
+
+ mask = torch.from_numpy(blob['layers'][0][:, :, -1]).unsqueeze(0) > 0
+
+ refer_mask = torch.zeros(frames.size(0), 1, *frame_size)
+ refer_mask[prompt_idx - 1] = mask
+
+ if refer_mask.size(0) % 2 != 0:
+ refer_mask = torch.cat((refer_mask, refer_mask[-1, None]))
+ refer_mask = refer_mask.flatten(1)
+ refer_mask = F.max_pool1d(refer_mask.transpose(-1, -2), kernel_size=2, stride=2).transpose(-1, -2)
+ refer_mask = refer_mask.view(-1, 1, *frame_size)
+
+ if video is None:
+ prefix = f'Here is an image with the following highlighted regions:\n[0]: <{prompt_idx}> {MEM_TOKEN}\n'
+ else:
+ prefix = f'Here is a video with {len(images)} frames denoted as <1> to <{len(images)}>. The highlighted regions are as follows:\n[0]: <{prompt_idx}>-<{prompt_idx + 1}> {MEM_TOKEN}\n'
+
+ messages = [{
+ 'role':
+ 'user',
+ 'content': [{
+ 'type': 'video',
+ 'video': images,
+ 'min_pixels': 128 * 28 * 28,
+ 'max_pixels': 256 * 28 * 28 * int(16 / len(images))
+ }, {
+ 'type': 'text',
+ 'text': prefix + query
+ }]
+ }]
+
+ text = processor.apply_chat_template(messages, add_generation_prompt=True)
+
+ images, videos, kwargs = process_vision_info(messages, return_video_kwargs=True)
+
+ data = processor(text=[text], images=images, videos=videos, return_tensors='pt', **kwargs)
+
+ refer_mask = T.resize(refer_mask, (data['video_grid_thw'][0][1] * 14, data['video_grid_thw'][0][2] * 14))
+ refer_mask = F.max_pool2d(refer_mask, kernel_size=28, stride=28)
+ refer_mask = refer_mask > 0
+
+ data['frames'] = [sam2_transform(frames).to(model.sam2.dtype)]
+ data['frame_size'] = [frames.shape[1:3]]
+ data['refer_mask'] = [refer_mask]
+
+ output_ids = model.generate(
+ **data.to(device),
+ do_sample=False,
+ temperature=None,
+ top_k=None,
+ top_p=None,
+ repetition_penalty=None,
+ max_new_tokens=512)
+
+ assert data.input_ids.size(0) == output_ids.size(0) == 1
+ output_ids = output_ids[0, data.input_ids.size(1):]
+
+ if output_ids[-1] == processor.tokenizer.eos_token_id:
+ output_ids = output_ids[:-1]
+
+ response = processor.decode(output_ids, clean_up_tokenization_spaces=False)
+ response = response.replace(' [0]', '[0]').replace('[0] ', '[0]').replace('[0]', '')
+
+ entities = []
+ for m in re.finditer(re.escape(''), response):
+ entities.append(dict(entity='region', start=m.start(), end=m.end(), color="#f85050"))
+
+ answer = dict(text=response, entities=entities)
+
+ return answer
+
+
+def build_demo():
+ with gr.Blocks(title=TITLE, js=JS) as demo:
+ gr.HTML(HEADER)
+
+ with gr.Tab('Image Segmentation'):
+ download_btn_1 = gr.DownloadButton(label='📦 Download', interactive=False, render=False)
+ msk_1 = gr.AnnotatedImage(label='Segmentation Results', color_map=color_map, render=False)
+ ans_1 = gr.HighlightedText(
+ label='Model Response', color_map=color_map_light, show_inline_category=False, render=False)
+
+ with gr.Row():
+ with gr.Column():
+ media_1 = gr.Image(type='filepath')
+
+ sample_frames_1 = gr.Slider(1, 32, value=16, step=1, visible=False)
+
+ query_1 = gr.Textbox(label='Text Prompt', placeholder='Please segment the...')
+
+ with gr.Row():
+ random_btn_1 = gr.Button(value='🔮 Random', visible=False)
+
+ reset_btn_1 = gr.ClearButton([media_1, query_1, msk_1, ans_1], value='🗑️ Reset')
+ reset_btn_1.click(reset_seg, None, [sample_frames_1, download_btn_1])
+
+ download_btn_1.render()
+
+ submit_btn_1 = gr.Button(value='🚀 Submit', variant='primary')
+ with gr.Column():
+ msk_1.render()
+ ans_1.render()
+
+ ctx_1 = submit_btn_1.click(disable_btns, None, [random_btn_1, reset_btn_1, download_btn_1, submit_btn_1])
+ ctx_1 = ctx_1.then(infer_seg_image, [media_1, query_1, sample_frames_1], [ans_1, msk_1, download_btn_1])
+ ctx_1.then(enable_btns, None, [random_btn_1, reset_btn_1, download_btn_1, submit_btn_1])
+
+ with gr.Tab('Video Segmentation'):
+ download_btn_2 = gr.DownloadButton(label='📦 Download', interactive=False, render=False)
+ msk_2 = gr.Image(label='Segmentation Results', render=False)
+ ans_2 = gr.HighlightedText(
+ label='Model Response', color_map=color_map_light, show_inline_category=False, render=False)
+
+ with gr.Row():
+ with gr.Column():
+ media_2 = gr.Video()
+
+ with gr.Accordion(label='Hyperparameters', open=False):
+ sample_frames_2 = gr.Slider(
+ 1,
+ 32,
+ value=16,
+ step=1,
+ interactive=True,
+ label='Sample Frames',
+ info='The number of frames to sample from a video (Default: 16)')
+
+ query_2 = gr.Textbox(label='Text Prompt', placeholder='Please segment the...')
+
+ with gr.Row():
+ random_btn_2 = gr.Button(value='🔮 Random', visible=False)
+
+ reset_btn_2 = gr.ClearButton([media_2, query_2, msk_2, ans_2], value='🗑️ Reset')
+ reset_btn_2.click(reset_seg, None, [sample_frames_2, download_btn_2])
+
+ download_btn_2.render()
+
+ submit_btn_2 = gr.Button(value='🚀 Submit', variant='primary')
+ with gr.Column():
+ msk_2.render()
+ ans_2.render()
+
+ ctx_2 = submit_btn_2.click(disable_btns, None, [random_btn_2, reset_btn_2, download_btn_2, submit_btn_2])
+ ctx_2 = ctx_2.then(infer_seg_video, [media_2, query_2, sample_frames_2], [ans_2, msk_2, download_btn_2])
+ ctx_2.then(enable_btns, None, [random_btn_2, reset_btn_2, download_btn_2, submit_btn_2])
+
+ with gr.Tab('Image Regional Understanding'):
+ download_btn_3 = gr.DownloadButton(visible=False)
+ msk_3 = gr.Image(label='Highlighted Region', render=False)
+ ans_3 = gr.HighlightedText(label='Model Response', show_inline_category=False, render=False)
+
+ with gr.Row():
+ with gr.Column():
+ media_3 = gr.ImageEditor(
+ label='Image & Mask Prompt',
+ brush=gr.Brush(colors=["#ff000080"], color_mode='fixed'),
+ transforms=None,
+ layers=False)
+ media_3.change(update_region, media_3, msk_3)
+
+ prompt_frame_index_3 = gr.Slider(1, 16, value=1, step=1, visible=False)
+
+ query_3 = gr.Textbox(label='Text Prompt', placeholder='Please describe the highlighted region...')
+
+ with gr.Row():
+ random_btn_3 = gr.Button(value='🔮 Random', visible=False)
+
+ reset_btn_3 = gr.ClearButton([media_3, query_3, msk_3, ans_3], value='🗑️ Reset')
+ reset_btn_3.click(reset_reg, None, [prompt_frame_index_3, download_btn_3])
+
+ submit_btn_3 = gr.Button(value='🚀 Submit', variant='primary')
+ with gr.Column():
+ msk_3.render()
+ ans_3.render()
+
+ ctx_3 = submit_btn_3.click(disable_btns, None, [random_btn_3, reset_btn_3, download_btn_3, submit_btn_3])
+ ctx_3 = ctx_3.then(infer_reg, [media_3, query_3, prompt_frame_index_3], ans_3)
+ ctx_3.then(enable_btns, None, [random_btn_3, reset_btn_3, download_btn_3, submit_btn_3])
+
+ with gr.Tab('Video Regional Understanding'):
+ download_btn_4 = gr.DownloadButton(visible=False)
+ prompt_frame_index_4 = gr.Slider(
+ 1,
+ 16,
+ value=1,
+ step=1,
+ interactive=True,
+ label='Prompt Frame Index',
+ info='The index of the frame that includes mask prompts (Default: 1)',
+ render=False)
+ msk_4 = gr.ImageEditor(
+ label='Mask Prompt',
+ brush=gr.Brush(colors=['#ff000080'], color_mode='fixed'),
+ transforms=None,
+ layers=False,
+ render=False)
+ ans_4 = gr.HighlightedText(label='Model Response', show_inline_category=False, render=False)
+
+ with gr.Row():
+ with gr.Column():
+ media_4 = gr.Video()
+ media_4.change(update_video, [media_4, prompt_frame_index_4], msk_4)
+
+ with gr.Accordion(label='Hyperparameters', open=False):
+ prompt_frame_index_4.render()
+ prompt_frame_index_4.change(update_video, [media_4, prompt_frame_index_4], msk_4)
+
+ query_4 = gr.Textbox(label='Text Prompt', placeholder='Please describe the highlighted region...')
+
+ with gr.Row():
+ random_btn_4 = gr.Button(value='🔮 Random', visible=False)
+
+ reset_btn_4 = gr.ClearButton([media_4, query_4, msk_4, ans_4], value='🗑️ Reset')
+ reset_btn_4.click(reset_reg, None, [prompt_frame_index_4, download_btn_4])
+
+ submit_btn_4 = gr.Button(value='🚀 Submit', variant='primary')
+ with gr.Column():
+ msk_4.render()
+ ans_4.render()
+
+ ctx_4 = submit_btn_4.click(disable_btns, None, [random_btn_4, reset_btn_4, download_btn_4, submit_btn_4])
+ ctx_4 = ctx_4.then(infer_reg, [msk_4, query_4, prompt_frame_index_4, media_4], ans_4)
+ ctx_4.then(enable_btns, None, [random_btn_4, reset_btn_4, download_btn_4, submit_btn_4])
+
+ return demo
+
+
+if __name__ == '__main__':
+ demo = build_demo()
+
+ demo.queue()
+ demo.launch(server_name='0.0.0.0')
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..a95ad1cbd94d6ca6ffbe3ae6f2ab58ef89b6d2bc
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,33 @@
+accelerate==1.9.0
+decord==0.6.0
+deepspeed==0.17.4
+gradio==5.48.0
+hydra-core==1.3.2
+imageio==2.37.0
+iopath==0.1.10
+matplotlib==3.10.5
+nncore==0.4.7
+numpy==2.1.2
+openai==1.99.1
+pandas==2.3.1
+peft==0.17.0
+pycocotools==2.0.10
+pydantic==2.11.7
+pysrt==1.1.2
+scikit-image==0.25.2
+scikit-learn==1.7.1
+sentencepiece==0.2.0
+spaces==0.42.1
+tensordict==0.9.1
+termplotlib==0.3.9
+transformers==4.53.3
+triton==3.3.1
+wandb==0.21.0
+
+# torch==2.7.1+cu128
+# torchvision==0.22.1+cu128
+
+# https://github.com/Dao-AILab/flash-attention/pull/1751
+# flash_attn==2.8.2
+
+# sam2 modified from https://github.com/facebookresearch/sam2/tree/722d1d15111c689908aeeb82d49a57780aac5153
diff --git a/sam2/__init__.py b/sam2/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..0712dd03cb280ab94ba04f8a32aa8ddc8aa3db4a
--- /dev/null
+++ b/sam2/__init__.py
@@ -0,0 +1,11 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+from hydra import initialize_config_module
+from hydra.core.global_hydra import GlobalHydra
+
+if not GlobalHydra.instance().is_initialized():
+ initialize_config_module("sam2", version_base="1.2")
diff --git a/sam2/automatic_mask_generator.py b/sam2/automatic_mask_generator.py
new file mode 100644
index 0000000000000000000000000000000000000000..4094cbf6f7905380f2b90999266494978034ac1c
--- /dev/null
+++ b/sam2/automatic_mask_generator.py
@@ -0,0 +1,416 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+# Adapted from https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/automatic_mask_generator.py
+from typing import Any, Dict, List, Optional, Tuple
+
+import numpy as np
+import torch
+from torchvision.ops.boxes import batched_nms, box_area # type: ignore
+
+from sam2.modeling.sam2_base import SAM2Base
+from sam2.sam2_image_predictor import SAM2ImagePredictor
+from sam2.utils.amg import (MaskData, area_from_rle, batch_iterator, batched_mask_to_box, box_xyxy_to_xywh,
+ build_all_layer_point_grids, calculate_stability_score, coco_encode_rle,
+ generate_crop_boxes, is_box_near_crop_edge, mask_to_rle_pytorch, remove_small_regions,
+ rle_to_mask, uncrop_boxes_xyxy, uncrop_masks, uncrop_points)
+
+
+class SAM2AutomaticMaskGenerator:
+
+ def __init__(
+ self,
+ model: SAM2Base,
+ points_per_side: Optional[int] = 32,
+ points_per_batch: int = 64,
+ pred_iou_thresh: float = 0.8,
+ stability_score_thresh: float = 0.95,
+ stability_score_offset: float = 1.0,
+ mask_threshold: float = 0.0,
+ box_nms_thresh: float = 0.7,
+ crop_n_layers: int = 0,
+ crop_nms_thresh: float = 0.7,
+ crop_overlap_ratio: float = 512 / 1500,
+ crop_n_points_downscale_factor: int = 1,
+ point_grids: Optional[List[np.ndarray]] = None,
+ min_mask_region_area: int = 0,
+ output_mode: str = "binary_mask",
+ use_m2m: bool = False,
+ multimask_output: bool = True,
+ **kwargs,
+ ) -> None:
+ """
+ Using a SAM 2 model, generates masks for the entire image.
+ Generates a grid of point prompts over the image, then filters
+ low quality and duplicate masks. The default settings are chosen
+ for SAM 2 with a HieraL backbone.
+
+ Arguments:
+ model (Sam): The SAM 2 model to use for mask prediction.
+ points_per_side (int or None): The number of points to be sampled
+ along one side of the image. The total number of points is
+ points_per_side**2. If None, 'point_grids' must provide explicit
+ point sampling.
+ points_per_batch (int): Sets the number of points run simultaneously
+ by the model. Higher numbers may be faster but use more GPU memory.
+ pred_iou_thresh (float): A filtering threshold in [0,1], using the
+ model's predicted mask quality.
+ stability_score_thresh (float): A filtering threshold in [0,1], using
+ the stability of the mask under changes to the cutoff used to binarize
+ the model's mask predictions.
+ stability_score_offset (float): The amount to shift the cutoff when
+ calculated the stability score.
+ mask_threshold (float): Threshold for binarizing the mask logits
+ box_nms_thresh (float): The box IoU cutoff used by non-maximal
+ suppression to filter duplicate masks.
+ crop_n_layers (int): If >0, mask prediction will be run again on
+ crops of the image. Sets the number of layers to run, where each
+ layer has 2**i_layer number of image crops.
+ crop_nms_thresh (float): The box IoU cutoff used by non-maximal
+ suppression to filter duplicate masks between different crops.
+ crop_overlap_ratio (float): Sets the degree to which crops overlap.
+ In the first crop layer, crops will overlap by this fraction of
+ the image length. Later layers with more crops scale down this overlap.
+ crop_n_points_downscale_factor (int): The number of points-per-side
+ sampled in layer n is scaled down by crop_n_points_downscale_factor**n.
+ point_grids (list(np.ndarray) or None): A list over explicit grids
+ of points used for sampling, normalized to [0,1]. The nth grid in the
+ list is used in the nth crop layer. Exclusive with points_per_side.
+ min_mask_region_area (int): If >0, postprocessing will be applied
+ to remove disconnected regions and holes in masks with area smaller
+ than min_mask_region_area. Requires opencv.
+ output_mode (str): The form masks are returned in. Can be 'binary_mask',
+ 'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools.
+ For large resolutions, 'binary_mask' may consume large amounts of
+ memory.
+ use_m2m (bool): Whether to add a one step refinement using previous mask predictions.
+ multimask_output (bool): Whether to output multimask at each point of the grid.
+ """
+
+ assert (points_per_side is None) != (point_grids
+ is None), "Exactly one of points_per_side or point_grid must be provided."
+ if points_per_side is not None:
+ self.point_grids = build_all_layer_point_grids(
+ points_per_side,
+ crop_n_layers,
+ crop_n_points_downscale_factor,
+ )
+ elif point_grids is not None:
+ self.point_grids = point_grids
+ else:
+ raise ValueError("Can't have both points_per_side and point_grid be None.")
+
+ assert output_mode in [
+ "binary_mask",
+ "uncompressed_rle",
+ "coco_rle",
+ ], f"Unknown output_mode {output_mode}."
+ if output_mode == "coco_rle":
+ try:
+ from pycocotools import mask as mask_utils # type: ignore # noqa: F401
+ except ImportError as e:
+ print("Please install pycocotools")
+ raise e
+
+ self.predictor = SAM2ImagePredictor(
+ model,
+ max_hole_area=min_mask_region_area,
+ max_sprinkle_area=min_mask_region_area,
+ )
+ self.points_per_batch = points_per_batch
+ self.pred_iou_thresh = pred_iou_thresh
+ self.stability_score_thresh = stability_score_thresh
+ self.stability_score_offset = stability_score_offset
+ self.mask_threshold = mask_threshold
+ self.box_nms_thresh = box_nms_thresh
+ self.crop_n_layers = crop_n_layers
+ self.crop_nms_thresh = crop_nms_thresh
+ self.crop_overlap_ratio = crop_overlap_ratio
+ self.crop_n_points_downscale_factor = crop_n_points_downscale_factor
+ self.min_mask_region_area = min_mask_region_area
+ self.output_mode = output_mode
+ self.use_m2m = use_m2m
+ self.multimask_output = multimask_output
+
+ @classmethod
+ def from_pretrained(cls, model_id: str, **kwargs) -> "SAM2AutomaticMaskGenerator":
+ """
+ Load a pretrained model from the Hugging Face hub.
+
+ Arguments:
+ model_id (str): The Hugging Face repository ID.
+ **kwargs: Additional arguments to pass to the model constructor.
+
+ Returns:
+ (SAM2AutomaticMaskGenerator): The loaded model.
+ """
+ from sam2.build_sam import build_sam2_hf
+
+ sam_model = build_sam2_hf(model_id, **kwargs)
+ return cls(sam_model, **kwargs)
+
+ @torch.no_grad()
+ def generate(self, image: np.ndarray) -> List[Dict[str, Any]]:
+ """
+ Generates masks for the given image.
+
+ Arguments:
+ image (np.ndarray): The image to generate masks for, in HWC uint8 format.
+
+ Returns:
+ list(dict(str, any)): A list over records for masks. Each record is
+ a dict containing the following keys:
+ segmentation (dict(str, any) or np.ndarray): The mask. If
+ output_mode='binary_mask', is an array of shape HW. Otherwise,
+ is a dictionary containing the RLE.
+ bbox (list(float)): The box around the mask, in XYWH format.
+ area (int): The area in pixels of the mask.
+ predicted_iou (float): The model's own prediction of the mask's
+ quality. This is filtered by the pred_iou_thresh parameter.
+ point_coords (list(list(float))): The point coordinates input
+ to the model to generate this mask.
+ stability_score (float): A measure of the mask's quality. This
+ is filtered on using the stability_score_thresh parameter.
+ crop_box (list(float)): The crop of the image used to generate
+ the mask, given in XYWH format.
+ """
+
+ # Generate masks
+ mask_data = self._generate_masks(image)
+
+ # Encode masks
+ if self.output_mode == "coco_rle":
+ mask_data["segmentations"] = [coco_encode_rle(rle) for rle in mask_data["rles"]]
+ elif self.output_mode == "binary_mask":
+ mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]]
+ else:
+ mask_data["segmentations"] = mask_data["rles"]
+
+ # Write mask records
+ curr_anns = []
+ for idx in range(len(mask_data["segmentations"])):
+ ann = {
+ "segmentation": mask_data["segmentations"][idx],
+ "area": area_from_rle(mask_data["rles"][idx]),
+ "bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(),
+ "predicted_iou": mask_data["iou_preds"][idx].item(),
+ "point_coords": [mask_data["points"][idx].tolist()],
+ "stability_score": mask_data["stability_score"][idx].item(),
+ "crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(),
+ }
+ curr_anns.append(ann)
+
+ return curr_anns
+
+ def _generate_masks(self, image: np.ndarray) -> MaskData:
+ orig_size = image.shape[:2]
+ crop_boxes, layer_idxs = generate_crop_boxes(orig_size, self.crop_n_layers, self.crop_overlap_ratio)
+
+ # Iterate over image crops
+ data = MaskData()
+ for crop_box, layer_idx in zip(crop_boxes, layer_idxs):
+ crop_data = self._process_crop(image, crop_box, layer_idx, orig_size)
+ data.cat(crop_data)
+
+ # Remove duplicate masks between crops
+ if len(crop_boxes) > 1:
+ # Prefer masks from smaller crops
+ scores = 1 / box_area(data["crop_boxes"])
+ scores = scores.to(data["boxes"].device)
+ keep_by_nms = batched_nms(
+ data["boxes"].float(),
+ scores,
+ torch.zeros_like(data["boxes"][:, 0]), # categories
+ iou_threshold=self.crop_nms_thresh,
+ )
+ data.filter(keep_by_nms)
+ data.to_numpy()
+ return data
+
+ def _process_crop(
+ self,
+ image: np.ndarray,
+ crop_box: List[int],
+ crop_layer_idx: int,
+ orig_size: Tuple[int, ...],
+ ) -> MaskData:
+ # Crop the image and calculate embeddings
+ x0, y0, x1, y1 = crop_box
+ cropped_im = image[y0:y1, x0:x1, :]
+ cropped_im_size = cropped_im.shape[:2]
+ self.predictor.set_image(cropped_im)
+
+ # Get points for this crop
+ points_scale = np.array(cropped_im_size)[None, ::-1]
+ points_for_image = self.point_grids[crop_layer_idx] * points_scale
+
+ # Generate masks for this crop in batches
+ data = MaskData()
+ for (points, ) in batch_iterator(self.points_per_batch, points_for_image):
+ batch_data = self._process_batch(points, cropped_im_size, crop_box, orig_size, normalize=True)
+ data.cat(batch_data)
+ del batch_data
+ self.predictor.reset_predictor()
+
+ # Remove duplicates within this crop.
+ keep_by_nms = batched_nms(
+ data["boxes"].float(),
+ data["iou_preds"],
+ torch.zeros_like(data["boxes"][:, 0]), # categories
+ iou_threshold=self.box_nms_thresh,
+ )
+ data.filter(keep_by_nms)
+
+ # Return to the original image frame
+ data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box)
+ data["points"] = uncrop_points(data["points"], crop_box)
+ data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(data["rles"]))])
+
+ return data
+
+ def _process_batch(
+ self,
+ points: np.ndarray,
+ im_size: Tuple[int, ...],
+ crop_box: List[int],
+ orig_size: Tuple[int, ...],
+ normalize=False,
+ ) -> MaskData:
+ orig_h, orig_w = orig_size
+
+ # Run model on this batch
+ points = torch.as_tensor(points, dtype=torch.float32, device=self.predictor.device)
+ in_points = self.predictor._transforms.transform_coords(points, normalize=normalize, orig_hw=im_size)
+ in_labels = torch.ones(in_points.shape[0], dtype=torch.int, device=in_points.device)
+ masks, iou_preds, low_res_masks = self.predictor._predict(
+ in_points[:, None, :],
+ in_labels[:, None],
+ multimask_output=self.multimask_output,
+ return_logits=True,
+ )
+
+ # Serialize predictions and store in MaskData
+ data = MaskData(
+ masks=masks.flatten(0, 1),
+ iou_preds=iou_preds.flatten(0, 1),
+ points=points.repeat_interleave(masks.shape[1], dim=0),
+ low_res_masks=low_res_masks.flatten(0, 1),
+ )
+ del masks
+
+ if not self.use_m2m:
+ # Filter by predicted IoU
+ if self.pred_iou_thresh > 0.0:
+ keep_mask = data["iou_preds"] > self.pred_iou_thresh
+ data.filter(keep_mask)
+
+ # Calculate and filter by stability score
+ data["stability_score"] = calculate_stability_score(data["masks"], self.mask_threshold,
+ self.stability_score_offset)
+ if self.stability_score_thresh > 0.0:
+ keep_mask = data["stability_score"] >= self.stability_score_thresh
+ data.filter(keep_mask)
+ else:
+ # One step refinement using previous mask predictions
+ in_points = self.predictor._transforms.transform_coords(
+ data["points"], normalize=normalize, orig_hw=im_size)
+ labels = torch.ones(in_points.shape[0], dtype=torch.int, device=in_points.device)
+ masks, ious = self.refine_with_m2m(in_points, labels, data["low_res_masks"], self.points_per_batch)
+ data["masks"] = masks.squeeze(1)
+ data["iou_preds"] = ious.squeeze(1)
+
+ if self.pred_iou_thresh > 0.0:
+ keep_mask = data["iou_preds"] > self.pred_iou_thresh
+ data.filter(keep_mask)
+
+ data["stability_score"] = calculate_stability_score(data["masks"], self.mask_threshold,
+ self.stability_score_offset)
+ if self.stability_score_thresh > 0.0:
+ keep_mask = data["stability_score"] >= self.stability_score_thresh
+ data.filter(keep_mask)
+
+ # Threshold masks and calculate boxes
+ data["masks"] = data["masks"] > self.mask_threshold
+ data["boxes"] = batched_mask_to_box(data["masks"])
+
+ # Filter boxes that touch crop boundaries
+ keep_mask = ~is_box_near_crop_edge(data["boxes"], crop_box, [0, 0, orig_w, orig_h])
+ if not torch.all(keep_mask):
+ data.filter(keep_mask)
+
+ # Compress to RLE
+ data["masks"] = uncrop_masks(data["masks"], crop_box, orig_h, orig_w)
+ data["rles"] = mask_to_rle_pytorch(data["masks"])
+ del data["masks"]
+
+ return data
+
+ @staticmethod
+ def postprocess_small_regions(mask_data: MaskData, min_area: int, nms_thresh: float) -> MaskData:
+ """
+ Removes small disconnected regions and holes in masks, then reruns
+ box NMS to remove any new duplicates.
+
+ Edits mask_data in place.
+
+ Requires open-cv as a dependency.
+ """
+ if len(mask_data["rles"]) == 0:
+ return mask_data
+
+ # Filter small disconnected regions and holes
+ new_masks = []
+ scores = []
+ for rle in mask_data["rles"]:
+ mask = rle_to_mask(rle)
+
+ mask, changed = remove_small_regions(mask, min_area, mode="holes")
+ unchanged = not changed
+ mask, changed = remove_small_regions(mask, min_area, mode="islands")
+ unchanged = unchanged and not changed
+
+ new_masks.append(torch.as_tensor(mask).unsqueeze(0))
+ # Give score=0 to changed masks and score=1 to unchanged masks
+ # so NMS will prefer ones that didn't need postprocessing
+ scores.append(float(unchanged))
+
+ # Recalculate boxes and remove any new duplicates
+ masks = torch.cat(new_masks, dim=0)
+ boxes = batched_mask_to_box(masks)
+ keep_by_nms = batched_nms(
+ boxes.float(),
+ torch.as_tensor(scores),
+ torch.zeros_like(boxes[:, 0]), # categories
+ iou_threshold=nms_thresh,
+ )
+
+ # Only recalculate RLEs for masks that have changed
+ for i_mask in keep_by_nms:
+ if scores[i_mask] == 0.0:
+ mask_torch = masks[i_mask].unsqueeze(0)
+ mask_data["rles"][i_mask] = mask_to_rle_pytorch(mask_torch)[0]
+ mask_data["boxes"][i_mask] = boxes[i_mask] # update res directly
+ mask_data.filter(keep_by_nms)
+
+ return mask_data
+
+ def refine_with_m2m(self, points, point_labels, low_res_masks, points_per_batch):
+ new_masks = []
+ new_iou_preds = []
+
+ for cur_points, cur_point_labels, low_res_mask in batch_iterator(points_per_batch, points, point_labels,
+ low_res_masks):
+ best_masks, best_iou_preds, _ = self.predictor._predict(
+ cur_points[:, None, :],
+ cur_point_labels[:, None],
+ mask_input=low_res_mask[:, None, :],
+ multimask_output=False,
+ return_logits=True,
+ )
+ new_masks.append(best_masks)
+ new_iou_preds.append(best_iou_preds)
+ masks = torch.cat(new_masks, dim=0)
+ return masks, torch.cat(new_iou_preds, dim=0)
diff --git a/sam2/build_sam.py b/sam2/build_sam.py
new file mode 100644
index 0000000000000000000000000000000000000000..d466c4c9bb6b170d5c05cf50b591892b940a5220
--- /dev/null
+++ b/sam2/build_sam.py
@@ -0,0 +1,172 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+import logging
+import os
+
+import torch
+from hydra import compose
+from hydra.utils import instantiate
+from omegaconf import OmegaConf
+
+import sam2
+
+# Check if the user is running Python from the parent directory of the sam2 repo
+# (i.e. the directory where this repo is cloned into) -- this is not supported since
+# it could shadow the sam2 package and cause issues.
+if os.path.isdir(os.path.join(sam2.__path__[0], "sam2")):
+ # If the user has "sam2/sam2" in their path, they are likey importing the repo itself
+ # as "sam2" rather than importing the "sam2" python package (i.e. "sam2/sam2" directory).
+ # This typically happens because the user is running Python from the parent directory
+ # that contains the sam2 repo they cloned.
+ raise RuntimeError("You're likely running Python from the parent directory of the sam2 repository "
+ "(i.e. the directory where https://github.com/facebookresearch/sam2 is cloned into). "
+ "This is not supported since the `sam2` Python package could be shadowed by the "
+ "repository name (the repository is also named `sam2` and contains the Python package "
+ "in `sam2/sam2`). Please run Python from another directory (e.g. from the repo dir "
+ "rather than its parent dir, or from your home directory) after installing SAM 2.")
+
+HF_MODEL_ID_TO_FILENAMES = {
+ "facebook/sam2-hiera-tiny": (
+ "configs/sam2/sam2_hiera_t.yaml",
+ "sam2_hiera_tiny.pt",
+ ),
+ "facebook/sam2-hiera-small": (
+ "configs/sam2/sam2_hiera_s.yaml",
+ "sam2_hiera_small.pt",
+ ),
+ "facebook/sam2-hiera-base-plus": (
+ "configs/sam2/sam2_hiera_b+.yaml",
+ "sam2_hiera_base_plus.pt",
+ ),
+ "facebook/sam2-hiera-large": (
+ "configs/sam2/sam2_hiera_l.yaml",
+ "sam2_hiera_large.pt",
+ ),
+ "facebook/sam2.1-hiera-tiny": (
+ "configs/sam2.1/sam2.1_hiera_t.yaml",
+ "sam2.1_hiera_tiny.pt",
+ ),
+ "facebook/sam2.1-hiera-small": (
+ "configs/sam2.1/sam2.1_hiera_s.yaml",
+ "sam2.1_hiera_small.pt",
+ ),
+ "facebook/sam2.1-hiera-base-plus": (
+ "configs/sam2.1/sam2.1_hiera_b+.yaml",
+ "sam2.1_hiera_base_plus.pt",
+ ),
+ "facebook/sam2.1-hiera-large": (
+ "configs/sam2.1/sam2.1_hiera_l.yaml",
+ "sam2.1_hiera_large.pt",
+ ),
+}
+
+
+def build_sam2(
+ config_file,
+ ckpt_path=None,
+ device="cuda",
+ mode="eval",
+ hydra_overrides_extra=[],
+ apply_postprocessing=True,
+ **kwargs,
+):
+
+ if apply_postprocessing:
+ hydra_overrides_extra = hydra_overrides_extra.copy()
+ hydra_overrides_extra += [
+ # dynamically fall back to multi-mask if the single mask is not stable
+ "++model.sam_mask_decoder_extra_args.dynamic_multimask_via_stability=true",
+ "++model.sam_mask_decoder_extra_args.dynamic_multimask_stability_delta=0.05",
+ "++model.sam_mask_decoder_extra_args.dynamic_multimask_stability_thresh=0.98",
+ ]
+ # Read config and init model
+ cfg = compose(config_name=config_file, overrides=hydra_overrides_extra)
+ OmegaConf.resolve(cfg)
+ model = instantiate(cfg.model, _recursive_=True)
+ _load_checkpoint(model, ckpt_path)
+ model = model.to(device)
+ if mode == "eval":
+ model.eval()
+ return model
+
+
+def build_sam2_video_predictor(
+ config_file,
+ ckpt_path=None,
+ device="cuda",
+ mode="eval",
+ hydra_overrides_extra=[],
+ apply_postprocessing=True,
+ vos_optimized=False,
+ **kwargs,
+):
+ hydra_overrides = [
+ "++model._target_=sam2.sam2_video_predictor.SAM2VideoPredictor",
+ ]
+ if vos_optimized:
+ hydra_overrides = [
+ "++model._target_=sam2.sam2_video_predictor.SAM2VideoPredictorVOS",
+ "++model.compile_image_encoder=True", # Let sam2_base handle this
+ ]
+
+ if apply_postprocessing:
+ hydra_overrides_extra = hydra_overrides_extra.copy()
+ hydra_overrides_extra += [
+ # dynamically fall back to multi-mask if the single mask is not stable
+ "++model.sam_mask_decoder_extra_args.dynamic_multimask_via_stability=true",
+ "++model.sam_mask_decoder_extra_args.dynamic_multimask_stability_delta=0.05",
+ "++model.sam_mask_decoder_extra_args.dynamic_multimask_stability_thresh=0.98",
+ # the sigmoid mask logits on interacted frames with clicks in the memory encoder so that the encoded masks are exactly as what users see from clicking
+ "++model.binarize_mask_from_pts_for_mem_enc=true",
+ # fill small holes in the low-res masks up to `fill_hole_area` (before resizing them to the original video resolution)
+ "++model.fill_hole_area=8",
+ ]
+ hydra_overrides.extend(hydra_overrides_extra)
+
+ # Read config and init model
+ cfg = compose(config_name=config_file, overrides=hydra_overrides)
+ OmegaConf.resolve(cfg)
+ model = instantiate(cfg.model, _recursive_=True)
+ _load_checkpoint(model, ckpt_path)
+ model = model.to(device)
+ if mode == "eval":
+ model.eval()
+ return model
+
+
+def _hf_download(model_id):
+ from huggingface_hub import hf_hub_download
+
+ config_name, checkpoint_name = HF_MODEL_ID_TO_FILENAMES[model_id]
+ ckpt_path = hf_hub_download(repo_id=model_id, filename=checkpoint_name)
+ return config_name, ckpt_path
+
+
+def build_sam2_hf(model_id, **kwargs):
+ config_name, ckpt_path = _hf_download(model_id)
+ return build_sam2(config_file=config_name, ckpt_path=ckpt_path, **kwargs)
+
+
+def build_sam2_video_predictor_hf(model_id, **kwargs):
+ config_name, ckpt_path = _hf_download(model_id)
+ return build_sam2_video_predictor(config_file=config_name, ckpt_path=ckpt_path, **kwargs)
+
+
+def _load_checkpoint(model, ckpt_path):
+ if ckpt_path is not None:
+ sd = torch.load(ckpt_path, map_location="cpu", weights_only=True)["model"]
+ # https://github.com/huggingface/transformers/issues/29554
+ sd['memory_encoder.fuser.layers.0.weight'] = sd.pop('memory_encoder.fuser.layers.0.gamma')
+ sd['memory_encoder.fuser.layers.1.weight'] = sd.pop('memory_encoder.fuser.layers.1.gamma')
+ missing_keys, unexpected_keys = model.load_state_dict(sd)
+ if missing_keys:
+ logging.error(missing_keys)
+ raise RuntimeError()
+ if unexpected_keys:
+ logging.error(unexpected_keys)
+ raise RuntimeError()
+ logging.info("Loaded checkpoint sucessfully")
diff --git a/sam2/configs/sam2.1/sam2.1_hiera_b+.yaml b/sam2/configs/sam2.1/sam2.1_hiera_b+.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d7172f9b0b663aaaace97fed7e2a08db75150461
--- /dev/null
+++ b/sam2/configs/sam2.1/sam2.1_hiera_b+.yaml
@@ -0,0 +1,116 @@
+# @package _global_
+
+# Model
+model:
+ _target_: sam2.modeling.sam2_base.SAM2Base
+ image_encoder:
+ _target_: sam2.modeling.backbones.image_encoder.ImageEncoder
+ scalp: 1
+ trunk:
+ _target_: sam2.modeling.backbones.hieradet.Hiera
+ embed_dim: 112
+ num_heads: 2
+ neck:
+ _target_: sam2.modeling.backbones.image_encoder.FpnNeck
+ position_encoding:
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
+ num_pos_feats: 256
+ normalize: true
+ scale: null
+ temperature: 10000
+ d_model: 256
+ backbone_channel_list: [896, 448, 224, 112]
+ fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features
+ fpn_interp_model: nearest
+
+ memory_attention:
+ _target_: sam2.modeling.memory_attention.MemoryAttention
+ d_model: 256
+ pos_enc_at_input: true
+ layer:
+ _target_: sam2.modeling.memory_attention.MemoryAttentionLayer
+ activation: relu
+ dim_feedforward: 2048
+ dropout: 0.1
+ pos_enc_at_attn: false
+ self_attention:
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
+ rope_theta: 10000.0
+ feat_sizes: [64, 64]
+ embedding_dim: 256
+ num_heads: 1
+ downsample_rate: 1
+ dropout: 0.1
+ d_model: 256
+ pos_enc_at_cross_attn_keys: true
+ pos_enc_at_cross_attn_queries: false
+ cross_attention:
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
+ rope_theta: 10000.0
+ feat_sizes: [64, 64]
+ rope_k_repeat: True
+ embedding_dim: 256
+ num_heads: 1
+ downsample_rate: 1
+ dropout: 0.1
+ kv_in_dim: 64
+ num_layers: 4
+
+ memory_encoder:
+ _target_: sam2.modeling.memory_encoder.MemoryEncoder
+ out_dim: 64
+ position_encoding:
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
+ num_pos_feats: 64
+ normalize: true
+ scale: null
+ temperature: 10000
+ mask_downsampler:
+ _target_: sam2.modeling.memory_encoder.MaskDownSampler
+ kernel_size: 3
+ stride: 2
+ padding: 1
+ fuser:
+ _target_: sam2.modeling.memory_encoder.Fuser
+ layer:
+ _target_: sam2.modeling.memory_encoder.CXBlock
+ dim: 256
+ kernel_size: 7
+ padding: 3
+ layer_scale_init_value: 1e-6
+ use_dwconv: True # depth-wise convs
+ num_layers: 2
+
+ num_maskmem: 7
+ image_size: 1024
+ # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask
+ sigmoid_scale_for_mem_enc: 20.0
+ sigmoid_bias_for_mem_enc: -10.0
+ use_mask_input_as_output_without_sam: true
+ # Memory
+ directly_add_no_mem_embed: true
+ no_obj_embed_spatial: true
+ # use high-resolution feature map in the SAM mask decoder
+ use_high_res_features_in_sam: true
+ # output 3 masks on the first click on initial conditioning frames
+ multimask_output_in_sam: true
+ # SAM heads
+ iou_prediction_use_sigmoid: True
+ # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
+ use_obj_ptrs_in_encoder: true
+ add_tpos_enc_to_obj_ptrs: true
+ proj_tpos_enc_in_obj_ptrs: true
+ use_signed_tpos_enc_to_obj_ptrs: true
+ only_obj_ptrs_in_the_past_for_eval: true
+ # object occlusion prediction
+ pred_obj_scores: true
+ pred_obj_scores_mlp: true
+ fixed_no_obj_ptr: true
+ # multimask tracking settings
+ multimask_output_for_tracking: true
+ use_multimask_token_for_obj_ptr: true
+ multimask_min_pt_num: 0
+ multimask_max_pt_num: 1
+ use_mlp_for_obj_ptr_proj: true
+ # Compilation flag
+ compile_image_encoder: False
diff --git a/sam2/configs/sam2.1/sam2.1_hiera_l.yaml b/sam2/configs/sam2.1/sam2.1_hiera_l.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..23073ea7a95901be656b3c6d1a66ce8736ab7ad3
--- /dev/null
+++ b/sam2/configs/sam2.1/sam2.1_hiera_l.yaml
@@ -0,0 +1,120 @@
+# @package _global_
+
+# Model
+model:
+ _target_: sam2.modeling.sam2_base.SAM2Base
+ image_encoder:
+ _target_: sam2.modeling.backbones.image_encoder.ImageEncoder
+ scalp: 1
+ trunk:
+ _target_: sam2.modeling.backbones.hieradet.Hiera
+ embed_dim: 144
+ num_heads: 2
+ stages: [2, 6, 36, 4]
+ global_att_blocks: [23, 33, 43]
+ window_pos_embed_bkg_spatial_size: [7, 7]
+ window_spec: [8, 4, 16, 8]
+ neck:
+ _target_: sam2.modeling.backbones.image_encoder.FpnNeck
+ position_encoding:
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
+ num_pos_feats: 256
+ normalize: true
+ scale: null
+ temperature: 10000
+ d_model: 256
+ backbone_channel_list: [1152, 576, 288, 144]
+ fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features
+ fpn_interp_model: nearest
+
+ memory_attention:
+ _target_: sam2.modeling.memory_attention.MemoryAttention
+ d_model: 256
+ pos_enc_at_input: true
+ layer:
+ _target_: sam2.modeling.memory_attention.MemoryAttentionLayer
+ activation: relu
+ dim_feedforward: 2048
+ dropout: 0.1
+ pos_enc_at_attn: false
+ self_attention:
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
+ rope_theta: 10000.0
+ feat_sizes: [64, 64]
+ embedding_dim: 256
+ num_heads: 1
+ downsample_rate: 1
+ dropout: 0.1
+ d_model: 256
+ pos_enc_at_cross_attn_keys: true
+ pos_enc_at_cross_attn_queries: false
+ cross_attention:
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
+ rope_theta: 10000.0
+ feat_sizes: [64, 64]
+ rope_k_repeat: True
+ embedding_dim: 256
+ num_heads: 1
+ downsample_rate: 1
+ dropout: 0.1
+ kv_in_dim: 64
+ num_layers: 4
+
+ memory_encoder:
+ _target_: sam2.modeling.memory_encoder.MemoryEncoder
+ out_dim: 64
+ position_encoding:
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
+ num_pos_feats: 64
+ normalize: true
+ scale: null
+ temperature: 10000
+ mask_downsampler:
+ _target_: sam2.modeling.memory_encoder.MaskDownSampler
+ kernel_size: 3
+ stride: 2
+ padding: 1
+ fuser:
+ _target_: sam2.modeling.memory_encoder.Fuser
+ layer:
+ _target_: sam2.modeling.memory_encoder.CXBlock
+ dim: 256
+ kernel_size: 7
+ padding: 3
+ layer_scale_init_value: 1e-6
+ use_dwconv: True # depth-wise convs
+ num_layers: 2
+
+ num_maskmem: 7
+ image_size: 1024
+ # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask
+ sigmoid_scale_for_mem_enc: 20.0
+ sigmoid_bias_for_mem_enc: -10.0
+ use_mask_input_as_output_without_sam: true
+ # Memory
+ directly_add_no_mem_embed: true
+ no_obj_embed_spatial: true
+ # use high-resolution feature map in the SAM mask decoder
+ use_high_res_features_in_sam: true
+ # output 3 masks on the first click on initial conditioning frames
+ multimask_output_in_sam: true
+ # SAM heads
+ iou_prediction_use_sigmoid: True
+ # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
+ use_obj_ptrs_in_encoder: true
+ add_tpos_enc_to_obj_ptrs: true
+ proj_tpos_enc_in_obj_ptrs: true
+ use_signed_tpos_enc_to_obj_ptrs: true
+ only_obj_ptrs_in_the_past_for_eval: true
+ # object occlusion prediction
+ pred_obj_scores: true
+ pred_obj_scores_mlp: true
+ fixed_no_obj_ptr: true
+ # multimask tracking settings
+ multimask_output_for_tracking: true
+ use_multimask_token_for_obj_ptr: true
+ multimask_min_pt_num: 0
+ multimask_max_pt_num: 1
+ use_mlp_for_obj_ptr_proj: true
+ # Compilation flag
+ compile_image_encoder: False
diff --git a/sam2/configs/sam2.1/sam2.1_hiera_s.yaml b/sam2/configs/sam2.1/sam2.1_hiera_s.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..fd8d40465b18b3de39b0a565aca712306306c4ed
--- /dev/null
+++ b/sam2/configs/sam2.1/sam2.1_hiera_s.yaml
@@ -0,0 +1,119 @@
+# @package _global_
+
+# Model
+model:
+ _target_: sam2.modeling.sam2_base.SAM2Base
+ image_encoder:
+ _target_: sam2.modeling.backbones.image_encoder.ImageEncoder
+ scalp: 1
+ trunk:
+ _target_: sam2.modeling.backbones.hieradet.Hiera
+ embed_dim: 96
+ num_heads: 1
+ stages: [1, 2, 11, 2]
+ global_att_blocks: [7, 10, 13]
+ window_pos_embed_bkg_spatial_size: [7, 7]
+ neck:
+ _target_: sam2.modeling.backbones.image_encoder.FpnNeck
+ position_encoding:
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
+ num_pos_feats: 256
+ normalize: true
+ scale: null
+ temperature: 10000
+ d_model: 256
+ backbone_channel_list: [768, 384, 192, 96]
+ fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features
+ fpn_interp_model: nearest
+
+ memory_attention:
+ _target_: sam2.modeling.memory_attention.MemoryAttention
+ d_model: 256
+ pos_enc_at_input: true
+ layer:
+ _target_: sam2.modeling.memory_attention.MemoryAttentionLayer
+ activation: relu
+ dim_feedforward: 2048
+ dropout: 0.1
+ pos_enc_at_attn: false
+ self_attention:
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
+ rope_theta: 10000.0
+ feat_sizes: [64, 64]
+ embedding_dim: 256
+ num_heads: 1
+ downsample_rate: 1
+ dropout: 0.1
+ d_model: 256
+ pos_enc_at_cross_attn_keys: true
+ pos_enc_at_cross_attn_queries: false
+ cross_attention:
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
+ rope_theta: 10000.0
+ feat_sizes: [64, 64]
+ rope_k_repeat: True
+ embedding_dim: 256
+ num_heads: 1
+ downsample_rate: 1
+ dropout: 0.1
+ kv_in_dim: 64
+ num_layers: 4
+
+ memory_encoder:
+ _target_: sam2.modeling.memory_encoder.MemoryEncoder
+ out_dim: 64
+ position_encoding:
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
+ num_pos_feats: 64
+ normalize: true
+ scale: null
+ temperature: 10000
+ mask_downsampler:
+ _target_: sam2.modeling.memory_encoder.MaskDownSampler
+ kernel_size: 3
+ stride: 2
+ padding: 1
+ fuser:
+ _target_: sam2.modeling.memory_encoder.Fuser
+ layer:
+ _target_: sam2.modeling.memory_encoder.CXBlock
+ dim: 256
+ kernel_size: 7
+ padding: 3
+ layer_scale_init_value: 1e-6
+ use_dwconv: True # depth-wise convs
+ num_layers: 2
+
+ num_maskmem: 7
+ image_size: 1024
+ # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask
+ sigmoid_scale_for_mem_enc: 20.0
+ sigmoid_bias_for_mem_enc: -10.0
+ use_mask_input_as_output_without_sam: true
+ # Memory
+ directly_add_no_mem_embed: true
+ no_obj_embed_spatial: true
+ # use high-resolution feature map in the SAM mask decoder
+ use_high_res_features_in_sam: true
+ # output 3 masks on the first click on initial conditioning frames
+ multimask_output_in_sam: true
+ # SAM heads
+ iou_prediction_use_sigmoid: True
+ # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
+ use_obj_ptrs_in_encoder: true
+ add_tpos_enc_to_obj_ptrs: true
+ proj_tpos_enc_in_obj_ptrs: true
+ use_signed_tpos_enc_to_obj_ptrs: true
+ only_obj_ptrs_in_the_past_for_eval: true
+ # object occlusion prediction
+ pred_obj_scores: true
+ pred_obj_scores_mlp: true
+ fixed_no_obj_ptr: true
+ # multimask tracking settings
+ multimask_output_for_tracking: true
+ use_multimask_token_for_obj_ptr: true
+ multimask_min_pt_num: 0
+ multimask_max_pt_num: 1
+ use_mlp_for_obj_ptr_proj: true
+ # Compilation flag
+ compile_image_encoder: False
diff --git a/sam2/configs/sam2.1/sam2.1_hiera_t.yaml b/sam2/configs/sam2.1/sam2.1_hiera_t.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e762aec932f26436d13798f3feb3ec82c360a943
--- /dev/null
+++ b/sam2/configs/sam2.1/sam2.1_hiera_t.yaml
@@ -0,0 +1,121 @@
+# @package _global_
+
+# Model
+model:
+ _target_: sam2.modeling.sam2_base.SAM2Base
+ image_encoder:
+ _target_: sam2.modeling.backbones.image_encoder.ImageEncoder
+ scalp: 1
+ trunk:
+ _target_: sam2.modeling.backbones.hieradet.Hiera
+ embed_dim: 96
+ num_heads: 1
+ stages: [1, 2, 7, 2]
+ global_att_blocks: [5, 7, 9]
+ window_pos_embed_bkg_spatial_size: [7, 7]
+ neck:
+ _target_: sam2.modeling.backbones.image_encoder.FpnNeck
+ position_encoding:
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
+ num_pos_feats: 256
+ normalize: true
+ scale: null
+ temperature: 10000
+ d_model: 256
+ backbone_channel_list: [768, 384, 192, 96]
+ fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features
+ fpn_interp_model: nearest
+
+ memory_attention:
+ _target_: sam2.modeling.memory_attention.MemoryAttention
+ d_model: 256
+ pos_enc_at_input: true
+ layer:
+ _target_: sam2.modeling.memory_attention.MemoryAttentionLayer
+ activation: relu
+ dim_feedforward: 2048
+ dropout: 0.1
+ pos_enc_at_attn: false
+ self_attention:
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
+ rope_theta: 10000.0
+ feat_sizes: [64, 64]
+ embedding_dim: 256
+ num_heads: 1
+ downsample_rate: 1
+ dropout: 0.1
+ d_model: 256
+ pos_enc_at_cross_attn_keys: true
+ pos_enc_at_cross_attn_queries: false
+ cross_attention:
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
+ rope_theta: 10000.0
+ feat_sizes: [64, 64]
+ rope_k_repeat: True
+ embedding_dim: 256
+ num_heads: 1
+ downsample_rate: 1
+ dropout: 0.1
+ kv_in_dim: 64
+ num_layers: 4
+
+ memory_encoder:
+ _target_: sam2.modeling.memory_encoder.MemoryEncoder
+ out_dim: 64
+ position_encoding:
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
+ num_pos_feats: 64
+ normalize: true
+ scale: null
+ temperature: 10000
+ mask_downsampler:
+ _target_: sam2.modeling.memory_encoder.MaskDownSampler
+ kernel_size: 3
+ stride: 2
+ padding: 1
+ fuser:
+ _target_: sam2.modeling.memory_encoder.Fuser
+ layer:
+ _target_: sam2.modeling.memory_encoder.CXBlock
+ dim: 256
+ kernel_size: 7
+ padding: 3
+ layer_scale_init_value: 1e-6
+ use_dwconv: True # depth-wise convs
+ num_layers: 2
+
+ num_maskmem: 7
+ image_size: 1024
+ # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask
+ # SAM decoder
+ sigmoid_scale_for_mem_enc: 20.0
+ sigmoid_bias_for_mem_enc: -10.0
+ use_mask_input_as_output_without_sam: true
+ # Memory
+ directly_add_no_mem_embed: true
+ no_obj_embed_spatial: true
+ # use high-resolution feature map in the SAM mask decoder
+ use_high_res_features_in_sam: true
+ # output 3 masks on the first click on initial conditioning frames
+ multimask_output_in_sam: true
+ # SAM heads
+ iou_prediction_use_sigmoid: True
+ # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
+ use_obj_ptrs_in_encoder: true
+ add_tpos_enc_to_obj_ptrs: true
+ proj_tpos_enc_in_obj_ptrs: true
+ use_signed_tpos_enc_to_obj_ptrs: true
+ only_obj_ptrs_in_the_past_for_eval: true
+ # object occlusion prediction
+ pred_obj_scores: true
+ pred_obj_scores_mlp: true
+ fixed_no_obj_ptr: true
+ # multimask tracking settings
+ multimask_output_for_tracking: true
+ use_multimask_token_for_obj_ptr: true
+ multimask_min_pt_num: 0
+ multimask_max_pt_num: 1
+ use_mlp_for_obj_ptr_proj: true
+ # Compilation flag
+ # HieraT does not currently support compilation, should always be set to False
+ compile_image_encoder: False
diff --git a/sam2/configs/sam2.1_hiera_b+.yaml b/sam2/configs/sam2.1_hiera_b+.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4697d17f75f04f66c1d0aac20bf8c3f43446b06d
--- /dev/null
+++ b/sam2/configs/sam2.1_hiera_b+.yaml
@@ -0,0 +1,137 @@
+# @package _global_
+
+# Model
+model:
+ _target_: sam2.sam2_train.SAM2Train
+ image_encoder:
+ _target_: sam2.modeling.backbones.image_encoder.ImageEncoder
+ scalp: 1
+ trunk:
+ _target_: sam2.modeling.backbones.hieradet.Hiera
+ embed_dim: 112
+ num_heads: 2
+ drop_path_rate: 0.1
+ neck:
+ _target_: sam2.modeling.backbones.image_encoder.FpnNeck
+ position_encoding:
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
+ num_pos_feats: 256
+ normalize: true
+ scale: null
+ temperature: 10000
+ d_model: 256
+ backbone_channel_list: [896, 448, 224, 112]
+ fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features
+ fpn_interp_model: nearest
+
+ memory_attention:
+ _target_: sam2.modeling.memory_attention.MemoryAttention
+ d_model: 256
+ pos_enc_at_input: true
+ layer:
+ _target_: sam2.modeling.memory_attention.MemoryAttentionLayer
+ activation: relu
+ dim_feedforward: 2048
+ dropout: 0.1
+ pos_enc_at_attn: false
+ self_attention:
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
+ rope_theta: 10000.0
+ feat_sizes: [64, 64]
+ embedding_dim: 256
+ num_heads: 1
+ downsample_rate: 1
+ dropout: 0.1
+ d_model: 256
+ pos_enc_at_cross_attn_keys: true
+ pos_enc_at_cross_attn_queries: false
+ cross_attention:
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
+ rope_theta: 10000.0
+ feat_sizes: [64, 64]
+ rope_k_repeat: True
+ embedding_dim: 256
+ num_heads: 1
+ downsample_rate: 1
+ dropout: 0.1
+ kv_in_dim: 64
+ num_layers: 4
+
+ memory_encoder:
+ _target_: sam2.modeling.memory_encoder.MemoryEncoder
+ out_dim: 64
+ position_encoding:
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
+ num_pos_feats: 64
+ normalize: true
+ scale: null
+ temperature: 10000
+ mask_downsampler:
+ _target_: sam2.modeling.memory_encoder.MaskDownSampler
+ kernel_size: 3
+ stride: 2
+ padding: 1
+ fuser:
+ _target_: sam2.modeling.memory_encoder.Fuser
+ layer:
+ _target_: sam2.modeling.memory_encoder.CXBlock
+ dim: 256
+ kernel_size: 7
+ padding: 3
+ layer_scale_init_value: 1e-6
+ use_dwconv: True # depth-wise convs
+ num_layers: 2
+
+ num_maskmem: 7
+ image_size: 1024
+ # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask
+ sigmoid_scale_for_mem_enc: 20.0
+ sigmoid_bias_for_mem_enc: -10.0
+ use_mask_input_as_output_without_sam: true
+ # Memory
+ directly_add_no_mem_embed: true
+ no_obj_embed_spatial: true
+ # use high-resolution feature map in the SAM mask decoder
+ use_high_res_features_in_sam: true
+ # output 3 masks on the first click on initial conditioning frames
+ multimask_output_in_sam: true
+ # SAM heads
+ iou_prediction_use_sigmoid: True
+ # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
+ use_obj_ptrs_in_encoder: true
+ add_tpos_enc_to_obj_ptrs: true
+ proj_tpos_enc_in_obj_ptrs: true
+ use_signed_tpos_enc_to_obj_ptrs: true
+ only_obj_ptrs_in_the_past_for_eval: true
+ # object occlusion prediction
+ pred_obj_scores: true
+ pred_obj_scores_mlp: true
+ fixed_no_obj_ptr: true
+ # multimask tracking settings
+ multimask_output_for_tracking: true
+ use_multimask_token_for_obj_ptr: true
+ multimask_min_pt_num: 0
+ multimask_max_pt_num: 1
+ use_mlp_for_obj_ptr_proj: true
+ # Compilation flag
+ compile_image_encoder: False
+
+ ####### Training specific params #######
+ # box/point input and corrections
+ prob_to_use_pt_input_for_train: 0.5
+ prob_to_use_pt_input_for_eval: 0.0
+ prob_to_use_box_input_for_train: 0.5 # 0.5*0.5 = 0.25 prob to use box instead of points
+ prob_to_use_box_input_for_eval: 0.0
+ prob_to_sample_from_gt_for_train: 0.1 # with a small prob, sampling correction points from GT mask instead of prediction errors
+ num_frames_to_correct_for_train: 2 # iteratively sample on random 1~2 frames (always include the first frame)
+ num_frames_to_correct_for_eval: 1 # only iteratively sample on first frame
+ rand_frames_to_correct_for_train: true # random #init-cond-frame ~ 2
+ add_all_frames_to_correct_as_cond: true # when a frame receives a correction click, it becomes a conditioning frame (even if it's not initially a conditioning frame)
+ # maximum 2 initial conditioning frames
+ num_init_cond_frames_for_train: 2
+ rand_init_cond_frames_for_train: true # random 1~2
+ num_correction_pt_per_frame: 7
+ use_act_ckpt_iterative_pt_sampling: false
+
+ num_init_cond_frames_for_eval: 1 # only mask on the first frame
+ forward_backbone_per_frame_for_eval: true
diff --git a/sam2/configs/sam2.1_hiera_l.yaml b/sam2/configs/sam2.1_hiera_l.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a0c5613c33f89e34fa258fba43ae05c11cebecea
--- /dev/null
+++ b/sam2/configs/sam2.1_hiera_l.yaml
@@ -0,0 +1,141 @@
+# @package _global_
+
+# Model
+model:
+ _target_: sam2.sam2_train.SAM2Train
+ image_encoder:
+ _target_: sam2.modeling.backbones.image_encoder.ImageEncoder
+ scalp: 1
+ trunk:
+ _target_: sam2.modeling.backbones.hieradet.Hiera
+ embed_dim: 144
+ num_heads: 2
+ stages: [2, 6, 36, 4]
+ global_att_blocks: [23, 33, 43]
+ window_pos_embed_bkg_spatial_size: [7, 7]
+ window_spec: [8, 4, 16, 8]
+ drop_path_rate: 0.1
+ neck:
+ _target_: sam2.modeling.backbones.image_encoder.FpnNeck
+ position_encoding:
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
+ num_pos_feats: 256
+ normalize: true
+ scale: null
+ temperature: 10000
+ d_model: 256
+ backbone_channel_list: [1152, 576, 288, 144]
+ fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features
+ fpn_interp_model: nearest
+
+ memory_attention:
+ _target_: sam2.modeling.memory_attention.MemoryAttention
+ d_model: 256
+ pos_enc_at_input: true
+ layer:
+ _target_: sam2.modeling.memory_attention.MemoryAttentionLayer
+ activation: relu
+ dim_feedforward: 2048
+ dropout: 0.1
+ pos_enc_at_attn: false
+ self_attention:
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
+ rope_theta: 10000.0
+ feat_sizes: [64, 64]
+ embedding_dim: 256
+ num_heads: 1
+ downsample_rate: 1
+ dropout: 0.1
+ d_model: 256
+ pos_enc_at_cross_attn_keys: true
+ pos_enc_at_cross_attn_queries: false
+ cross_attention:
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
+ rope_theta: 10000.0
+ feat_sizes: [64, 64]
+ rope_k_repeat: True
+ embedding_dim: 256
+ num_heads: 1
+ downsample_rate: 1
+ dropout: 0.1
+ kv_in_dim: 64
+ num_layers: 4
+
+ memory_encoder:
+ _target_: sam2.modeling.memory_encoder.MemoryEncoder
+ out_dim: 64
+ position_encoding:
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
+ num_pos_feats: 64
+ normalize: true
+ scale: null
+ temperature: 10000
+ mask_downsampler:
+ _target_: sam2.modeling.memory_encoder.MaskDownSampler
+ kernel_size: 3
+ stride: 2
+ padding: 1
+ fuser:
+ _target_: sam2.modeling.memory_encoder.Fuser
+ layer:
+ _target_: sam2.modeling.memory_encoder.CXBlock
+ dim: 256
+ kernel_size: 7
+ padding: 3
+ layer_scale_init_value: 1e-6
+ use_dwconv: True # depth-wise convs
+ num_layers: 2
+
+ num_maskmem: 7
+ image_size: 1024
+ # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask
+ sigmoid_scale_for_mem_enc: 20.0
+ sigmoid_bias_for_mem_enc: -10.0
+ use_mask_input_as_output_without_sam: true
+ # Memory
+ directly_add_no_mem_embed: true
+ no_obj_embed_spatial: true
+ # use high-resolution feature map in the SAM mask decoder
+ use_high_res_features_in_sam: true
+ # output 3 masks on the first click on initial conditioning frames
+ multimask_output_in_sam: true
+ # SAM heads
+ iou_prediction_use_sigmoid: True
+ # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
+ use_obj_ptrs_in_encoder: true
+ add_tpos_enc_to_obj_ptrs: true
+ proj_tpos_enc_in_obj_ptrs: true
+ use_signed_tpos_enc_to_obj_ptrs: true
+ only_obj_ptrs_in_the_past_for_eval: true
+ # object occlusion prediction
+ pred_obj_scores: true
+ pred_obj_scores_mlp: true
+ fixed_no_obj_ptr: true
+ # multimask tracking settings
+ multimask_output_for_tracking: true
+ use_multimask_token_for_obj_ptr: true
+ multimask_min_pt_num: 0
+ multimask_max_pt_num: 1
+ use_mlp_for_obj_ptr_proj: true
+ # Compilation flag
+ compile_image_encoder: False
+
+ ####### Training specific params #######
+ # box/point input and corrections
+ prob_to_use_pt_input_for_train: 0.5
+ prob_to_use_pt_input_for_eval: 0.0
+ prob_to_use_box_input_for_train: 0.5 # 0.5*0.5 = 0.25 prob to use box instead of points
+ prob_to_use_box_input_for_eval: 0.0
+ prob_to_sample_from_gt_for_train: 0.1 # with a small prob, sampling correction points from GT mask instead of prediction errors
+ num_frames_to_correct_for_train: 2 # iteratively sample on random 1~2 frames (always include the first frame)
+ num_frames_to_correct_for_eval: 1 # only iteratively sample on first frame
+ rand_frames_to_correct_for_train: true # random #init-cond-frame ~ 2
+ add_all_frames_to_correct_as_cond: true # when a frame receives a correction click, it becomes a conditioning frame (even if it's not initially a conditioning frame)
+ # maximum 2 initial conditioning frames
+ num_init_cond_frames_for_train: 2
+ rand_init_cond_frames_for_train: true # random 1~2
+ num_correction_pt_per_frame: 7
+ use_act_ckpt_iterative_pt_sampling: false
+
+ num_init_cond_frames_for_eval: 1 # only mask on the first frame
+ forward_backbone_per_frame_for_eval: true
diff --git a/sam2/configs/sam2.1_hiera_s.yaml b/sam2/configs/sam2.1_hiera_s.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..bbae74580bc2c3e8fdba870ff8cea1320bf885bc
--- /dev/null
+++ b/sam2/configs/sam2.1_hiera_s.yaml
@@ -0,0 +1,140 @@
+# @package _global_
+
+# Model
+model:
+ _target_: sam2.sam2_train.SAM2Train
+ image_encoder:
+ _target_: sam2.modeling.backbones.image_encoder.ImageEncoder
+ scalp: 1
+ trunk:
+ _target_: sam2.modeling.backbones.hieradet.Hiera
+ embed_dim: 96
+ num_heads: 1
+ stages: [1, 2, 11, 2]
+ global_att_blocks: [7, 10, 13]
+ window_pos_embed_bkg_spatial_size: [7, 7]
+ drop_path_rate: 0.1
+ neck:
+ _target_: sam2.modeling.backbones.image_encoder.FpnNeck
+ position_encoding:
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
+ num_pos_feats: 256
+ normalize: true
+ scale: null
+ temperature: 10000
+ d_model: 256
+ backbone_channel_list: [768, 384, 192, 96]
+ fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features
+ fpn_interp_model: nearest
+
+ memory_attention:
+ _target_: sam2.modeling.memory_attention.MemoryAttention
+ d_model: 256
+ pos_enc_at_input: true
+ layer:
+ _target_: sam2.modeling.memory_attention.MemoryAttentionLayer
+ activation: relu
+ dim_feedforward: 2048
+ dropout: 0.1
+ pos_enc_at_attn: false
+ self_attention:
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
+ rope_theta: 10000.0
+ feat_sizes: [64, 64]
+ embedding_dim: 256
+ num_heads: 1
+ downsample_rate: 1
+ dropout: 0.1
+ d_model: 256
+ pos_enc_at_cross_attn_keys: true
+ pos_enc_at_cross_attn_queries: false
+ cross_attention:
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
+ rope_theta: 10000.0
+ feat_sizes: [64, 64]
+ rope_k_repeat: True
+ embedding_dim: 256
+ num_heads: 1
+ downsample_rate: 1
+ dropout: 0.1
+ kv_in_dim: 64
+ num_layers: 4
+
+ memory_encoder:
+ _target_: sam2.modeling.memory_encoder.MemoryEncoder
+ out_dim: 64
+ position_encoding:
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
+ num_pos_feats: 64
+ normalize: true
+ scale: null
+ temperature: 10000
+ mask_downsampler:
+ _target_: sam2.modeling.memory_encoder.MaskDownSampler
+ kernel_size: 3
+ stride: 2
+ padding: 1
+ fuser:
+ _target_: sam2.modeling.memory_encoder.Fuser
+ layer:
+ _target_: sam2.modeling.memory_encoder.CXBlock
+ dim: 256
+ kernel_size: 7
+ padding: 3
+ layer_scale_init_value: 1e-6
+ use_dwconv: True # depth-wise convs
+ num_layers: 2
+
+ num_maskmem: 7
+ image_size: 1024
+ # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask
+ sigmoid_scale_for_mem_enc: 20.0
+ sigmoid_bias_for_mem_enc: -10.0
+ use_mask_input_as_output_without_sam: true
+ # Memory
+ directly_add_no_mem_embed: true
+ no_obj_embed_spatial: true
+ # use high-resolution feature map in the SAM mask decoder
+ use_high_res_features_in_sam: true
+ # output 3 masks on the first click on initial conditioning frames
+ multimask_output_in_sam: true
+ # SAM heads
+ iou_prediction_use_sigmoid: True
+ # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
+ use_obj_ptrs_in_encoder: true
+ add_tpos_enc_to_obj_ptrs: true
+ proj_tpos_enc_in_obj_ptrs: true
+ use_signed_tpos_enc_to_obj_ptrs: true
+ only_obj_ptrs_in_the_past_for_eval: true
+ # object occlusion prediction
+ pred_obj_scores: true
+ pred_obj_scores_mlp: true
+ fixed_no_obj_ptr: true
+ # multimask tracking settings
+ multimask_output_for_tracking: true
+ use_multimask_token_for_obj_ptr: true
+ multimask_min_pt_num: 0
+ multimask_max_pt_num: 1
+ use_mlp_for_obj_ptr_proj: true
+ # Compilation flag
+ compile_image_encoder: False
+
+ ####### Training specific params #######
+ # box/point input and corrections
+ prob_to_use_pt_input_for_train: 0.5
+ prob_to_use_pt_input_for_eval: 0.0
+ prob_to_use_box_input_for_train: 0.5 # 0.5*0.5 = 0.25 prob to use box instead of points
+ prob_to_use_box_input_for_eval: 0.0
+ prob_to_sample_from_gt_for_train: 0.1 # with a small prob, sampling correction points from GT mask instead of prediction errors
+ num_frames_to_correct_for_train: 2 # iteratively sample on random 1~2 frames (always include the first frame)
+ num_frames_to_correct_for_eval: 1 # only iteratively sample on first frame
+ rand_frames_to_correct_for_train: true # random #init-cond-frame ~ 2
+ add_all_frames_to_correct_as_cond: true # when a frame receives a correction click, it becomes a conditioning frame (even if it's not initially a conditioning frame)
+ # maximum 2 initial conditioning frames
+ num_init_cond_frames_for_train: 2
+ rand_init_cond_frames_for_train: true # random 1~2
+ num_correction_pt_per_frame: 7
+ use_act_ckpt_iterative_pt_sampling: false
+
+ num_init_cond_frames_for_eval: 1 # only mask on the first frame
+ forward_backbone_per_frame_for_eval: true
diff --git a/sam2/configs/sam2.1_hiera_t.yaml b/sam2/configs/sam2.1_hiera_t.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3c3f6e5cf610c287821eab85e5cd70e7bfa12ee8
--- /dev/null
+++ b/sam2/configs/sam2.1_hiera_t.yaml
@@ -0,0 +1,142 @@
+# @package _global_
+
+# Model
+model:
+ _target_: sam2.sam2_train.SAM2Train
+ image_encoder:
+ _target_: sam2.modeling.backbones.image_encoder.ImageEncoder
+ scalp: 1
+ trunk:
+ _target_: sam2.modeling.backbones.hieradet.Hiera
+ embed_dim: 96
+ num_heads: 1
+ stages: [1, 2, 7, 2]
+ global_att_blocks: [5, 7, 9]
+ window_pos_embed_bkg_spatial_size: [7, 7]
+ drop_path_rate: 0.1
+ neck:
+ _target_: sam2.modeling.backbones.image_encoder.FpnNeck
+ position_encoding:
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
+ num_pos_feats: 256
+ normalize: true
+ scale: null
+ temperature: 10000
+ d_model: 256
+ backbone_channel_list: [768, 384, 192, 96]
+ fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features
+ fpn_interp_model: nearest
+
+ memory_attention:
+ _target_: sam2.modeling.memory_attention.MemoryAttention
+ d_model: 256
+ pos_enc_at_input: true
+ layer:
+ _target_: sam2.modeling.memory_attention.MemoryAttentionLayer
+ activation: relu
+ dim_feedforward: 2048
+ dropout: 0.1
+ pos_enc_at_attn: false
+ self_attention:
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
+ rope_theta: 10000.0
+ feat_sizes: [64, 64]
+ embedding_dim: 256
+ num_heads: 1
+ downsample_rate: 1
+ dropout: 0.1
+ d_model: 256
+ pos_enc_at_cross_attn_keys: true
+ pos_enc_at_cross_attn_queries: false
+ cross_attention:
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
+ rope_theta: 10000.0
+ feat_sizes: [64, 64]
+ rope_k_repeat: true
+ embedding_dim: 256
+ num_heads: 1
+ downsample_rate: 1
+ dropout: 0.1
+ kv_in_dim: 64
+ num_layers: 4
+
+ memory_encoder:
+ _target_: sam2.modeling.memory_encoder.MemoryEncoder
+ out_dim: 64
+ position_encoding:
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
+ num_pos_feats: 64
+ normalize: true
+ scale: null
+ temperature: 10000
+ mask_downsampler:
+ _target_: sam2.modeling.memory_encoder.MaskDownSampler
+ kernel_size: 3
+ stride: 2
+ padding: 1
+ fuser:
+ _target_: sam2.modeling.memory_encoder.Fuser
+ layer:
+ _target_: sam2.modeling.memory_encoder.CXBlock
+ dim: 256
+ kernel_size: 7
+ padding: 3
+ layer_scale_init_value: 1e-6
+ use_dwconv: true # depth-wise convs
+ num_layers: 2
+
+ num_maskmem: 7
+ image_size: 1024
+ # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask
+ # SAM decoder
+ sigmoid_scale_for_mem_enc: 20.0
+ sigmoid_bias_for_mem_enc: -10.0
+ use_mask_input_as_output_without_sam: true
+ # Memory
+ directly_add_no_mem_embed: true
+ no_obj_embed_spatial: true
+ # use high-resolution feature map in the SAM mask decoder
+ use_high_res_features_in_sam: true
+ # output 3 masks on the first click on initial conditioning frames
+ multimask_output_in_sam: true
+ # SAM heads
+ iou_prediction_use_sigmoid: true
+ # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
+ use_obj_ptrs_in_encoder: true
+ add_tpos_enc_to_obj_ptrs: true
+ proj_tpos_enc_in_obj_ptrs: true
+ use_signed_tpos_enc_to_obj_ptrs: true
+ only_obj_ptrs_in_the_past_for_eval: true
+ # object occlusion prediction
+ pred_obj_scores: true
+ pred_obj_scores_mlp: true
+ fixed_no_obj_ptr: true
+ # multimask tracking settings
+ multimask_output_for_tracking: true
+ use_multimask_token_for_obj_ptr: true
+ multimask_min_pt_num: 0
+ multimask_max_pt_num: 1
+ use_mlp_for_obj_ptr_proj: true
+ # Compilation flag
+ # HieraT does not currently support compilation, should always be set to false
+ compile_image_encoder: false
+
+ ####### Training specific params #######
+ # box/point input and corrections
+ prob_to_use_pt_input_for_train: 0.5
+ prob_to_use_pt_input_for_eval: 0.0
+ prob_to_use_box_input_for_train: 0.5 # 0.5*0.5 = 0.25 prob to use box instead of points
+ prob_to_use_box_input_for_eval: 0.0
+ prob_to_sample_from_gt_for_train: 0.1 # with a small prob, sampling correction points from GT mask instead of prediction errors
+ num_frames_to_correct_for_train: 2 # iteratively sample on random 1~2 frames (always include the first frame)
+ num_frames_to_correct_for_eval: 1 # only iteratively sample on first frame
+ rand_frames_to_correct_for_train: true # random #init-cond-frame ~ 2
+ add_all_frames_to_correct_as_cond: true # when a frame receives a correction click, it becomes a conditioning frame (even if it's not initially a conditioning frame)
+ # maximum 2 initial conditioning frames
+ num_init_cond_frames_for_train: 2
+ rand_init_cond_frames_for_train: true # random 1~2
+ num_correction_pt_per_frame: 7
+ use_act_ckpt_iterative_pt_sampling: false
+
+ num_init_cond_frames_for_eval: 1 # only mask on the first frame
+ forward_backbone_per_frame_for_eval: true
diff --git a/sam2/configs/sam2.1_training/sam2.1_hiera_b+_MOSE_finetune.yaml b/sam2/configs/sam2.1_training/sam2.1_hiera_b+_MOSE_finetune.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9b6faa79f47ee576faf007bffd23fb6649bd881d
--- /dev/null
+++ b/sam2/configs/sam2.1_training/sam2.1_hiera_b+_MOSE_finetune.yaml
@@ -0,0 +1,339 @@
+# @package _global_
+
+scratch:
+ resolution: 1024
+ train_batch_size: 1
+ num_train_workers: 10
+ num_frames: 8
+ max_num_objects: 3
+ base_lr: 5.0e-6
+ vision_lr: 3.0e-06
+ phases_per_epoch: 1
+ num_epochs: 40
+
+dataset:
+ # PATHS to Dataset
+ img_folder: null # PATH to MOSE JPEGImages folder
+ gt_folder: null # PATH to MOSE Annotations folder
+ file_list_txt: training/assets/MOSE_sample_train_list.txt # Optional PATH to filelist containing a subset of videos to be used for training
+ multiplier: 2
+
+# Video transforms
+vos:
+ train_transforms:
+ - _target_: training.dataset.transforms.ComposeAPI
+ transforms:
+ - _target_: training.dataset.transforms.RandomHorizontalFlip
+ consistent_transform: True
+ - _target_: training.dataset.transforms.RandomAffine
+ degrees: 25
+ shear: 20
+ image_interpolation: bilinear
+ consistent_transform: True
+ - _target_: training.dataset.transforms.RandomResizeAPI
+ sizes: ${scratch.resolution}
+ square: true
+ consistent_transform: True
+ - _target_: training.dataset.transforms.ColorJitter
+ consistent_transform: True
+ brightness: 0.1
+ contrast: 0.03
+ saturation: 0.03
+ hue: null
+ - _target_: training.dataset.transforms.RandomGrayscale
+ p: 0.05
+ consistent_transform: True
+ - _target_: training.dataset.transforms.ColorJitter
+ consistent_transform: False
+ brightness: 0.1
+ contrast: 0.05
+ saturation: 0.05
+ hue: null
+ - _target_: training.dataset.transforms.ToTensorAPI
+ - _target_: training.dataset.transforms.NormalizeAPI
+ mean: [0.485, 0.456, 0.406]
+ std: [0.229, 0.224, 0.225]
+
+trainer:
+ _target_: training.trainer.Trainer
+ mode: train_only
+ max_epochs: ${times:${scratch.num_epochs},${scratch.phases_per_epoch}}
+ accelerator: cuda
+ seed_value: 123
+
+ model:
+ _target_: training.model.sam2.SAM2Train
+ image_encoder:
+ _target_: sam2.modeling.backbones.image_encoder.ImageEncoder
+ scalp: 1
+ trunk:
+ _target_: sam2.modeling.backbones.hieradet.Hiera
+ embed_dim: 112
+ num_heads: 2
+ drop_path_rate: 0.1
+ neck:
+ _target_: sam2.modeling.backbones.image_encoder.FpnNeck
+ position_encoding:
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
+ num_pos_feats: 256
+ normalize: true
+ scale: null
+ temperature: 10000
+ d_model: 256
+ backbone_channel_list: [896, 448, 224, 112]
+ fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features
+ fpn_interp_model: nearest
+
+ memory_attention:
+ _target_: sam2.modeling.memory_attention.MemoryAttention
+ d_model: 256
+ pos_enc_at_input: true
+ layer:
+ _target_: sam2.modeling.memory_attention.MemoryAttentionLayer
+ activation: relu
+ dim_feedforward: 2048
+ dropout: 0.1
+ pos_enc_at_attn: false
+ self_attention:
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
+ rope_theta: 10000.0
+ feat_sizes: [64, 64]
+ embedding_dim: 256
+ num_heads: 1
+ downsample_rate: 1
+ dropout: 0.1
+ d_model: 256
+ pos_enc_at_cross_attn_keys: true
+ pos_enc_at_cross_attn_queries: false
+ cross_attention:
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
+ rope_theta: 10000.0
+ feat_sizes: [64, 64]
+ rope_k_repeat: True
+ embedding_dim: 256
+ num_heads: 1
+ downsample_rate: 1
+ dropout: 0.1
+ kv_in_dim: 64
+ num_layers: 4
+
+ memory_encoder:
+ _target_: sam2.modeling.memory_encoder.MemoryEncoder
+ out_dim: 64
+ position_encoding:
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
+ num_pos_feats: 64
+ normalize: true
+ scale: null
+ temperature: 10000
+ mask_downsampler:
+ _target_: sam2.modeling.memory_encoder.MaskDownSampler
+ kernel_size: 3
+ stride: 2
+ padding: 1
+ fuser:
+ _target_: sam2.modeling.memory_encoder.Fuser
+ layer:
+ _target_: sam2.modeling.memory_encoder.CXBlock
+ dim: 256
+ kernel_size: 7
+ padding: 3
+ layer_scale_init_value: 1e-6
+ use_dwconv: True # depth-wise convs
+ num_layers: 2
+
+ num_maskmem: 7
+ image_size: ${scratch.resolution}
+ # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask
+ sigmoid_scale_for_mem_enc: 20.0
+ sigmoid_bias_for_mem_enc: -10.0
+ use_mask_input_as_output_without_sam: true
+ # Memory
+ directly_add_no_mem_embed: true
+ no_obj_embed_spatial: true
+ # use high-resolution feature map in the SAM mask decoder
+ use_high_res_features_in_sam: true
+ # output 3 masks on the first click on initial conditioning frames
+ multimask_output_in_sam: true
+ # SAM heads
+ iou_prediction_use_sigmoid: True
+ # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
+ use_obj_ptrs_in_encoder: true
+ add_tpos_enc_to_obj_ptrs: true
+ proj_tpos_enc_in_obj_ptrs: true
+ use_signed_tpos_enc_to_obj_ptrs: true
+ only_obj_ptrs_in_the_past_for_eval: true
+ # object occlusion prediction
+ pred_obj_scores: true
+ pred_obj_scores_mlp: true
+ fixed_no_obj_ptr: true
+ # multimask tracking settings
+ multimask_output_for_tracking: true
+ use_multimask_token_for_obj_ptr: true
+ multimask_min_pt_num: 0
+ multimask_max_pt_num: 1
+ use_mlp_for_obj_ptr_proj: true
+ # Compilation flag
+ # compile_image_encoder: False
+
+ ####### Training specific params #######
+ # box/point input and corrections
+ prob_to_use_pt_input_for_train: 0.5
+ prob_to_use_pt_input_for_eval: 0.0
+ prob_to_use_box_input_for_train: 0.5 # 0.5*0.5 = 0.25 prob to use box instead of points
+ prob_to_use_box_input_for_eval: 0.0
+ prob_to_sample_from_gt_for_train: 0.1 # with a small prob, sampling correction points from GT mask instead of prediction errors
+ num_frames_to_correct_for_train: 2 # iteratively sample on random 1~2 frames (always include the first frame)
+ num_frames_to_correct_for_eval: 1 # only iteratively sample on first frame
+ rand_frames_to_correct_for_train: True # random #init-cond-frame ~ 2
+ add_all_frames_to_correct_as_cond: True # when a frame receives a correction click, it becomes a conditioning frame (even if it's not initially a conditioning frame)
+ # maximum 2 initial conditioning frames
+ num_init_cond_frames_for_train: 2
+ rand_init_cond_frames_for_train: True # random 1~2
+ num_correction_pt_per_frame: 7
+ use_act_ckpt_iterative_pt_sampling: false
+
+
+
+ num_init_cond_frames_for_eval: 1 # only mask on the first frame
+ forward_backbone_per_frame_for_eval: True
+
+
+ data:
+ train:
+ _target_: training.dataset.sam2_datasets.TorchTrainMixedDataset
+ phases_per_epoch: ${scratch.phases_per_epoch}
+ batch_sizes:
+ - ${scratch.train_batch_size}
+
+ datasets:
+ - _target_: training.dataset.utils.RepeatFactorWrapper
+ dataset:
+ _target_: training.dataset.utils.ConcatDataset
+ datasets:
+ - _target_: training.dataset.vos_dataset.VOSDataset
+ transforms: ${vos.train_transforms}
+ training: true
+ video_dataset:
+ _target_: training.dataset.vos_raw_dataset.PNGRawDataset
+ img_folder: ${dataset.img_folder}
+ gt_folder: ${dataset.gt_folder}
+ file_list_txt: ${dataset.file_list_txt}
+ sampler:
+ _target_: training.dataset.vos_sampler.RandomUniformSampler
+ num_frames: ${scratch.num_frames}
+ max_num_objects: ${scratch.max_num_objects}
+ multiplier: ${dataset.multiplier}
+ shuffle: True
+ num_workers: ${scratch.num_train_workers}
+ pin_memory: True
+ drop_last: True
+ collate_fn:
+ _target_: training.utils.data_utils.collate_fn
+ _partial_: true
+ dict_key: all
+
+ optim:
+ amp:
+ enabled: True
+ amp_dtype: bfloat16
+
+ optimizer:
+ _target_: torch.optim.AdamW
+
+ gradient_clip:
+ _target_: training.optimizer.GradientClipper
+ max_norm: 0.1
+ norm_type: 2
+
+ param_group_modifiers:
+ - _target_: training.optimizer.layer_decay_param_modifier
+ _partial_: True
+ layer_decay_value: 0.9
+ apply_to: 'image_encoder.trunk'
+ overrides:
+ - pattern: '*pos_embed*'
+ value: 1.0
+
+ options:
+ lr:
+ - scheduler:
+ _target_: fvcore.common.param_scheduler.CosineParamScheduler
+ start_value: ${scratch.base_lr}
+ end_value: ${divide:${scratch.base_lr},10}
+ - scheduler:
+ _target_: fvcore.common.param_scheduler.CosineParamScheduler
+ start_value: ${scratch.vision_lr}
+ end_value: ${divide:${scratch.vision_lr},10}
+ param_names:
+ - 'image_encoder.*'
+ weight_decay:
+ - scheduler:
+ _target_: fvcore.common.param_scheduler.ConstantParamScheduler
+ value: 0.1
+ - scheduler:
+ _target_: fvcore.common.param_scheduler.ConstantParamScheduler
+ value: 0.0
+ param_names:
+ - '*bias*'
+ module_cls_names: ['torch.nn.LayerNorm']
+
+ loss:
+ all:
+ _target_: training.loss_fns.MultiStepMultiMasksAndIous
+ weight_dict:
+ loss_mask: 20
+ loss_dice: 1
+ loss_iou: 1
+ loss_class: 1
+ supervise_all_iou: true
+ iou_use_l1_loss: true
+ pred_obj_scores: true
+ focal_gamma_obj_score: 0.0
+ focal_alpha_obj_score: -1.0
+
+ distributed:
+ backend: nccl
+ find_unused_parameters: True
+
+ logging:
+ tensorboard_writer:
+ _target_: training.utils.logger.make_tensorboard_logger
+ log_dir: ${launcher.experiment_log_dir}/tensorboard
+ flush_secs: 120
+ should_log: True
+ log_dir: ${launcher.experiment_log_dir}/logs
+ log_freq: 10
+
+ # initialize from a SAM 2 checkpoint
+ checkpoint:
+ save_dir: ${launcher.experiment_log_dir}/checkpoints
+ save_freq: 0 # 0 only last checkpoint is saved.
+ model_weight_initializer:
+ _partial_: True
+ _target_: training.utils.checkpoint_utils.load_state_dict_into_model
+ strict: True
+ ignore_unexpected_keys: null
+ ignore_missing_keys: null
+
+ state_dict:
+ _target_: training.utils.checkpoint_utils.load_checkpoint_and_apply_kernels
+ checkpoint_path: ./checkpoints/sam2.1_hiera_base_plus.pt # PATH to SAM 2.1 checkpoint
+ ckpt_state_dict_keys: ['model']
+
+launcher:
+ num_nodes: 1
+ gpus_per_node: 8
+ experiment_log_dir: null # Path to log directory, defaults to ./sam2_logs/${config_name}
+
+# SLURM args if running on a cluster
+submitit:
+ partition: null
+ account: null
+ qos: null
+ cpus_per_task: 10
+ use_cluster: false
+ timeout_hour: 24
+ name: null
+ port_range: [10000, 65000]
+
diff --git a/sam2/configs/sam2/sam2_hiera_b+.yaml b/sam2/configs/sam2/sam2_hiera_b+.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0f435af02fc88e2d3b7bff06f8cf8013cc079c24
--- /dev/null
+++ b/sam2/configs/sam2/sam2_hiera_b+.yaml
@@ -0,0 +1,113 @@
+# @package _global_
+
+# Model
+model:
+ _target_: sam2.modeling.sam2_base.SAM2Base
+ image_encoder:
+ _target_: sam2.modeling.backbones.image_encoder.ImageEncoder
+ scalp: 1
+ trunk:
+ _target_: sam2.modeling.backbones.hieradet.Hiera
+ embed_dim: 112
+ num_heads: 2
+ neck:
+ _target_: sam2.modeling.backbones.image_encoder.FpnNeck
+ position_encoding:
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
+ num_pos_feats: 256
+ normalize: true
+ scale: null
+ temperature: 10000
+ d_model: 256
+ backbone_channel_list: [896, 448, 224, 112]
+ fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features
+ fpn_interp_model: nearest
+
+ memory_attention:
+ _target_: sam2.modeling.memory_attention.MemoryAttention
+ d_model: 256
+ pos_enc_at_input: true
+ layer:
+ _target_: sam2.modeling.memory_attention.MemoryAttentionLayer
+ activation: relu
+ dim_feedforward: 2048
+ dropout: 0.1
+ pos_enc_at_attn: false
+ self_attention:
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
+ rope_theta: 10000.0
+ feat_sizes: [64, 64]
+ embedding_dim: 256
+ num_heads: 1
+ downsample_rate: 1
+ dropout: 0.1
+ d_model: 256
+ pos_enc_at_cross_attn_keys: true
+ pos_enc_at_cross_attn_queries: false
+ cross_attention:
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
+ rope_theta: 10000.0
+ feat_sizes: [64, 64]
+ rope_k_repeat: True
+ embedding_dim: 256
+ num_heads: 1
+ downsample_rate: 1
+ dropout: 0.1
+ kv_in_dim: 64
+ num_layers: 4
+
+ memory_encoder:
+ _target_: sam2.modeling.memory_encoder.MemoryEncoder
+ out_dim: 64
+ position_encoding:
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
+ num_pos_feats: 64
+ normalize: true
+ scale: null
+ temperature: 10000
+ mask_downsampler:
+ _target_: sam2.modeling.memory_encoder.MaskDownSampler
+ kernel_size: 3
+ stride: 2
+ padding: 1
+ fuser:
+ _target_: sam2.modeling.memory_encoder.Fuser
+ layer:
+ _target_: sam2.modeling.memory_encoder.CXBlock
+ dim: 256
+ kernel_size: 7
+ padding: 3
+ layer_scale_init_value: 1e-6
+ use_dwconv: True # depth-wise convs
+ num_layers: 2
+
+ num_maskmem: 7
+ image_size: 1024
+ # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask
+ sigmoid_scale_for_mem_enc: 20.0
+ sigmoid_bias_for_mem_enc: -10.0
+ use_mask_input_as_output_without_sam: true
+ # Memory
+ directly_add_no_mem_embed: true
+ # use high-resolution feature map in the SAM mask decoder
+ use_high_res_features_in_sam: true
+ # output 3 masks on the first click on initial conditioning frames
+ multimask_output_in_sam: true
+ # SAM heads
+ iou_prediction_use_sigmoid: True
+ # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
+ use_obj_ptrs_in_encoder: true
+ add_tpos_enc_to_obj_ptrs: false
+ only_obj_ptrs_in_the_past_for_eval: true
+ # object occlusion prediction
+ pred_obj_scores: true
+ pred_obj_scores_mlp: true
+ fixed_no_obj_ptr: true
+ # multimask tracking settings
+ multimask_output_for_tracking: true
+ use_multimask_token_for_obj_ptr: true
+ multimask_min_pt_num: 0
+ multimask_max_pt_num: 1
+ use_mlp_for_obj_ptr_proj: true
+ # Compilation flag
+ compile_image_encoder: False
diff --git a/sam2/configs/sam2/sam2_hiera_l.yaml b/sam2/configs/sam2/sam2_hiera_l.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1092802b1d24be6fedf78939f45b0d021d4ec560
--- /dev/null
+++ b/sam2/configs/sam2/sam2_hiera_l.yaml
@@ -0,0 +1,117 @@
+# @package _global_
+
+# Model
+model:
+ _target_: sam2.modeling.sam2_base.SAM2Base
+ image_encoder:
+ _target_: sam2.modeling.backbones.image_encoder.ImageEncoder
+ scalp: 1
+ trunk:
+ _target_: sam2.modeling.backbones.hieradet.Hiera
+ embed_dim: 144
+ num_heads: 2
+ stages: [2, 6, 36, 4]
+ global_att_blocks: [23, 33, 43]
+ window_pos_embed_bkg_spatial_size: [7, 7]
+ window_spec: [8, 4, 16, 8]
+ neck:
+ _target_: sam2.modeling.backbones.image_encoder.FpnNeck
+ position_encoding:
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
+ num_pos_feats: 256
+ normalize: true
+ scale: null
+ temperature: 10000
+ d_model: 256
+ backbone_channel_list: [1152, 576, 288, 144]
+ fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features
+ fpn_interp_model: nearest
+
+ memory_attention:
+ _target_: sam2.modeling.memory_attention.MemoryAttention
+ d_model: 256
+ pos_enc_at_input: true
+ layer:
+ _target_: sam2.modeling.memory_attention.MemoryAttentionLayer
+ activation: relu
+ dim_feedforward: 2048
+ dropout: 0.1
+ pos_enc_at_attn: false
+ self_attention:
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
+ rope_theta: 10000.0
+ feat_sizes: [64, 64]
+ embedding_dim: 256
+ num_heads: 1
+ downsample_rate: 1
+ dropout: 0.1
+ d_model: 256
+ pos_enc_at_cross_attn_keys: true
+ pos_enc_at_cross_attn_queries: false
+ cross_attention:
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
+ rope_theta: 10000.0
+ feat_sizes: [64, 64]
+ rope_k_repeat: True
+ embedding_dim: 256
+ num_heads: 1
+ downsample_rate: 1
+ dropout: 0.1
+ kv_in_dim: 64
+ num_layers: 4
+
+ memory_encoder:
+ _target_: sam2.modeling.memory_encoder.MemoryEncoder
+ out_dim: 64
+ position_encoding:
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
+ num_pos_feats: 64
+ normalize: true
+ scale: null
+ temperature: 10000
+ mask_downsampler:
+ _target_: sam2.modeling.memory_encoder.MaskDownSampler
+ kernel_size: 3
+ stride: 2
+ padding: 1
+ fuser:
+ _target_: sam2.modeling.memory_encoder.Fuser
+ layer:
+ _target_: sam2.modeling.memory_encoder.CXBlock
+ dim: 256
+ kernel_size: 7
+ padding: 3
+ layer_scale_init_value: 1e-6
+ use_dwconv: True # depth-wise convs
+ num_layers: 2
+
+ num_maskmem: 7
+ image_size: 1024
+ # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask
+ sigmoid_scale_for_mem_enc: 20.0
+ sigmoid_bias_for_mem_enc: -10.0
+ use_mask_input_as_output_without_sam: true
+ # Memory
+ directly_add_no_mem_embed: true
+ # use high-resolution feature map in the SAM mask decoder
+ use_high_res_features_in_sam: true
+ # output 3 masks on the first click on initial conditioning frames
+ multimask_output_in_sam: true
+ # SAM heads
+ iou_prediction_use_sigmoid: True
+ # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
+ use_obj_ptrs_in_encoder: true
+ add_tpos_enc_to_obj_ptrs: false
+ only_obj_ptrs_in_the_past_for_eval: true
+ # object occlusion prediction
+ pred_obj_scores: true
+ pred_obj_scores_mlp: true
+ fixed_no_obj_ptr: true
+ # multimask tracking settings
+ multimask_output_for_tracking: true
+ use_multimask_token_for_obj_ptr: true
+ multimask_min_pt_num: 0
+ multimask_max_pt_num: 1
+ use_mlp_for_obj_ptr_proj: true
+ # Compilation flag
+ compile_image_encoder: False
diff --git a/sam2/configs/sam2/sam2_hiera_s.yaml b/sam2/configs/sam2/sam2_hiera_s.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..174e414f1467d80e94a34e9525dc373058f8caaa
--- /dev/null
+++ b/sam2/configs/sam2/sam2_hiera_s.yaml
@@ -0,0 +1,116 @@
+# @package _global_
+
+# Model
+model:
+ _target_: sam2.modeling.sam2_base.SAM2Base
+ image_encoder:
+ _target_: sam2.modeling.backbones.image_encoder.ImageEncoder
+ scalp: 1
+ trunk:
+ _target_: sam2.modeling.backbones.hieradet.Hiera
+ embed_dim: 96
+ num_heads: 1
+ stages: [1, 2, 11, 2]
+ global_att_blocks: [7, 10, 13]
+ window_pos_embed_bkg_spatial_size: [7, 7]
+ neck:
+ _target_: sam2.modeling.backbones.image_encoder.FpnNeck
+ position_encoding:
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
+ num_pos_feats: 256
+ normalize: true
+ scale: null
+ temperature: 10000
+ d_model: 256
+ backbone_channel_list: [768, 384, 192, 96]
+ fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features
+ fpn_interp_model: nearest
+
+ memory_attention:
+ _target_: sam2.modeling.memory_attention.MemoryAttention
+ d_model: 256
+ pos_enc_at_input: true
+ layer:
+ _target_: sam2.modeling.memory_attention.MemoryAttentionLayer
+ activation: relu
+ dim_feedforward: 2048
+ dropout: 0.1
+ pos_enc_at_attn: false
+ self_attention:
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
+ rope_theta: 10000.0
+ feat_sizes: [64, 64]
+ embedding_dim: 256
+ num_heads: 1
+ downsample_rate: 1
+ dropout: 0.1
+ d_model: 256
+ pos_enc_at_cross_attn_keys: true
+ pos_enc_at_cross_attn_queries: false
+ cross_attention:
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
+ rope_theta: 10000.0
+ feat_sizes: [64, 64]
+ rope_k_repeat: True
+ embedding_dim: 256
+ num_heads: 1
+ downsample_rate: 1
+ dropout: 0.1
+ kv_in_dim: 64
+ num_layers: 4
+
+ memory_encoder:
+ _target_: sam2.modeling.memory_encoder.MemoryEncoder
+ out_dim: 64
+ position_encoding:
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
+ num_pos_feats: 64
+ normalize: true
+ scale: null
+ temperature: 10000
+ mask_downsampler:
+ _target_: sam2.modeling.memory_encoder.MaskDownSampler
+ kernel_size: 3
+ stride: 2
+ padding: 1
+ fuser:
+ _target_: sam2.modeling.memory_encoder.Fuser
+ layer:
+ _target_: sam2.modeling.memory_encoder.CXBlock
+ dim: 256
+ kernel_size: 7
+ padding: 3
+ layer_scale_init_value: 1e-6
+ use_dwconv: True # depth-wise convs
+ num_layers: 2
+
+ num_maskmem: 7
+ image_size: 1024
+ # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask
+ sigmoid_scale_for_mem_enc: 20.0
+ sigmoid_bias_for_mem_enc: -10.0
+ use_mask_input_as_output_without_sam: true
+ # Memory
+ directly_add_no_mem_embed: true
+ # use high-resolution feature map in the SAM mask decoder
+ use_high_res_features_in_sam: true
+ # output 3 masks on the first click on initial conditioning frames
+ multimask_output_in_sam: true
+ # SAM heads
+ iou_prediction_use_sigmoid: True
+ # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
+ use_obj_ptrs_in_encoder: true
+ add_tpos_enc_to_obj_ptrs: false
+ only_obj_ptrs_in_the_past_for_eval: true
+ # object occlusion prediction
+ pred_obj_scores: true
+ pred_obj_scores_mlp: true
+ fixed_no_obj_ptr: true
+ # multimask tracking settings
+ multimask_output_for_tracking: true
+ use_multimask_token_for_obj_ptr: true
+ multimask_min_pt_num: 0
+ multimask_max_pt_num: 1
+ use_mlp_for_obj_ptr_proj: true
+ # Compilation flag
+ compile_image_encoder: False
diff --git a/sam2/configs/sam2/sam2_hiera_t.yaml b/sam2/configs/sam2/sam2_hiera_t.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..121447aabd5318fac20efc2bc00d7c406ca26f01
--- /dev/null
+++ b/sam2/configs/sam2/sam2_hiera_t.yaml
@@ -0,0 +1,118 @@
+# @package _global_
+
+# Model
+model:
+ _target_: sam2.modeling.sam2_base.SAM2Base
+ image_encoder:
+ _target_: sam2.modeling.backbones.image_encoder.ImageEncoder
+ scalp: 1
+ trunk:
+ _target_: sam2.modeling.backbones.hieradet.Hiera
+ embed_dim: 96
+ num_heads: 1
+ stages: [1, 2, 7, 2]
+ global_att_blocks: [5, 7, 9]
+ window_pos_embed_bkg_spatial_size: [7, 7]
+ neck:
+ _target_: sam2.modeling.backbones.image_encoder.FpnNeck
+ position_encoding:
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
+ num_pos_feats: 256
+ normalize: true
+ scale: null
+ temperature: 10000
+ d_model: 256
+ backbone_channel_list: [768, 384, 192, 96]
+ fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features
+ fpn_interp_model: nearest
+
+ memory_attention:
+ _target_: sam2.modeling.memory_attention.MemoryAttention
+ d_model: 256
+ pos_enc_at_input: true
+ layer:
+ _target_: sam2.modeling.memory_attention.MemoryAttentionLayer
+ activation: relu
+ dim_feedforward: 2048
+ dropout: 0.1
+ pos_enc_at_attn: false
+ self_attention:
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
+ rope_theta: 10000.0
+ feat_sizes: [64, 64]
+ embedding_dim: 256
+ num_heads: 1
+ downsample_rate: 1
+ dropout: 0.1
+ d_model: 256
+ pos_enc_at_cross_attn_keys: true
+ pos_enc_at_cross_attn_queries: false
+ cross_attention:
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
+ rope_theta: 10000.0
+ feat_sizes: [64, 64]
+ rope_k_repeat: True
+ embedding_dim: 256
+ num_heads: 1
+ downsample_rate: 1
+ dropout: 0.1
+ kv_in_dim: 64
+ num_layers: 4
+
+ memory_encoder:
+ _target_: sam2.modeling.memory_encoder.MemoryEncoder
+ out_dim: 64
+ position_encoding:
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
+ num_pos_feats: 64
+ normalize: true
+ scale: null
+ temperature: 10000
+ mask_downsampler:
+ _target_: sam2.modeling.memory_encoder.MaskDownSampler
+ kernel_size: 3
+ stride: 2
+ padding: 1
+ fuser:
+ _target_: sam2.modeling.memory_encoder.Fuser
+ layer:
+ _target_: sam2.modeling.memory_encoder.CXBlock
+ dim: 256
+ kernel_size: 7
+ padding: 3
+ layer_scale_init_value: 1e-6
+ use_dwconv: True # depth-wise convs
+ num_layers: 2
+
+ num_maskmem: 7
+ image_size: 1024
+ # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask
+ # SAM decoder
+ sigmoid_scale_for_mem_enc: 20.0
+ sigmoid_bias_for_mem_enc: -10.0
+ use_mask_input_as_output_without_sam: true
+ # Memory
+ directly_add_no_mem_embed: true
+ # use high-resolution feature map in the SAM mask decoder
+ use_high_res_features_in_sam: true
+ # output 3 masks on the first click on initial conditioning frames
+ multimask_output_in_sam: true
+ # SAM heads
+ iou_prediction_use_sigmoid: True
+ # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
+ use_obj_ptrs_in_encoder: true
+ add_tpos_enc_to_obj_ptrs: false
+ only_obj_ptrs_in_the_past_for_eval: true
+ # object occlusion prediction
+ pred_obj_scores: true
+ pred_obj_scores_mlp: true
+ fixed_no_obj_ptr: true
+ # multimask tracking settings
+ multimask_output_for_tracking: true
+ use_multimask_token_for_obj_ptr: true
+ multimask_min_pt_num: 0
+ multimask_max_pt_num: 1
+ use_mlp_for_obj_ptr_proj: true
+ # Compilation flag
+ # HieraT does not currently support compilation, should always be set to False
+ compile_image_encoder: False
diff --git a/sam2/csrc/connected_components.cu b/sam2/csrc/connected_components.cu
new file mode 100644
index 0000000000000000000000000000000000000000..ced21eb32eaaadb818d441c1322b99d1bf068f45
--- /dev/null
+++ b/sam2/csrc/connected_components.cu
@@ -0,0 +1,289 @@
+// Copyright (c) Meta Platforms, Inc. and affiliates.
+// All rights reserved.
+
+// This source code is licensed under the license found in the
+// LICENSE file in the root directory of this source tree.
+
+// adapted from https://github.com/zsef123/Connected_components_PyTorch
+// with license found in the LICENSE_cctorch file in the root directory.
+#include
+#include
+#include
+#include
+#include
+#include
+
+// 2d
+#define BLOCK_ROWS 16
+#define BLOCK_COLS 16
+
+namespace cc2d {
+
+template
+__device__ __forceinline__ unsigned char hasBit(T bitmap, unsigned char pos) {
+ return (bitmap >> pos) & 1;
+}
+
+__device__ int32_t find(const int32_t* s_buf, int32_t n) {
+ while (s_buf[n] != n)
+ n = s_buf[n];
+ return n;
+}
+
+__device__ int32_t find_n_compress(int32_t* s_buf, int32_t n) {
+ const int32_t id = n;
+ while (s_buf[n] != n) {
+ n = s_buf[n];
+ s_buf[id] = n;
+ }
+ return n;
+}
+
+__device__ void union_(int32_t* s_buf, int32_t a, int32_t b) {
+ bool done;
+ do {
+ a = find(s_buf, a);
+ b = find(s_buf, b);
+
+ if (a < b) {
+ int32_t old = atomicMin(s_buf + b, a);
+ done = (old == b);
+ b = old;
+ } else if (b < a) {
+ int32_t old = atomicMin(s_buf + a, b);
+ done = (old == a);
+ a = old;
+ } else
+ done = true;
+
+ } while (!done);
+}
+
+__global__ void
+init_labeling(int32_t* label, const uint32_t W, const uint32_t H) {
+ const uint32_t row = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
+ const uint32_t col = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
+ const uint32_t idx = row * W + col;
+
+ if (row < H && col < W)
+ label[idx] = idx;
+}
+
+__global__ void
+merge(uint8_t* img, int32_t* label, const uint32_t W, const uint32_t H) {
+ const uint32_t row = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
+ const uint32_t col = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
+ const uint32_t idx = row * W + col;
+
+ if (row >= H || col >= W)
+ return;
+
+ uint32_t P = 0;
+
+ if (img[idx])
+ P |= 0x777;
+ if (row + 1 < H && img[idx + W])
+ P |= 0x777 << 4;
+ if (col + 1 < W && img[idx + 1])
+ P |= 0x777 << 1;
+
+ if (col == 0)
+ P &= 0xEEEE;
+ if (col + 1 >= W)
+ P &= 0x3333;
+ else if (col + 2 >= W)
+ P &= 0x7777;
+
+ if (row == 0)
+ P &= 0xFFF0;
+ if (row + 1 >= H)
+ P &= 0xFF;
+
+ if (P > 0) {
+ // If need check about top-left pixel(if flag the first bit) and hit the
+ // top-left pixel
+ if (hasBit(P, 0) && img[idx - W - 1]) {
+ union_(label, idx, idx - 2 * W - 2); // top left block
+ }
+
+ if ((hasBit(P, 1) && img[idx - W]) || (hasBit(P, 2) && img[idx - W + 1]))
+ union_(label, idx, idx - 2 * W); // top bottom block
+
+ if (hasBit(P, 3) && img[idx + 2 - W])
+ union_(label, idx, idx - 2 * W + 2); // top right block
+
+ if ((hasBit(P, 4) && img[idx - 1]) || (hasBit(P, 8) && img[idx + W - 1]))
+ union_(label, idx, idx - 2); // just left block
+ }
+}
+
+__global__ void compression(int32_t* label, const int32_t W, const int32_t H) {
+ const uint32_t row = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
+ const uint32_t col = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
+ const uint32_t idx = row * W + col;
+
+ if (row < H && col < W)
+ find_n_compress(label, idx);
+}
+
+__global__ void final_labeling(
+ const uint8_t* img,
+ int32_t* label,
+ const int32_t W,
+ const int32_t H) {
+ const uint32_t row = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
+ const uint32_t col = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
+ const uint32_t idx = row * W + col;
+
+ if (row >= H || col >= W)
+ return;
+
+ int32_t y = label[idx] + 1;
+
+ if (img[idx])
+ label[idx] = y;
+ else
+ label[idx] = 0;
+
+ if (col + 1 < W) {
+ if (img[idx + 1])
+ label[idx + 1] = y;
+ else
+ label[idx + 1] = 0;
+
+ if (row + 1 < H) {
+ if (img[idx + W + 1])
+ label[idx + W + 1] = y;
+ else
+ label[idx + W + 1] = 0;
+ }
+ }
+
+ if (row + 1 < H) {
+ if (img[idx + W])
+ label[idx + W] = y;
+ else
+ label[idx + W] = 0;
+ }
+}
+
+__global__ void init_counting(
+ const int32_t* label,
+ int32_t* count_init,
+ const int32_t W,
+ const int32_t H) {
+ const uint32_t row = (blockIdx.y * blockDim.y + threadIdx.y);
+ const uint32_t col = (blockIdx.x * blockDim.x + threadIdx.x);
+ const uint32_t idx = row * W + col;
+
+ if (row >= H || col >= W)
+ return;
+
+ int32_t y = label[idx];
+ if (y > 0) {
+ int32_t count_idx = y - 1;
+ atomicAdd(count_init + count_idx, 1);
+ }
+}
+
+__global__ void final_counting(
+ const int32_t* label,
+ const int32_t* count_init,
+ int32_t* count_final,
+ const int32_t W,
+ const int32_t H) {
+ const uint32_t row = (blockIdx.y * blockDim.y + threadIdx.y);
+ const uint32_t col = (blockIdx.x * blockDim.x + threadIdx.x);
+ const uint32_t idx = row * W + col;
+
+ if (row >= H || col >= W)
+ return;
+
+ int32_t y = label[idx];
+ if (y > 0) {
+ int32_t count_idx = y - 1;
+ count_final[idx] = count_init[count_idx];
+ } else {
+ count_final[idx] = 0;
+ }
+}
+
+} // namespace cc2d
+
+std::vector get_connected_componnets(
+ const torch::Tensor& inputs) {
+ AT_ASSERTM(inputs.is_cuda(), "inputs must be a CUDA tensor");
+ AT_ASSERTM(inputs.ndimension() == 4, "inputs must be [N, 1, H, W] shape");
+ AT_ASSERTM(
+ inputs.scalar_type() == torch::kUInt8, "inputs must be a uint8 type");
+
+ const uint32_t N = inputs.size(0);
+ const uint32_t C = inputs.size(1);
+ const uint32_t H = inputs.size(2);
+ const uint32_t W = inputs.size(3);
+
+ AT_ASSERTM(C == 1, "inputs must be [N, 1, H, W] shape");
+ AT_ASSERTM((H % 2) == 0, "height must be an even number");
+ AT_ASSERTM((W % 2) == 0, "width must be an even number");
+
+ // label must be uint32_t
+ auto label_options =
+ torch::TensorOptions().dtype(torch::kInt32).device(inputs.device());
+ torch::Tensor labels = torch::zeros({N, C, H, W}, label_options);
+ torch::Tensor counts_init = torch::zeros({N, C, H, W}, label_options);
+ torch::Tensor counts_final = torch::zeros({N, C, H, W}, label_options);
+
+ dim3 grid = dim3(
+ ((W + 1) / 2 + BLOCK_COLS - 1) / BLOCK_COLS,
+ ((H + 1) / 2 + BLOCK_ROWS - 1) / BLOCK_ROWS);
+ dim3 block = dim3(BLOCK_COLS, BLOCK_ROWS);
+ dim3 grid_count =
+ dim3((W + BLOCK_COLS) / BLOCK_COLS, (H + BLOCK_ROWS) / BLOCK_ROWS);
+ dim3 block_count = dim3(BLOCK_COLS, BLOCK_ROWS);
+ cudaStream_t stream = at::cuda::getCurrentCUDAStream();
+
+ for (int n = 0; n < N; n++) {
+ uint32_t offset = n * H * W;
+
+ cc2d::init_labeling<<>>(
+ labels.data_ptr() + offset, W, H);
+ cc2d::merge<<>>(
+ inputs.data_ptr() + offset,
+ labels.data_ptr() + offset,
+ W,
+ H);
+ cc2d::compression<<>>(
+ labels.data_ptr() + offset, W, H);
+ cc2d::final_labeling<<>>(
+ inputs.data_ptr() + offset,
+ labels.data_ptr() + offset,
+ W,
+ H);
+
+ // get the counting of each pixel
+ cc2d::init_counting<<>>(
+ labels.data_ptr() + offset,
+ counts_init.data_ptr() + offset,
+ W,
+ H);
+ cc2d::final_counting<<>>(
+ labels.data_ptr() + offset,
+ counts_init.data_ptr() + offset,
+ counts_final.data_ptr() + offset,
+ W,
+ H);
+ }
+
+ // returned values are [labels, counts]
+ std::vector outputs;
+ outputs.push_back(labels);
+ outputs.push_back(counts_final);
+ return outputs;
+}
+
+PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
+ m.def(
+ "get_connected_componnets",
+ &get_connected_componnets,
+ "get_connected_componnets");
+}
diff --git a/sam2/loss_fns.py b/sam2/loss_fns.py
new file mode 100644
index 0000000000000000000000000000000000000000..86add58ef5102d5e46d11a59c5e7d9278bd00df6
--- /dev/null
+++ b/sam2/loss_fns.py
@@ -0,0 +1,288 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+from collections import defaultdict
+from typing import Dict, List
+
+import torch
+import torch.distributed
+import torch.nn as nn
+import torch.nn.functional as F
+from nncore.engine import comm
+
+
+def dice_loss(inputs, targets, num_objects, loss_on_multimask=False):
+ """
+ Compute the DICE loss, similar to generalized IOU for masks
+ Args:
+ inputs: A float tensor of arbitrary shape.
+ The predictions for each example.
+ targets: A float tensor with the same shape as inputs. Stores the binary
+ classification label for each element in inputs
+ (0 for the negative class and 1 for the positive class).
+ num_objects: Number of objects in the batch
+ loss_on_multimask: True if multimask prediction is enabled
+ Returns:
+ Dice loss tensor
+ """
+ inputs = inputs.sigmoid()
+ if loss_on_multimask:
+ # inputs and targets are [N, M, H, W] where M corresponds to multiple predicted masks
+ assert inputs.dim() == 4 and targets.dim() == 4
+ # flatten spatial dimension while keeping multimask channel dimension
+ inputs = inputs.flatten(2)
+ targets = targets.flatten(2)
+ numerator = 2 * (inputs * targets).sum(-1)
+ else:
+ inputs = inputs.flatten(1)
+ numerator = 2 * (inputs * targets).sum(1)
+ denominator = inputs.sum(-1) + targets.sum(-1)
+ loss = 1 - (numerator + 1) / (denominator + 1)
+ if loss_on_multimask:
+ return loss / num_objects
+ return loss.sum() / num_objects
+
+
+def sigmoid_focal_loss(
+ inputs,
+ targets,
+ num_objects,
+ alpha: float = 0.25,
+ gamma: float = 2,
+ loss_on_multimask=False,
+):
+ """
+ Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.
+ Args:
+ inputs: A float tensor of arbitrary shape.
+ The predictions for each example.
+ targets: A float tensor with the same shape as inputs. Stores the binary
+ classification label for each element in inputs
+ (0 for the negative class and 1 for the positive class).
+ num_objects: Number of objects in the batch
+ alpha: (optional) Weighting factor in range (0,1) to balance
+ positive vs negative examples. Default = -1 (no weighting).
+ gamma: Exponent of the modulating factor (1 - p_t) to
+ balance easy vs hard examples.
+ loss_on_multimask: True if multimask prediction is enabled
+ Returns:
+ focal loss tensor
+ """
+ prob = inputs.sigmoid()
+ ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
+ p_t = prob * targets + (1 - prob) * (1 - targets)
+ loss = ce_loss * ((1 - p_t)**gamma)
+
+ if alpha >= 0:
+ alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
+ loss = alpha_t * loss
+
+ if loss_on_multimask:
+ # loss is [N, M, H, W] where M corresponds to multiple predicted masks
+ assert loss.dim() == 4
+ return loss.flatten(2).mean(-1) / num_objects # average over spatial dims
+ return loss.mean(1).sum() / num_objects
+
+
+def iou_loss(inputs, targets, pred_ious, num_objects, loss_on_multimask=False, use_l1_loss=False):
+ """
+ Args:
+ inputs: A float tensor of arbitrary shape.
+ The predictions for each example.
+ targets: A float tensor with the same shape as inputs. Stores the binary
+ classification label for each element in inputs
+ (0 for the negative class and 1 for the positive class).
+ pred_ious: A float tensor containing the predicted IoUs scores per mask
+ num_objects: Number of objects in the batch
+ loss_on_multimask: True if multimask prediction is enabled
+ use_l1_loss: Whether to use L1 loss is used instead of MSE loss
+ Returns:
+ IoU loss tensor
+ """
+ assert inputs.dim() == 4 and targets.dim() == 4
+ pred_mask = inputs.flatten(2) > 0
+ gt_mask = targets.flatten(2) > 0
+ area_i = torch.sum(pred_mask & gt_mask, dim=-1).float()
+ area_u = torch.sum(pred_mask | gt_mask, dim=-1).float()
+ actual_ious = area_i / torch.clamp(area_u, min=1.0)
+
+ if use_l1_loss:
+ loss = F.l1_loss(pred_ious, actual_ious, reduction="none")
+ else:
+ loss = F.mse_loss(pred_ious, actual_ious, reduction="none")
+ if loss_on_multimask:
+ return loss / num_objects
+ return loss.sum() / num_objects
+
+
+class MultiStepMultiMasksAndIous(nn.Module):
+
+ def __init__(
+ self,
+ weight_dict,
+ focal_alpha=0.25,
+ focal_gamma=2,
+ supervise_all_iou=False,
+ iou_use_l1_loss=False,
+ pred_obj_scores=False,
+ focal_gamma_obj_score=0.0,
+ focal_alpha_obj_score=-1,
+ ):
+ """
+ This class computes the multi-step multi-mask and IoU losses.
+ Args:
+ weight_dict: dict containing weights for focal, dice, iou losses
+ focal_alpha: alpha for sigmoid focal loss
+ focal_gamma: gamma for sigmoid focal loss
+ supervise_all_iou: if True, back-prop iou losses for all predicted masks
+ iou_use_l1_loss: use L1 loss instead of MSE loss for iou
+ pred_obj_scores: if True, compute loss for object scores
+ focal_gamma_obj_score: gamma for sigmoid focal loss on object scores
+ focal_alpha_obj_score: alpha for sigmoid focal loss on object scores
+ """
+
+ super().__init__()
+ self.weight_dict = weight_dict
+ self.focal_alpha = focal_alpha
+ self.focal_gamma = focal_gamma
+ assert "loss_mask" in self.weight_dict
+ assert "loss_dice" in self.weight_dict
+ assert "loss_iou" in self.weight_dict
+ if "loss_class" not in self.weight_dict:
+ self.weight_dict["loss_class"] = 0.0
+
+ self.focal_alpha_obj_score = focal_alpha_obj_score
+ self.focal_gamma_obj_score = focal_gamma_obj_score
+ self.supervise_all_iou = supervise_all_iou
+ self.iou_use_l1_loss = iou_use_l1_loss
+ self.pred_obj_scores = pred_obj_scores
+
+ def forward(self, outs_batch: List[Dict], targets_batch: torch.Tensor):
+ assert len(outs_batch) == len(targets_batch)
+ num_objects = torch.tensor((targets_batch.shape[1]), device=targets_batch.device,
+ dtype=torch.float) # Number of objects is fixed within a batch
+ if comm.is_distributed():
+ torch.distributed.all_reduce(num_objects)
+ num_objects = torch.clamp(num_objects / comm.get_world_size(), min=1).item()
+
+ losses = defaultdict(int)
+ for outs, targets in zip(outs_batch, targets_batch):
+ cur_losses = self._forward(outs, targets, num_objects)
+ for k, v in cur_losses.items():
+ losses[k] += v
+
+ return losses
+
+ def _forward(self, outputs: Dict, targets: torch.Tensor, num_objects):
+ """
+ Compute the losses related to the masks: the focal loss and the dice loss.
+ and also the MAE or MSE loss between predicted IoUs and actual IoUs.
+
+ Here "multistep_pred_multimasks_high_res" is a list of multimasks (tensors
+ of shape [N, M, H, W], where M could be 1 or larger, corresponding to
+ one or multiple predicted masks from a click.
+
+ We back-propagate focal, dice losses only on the prediction channel
+ with the lowest focal+dice loss between predicted mask and ground-truth.
+ If `supervise_all_iou` is True, we backpropagate ious losses for all predicted masks.
+ """
+
+ target_masks = targets.unsqueeze(1).float()
+ assert target_masks.dim() == 4 # [N, 1, H, W]
+ src_masks_list = outputs["multistep_pred_multimasks_high_res"]
+ ious_list = outputs["multistep_pred_ious"]
+ object_score_logits_list = outputs["multistep_object_score_logits"]
+
+ assert len(src_masks_list) == len(ious_list)
+ assert len(object_score_logits_list) == len(ious_list)
+
+ # accumulate the loss over prediction steps
+ losses = {"loss_mask": 0, "loss_dice": 0, "loss_iou": 0, "loss_class": 0}
+ for src_masks, ious, object_score_logits in zip(src_masks_list, ious_list, object_score_logits_list):
+ self._update_losses(losses, src_masks, target_masks, ious, num_objects, object_score_logits)
+ losses["core_loss"] = self.reduce_loss(losses)
+ return losses
+
+ def _update_losses(self, losses, src_masks, target_masks, ious, num_objects, object_score_logits):
+ target_masks = target_masks.expand_as(src_masks)
+ # get focal, dice and iou loss on all output masks in a prediction step
+ loss_multimask = sigmoid_focal_loss(
+ src_masks,
+ target_masks,
+ num_objects,
+ alpha=self.focal_alpha,
+ gamma=self.focal_gamma,
+ loss_on_multimask=True,
+ )
+ loss_multidice = dice_loss(src_masks, target_masks, num_objects, loss_on_multimask=True)
+ if not self.pred_obj_scores:
+ loss_class = torch.tensor(0.0, dtype=loss_multimask.dtype, device=loss_multimask.device)
+ target_obj = torch.ones(
+ loss_multimask.shape[0],
+ 1,
+ dtype=loss_multimask.dtype,
+ device=loss_multimask.device,
+ )
+ else:
+ target_obj = torch.any((target_masks[:, 0] > 0).flatten(1), dim=-1)[..., None].float()
+ loss_class = sigmoid_focal_loss(
+ object_score_logits,
+ target_obj,
+ num_objects,
+ alpha=self.focal_alpha_obj_score,
+ gamma=self.focal_gamma_obj_score,
+ )
+
+ loss_multiiou = iou_loss(
+ src_masks,
+ target_masks,
+ ious,
+ num_objects,
+ loss_on_multimask=True,
+ use_l1_loss=self.iou_use_l1_loss,
+ )
+ assert loss_multimask.dim() == 2
+ assert loss_multidice.dim() == 2
+ assert loss_multiiou.dim() == 2
+ if loss_multimask.size(1) > 1:
+ # take the mask indices with the smallest focal + dice loss for back propagation
+ loss_combo = (
+ loss_multimask * self.weight_dict["loss_mask"] + loss_multidice * self.weight_dict["loss_dice"])
+ best_loss_inds = torch.argmin(loss_combo, dim=-1)
+ batch_inds = torch.arange(loss_combo.size(0), device=loss_combo.device)
+ loss_mask = loss_multimask[batch_inds, best_loss_inds].unsqueeze(1)
+ loss_dice = loss_multidice[batch_inds, best_loss_inds].unsqueeze(1)
+ # calculate the iou prediction and slot losses only in the index
+ # with the minimum loss for each mask (to be consistent w/ SAM)
+ if self.supervise_all_iou:
+ loss_iou = loss_multiiou.mean(dim=-1).unsqueeze(1)
+ else:
+ loss_iou = loss_multiiou[batch_inds, best_loss_inds].unsqueeze(1)
+ else:
+ loss_mask = loss_multimask
+ loss_dice = loss_multidice
+ loss_iou = loss_multiiou
+
+ # backprop focal, dice and iou loss only if obj present
+ loss_mask = loss_mask * target_obj
+ loss_dice = loss_dice * target_obj
+ loss_iou = loss_iou * target_obj
+
+ # sum over batch dimension (note that the losses are already divided by num_objects)
+ losses["loss_mask"] += loss_mask.sum()
+ losses["loss_dice"] += loss_dice.sum()
+ losses["loss_iou"] += loss_iou.sum()
+ losses["loss_class"] += loss_class
+
+ def reduce_loss(self, losses):
+ reduced_loss = 0.0
+ for loss_key, weight in self.weight_dict.items():
+ if loss_key not in losses:
+ raise ValueError(f"{type(self)} doesn't compute {loss_key}")
+ if weight != 0:
+ reduced_loss += losses[loss_key] * weight
+
+ return reduced_loss
diff --git a/sam2/modeling/__init__.py b/sam2/modeling/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..5277f46157403e47fd830fc519144b97ef69d4ae
--- /dev/null
+++ b/sam2/modeling/__init__.py
@@ -0,0 +1,5 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
diff --git a/sam2/modeling/backbones/__init__.py b/sam2/modeling/backbones/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..5277f46157403e47fd830fc519144b97ef69d4ae
--- /dev/null
+++ b/sam2/modeling/backbones/__init__.py
@@ -0,0 +1,5 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
diff --git a/sam2/modeling/backbones/hieradet.py b/sam2/modeling/backbones/hieradet.py
new file mode 100644
index 0000000000000000000000000000000000000000..590a9b0a34aa9ca6e71e817c80b7f3795959a3ff
--- /dev/null
+++ b/sam2/modeling/backbones/hieradet.py
@@ -0,0 +1,312 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+import logging
+from functools import partial
+from typing import List, Tuple, Union
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from iopath.common.file_io import g_pathmgr
+
+from sam2.modeling.backbones.utils import (
+ PatchEmbed,
+ window_partition,
+ window_unpartition,
+)
+
+from sam2.modeling.sam2_utils import DropPath, MLP
+
+
+def do_pool(x: torch.Tensor, pool: nn.Module, norm: nn.Module = None) -> torch.Tensor:
+ if pool is None:
+ return x
+ # (B, H, W, C) -> (B, C, H, W)
+ x = x.permute(0, 3, 1, 2)
+ x = pool(x.float()).to(x.dtype)
+ # (B, C, H', W') -> (B, H', W', C)
+ x = x.permute(0, 2, 3, 1)
+ if norm:
+ x = norm(x)
+
+ return x
+
+
+class MultiScaleAttention(nn.Module):
+
+ def __init__(
+ self,
+ dim: int,
+ dim_out: int,
+ num_heads: int,
+ q_pool: nn.Module = None,
+ ):
+ super().__init__()
+
+ self.dim = dim
+ self.dim_out = dim_out
+ self.num_heads = num_heads
+ self.q_pool = q_pool
+ self.qkv = nn.Linear(dim, dim_out * 3)
+ self.proj = nn.Linear(dim_out, dim_out)
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ B, H, W, _ = x.shape
+ # qkv with shape (B, H * W, 3, nHead, C)
+ qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, -1)
+ # q, k, v with shape (B, H * W, nheads, C)
+ q, k, v = torch.unbind(qkv, 2)
+
+ # Q pooling (for downsample at stage changes)
+ if self.q_pool:
+ q = do_pool(q.reshape(B, H, W, -1), self.q_pool)
+ H, W = q.shape[1:3] # downsampled shape
+ q = q.reshape(B, H * W, self.num_heads, -1)
+
+ # Torch's SDPA expects [B, nheads, H*W, C] so we transpose
+ x = F.scaled_dot_product_attention(
+ q.transpose(1, 2),
+ k.transpose(1, 2),
+ v.transpose(1, 2),
+ )
+ # Transpose back
+ x = x.transpose(1, 2)
+ x = x.reshape(B, H, W, -1)
+
+ x = self.proj(x)
+
+ return x
+
+
+class MultiScaleBlock(nn.Module):
+
+ def __init__(
+ self,
+ dim: int,
+ dim_out: int,
+ num_heads: int,
+ mlp_ratio: float = 4.0,
+ drop_path: float = 0.0,
+ norm_layer: Union[nn.Module, str] = "LayerNorm",
+ q_stride: Tuple[int, int] = None,
+ act_layer: nn.Module = nn.GELU,
+ window_size: int = 0,
+ ):
+ super().__init__()
+
+ if isinstance(norm_layer, str):
+ norm_layer = partial(getattr(nn, norm_layer), eps=1e-6)
+
+ self.dim = dim
+ self.dim_out = dim_out
+ self.norm1 = norm_layer(dim)
+
+ self.window_size = window_size
+
+ self.pool, self.q_stride = None, q_stride
+ if self.q_stride:
+ self.pool = nn.MaxPool2d(kernel_size=q_stride, stride=q_stride, ceil_mode=False)
+
+ self.attn = MultiScaleAttention(
+ dim,
+ dim_out,
+ num_heads=num_heads,
+ q_pool=self.pool,
+ )
+ self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
+
+ self.norm2 = norm_layer(dim_out)
+ self.mlp = MLP(
+ dim_out,
+ int(dim_out * mlp_ratio),
+ dim_out,
+ num_layers=2,
+ activation=act_layer,
+ )
+
+ if dim != dim_out:
+ self.proj = nn.Linear(dim, dim_out)
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ shortcut = x # B, H, W, C
+ x = self.norm1(x)
+
+ # Skip connection
+ if self.dim != self.dim_out:
+ shortcut = do_pool(self.proj(x), self.pool)
+
+ # Window partition
+ window_size = self.window_size
+ if window_size > 0:
+ H, W = x.shape[1], x.shape[2]
+ x, pad_hw = window_partition(x, window_size)
+
+ # Window Attention + Q Pooling (if stage change)
+ # Apply chunks to reduce memory
+ CHUNK_SIZE, batch_size = 64, x.size(0)
+ if batch_size > CHUNK_SIZE:
+ chunks = []
+ for i in range(0, batch_size, CHUNK_SIZE):
+ chunks.append(self.attn(x[i:i + CHUNK_SIZE]))
+ x = torch.cat(chunks)
+ assert x.size(0) == batch_size
+ else:
+ x = self.attn(x)
+
+ if self.q_stride:
+ # Shapes have changed due to Q pooling
+ window_size = self.window_size // self.q_stride[0]
+ H, W = shortcut.shape[1:3]
+
+ pad_h = (window_size - H % window_size) % window_size
+ pad_w = (window_size - W % window_size) % window_size
+ pad_hw = (H + pad_h, W + pad_w)
+
+ # Reverse window partition
+ if self.window_size > 0:
+ x = window_unpartition(x, window_size, pad_hw, (H, W))
+
+ x = shortcut + self.drop_path(x)
+ # MLP
+ x = x + self.drop_path(self.mlp(self.norm2(x)))
+ return x
+
+
+class Hiera(nn.Module):
+ """
+ Reference: https://arxiv.org/abs/2306.00989
+ """
+
+ def __init__(
+ self,
+ embed_dim: int = 96, # initial embed dim
+ num_heads: int = 1, # initial number of heads
+ drop_path_rate: float = 0.0, # stochastic depth
+ q_pool: int = 3, # number of q_pool stages
+ q_stride: Tuple[int, int] = (2, 2), # downsample stride bet. stages
+ stages: Tuple[int, ...] = (2, 3, 16, 3), # blocks per stage
+ dim_mul: float = 2.0, # dim_mul factor at stage shift
+ head_mul: float = 2.0, # head_mul factor at stage shift
+ window_pos_embed_bkg_spatial_size: Tuple[int, int] = (14, 14),
+ # window size per stage, when not using global att.
+ window_spec: Tuple[int, ...] = (
+ 8,
+ 4,
+ 14,
+ 7,
+ ),
+ # global attn in these blocks
+ global_att_blocks: Tuple[int, ...] = (
+ 12,
+ 16,
+ 20,
+ ),
+ weights_path=None,
+ return_interm_layers=True, # return feats from every stage
+ ):
+ super().__init__()
+
+ assert len(stages) == len(window_spec)
+ self.window_spec = window_spec
+
+ depth = sum(stages)
+ self.q_stride = q_stride
+ self.stage_ends = [sum(stages[:i]) - 1 for i in range(1, len(stages) + 1)]
+ assert 0 <= q_pool <= len(self.stage_ends[:-1])
+ self.q_pool_blocks = [x + 1 for x in self.stage_ends[:-1]][:q_pool]
+ self.return_interm_layers = return_interm_layers
+
+ self.patch_embed = PatchEmbed(embed_dim=embed_dim, )
+ # Which blocks have global att?
+ self.global_att_blocks = global_att_blocks
+
+ # Windowed positional embedding (https://arxiv.org/abs/2311.05613)
+ self.window_pos_embed_bkg_spatial_size = window_pos_embed_bkg_spatial_size
+ self.pos_embed = nn.Parameter(torch.zeros(1, embed_dim, *self.window_pos_embed_bkg_spatial_size))
+ self.pos_embed_window = nn.Parameter(torch.zeros(1, embed_dim, self.window_spec[0], self.window_spec[0]))
+
+ dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
+
+ cur_stage = 1
+ self.blocks = nn.ModuleList()
+
+ for i in range(depth):
+ dim_out = embed_dim
+ # lags by a block, so first block of
+ # next stage uses an initial window size
+ # of previous stage and final window size of current stage
+ window_size = self.window_spec[cur_stage - 1]
+
+ if self.global_att_blocks is not None:
+ window_size = 0 if i in self.global_att_blocks else window_size
+
+ if i - 1 in self.stage_ends:
+ dim_out = int(embed_dim * dim_mul)
+ num_heads = int(num_heads * head_mul)
+ cur_stage += 1
+
+ block = MultiScaleBlock(
+ dim=embed_dim,
+ dim_out=dim_out,
+ num_heads=num_heads,
+ drop_path=dpr[i],
+ q_stride=self.q_stride if i in self.q_pool_blocks else None,
+ window_size=window_size,
+ )
+
+ embed_dim = dim_out
+ self.blocks.append(block)
+
+ self.channel_list = ([self.blocks[i].dim_out
+ for i in self.stage_ends[::-1]] if return_interm_layers else [self.blocks[-1].dim_out])
+
+ if weights_path is not None:
+ with g_pathmgr.open(weights_path, "rb") as f:
+ chkpt = torch.load(f, map_location="cpu")
+ logging.info("loading Hiera", self.load_state_dict(chkpt, strict=False))
+
+ def _get_pos_embed(self, hw: Tuple[int, int]) -> torch.Tensor:
+ h, w = hw
+ window_embed = self.pos_embed_window
+ pos_embed = F.interpolate(self.pos_embed.float(), size=(h, w), mode="bicubic").to(self.pos_embed.dtype)
+ pos_embed = pos_embed + window_embed.tile([x // y for x, y in zip(pos_embed.shape, window_embed.shape)])
+ pos_embed = pos_embed.permute(0, 2, 3, 1)
+ return pos_embed
+
+ def forward(self, x: torch.Tensor) -> List[torch.Tensor]:
+ x = self.patch_embed(x)
+ # x: (B, H, W, C)
+
+ # Add pos embed
+ x = x + self._get_pos_embed(x.shape[1:3])
+
+ outputs = []
+ for i, blk in enumerate(self.blocks):
+ x = blk(x)
+ if (i == self.stage_ends[-1]) or (i in self.stage_ends and self.return_interm_layers):
+ feats = x.permute(0, 3, 1, 2)
+ outputs.append(feats)
+
+ return outputs
+
+ def get_layer_id(self, layer_name):
+ # https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py#L33
+ num_layers = self.get_num_layers()
+
+ if layer_name.find("rel_pos") != -1:
+ return num_layers + 1
+ elif layer_name.find("pos_embed") != -1:
+ return 0
+ elif layer_name.find("patch_embed") != -1:
+ return 0
+ elif layer_name.find("blocks") != -1:
+ return int(layer_name.split("blocks")[1].split(".")[1]) + 1
+ else:
+ return num_layers + 1
+
+ def get_num_layers(self) -> int:
+ return len(self.blocks)
diff --git a/sam2/modeling/backbones/image_encoder.py b/sam2/modeling/backbones/image_encoder.py
new file mode 100644
index 0000000000000000000000000000000000000000..ea9bde46af0fed3d9574a8b0a5d3a00d93aa9f92
--- /dev/null
+++ b/sam2/modeling/backbones/image_encoder.py
@@ -0,0 +1,145 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+from typing import List, Optional
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+
+class ImageEncoder(nn.Module):
+
+ def __init__(
+ self,
+ trunk: nn.Module,
+ neck: nn.Module,
+ scalp: int = 0,
+ ):
+ super().__init__()
+ self.trunk = trunk
+ self.neck = neck
+ self.scalp = scalp
+ assert (
+ self.trunk.channel_list == self.neck.backbone_channel_list
+ ), f"Channel dims of trunk and neck do not match. Trunk: {self.trunk.channel_list}, neck: {self.neck.backbone_channel_list}"
+
+ def forward(self, sample: torch.Tensor):
+ # Forward through backbone
+ # features, pos = self.neck(self.trunk(sample))
+
+ # NOTE: use chunk to reduce memory ------------------------------
+ features, pos, chunk_size = [], [], 16
+ for base_idx in range(0, sample.size(0), chunk_size):
+ chunk_features, chunk_pos = self.neck(self.trunk(sample[base_idx:base_idx + chunk_size]))
+ features.append(chunk_features)
+ pos.append(chunk_pos)
+ features = [torch.cat([e[i] for e in features]) for i in range(len(features[0]))]
+ pos = [torch.cat([e[i] for e in pos]) for i in range(len(pos[0]))]
+ assert features[0].size(0) == pos[0].size(0) == sample.size(0)
+ # ---------------------------------------------------------------
+
+ if self.scalp > 0:
+ # Discard the lowest resolution features
+ features, pos = features[:-self.scalp], pos[:-self.scalp]
+
+ src = features[-1]
+ output = {
+ "vision_features": src,
+ "vision_pos_enc": pos,
+ "backbone_fpn": features,
+ }
+ return output
+
+
+class FpnNeck(nn.Module):
+ """
+ A modified variant of Feature Pyramid Network (FPN) neck
+ (we remove output conv and also do bicubic interpolation similar to ViT
+ pos embed interpolation)
+ """
+
+ def __init__(
+ self,
+ position_encoding: nn.Module,
+ d_model: int,
+ backbone_channel_list: List[int],
+ kernel_size: int = 1,
+ stride: int = 1,
+ padding: int = 0,
+ fpn_interp_model: str = "bilinear",
+ fuse_type: str = "sum",
+ fpn_top_down_levels: Optional[List[int]] = None,
+ ):
+ """Initialize the neck
+ :param trunk: the backbone
+ :param position_encoding: the positional encoding to use
+ :param d_model: the dimension of the model
+ :param neck_norm: the normalization to use
+ """
+ super().__init__()
+ self.position_encoding = position_encoding
+ self.convs = nn.ModuleList()
+ self.backbone_channel_list = backbone_channel_list
+ self.d_model = d_model
+ for dim in backbone_channel_list:
+ current = nn.Sequential()
+ current.add_module(
+ "conv",
+ nn.Conv2d(
+ in_channels=dim,
+ out_channels=d_model,
+ kernel_size=kernel_size,
+ stride=stride,
+ padding=padding,
+ ),
+ )
+
+ self.convs.append(current)
+ self.fpn_interp_model = fpn_interp_model
+ assert fuse_type in ["sum", "avg"]
+ self.fuse_type = fuse_type
+
+ # levels to have top-down features in its outputs
+ # e.g. if fpn_top_down_levels is [2, 3], then only outputs of level 2 and 3
+ # have top-down propagation, while outputs of level 0 and level 1 have only
+ # lateral features from the same backbone level.
+ if fpn_top_down_levels is None:
+ # default is to have top-down features on all levels
+ fpn_top_down_levels = range(len(self.convs))
+ self.fpn_top_down_levels = list(fpn_top_down_levels)
+
+ def forward(self, xs: List[torch.Tensor]):
+
+ out = [None] * len(self.convs)
+ pos = [None] * len(self.convs)
+ assert len(xs) == len(self.convs)
+ # fpn forward pass
+ # see https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/fpn.py
+ prev_features = None
+ # forward in top-down order (from low to high resolution)
+ n = len(self.convs) - 1
+ for i in range(n, -1, -1):
+ x = xs[i]
+ lateral_features = self.convs[n - i](x)
+ if i in self.fpn_top_down_levels and prev_features is not None:
+ top_down_features = F.interpolate(
+ prev_features.float(),
+ scale_factor=2.0,
+ mode=self.fpn_interp_model,
+ align_corners=(None if self.fpn_interp_model == "nearest" else False),
+ antialias=False,
+ ).to(prev_features.dtype)
+ prev_features = lateral_features + top_down_features
+ if self.fuse_type == "avg":
+ prev_features /= 2
+ else:
+ prev_features = lateral_features
+ x_out = prev_features
+ out[i] = x_out
+ pos[i] = self.position_encoding(x_out).to(x_out.dtype)
+
+ return out, pos
diff --git a/sam2/modeling/backbones/utils.py b/sam2/modeling/backbones/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..562a3b5ea73f322745befb3aac691c67ee9ce47d
--- /dev/null
+++ b/sam2/modeling/backbones/utils.py
@@ -0,0 +1,88 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+"""Some utilities for backbones, in particular for windowing"""
+
+from typing import Tuple
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+
+def window_partition(x, window_size):
+ """
+ Partition into non-overlapping windows with padding if needed.
+ Args:
+ x (tensor): input tokens with [B, H, W, C].
+ window_size (int): window size.
+ Returns:
+ windows: windows after partition with [B * num_windows, window_size, window_size, C].
+ (Hp, Wp): padded height and width before partition
+ """
+ B, H, W, C = x.shape
+
+ pad_h = (window_size - H % window_size) % window_size
+ pad_w = (window_size - W % window_size) % window_size
+ if pad_h > 0 or pad_w > 0:
+ x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h))
+ Hp, Wp = H + pad_h, W + pad_w
+
+ x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C)
+ windows = x.permute(0, 1, 3, 2, 4, 5).reshape(-1, window_size, window_size, C)
+ return windows, (Hp, Wp)
+
+
+def window_unpartition(windows, window_size, pad_hw, hw):
+ """
+ Window unpartition into original sequences and removing padding.
+ Args:
+ x (tensor): input tokens with [B * num_windows, window_size, window_size, C].
+ window_size (int): window size.
+ pad_hw (Tuple): padded height and width (Hp, Wp).
+ hw (Tuple): original height and width (H, W) before padding.
+ Returns:
+ x: unpartitioned sequences with [B, H, W, C].
+ """
+ Hp, Wp = pad_hw
+ H, W = hw
+ B = windows.shape[0] // (Hp * Wp // window_size // window_size)
+ x = windows.reshape(B, Hp // window_size, Wp // window_size, window_size, window_size, -1)
+ x = x.permute(0, 1, 3, 2, 4, 5).reshape(B, Hp, Wp, -1)
+
+ if Hp > H or Wp > W:
+ x = x[:, :H, :W, :]
+ return x
+
+
+class PatchEmbed(nn.Module):
+ """
+ Image to Patch Embedding.
+ """
+
+ def __init__(
+ self,
+ kernel_size: Tuple[int, ...] = (7, 7),
+ stride: Tuple[int, ...] = (4, 4),
+ padding: Tuple[int, ...] = (3, 3),
+ in_chans: int = 3,
+ embed_dim: int = 768,
+ ):
+ """
+ Args:
+ kernel_size (Tuple): kernel size of the projection layer.
+ stride (Tuple): stride of the projection layer.
+ padding (Tuple): padding size of the projection layer.
+ in_chans (int): Number of input image channels.
+ embed_dim (int): embed_dim (int): Patch embedding dimension.
+ """
+ super().__init__()
+ self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding)
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ x = self.proj(x)
+ # B C H W -> B H W C
+ x = x.permute(0, 2, 3, 1)
+ return x
diff --git a/sam2/modeling/memory_attention.py b/sam2/modeling/memory_attention.py
new file mode 100644
index 0000000000000000000000000000000000000000..a14f6bebb146942f657ea575209a2a4366b2d193
--- /dev/null
+++ b/sam2/modeling/memory_attention.py
@@ -0,0 +1,168 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+from typing import Optional
+
+import torch
+from torch import nn, Tensor
+
+from sam2.modeling.sam.transformer import RoPEAttention
+
+from sam2.modeling.sam2_utils import get_activation_fn, get_clones
+
+
+class MemoryAttentionLayer(nn.Module):
+
+ def __init__(
+ self,
+ activation: str,
+ cross_attention: nn.Module,
+ d_model: int,
+ dim_feedforward: int,
+ dropout: float,
+ pos_enc_at_attn: bool,
+ pos_enc_at_cross_attn_keys: bool,
+ pos_enc_at_cross_attn_queries: bool,
+ self_attention: nn.Module,
+ ):
+ super().__init__()
+ self.d_model = d_model
+ self.dim_feedforward = dim_feedforward
+ self.dropout_value = dropout
+ self.self_attn = self_attention
+ self.cross_attn_image = cross_attention
+
+ # Implementation of Feedforward model
+ self.linear1 = nn.Linear(d_model, dim_feedforward)
+ self.dropout = nn.Dropout(dropout)
+ self.linear2 = nn.Linear(dim_feedforward, d_model)
+
+ self.norm1 = nn.LayerNorm(d_model)
+ self.norm2 = nn.LayerNorm(d_model)
+ self.norm3 = nn.LayerNorm(d_model)
+ self.dropout1 = nn.Dropout(dropout)
+ self.dropout2 = nn.Dropout(dropout)
+ self.dropout3 = nn.Dropout(dropout)
+
+ self.activation_str = activation
+ self.activation = get_activation_fn(activation)
+
+ # Where to add pos enc
+ self.pos_enc_at_attn = pos_enc_at_attn
+ self.pos_enc_at_cross_attn_queries = pos_enc_at_cross_attn_queries
+ self.pos_enc_at_cross_attn_keys = pos_enc_at_cross_attn_keys
+
+ def _forward_sa(self, tgt, query_pos):
+ # Self-Attention
+ tgt2 = self.norm1(tgt)
+ q = k = tgt2 + query_pos if self.pos_enc_at_attn else tgt2
+ tgt2 = self.self_attn(q, k, v=tgt2)
+ tgt = tgt + self.dropout1(tgt2)
+ return tgt
+
+ def _forward_ca(self, tgt, memory, query_pos, pos, num_k_exclude_rope=0):
+ kwds = {}
+ if num_k_exclude_rope > 0:
+ assert isinstance(self.cross_attn_image, RoPEAttention)
+ kwds = {"num_k_exclude_rope": num_k_exclude_rope}
+
+ # Cross-Attention
+ tgt2 = self.norm2(tgt)
+ tgt2 = self.cross_attn_image(
+ q=tgt2 + query_pos if self.pos_enc_at_cross_attn_queries else tgt2,
+ k=memory + pos if self.pos_enc_at_cross_attn_keys else memory,
+ v=memory,
+ **kwds,
+ )
+ tgt = tgt + self.dropout2(tgt2)
+ return tgt
+
+ def forward(
+ self,
+ tgt,
+ memory,
+ pos: Optional[Tensor] = None,
+ query_pos: Optional[Tensor] = None,
+ num_k_exclude_rope: int = 0,
+ ) -> torch.Tensor:
+
+ # Self-Attn, Cross-Attn
+ tgt = self._forward_sa(tgt, query_pos)
+ tgt = self._forward_ca(tgt, memory, query_pos, pos, num_k_exclude_rope)
+ # MLP
+ tgt2 = self.norm3(tgt)
+ tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
+ tgt = tgt + self.dropout3(tgt2)
+ return tgt
+
+
+class MemoryAttention(nn.Module):
+
+ def __init__(
+ self,
+ d_model: int,
+ pos_enc_at_input: bool,
+ layer: nn.Module,
+ num_layers: int,
+ batch_first: bool = True, # Do layers expect batch first input?
+ ):
+ super().__init__()
+ self.d_model = d_model
+ self.layers = get_clones(layer, num_layers)
+ self.num_layers = num_layers
+ self.norm = nn.LayerNorm(d_model)
+ self.pos_enc_at_input = pos_enc_at_input
+ self.batch_first = batch_first
+
+ def forward(
+ self,
+ curr: torch.Tensor, # self-attention inputs
+ memory: torch.Tensor, # cross-attention inputs
+ curr_pos: Optional[Tensor] = None, # pos_enc for self-attention inputs
+ memory_pos: Optional[Tensor] = None, # pos_enc for cross-attention inputs
+ num_obj_ptr_tokens: int = 0, # number of object pointer *tokens*
+ ):
+ if isinstance(curr, list):
+ assert isinstance(curr_pos, list)
+ assert len(curr) == len(curr_pos) == 1
+ curr, curr_pos = (
+ curr[0],
+ curr_pos[0],
+ )
+
+ assert (curr.shape[1] == memory.shape[1]), "Batch size must be the same for curr and memory"
+
+ output = curr
+ if self.pos_enc_at_input and curr_pos is not None:
+ output = output + 0.1 * curr_pos
+
+ if self.batch_first:
+ # Convert to batch first
+ output = output.transpose(0, 1)
+ curr_pos = curr_pos.transpose(0, 1)
+ memory = memory.transpose(0, 1)
+ memory_pos = memory_pos.transpose(0, 1)
+
+ for layer in self.layers:
+ kwds = {}
+ if isinstance(layer.cross_attn_image, RoPEAttention):
+ kwds = {"num_k_exclude_rope": num_obj_ptr_tokens}
+
+ output = layer(
+ tgt=output,
+ memory=memory,
+ pos=memory_pos,
+ query_pos=curr_pos,
+ **kwds,
+ )
+ normed_output = self.norm(output)
+
+ if self.batch_first:
+ # Convert back to seq first
+ normed_output = normed_output.transpose(0, 1)
+ curr_pos = curr_pos.transpose(0, 1)
+
+ return normed_output
diff --git a/sam2/modeling/memory_encoder.py b/sam2/modeling/memory_encoder.py
new file mode 100644
index 0000000000000000000000000000000000000000..066f828d731028c4ddf6fb93076418f3f33ba0ca
--- /dev/null
+++ b/sam2/modeling/memory_encoder.py
@@ -0,0 +1,180 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+import math
+from typing import Tuple
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+from sam2.modeling.sam2_utils import DropPath, get_clones, LayerNorm2d
+
+
+class MaskDownSampler(nn.Module):
+ """
+ Progressively downsample a mask by total_stride, each time by stride.
+ Note that LayerNorm is applied per *token*, like in ViT.
+
+ With each downsample (by a factor stride**2), channel capacity increases by the same factor.
+ In the end, we linearly project to embed_dim channels.
+ """
+
+ def __init__(
+ self,
+ embed_dim=256,
+ kernel_size=4,
+ stride=4,
+ padding=0,
+ total_stride=16,
+ activation=nn.GELU,
+ ):
+ super().__init__()
+ num_layers = int(math.log2(total_stride) // math.log2(stride))
+ assert stride**num_layers == total_stride
+ self.encoder = nn.Sequential()
+ mask_in_chans, mask_out_chans = 1, 1
+ for _ in range(num_layers):
+ mask_out_chans = mask_in_chans * (stride**2)
+ self.encoder.append(
+ nn.Conv2d(
+ mask_in_chans,
+ mask_out_chans,
+ kernel_size=kernel_size,
+ stride=stride,
+ padding=padding,
+ ))
+ self.encoder.append(LayerNorm2d(mask_out_chans))
+ self.encoder.append(activation())
+ mask_in_chans = mask_out_chans
+
+ self.encoder.append(nn.Conv2d(mask_out_chans, embed_dim, kernel_size=1))
+
+ def forward(self, x):
+ return self.encoder(x)
+
+
+# Lightly adapted from ConvNext (https://github.com/facebookresearch/ConvNeXt)
+class CXBlock(nn.Module):
+ r"""ConvNeXt Block. There are two equivalent implementations:
+ (1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W)
+ (2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back
+ We use (2) as we find it slightly faster in PyTorch
+
+ Args:
+ dim (int): Number of input channels.
+ drop_path (float): Stochastic depth rate. Default: 0.0
+ layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6.
+ """
+
+ def __init__(
+ self,
+ dim,
+ kernel_size=7,
+ padding=3,
+ drop_path=0.0,
+ layer_scale_init_value=1e-6,
+ use_dwconv=True,
+ ):
+ super().__init__()
+ self.dwconv = nn.Conv2d(
+ dim,
+ dim,
+ kernel_size=kernel_size,
+ padding=padding,
+ groups=dim if use_dwconv else 1,
+ ) # depthwise conv
+ self.norm = LayerNorm2d(dim, eps=1e-6)
+ self.pwconv1 = nn.Linear(dim, 4 * dim) # pointwise/1x1 convs, implemented with linear layers
+ self.act = nn.GELU()
+ self.pwconv2 = nn.Linear(4 * dim, dim)
+ # NOTE: changed from gamma to weight
+ # https://github.com/huggingface/transformers/issues/29554
+ self.weight = (
+ nn.Parameter(layer_scale_init_value * torch.ones(
+ (dim)), requires_grad=True) if layer_scale_init_value > 0 else None)
+ self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
+
+ def forward(self, x):
+ input = x
+ x = self.dwconv(x)
+ x = self.norm(x)
+ x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C)
+ x = self.pwconv1(x)
+ x = self.act(x)
+ x = self.pwconv2(x)
+ if self.weight is not None:
+ x = self.weight * x
+ x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W)
+
+ x = input + self.drop_path(x)
+ return x
+
+
+class Fuser(nn.Module):
+
+ def __init__(self, layer, num_layers, dim=None, input_projection=False):
+ super().__init__()
+ self.proj = nn.Identity()
+ self.layers = get_clones(layer, num_layers)
+
+ if input_projection:
+ assert dim is not None
+ self.proj = nn.Conv2d(dim, dim, kernel_size=1)
+
+ def forward(self, x):
+ # normally x: (N, C, H, W)
+ x = self.proj(x)
+ for layer in self.layers:
+ x = layer(x)
+ return x
+
+
+class MemoryEncoder(nn.Module):
+
+ def __init__(
+ self,
+ out_dim,
+ mask_downsampler,
+ fuser,
+ position_encoding,
+ in_dim=256, # in_dim of pix_feats
+ ):
+ super().__init__()
+
+ self.mask_downsampler = mask_downsampler
+
+ self.pix_feat_proj = nn.Conv2d(in_dim, in_dim, kernel_size=1)
+ self.fuser = fuser
+ self.position_encoding = position_encoding
+ self.out_proj = nn.Identity()
+ if out_dim != in_dim:
+ self.out_proj = nn.Conv2d(in_dim, out_dim, kernel_size=1)
+
+ def forward(
+ self,
+ pix_feat: torch.Tensor,
+ masks: torch.Tensor,
+ skip_mask_sigmoid: bool = False,
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
+ # Process masks
+ # sigmoid, so that less domain shift from gt masks which are bool
+ if not skip_mask_sigmoid:
+ masks = F.sigmoid(masks)
+ masks = self.mask_downsampler(masks)
+
+ # Fuse pix_feats and downsampled masks
+ # in case the visual features are on CPU, cast them to CUDA
+ pix_feat = pix_feat.to(masks.device)
+
+ x = self.pix_feat_proj(pix_feat)
+ x = x + masks
+ x = self.fuser(x)
+ x = self.out_proj(x)
+
+ pos = self.position_encoding(x).to(x.dtype)
+
+ return {"vision_features": x, "vision_pos_enc": [pos]}
diff --git a/sam2/modeling/position_encoding.py b/sam2/modeling/position_encoding.py
new file mode 100644
index 0000000000000000000000000000000000000000..ae1e5de4f6f5c0aee5cc9d1e8e133a09ae5b7af1
--- /dev/null
+++ b/sam2/modeling/position_encoding.py
@@ -0,0 +1,312 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+import math
+from typing import Optional, Tuple
+
+import numpy as np
+import torch
+from torch import nn
+
+
+class PositionEmbeddingSine(nn.Module):
+ """
+ This is a more standard version of the position embedding, very similar to the one
+ used by the Attention Is All You Need paper, generalized to work on images.
+ """
+
+ def __init__(
+ self,
+ num_pos_feats,
+ temperature: int = 10000,
+ normalize: bool = True,
+ scale: Optional[float] = None,
+ # Following settings only relevant
+ # for warmping up cache for compilation
+ warmup_cache: bool = True,
+ image_size: int = 1024,
+ strides: Tuple[int] = (4, 8, 16, 32),
+ ):
+ super().__init__()
+ assert num_pos_feats % 2 == 0, "Expecting even model width"
+ self.num_pos_feats = num_pos_feats // 2
+ self.temperature = temperature
+ self.normalize = normalize
+ if scale is not None and normalize is False:
+ raise ValueError("normalize should be True if scale is passed")
+ if scale is None:
+ scale = 2 * math.pi
+ self.scale = scale
+
+ self.cache = {}
+ if warmup_cache:
+ # Warmup cache for cuda and npu, to help with compilation
+ try:
+ import torch_npu
+ has_npu = torch_npu.npu.is_available()
+ except ImportError:
+ has_npu = False
+ if torch.cuda.is_available() or has_npu:
+ device = torch.device("cuda" if torch.cuda.is_available() else "npu")
+ for stride in strides:
+ cache_key = (image_size // stride, image_size // stride)
+ self._pe(1, device, None, *cache_key)
+
+ def _encode_xy(self, x, y):
+ # NOTE: disable autocasting here
+ raise NotImplementedError
+ # The positions are expected to be normalized
+ assert len(x) == len(y) and x.ndim == y.ndim == 1
+ x_embed = x * self.scale
+ y_embed = y * self.scale
+
+ dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
+ dim_t = self.temperature**(2 * (dim_t // 2) / self.num_pos_feats)
+
+ pos_x = x_embed[:, None] / dim_t
+ pos_y = y_embed[:, None] / dim_t
+ pos_x = torch.stack((pos_x[:, 0::2].sin(), pos_x[:, 1::2].cos()), dim=2).flatten(1)
+ pos_y = torch.stack((pos_y[:, 0::2].sin(), pos_y[:, 1::2].cos()), dim=2).flatten(1)
+ return pos_x, pos_y
+
+ @torch.no_grad()
+ def encode_boxes(self, x, y, w, h):
+ # NOTE: disable autocasting here
+ raise NotImplementedError
+ pos_x, pos_y = self._encode_xy(x, y)
+ pos = torch.cat((pos_y, pos_x, h[:, None], w[:, None]), dim=1)
+ return pos
+
+ encode = encode_boxes # Backwards compatibility
+
+ @torch.no_grad()
+ def encode_points(self, x, y, labels):
+ # NOTE: disable autocasting here
+ raise NotImplementedError
+ (bx, nx), (by, ny), (bl, nl) = x.shape, y.shape, labels.shape
+ assert bx == by and nx == ny and bx == bl and nx == nl
+ pos_x, pos_y = self._encode_xy(x.flatten(), y.flatten())
+ pos_x, pos_y = pos_x.reshape(bx, nx, -1), pos_y.reshape(by, ny, -1)
+ pos = torch.cat((pos_y, pos_x, labels[:, :, None]), dim=2)
+ return pos
+
+ @torch.no_grad()
+ def _pe(self, B, device, dtype, *cache_key):
+ H, W = cache_key
+ if cache_key in self.cache:
+ return self.cache[cache_key].to(device)[None].repeat(B, 1, 1, 1)
+
+ # Force fp32 (https://github.com/huggingface/transformers/pull/29285)
+ with torch.autocast(device_type=device.type, enabled=False):
+ y_embed = torch.arange(1, H + 1, dtype=torch.float32, device=device).view(1, -1, 1).repeat(B, 1, W)
+ x_embed = torch.arange(1, W + 1, dtype=torch.float32, device=device).view(1, 1, -1).repeat(B, H, 1)
+
+ if self.normalize:
+ eps = 1e-6
+ y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
+ x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
+
+ dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=device)
+ dim_t = self.temperature**(2 * (dim_t // 2) / self.num_pos_feats)
+
+ pos_x = x_embed[:, :, :, None] / dim_t
+ pos_y = y_embed[:, :, :, None] / dim_t
+ pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
+ pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
+ pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
+
+ if dtype is not None:
+ pos = pos.to(dtype)
+
+ self.cache[cache_key] = pos[0]
+ return pos
+
+ @torch.no_grad()
+ def forward(self, x: torch.Tensor):
+ B = x.shape[0]
+ cache_key = (x.shape[-2], x.shape[-1])
+ return self._pe(B, x.device, x.dtype, *cache_key)
+
+
+class PositionEmbeddingRandom(nn.Module):
+ """
+ Positional encoding using random spatial frequencies.
+ """
+
+ def __init__(self, num_pos_feats: int = 64, scale: Optional[float] = None) -> None:
+ super().__init__()
+ if scale is None or scale <= 0.0:
+ scale = 1.0
+ self.register_buffer(
+ "positional_encoding_gaussian_matrix",
+ scale * torch.randn((2, num_pos_feats)),
+ )
+
+ @torch.no_grad()
+ def _pe_encoding(self, coords: torch.Tensor) -> torch.Tensor:
+ """Positionally encode points that are normalized to [0,1]."""
+ # assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape
+ coords = 2 * coords - 1
+ coords = coords @ self.positional_encoding_gaussian_matrix.to(coords.dtype)
+ coords = 2 * np.pi * coords
+ # outputs d_1 x ... x d_n x C shape
+ return torch.cat([torch.sin(coords), torch.cos(coords)], dim=-1)
+
+ @torch.no_grad()
+ def forward(self, size: Tuple[int, int]) -> torch.Tensor:
+ """Generate positional encoding for a grid of the specified size."""
+ h, w = size
+ device = self.positional_encoding_gaussian_matrix.device
+
+ # Force fp32 (https://github.com/huggingface/transformers/pull/29285)
+ with torch.autocast(device_type=device.type, enabled=False):
+ grid = torch.ones((h, w), device=device, dtype=torch.float32)
+ y_embed = grid.cumsum(dim=0) - 0.5
+ x_embed = grid.cumsum(dim=1) - 0.5
+ y_embed = y_embed / h
+ x_embed = x_embed / w
+ pe = self._pe_encoding(torch.stack([x_embed, y_embed], dim=-1))
+
+ pe = pe.to(self.positional_encoding_gaussian_matrix.dtype)
+ return pe.permute(2, 0, 1) # C x H x W
+
+ @torch.no_grad()
+ def forward_with_coords(self, coords_input: torch.Tensor, image_size: Tuple[int, int]) -> torch.Tensor:
+ """Positionally encode points that are not normalized to [0,1]."""
+ assert coords_input.dtype == torch.float, 'coords_input must be in float32'
+
+ # Force fp32 (https://github.com/huggingface/transformers/pull/29285)
+ with torch.autocast(device_type=coords_input.device.type, enabled=False):
+ coords = coords_input.clone()
+ coords[:, :, 0] = coords[:, :, 0] / image_size[1]
+ coords[:, :, 1] = coords[:, :, 1] / image_size[0]
+ pe = self._pe_encoding(coords.to(torch.float)) # B x N x C
+
+ pe = pe.to(self.positional_encoding_gaussian_matrix.dtype)
+ return pe
+
+
+class PositionEmbedding1DRandom(nn.Module):
+ """
+ Positional encoding using random frequencies for 1D inputs.
+ """
+
+ def __init__(self, num_pos_feats: int = 64, scale: Optional[float] = None) -> None:
+ super().__init__()
+ if scale is None or scale <= 0.0:
+ scale = 1.0
+ self.register_buffer(
+ "positional_encoding_gaussian_matrix",
+ scale * torch.randn((1, num_pos_feats)),
+ )
+
+ @torch.no_grad()
+ def _pe_encoding(self, coords: torch.Tensor) -> torch.Tensor:
+ """Positionally encode points that are normalized to [0,1]."""
+ coords = 2 * coords - 1
+ coords = coords @ self.positional_encoding_gaussian_matrix.to(coords.dtype)
+ coords = 2 * np.pi * coords
+ return torch.cat([torch.sin(coords), torch.cos(coords)], dim=-1)
+
+ @torch.no_grad()
+ def forward(self, size: int) -> torch.Tensor:
+ """Generate positional encoding for a sequence of the specified length."""
+ device = self.positional_encoding_gaussian_matrix.device
+
+ # Force fp32 (https://github.com/huggingface/transformers/pull/29285)
+ with torch.autocast(device_type=device.type, enabled=False):
+ positions = torch.arange(size, device=device, dtype=torch.float32)
+ positions = positions / (size - 1)
+ positions = positions.unsqueeze(-1)
+ pe = self._pe_encoding(positions)
+
+ pe = pe.to(self.positional_encoding_gaussian_matrix.dtype)
+ return pe.permute(1, 0) # C x L
+
+ @torch.no_grad()
+ def forward_with_coords(self, coords_input: torch.Tensor, seq_length: int) -> torch.Tensor:
+ """Positionally encode raw coordinates by normalizing to [0,1]."""
+ assert coords_input.dtype == torch.float, 'coords_input must be in float32'
+
+ # Force fp32 (https://github.com/huggingface/transformers/pull/29285)
+ with torch.autocast(device_type=coords_input.device.type, enabled=False):
+ coords = coords_input.clone()
+ coords = coords / (seq_length - 1)
+ if coords.dim() == 2:
+ coords = coords.unsqueeze(-1)
+ pe = self._pe_encoding(coords.to(torch.float)) # B x N x C
+
+ pe = pe.to(self.positional_encoding_gaussian_matrix.dtype)
+ return pe
+
+
+# Rotary Positional Encoding, adapted from:
+# 1. https://github.com/meta-llama/codellama/blob/main/llama/model.py
+# 2. https://github.com/naver-ai/rope-vit
+# 3. https://github.com/lucidrains/rotary-embedding-torch
+
+
+@torch.no_grad()
+def init_t_xy(end_x: int, end_y: int):
+ t = torch.arange(end_x * end_y, dtype=torch.float32)
+ t_x = (t % end_x).float()
+ t_y = torch.div(t, end_x, rounding_mode="floor").float()
+ return t_x, t_y
+
+
+@torch.no_grad()
+def compute_axial_cis(dim: int, end_x: int, end_y: int, theta: float = 10000.0):
+ # Force fp32 on CPU (see https://github.com/huggingface/transformers/pull/29285)
+ with torch.autocast(device_type='cpu', enabled=False):
+ freqs_x = 1.0 / (theta**(torch.arange(0, dim, 4)[:(dim // 4)].float() / dim))
+ freqs_y = 1.0 / (theta**(torch.arange(0, dim, 4)[:(dim // 4)].float() / dim))
+
+ t_x, t_y = init_t_xy(end_x, end_y)
+ freqs_x = torch.outer(t_x, freqs_x)
+ freqs_y = torch.outer(t_y, freqs_y)
+ freqs_cis_x = torch.polar(torch.ones_like(freqs_x), freqs_x)
+ freqs_cis_y = torch.polar(torch.ones_like(freqs_y), freqs_y)
+
+ return torch.cat([freqs_cis_x, freqs_cis_y], dim=-1)
+
+
+@torch.no_grad()
+def reshape_for_broadcast(freqs_cis: torch.Tensor, x: torch.Tensor):
+ ndim = x.ndim
+ assert 0 <= 1 < ndim
+ assert freqs_cis.shape == (x.shape[-2], x.shape[-1])
+ shape = [d if i >= ndim - 2 else 1 for i, d in enumerate(x.shape)]
+ return freqs_cis.view(*shape)
+
+
+@torch.no_grad()
+def apply_rotary_enc(
+ xq: torch.Tensor,
+ xk: torch.Tensor,
+ freqs_cis: torch.Tensor,
+ repeat_freqs_k: bool = False,
+):
+ # Force fp32 (https://github.com/huggingface/transformers/pull/29285)
+ with torch.autocast(device_type=freqs_cis.device.type, enabled=False):
+ xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2))
+ xk_ = (torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2)) if xk.shape[-2] != 0 else None)
+ freqs_cis = reshape_for_broadcast(freqs_cis, xq_)
+ xq_out = torch.view_as_real(xq_ * freqs_cis).flatten(3)
+ if xk_ is None:
+ # no keys to rotate, due to dropout
+ return xq_out.type_as(xq).to(xq.device), xk
+ # repeat freqs along seq_len dim to match k seq_len
+ if repeat_freqs_k:
+ r = xk_.shape[-2] // xq_.shape[-2]
+ if freqs_cis.is_cuda:
+ freqs_cis = freqs_cis.repeat(*([1] * (freqs_cis.ndim - 2)), r, 1)
+ else:
+ # torch.repeat on complex numbers may not be supported on non-CUDA devices
+ # (freqs_cis has 4 dims and we repeat on dim 2) so we use expand + flatten
+ freqs_cis = freqs_cis.unsqueeze(2).expand(-1, -1, r, -1, -1).flatten(2, 3)
+ xk_out = torch.view_as_real(xk_ * freqs_cis).flatten(3)
+
+ return xq_out.type_as(xq).to(xq.device), xk_out.type_as(xk).to(xk.device)
diff --git a/sam2/modeling/sam/__init__.py b/sam2/modeling/sam/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..5277f46157403e47fd830fc519144b97ef69d4ae
--- /dev/null
+++ b/sam2/modeling/sam/__init__.py
@@ -0,0 +1,5 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
diff --git a/sam2/modeling/sam/mask_decoder.py b/sam2/modeling/sam/mask_decoder.py
new file mode 100644
index 0000000000000000000000000000000000000000..6a75ac8aa9ebebdec3cbb8dd8877d2832438b3c5
--- /dev/null
+++ b/sam2/modeling/sam/mask_decoder.py
@@ -0,0 +1,274 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+from typing import List, Optional, Tuple, Type
+
+import torch
+from torch import nn
+
+from sam2.modeling.sam2_utils import LayerNorm2d, MLP
+
+
+class MaskDecoder(nn.Module):
+
+ def __init__(
+ self,
+ *,
+ transformer_dim: int,
+ transformer: nn.Module,
+ num_multimask_outputs: int = 3,
+ activation: Type[nn.Module] = nn.GELU,
+ iou_head_depth: int = 3,
+ iou_head_hidden_dim: int = 256,
+ use_high_res_features: bool = False,
+ iou_prediction_use_sigmoid=False,
+ dynamic_multimask_via_stability=False,
+ dynamic_multimask_stability_delta=0.05,
+ dynamic_multimask_stability_thresh=0.98,
+ pred_obj_scores: bool = False,
+ pred_obj_scores_mlp: bool = False,
+ use_multimask_token_for_obj_ptr: bool = False,
+ ) -> None:
+ """
+ Predicts masks given an image and prompt embeddings, using a
+ transformer architecture.
+
+ Arguments:
+ transformer_dim (int): the channel dimension of the transformer
+ transformer (nn.Module): the transformer used to predict masks
+ num_multimask_outputs (int): the number of masks to predict
+ when disambiguating masks
+ activation (nn.Module): the type of activation to use when
+ upscaling masks
+ iou_head_depth (int): the depth of the MLP used to predict
+ mask quality
+ iou_head_hidden_dim (int): the hidden dimension of the MLP
+ used to predict mask quality
+ """
+ super().__init__()
+ self.transformer_dim = transformer_dim
+ self.transformer = transformer
+
+ self.num_multimask_outputs = num_multimask_outputs
+
+ self.iou_token = nn.Embedding(1, transformer_dim)
+ self.num_mask_tokens = num_multimask_outputs + 1
+ self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim)
+
+ self.pred_obj_scores = pred_obj_scores
+ if self.pred_obj_scores:
+ self.obj_score_token = nn.Embedding(1, transformer_dim)
+ self.use_multimask_token_for_obj_ptr = use_multimask_token_for_obj_ptr
+
+ self.output_upscaling = nn.Sequential(
+ nn.ConvTranspose2d(transformer_dim, transformer_dim // 4, kernel_size=2, stride=2),
+ LayerNorm2d(transformer_dim // 4),
+ activation(),
+ nn.ConvTranspose2d(transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2),
+ activation(),
+ )
+ self.use_high_res_features = use_high_res_features
+ if use_high_res_features:
+ self.conv_s0 = nn.Conv2d(transformer_dim, transformer_dim // 8, kernel_size=1, stride=1)
+ self.conv_s1 = nn.Conv2d(transformer_dim, transformer_dim // 4, kernel_size=1, stride=1)
+
+ self.output_hypernetworks_mlps = nn.ModuleList(
+ [MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3) for i in range(self.num_mask_tokens)])
+
+ self.iou_prediction_head = MLP(
+ transformer_dim,
+ iou_head_hidden_dim,
+ self.num_mask_tokens,
+ iou_head_depth,
+ sigmoid_output=iou_prediction_use_sigmoid,
+ )
+ if self.pred_obj_scores:
+ self.pred_obj_score_head = nn.Linear(transformer_dim, 1)
+ if pred_obj_scores_mlp:
+ self.pred_obj_score_head = MLP(transformer_dim, transformer_dim, 1, 3)
+
+ # When outputting a single mask, optionally we can dynamically fall back to the best
+ # multimask output token if the single mask output token gives low stability scores.
+ self.dynamic_multimask_via_stability = dynamic_multimask_via_stability
+ self.dynamic_multimask_stability_delta = dynamic_multimask_stability_delta
+ self.dynamic_multimask_stability_thresh = dynamic_multimask_stability_thresh
+
+ def forward(
+ self,
+ image_embeddings: torch.Tensor,
+ image_pe: torch.Tensor,
+ sparse_prompt_embeddings: torch.Tensor,
+ dense_prompt_embeddings: torch.Tensor,
+ multimask_output: bool,
+ repeat_image: bool,
+ high_res_features: Optional[List[torch.Tensor]] = None,
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
+ """
+ Predict masks given image and prompt embeddings.
+
+ Arguments:
+ image_embeddings (torch.Tensor): the embeddings from the image encoder
+ image_pe (torch.Tensor): positional encoding with the shape of image_embeddings
+ sparse_prompt_embeddings (torch.Tensor): the embeddings of the points and boxes
+ dense_prompt_embeddings (torch.Tensor): the embeddings of the mask inputs
+ multimask_output (bool): Whether to return multiple masks or a single
+ mask.
+
+ Returns:
+ torch.Tensor: batched predicted masks
+ torch.Tensor: batched predictions of mask quality
+ torch.Tensor: batched SAM token for mask output
+ """
+ masks, iou_pred, mask_tokens_out, object_score_logits = self.predict_masks(
+ image_embeddings=image_embeddings,
+ image_pe=image_pe,
+ sparse_prompt_embeddings=sparse_prompt_embeddings,
+ dense_prompt_embeddings=dense_prompt_embeddings,
+ repeat_image=repeat_image,
+ high_res_features=high_res_features,
+ )
+
+ # Select the correct mask or masks for output
+ if multimask_output:
+ masks = masks[:, 1:, :, :]
+ iou_pred = iou_pred[:, 1:]
+ elif self.dynamic_multimask_via_stability and not self.training:
+ masks, iou_pred = self._dynamic_multimask_via_stability(masks, iou_pred)
+ else:
+ masks = masks[:, 0:1, :, :]
+ iou_pred = iou_pred[:, 0:1]
+
+ if multimask_output and self.use_multimask_token_for_obj_ptr:
+ sam_tokens_out = mask_tokens_out[:, 1:] # [b, 3, c] shape
+ else:
+ # Take the mask output token. Here we *always* use the token for single mask output.
+ # At test time, even if we track after 1-click (and using multimask_output=True),
+ # we still take the single mask token here. The rationale is that we always track
+ # after multiple clicks during training, so the past tokens seen during training
+ # are always the single mask token (and we'll let it be the object-memory token).
+ sam_tokens_out = mask_tokens_out[:, 0:1] # [b, 1, c] shape
+
+ # Prepare output
+ return masks, iou_pred, sam_tokens_out, object_score_logits
+
+ def predict_masks(
+ self,
+ image_embeddings: torch.Tensor,
+ image_pe: torch.Tensor,
+ sparse_prompt_embeddings: torch.Tensor,
+ dense_prompt_embeddings: torch.Tensor,
+ repeat_image: bool,
+ high_res_features: Optional[List[torch.Tensor]] = None,
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
+ """Predicts masks. See 'forward' for more details."""
+ # Concatenate output tokens
+ s = 0
+ if self.pred_obj_scores:
+ output_tokens = torch.cat(
+ [
+ self.obj_score_token.weight,
+ self.iou_token.weight,
+ self.mask_tokens.weight,
+ ],
+ dim=0,
+ )
+ s = 1
+ else:
+ output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight], dim=0)
+ output_tokens = output_tokens.unsqueeze(0).expand(sparse_prompt_embeddings.size(0), -1, -1)
+ tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1)
+
+ # Expand per-image data in batch direction to be per-mask
+ if repeat_image:
+ src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0)
+ else:
+ assert image_embeddings.shape[0] == tokens.shape[0]
+ src = image_embeddings
+ src = src + dense_prompt_embeddings
+ assert (image_pe.size(0) == 1), "image_pe should have size 1 in batch dim (from `get_dense_pe()`)"
+ pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0)
+ b, c, h, w = src.shape
+
+ # Run the transformer
+ hs, src = self.transformer(src, pos_src, tokens)
+ iou_token_out = hs[:, s, :]
+ mask_tokens_out = hs[:, s + 1:(s + 1 + self.num_mask_tokens), :]
+
+ # Upscale mask embeddings and predict masks using the mask tokens
+ src = src.transpose(1, 2).view(b, c, h, w)
+ if not self.use_high_res_features:
+ upscaled_embedding = self.output_upscaling(src)
+ else:
+ dc1, ln1, act1, dc2, act2 = self.output_upscaling
+ feat_s0, feat_s1 = high_res_features
+ upscaled_embedding = act1(ln1(dc1(src) + feat_s1))
+ upscaled_embedding = act2(dc2(upscaled_embedding) + feat_s0)
+
+ hyper_in_list: List[torch.Tensor] = []
+ for i in range(self.num_mask_tokens):
+ hyper_in_list.append(self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :]))
+ hyper_in = torch.stack(hyper_in_list, dim=1)
+ b, c, h, w = upscaled_embedding.shape
+ masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(b, -1, h, w)
+
+ # Generate mask quality predictions
+ iou_pred = self.iou_prediction_head(iou_token_out)
+ if self.pred_obj_scores:
+ assert s == 1
+ object_score_logits = self.pred_obj_score_head(hs[:, 0, :])
+ else:
+ # Obj scores logits - default to 10.0, i.e. assuming the object is present, sigmoid(10)=1
+ object_score_logits = 10.0 * iou_pred.new_ones(iou_pred.shape[0], 1)
+
+ return masks, iou_pred, mask_tokens_out, object_score_logits
+
+ def _get_stability_scores(self, mask_logits):
+ """
+ Compute stability scores of the mask logits based on the IoU between upper and
+ lower thresholds.
+ """
+ mask_logits = mask_logits.flatten(-2)
+ stability_delta = self.dynamic_multimask_stability_delta
+ area_i = torch.sum(mask_logits > stability_delta, dim=-1).float()
+ area_u = torch.sum(mask_logits > -stability_delta, dim=-1).float()
+ stability_scores = torch.where(area_u > 0, area_i / area_u, 1.0)
+ return stability_scores
+
+ def _dynamic_multimask_via_stability(self, all_mask_logits, all_iou_scores):
+ """
+ When outputting a single mask, if the stability score from the current single-mask
+ output (based on output token 0) falls below a threshold, we instead select from
+ multi-mask outputs (based on output token 1~3) the mask with the highest predicted
+ IoU score. This is intended to ensure a valid mask for both clicking and tracking.
+ """
+ # The best mask from multimask output tokens (1~3)
+ multimask_logits = all_mask_logits[:, 1:, :, :]
+ multimask_iou_scores = all_iou_scores[:, 1:]
+ best_scores_inds = torch.argmax(multimask_iou_scores, dim=-1)
+ batch_inds = torch.arange(multimask_iou_scores.size(0), device=all_iou_scores.device)
+ best_multimask_logits = multimask_logits[batch_inds, best_scores_inds]
+ best_multimask_logits = best_multimask_logits.unsqueeze(1)
+ best_multimask_iou_scores = multimask_iou_scores[batch_inds, best_scores_inds]
+ best_multimask_iou_scores = best_multimask_iou_scores.unsqueeze(1)
+
+ # The mask from singlemask output token 0 and its stability score
+ singlemask_logits = all_mask_logits[:, 0:1, :, :]
+ singlemask_iou_scores = all_iou_scores[:, 0:1]
+ stability_scores = self._get_stability_scores(singlemask_logits)
+ is_stable = stability_scores >= self.dynamic_multimask_stability_thresh
+
+ # Dynamically fall back to best multimask output upon low stability scores.
+ mask_logits_out = torch.where(
+ is_stable[..., None, None].expand_as(singlemask_logits),
+ singlemask_logits,
+ best_multimask_logits,
+ )
+ iou_scores_out = torch.where(
+ is_stable.expand_as(singlemask_iou_scores),
+ singlemask_iou_scores,
+ best_multimask_iou_scores,
+ )
+ return mask_logits_out, iou_scores_out
diff --git a/sam2/modeling/sam/prompt_encoder.py b/sam2/modeling/sam/prompt_encoder.py
new file mode 100644
index 0000000000000000000000000000000000000000..a4c14f09e2cdfdf81e4343f6b0cabcb3ddd7810f
--- /dev/null
+++ b/sam2/modeling/sam/prompt_encoder.py
@@ -0,0 +1,188 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+from typing import Optional, Tuple, Type
+
+import torch
+from torch import nn
+
+from sam2.modeling.position_encoding import PositionEmbeddingRandom
+from sam2.modeling.sam2_utils import LayerNorm2d
+
+
+class PromptEncoder(nn.Module):
+
+ def __init__(
+ self,
+ embed_dim: int,
+ image_embedding_size: Tuple[int, int],
+ input_image_size: Tuple[int, int],
+ mask_in_chans: int,
+ activation: Type[nn.Module] = nn.GELU,
+ ) -> None:
+ """
+ Encodes prompts for input to SAM's mask decoder.
+
+ Arguments:
+ embed_dim (int): The prompts' embedding dimension
+ image_embedding_size (tuple(int, int)): The spatial size of the
+ image embedding, as (H, W).
+ input_image_size (int): The padded size of the image as input
+ to the image encoder, as (H, W).
+ mask_in_chans (int): The number of hidden channels used for
+ encoding input masks.
+ activation (nn.Module): The activation to use when encoding
+ input masks.
+ """
+ super().__init__()
+ self.embed_dim = embed_dim
+ self.input_image_size = input_image_size
+ self.image_embedding_size = image_embedding_size
+ self.pe_layer = PositionEmbeddingRandom(embed_dim // 2)
+
+ self.num_point_embeddings: int = 4 # pos/neg point + 2 box corners
+ point_embeddings = [nn.Embedding(1, embed_dim) for i in range(self.num_point_embeddings)]
+ self.point_embeddings = nn.ModuleList(point_embeddings)
+ self.not_a_point_embed = nn.Embedding(1, embed_dim)
+
+ self.mask_input_size = (
+ 4 * image_embedding_size[0],
+ 4 * image_embedding_size[1],
+ )
+ self.mask_downscaling = nn.Sequential(
+ nn.Conv2d(1, mask_in_chans // 4, kernel_size=2, stride=2),
+ LayerNorm2d(mask_in_chans // 4),
+ activation(),
+ nn.Conv2d(mask_in_chans // 4, mask_in_chans, kernel_size=2, stride=2),
+ LayerNorm2d(mask_in_chans),
+ activation(),
+ nn.Conv2d(mask_in_chans, embed_dim, kernel_size=1),
+ )
+ self.no_mask_embed = nn.Embedding(1, embed_dim)
+
+ def get_dense_pe(self) -> torch.Tensor:
+ """
+ Returns the positional encoding used to encode point prompts,
+ applied to a dense set of points the shape of the image encoding.
+
+ Returns:
+ torch.Tensor: Positional encoding with shape
+ 1x(embed_dim)x(embedding_h)x(embedding_w)
+ """
+ return self.pe_layer(self.image_embedding_size).unsqueeze(0)
+
+ def _embed_points(
+ self,
+ points: torch.Tensor,
+ labels: torch.Tensor,
+ pad: bool,
+ ) -> torch.Tensor:
+ """Embeds point prompts."""
+ points = points + 0.5 # Shift to center of pixel
+ if pad:
+ padding_point = torch.zeros((points.shape[0], 1, 2), device=points.device)
+ padding_label = -torch.ones((labels.shape[0], 1), device=labels.device)
+ points = torch.cat([points, padding_point], dim=1)
+ labels = torch.cat([labels, padding_label], dim=1)
+ point_embedding = self.pe_layer.forward_with_coords(points, self.input_image_size)
+ point_embedding = torch.where((labels == -1).unsqueeze(-1),
+ torch.zeros_like(point_embedding) + self.not_a_point_embed.weight,
+ point_embedding)
+ point_embedding = torch.where((labels == 0).unsqueeze(-1), point_embedding + self.point_embeddings[0].weight,
+ point_embedding)
+ point_embedding = torch.where((labels == 1).unsqueeze(-1), point_embedding + self.point_embeddings[1].weight,
+ point_embedding)
+ point_embedding = torch.where((labels == 2).unsqueeze(-1), point_embedding + self.point_embeddings[2].weight,
+ point_embedding)
+ point_embedding = torch.where((labels == 3).unsqueeze(-1), point_embedding + self.point_embeddings[3].weight,
+ point_embedding)
+ return point_embedding
+
+ def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor:
+ """Embeds box prompts."""
+ boxes = boxes + 0.5 # Shift to center of pixel
+ coords = boxes.reshape(-1, 2, 2)
+ corner_embedding = self.pe_layer.forward_with_coords(coords, self.input_image_size)
+ corner_embedding[:, 0, :] += self.point_embeddings[2].weight
+ corner_embedding[:, 1, :] += self.point_embeddings[3].weight
+ return corner_embedding
+
+ def _embed_masks(self, masks: torch.Tensor) -> torch.Tensor:
+ """Embeds mask inputs."""
+ mask_embedding = self.mask_downscaling(masks)
+ return mask_embedding
+
+ def _get_batch_size(
+ self,
+ points: Optional[Tuple[torch.Tensor, torch.Tensor]],
+ boxes: Optional[torch.Tensor],
+ masks: Optional[torch.Tensor],
+ hidden: Optional[torch.Tensor],
+ ) -> int:
+ """
+ Gets the batch size of the output given the batch size of the input prompts.
+ """
+ if points is not None:
+ return points[0].shape[0]
+ elif boxes is not None:
+ return boxes.shape[0]
+ elif masks is not None:
+ return masks.shape[0]
+ elif hidden is not None:
+ return hidden.shape[0]
+ else:
+ return 1
+
+ def _get_device(self) -> torch.device:
+ return self.point_embeddings[0].weight.device
+
+ def forward(
+ self,
+ points: Optional[Tuple[torch.Tensor, torch.Tensor]],
+ boxes: Optional[torch.Tensor],
+ masks: Optional[torch.Tensor],
+ hidden: Optional[torch.Tensor] = None,
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
+ """
+ Embeds different types of prompts, returning both sparse and dense
+ embeddings.
+
+ Arguments:
+ points (tuple(torch.Tensor, torch.Tensor) or none): point coordinates
+ and labels to embed.
+ boxes (torch.Tensor or none): boxes to embed
+ masks (torch.Tensor or none): masks to embed
+
+ Returns:
+ torch.Tensor: sparse embeddings for the points and boxes, with shape
+ BxNx(embed_dim), where N is determined by the number of input points
+ and boxes.
+ torch.Tensor: dense embeddings for the masks, in the shape
+ Bx(embed_dim)x(embed_H)x(embed_W)
+ """
+ bs = self._get_batch_size(points, boxes, masks, hidden)
+ sparse_embeddings = torch.empty((bs, 0, self.embed_dim),
+ dtype=self.no_mask_embed.weight.dtype,
+ device=self._get_device())
+ if points is not None:
+ coords, labels = points
+ point_embeddings = self._embed_points(coords, labels, pad=(boxes is None))
+ sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1)
+ if boxes is not None:
+ box_embeddings = self._embed_boxes(boxes)
+ sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=1)
+
+ if hidden is not None:
+ sparse_embeddings = torch.cat([sparse_embeddings, hidden], dim=1)
+
+ if masks is not None:
+ dense_embeddings = self._embed_masks(masks)
+ else:
+ dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1,
+ 1).expand(bs, -1, self.image_embedding_size[0],
+ self.image_embedding_size[1])
+
+ return sparse_embeddings, dense_embeddings
diff --git a/sam2/modeling/sam/transformer.py b/sam2/modeling/sam/transformer.py
new file mode 100644
index 0000000000000000000000000000000000000000..ede91cfd1f499ddf2ccd6f4a61dc9fcc3a7d6786
--- /dev/null
+++ b/sam2/modeling/sam/transformer.py
@@ -0,0 +1,303 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+import math
+from functools import partial
+from typing import Tuple, Type
+
+import torch
+import torch.nn.functional as F
+from torch import Tensor, nn
+
+from sam2.modeling.position_encoding import apply_rotary_enc, compute_axial_cis
+from sam2.modeling.sam2_utils import MLP
+
+
+class TwoWayTransformer(nn.Module):
+
+ def __init__(
+ self,
+ depth: int,
+ embedding_dim: int,
+ num_heads: int,
+ mlp_dim: int,
+ activation: Type[nn.Module] = nn.ReLU,
+ attention_downsample_rate: int = 2,
+ ) -> None:
+ """
+ A transformer decoder that attends to an input image using
+ queries whose positional embedding is supplied.
+
+ Args:
+ depth (int): number of layers in the transformer
+ embedding_dim (int): the channel dimension for the input embeddings
+ num_heads (int): the number of heads for multihead attention. Must
+ divide embedding_dim
+ mlp_dim (int): the channel dimension internal to the MLP block
+ activation (nn.Module): the activation to use in the MLP block
+ """
+ super().__init__()
+ self.depth = depth
+ self.embedding_dim = embedding_dim
+ self.num_heads = num_heads
+ self.mlp_dim = mlp_dim
+ self.layers = nn.ModuleList()
+
+ for i in range(depth):
+ self.layers.append(
+ TwoWayAttentionBlock(
+ embedding_dim=embedding_dim,
+ num_heads=num_heads,
+ mlp_dim=mlp_dim,
+ activation=activation,
+ attention_downsample_rate=attention_downsample_rate,
+ skip_first_layer_pe=(i == 0),
+ ))
+
+ self.final_attn_token_to_image = Attention(embedding_dim, num_heads, downsample_rate=attention_downsample_rate)
+ self.norm_final_attn = nn.LayerNorm(embedding_dim)
+
+ def forward(
+ self,
+ image_embedding: Tensor,
+ image_pe: Tensor,
+ point_embedding: Tensor,
+ ) -> Tuple[Tensor, Tensor]:
+ """
+ Args:
+ image_embedding (torch.Tensor): image to attend to. Should be shape
+ B x embedding_dim x h x w for any h and w.
+ image_pe (torch.Tensor): the positional encoding to add to the image. Must
+ have the same shape as image_embedding.
+ point_embedding (torch.Tensor): the embedding to add to the query points.
+ Must have shape B x N_points x embedding_dim for any N_points.
+
+ Returns:
+ torch.Tensor: the processed point_embedding
+ torch.Tensor: the processed image_embedding
+ """
+ # BxCxHxW -> BxHWxC == B x N_image_tokens x C
+ bs, c, h, w = image_embedding.shape
+ image_embedding = image_embedding.flatten(2).permute(0, 2, 1)
+ image_pe = image_pe.flatten(2).permute(0, 2, 1)
+
+ # Prepare queries
+ queries = point_embedding
+ keys = image_embedding
+
+ # Apply transformer blocks and final layernorm
+ for layer in self.layers:
+ queries, keys = layer(
+ queries=queries,
+ keys=keys,
+ query_pe=point_embedding,
+ key_pe=image_pe,
+ )
+
+ # Apply the final attention layer from the points to the image
+ q = queries + point_embedding
+ k = keys + image_pe
+ attn_out = self.final_attn_token_to_image(q=q, k=k, v=keys)
+ queries = queries + attn_out
+ queries = self.norm_final_attn(queries)
+
+ return queries, keys
+
+
+class TwoWayAttentionBlock(nn.Module):
+
+ def __init__(
+ self,
+ embedding_dim: int,
+ num_heads: int,
+ mlp_dim: int = 2048,
+ activation: Type[nn.Module] = nn.ReLU,
+ attention_downsample_rate: int = 2,
+ skip_first_layer_pe: bool = False,
+ ) -> None:
+ """
+ A transformer block with four layers: (1) self-attention of sparse
+ inputs, (2) cross attention of sparse inputs to dense inputs, (3) mlp
+ block on sparse inputs, and (4) cross attention of dense inputs to sparse
+ inputs.
+
+ Arguments:
+ embedding_dim (int): the channel dimension of the embeddings
+ num_heads (int): the number of heads in the attention layers
+ mlp_dim (int): the hidden dimension of the mlp block
+ activation (nn.Module): the activation of the mlp block
+ skip_first_layer_pe (bool): skip the PE on the first layer
+ """
+ super().__init__()
+ self.self_attn = Attention(embedding_dim, num_heads)
+ self.norm1 = nn.LayerNorm(embedding_dim)
+
+ self.cross_attn_token_to_image = Attention(embedding_dim, num_heads, downsample_rate=attention_downsample_rate)
+ self.norm2 = nn.LayerNorm(embedding_dim)
+
+ self.mlp = MLP(embedding_dim, mlp_dim, embedding_dim, num_layers=2, activation=activation)
+ self.norm3 = nn.LayerNorm(embedding_dim)
+
+ self.norm4 = nn.LayerNorm(embedding_dim)
+ self.cross_attn_image_to_token = Attention(embedding_dim, num_heads, downsample_rate=attention_downsample_rate)
+
+ self.skip_first_layer_pe = skip_first_layer_pe
+
+ def forward(self, queries: Tensor, keys: Tensor, query_pe: Tensor, key_pe: Tensor) -> Tuple[Tensor, Tensor]:
+ # Self attention block
+ if self.skip_first_layer_pe:
+ queries = self.self_attn(q=queries, k=queries, v=queries)
+ else:
+ q = queries + query_pe
+ attn_out = self.self_attn(q=q, k=q, v=queries)
+ queries = queries + attn_out
+ queries = self.norm1(queries)
+
+ # Cross attention block, tokens attending to image embedding
+ q = queries + query_pe
+ k = keys + key_pe
+ attn_out = self.cross_attn_token_to_image(q=q, k=k, v=keys)
+ queries = queries + attn_out
+ queries = self.norm2(queries)
+
+ # MLP block
+ mlp_out = self.mlp(queries)
+ queries = queries + mlp_out
+ queries = self.norm3(queries)
+
+ # Cross attention block, image embedding attending to tokens
+ q = queries + query_pe
+ k = keys + key_pe
+ attn_out = self.cross_attn_image_to_token(q=k, k=q, v=queries)
+ keys = keys + attn_out
+ keys = self.norm4(keys)
+
+ return queries, keys
+
+
+class Attention(nn.Module):
+ """
+ An attention layer that allows for downscaling the size of the embedding
+ after projection to queries, keys, and values.
+ """
+
+ def __init__(
+ self,
+ embedding_dim: int,
+ num_heads: int,
+ downsample_rate: int = 1,
+ dropout: float = 0.0,
+ kv_in_dim: int = None,
+ ) -> None:
+ super().__init__()
+ self.embedding_dim = embedding_dim
+ self.kv_in_dim = kv_in_dim if kv_in_dim is not None else embedding_dim
+ self.internal_dim = embedding_dim // downsample_rate
+ self.num_heads = num_heads
+ assert (self.internal_dim % num_heads == 0), "num_heads must divide embedding_dim."
+
+ self.q_proj = nn.Linear(embedding_dim, self.internal_dim)
+ self.k_proj = nn.Linear(self.kv_in_dim, self.internal_dim)
+ self.v_proj = nn.Linear(self.kv_in_dim, self.internal_dim)
+ self.out_proj = nn.Linear(self.internal_dim, embedding_dim)
+
+ self.dropout_p = dropout
+
+ def _separate_heads(self, x: Tensor, num_heads: int) -> Tensor:
+ b, n, c = x.shape
+ x = x.reshape(b, n, num_heads, c // num_heads)
+ return x.transpose(1, 2) # B x N_heads x N_tokens x C_per_head
+
+ def _recombine_heads(self, x: Tensor) -> Tensor:
+ b, n_heads, n_tokens, c_per_head = x.shape
+ x = x.transpose(1, 2)
+ return x.reshape(b, n_tokens, n_heads * c_per_head) # B x N_tokens x C
+
+ def forward(self, q: Tensor, k: Tensor, v: Tensor) -> Tensor:
+ # Input projections
+ q = self.q_proj(q)
+ k = self.k_proj(k)
+ v = self.v_proj(v)
+
+ # Separate into heads
+ q = self._separate_heads(q, self.num_heads)
+ k = self._separate_heads(k, self.num_heads)
+ v = self._separate_heads(v, self.num_heads)
+
+ dropout_p = self.dropout_p if self.training else 0.0
+ # Attention
+ out = F.scaled_dot_product_attention(q, k, v, dropout_p=dropout_p)
+
+ out = self._recombine_heads(out)
+ out = self.out_proj(out)
+
+ return out
+
+
+class RoPEAttention(Attention):
+ """Attention with rotary position encoding."""
+
+ def __init__(
+ self,
+ *args,
+ rope_theta=10000.0,
+ # whether to repeat q rope to match k length
+ # this is needed for cross-attention to memories
+ rope_k_repeat=False,
+ feat_sizes=(64, 64), # [w, h] for stride 16 feats at 1024 resolution
+ **kwargs,
+ ):
+ super().__init__(*args, **kwargs)
+
+ self.compute_cis = partial(compute_axial_cis, dim=self.internal_dim // self.num_heads, theta=rope_theta)
+ freqs_cis = self.compute_cis(end_x=feat_sizes[0], end_y=feat_sizes[1])
+ try:
+ import torch_npu
+ has_npu = torch_npu.npu.is_available()
+ except ImportError:
+ has_npu = False
+ if torch.cuda.is_available():
+ freqs_cis = freqs_cis.to("cuda")
+ elif has_npu:
+ freqs_cis = freqs_cis.to("npu")
+ self.freqs_cis = freqs_cis
+ self.rope_k_repeat = rope_k_repeat
+
+ def forward(self, q: Tensor, k: Tensor, v: Tensor, num_k_exclude_rope: int = 0) -> Tensor:
+ # Input projections
+ q = self.q_proj(q)
+ k = self.k_proj(k)
+ v = self.v_proj(v)
+
+ # Separate into heads
+ q = self._separate_heads(q, self.num_heads)
+ k = self._separate_heads(k, self.num_heads)
+ v = self._separate_heads(v, self.num_heads)
+
+ # Apply rotary position encoding
+ w = h = math.sqrt(q.shape[-2])
+ self.freqs_cis = self.freqs_cis.to(q.device)
+ if self.freqs_cis.shape[0] != q.shape[-2]:
+ self.freqs_cis = self.compute_cis(end_x=w, end_y=h).to(q.device)
+ if q.shape[-2] != k.shape[-2]:
+ assert self.rope_k_repeat
+
+ num_k_rope = k.size(-2) - num_k_exclude_rope
+ q, k[:, :, :num_k_rope] = apply_rotary_enc(
+ q,
+ k[:, :, :num_k_rope],
+ freqs_cis=self.freqs_cis,
+ repeat_freqs_k=self.rope_k_repeat,
+ )
+
+ dropout_p = self.dropout_p if self.training else 0.0
+ # Attention
+ out = F.scaled_dot_product_attention(q, k, v, dropout_p=dropout_p)
+
+ out = self._recombine_heads(out)
+ out = self.out_proj(out)
+
+ return out
diff --git a/sam2/modeling/sam2_base.py b/sam2/modeling/sam2_base.py
new file mode 100644
index 0000000000000000000000000000000000000000..02d0202564d0a6552976f667f5fe7a3f095d381f
--- /dev/null
+++ b/sam2/modeling/sam2_base.py
@@ -0,0 +1,882 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+import torch
+import torch.distributed
+import torch.nn.functional as F
+from torch.nn.init import trunc_normal_
+
+from sam2.modeling.sam2_utils import MLP, get_1d_sine_pe, select_closest_cond_frames
+from sam2.modeling.sam.mask_decoder import MaskDecoder
+from sam2.modeling.sam.prompt_encoder import PromptEncoder
+from sam2.modeling.sam.transformer import TwoWayTransformer
+
+# a large negative value as a placeholder score for missing objects
+NO_OBJ_SCORE = -1024.0
+
+
+class SAM2Base(torch.nn.Module):
+
+ def __init__(
+ self,
+ image_encoder,
+ memory_attention,
+ memory_encoder,
+ num_maskmem=7, # default 1 input frame + 6 previous frames
+ image_size=512,
+ backbone_stride=16, # stride of the image backbone output
+ sigmoid_scale_for_mem_enc=1.0, # scale factor for mask sigmoid prob
+ sigmoid_bias_for_mem_enc=0.0, # bias factor for mask sigmoid prob
+ # During evaluation, whether to binarize the sigmoid mask logits on interacted frames with clicks
+ binarize_mask_from_pts_for_mem_enc=False,
+ use_mask_input_as_output_without_sam=False, # on frames with mask input, whether to directly output the input mask without using a SAM prompt encoder + mask decoder
+ # The maximum number of conditioning frames to participate in the memory attention (-1 means no limit; if there are more conditioning frames than this limit,
+ # we only cross-attend to the temporally closest `max_cond_frames_in_attn` conditioning frames in the encoder when tracking each frame). This gives the model
+ # a temporal locality when handling a large number of annotated frames (since closer frames should be more important) and also avoids GPU OOM.
+ max_cond_frames_in_attn=-1,
+ # on the first frame, whether to directly add the no-memory embedding to the image feature
+ # (instead of using the transformer encoder)
+ directly_add_no_mem_embed=False,
+ # whether to use high-resolution feature maps in the SAM mask decoder
+ use_high_res_features_in_sam=False,
+ # whether to output multiple (3) masks for the first click on initial conditioning frames
+ multimask_output_in_sam=False,
+ # the minimum and maximum number of clicks to use multimask_output_in_sam (only relevant when `multimask_output_in_sam=True`;
+ # default is 1 for both, meaning that only the first click gives multimask output; also note that a box counts as two points)
+ multimask_min_pt_num=1,
+ multimask_max_pt_num=1,
+ # whether to also use multimask output for tracking (not just for the first click on initial conditioning frames; only relevant when `multimask_output_in_sam=True`)
+ multimask_output_for_tracking=False,
+ # Whether to use multimask tokens for obj ptr; Only relevant when both
+ # use_obj_ptrs_in_encoder=True and multimask_output_for_tracking=True
+ use_multimask_token_for_obj_ptr: bool = False,
+ # whether to use sigmoid to restrict ious prediction to [0-1]
+ iou_prediction_use_sigmoid=False,
+ # The memory bank's temporal stride during evaluation (i.e. the `r` parameter in XMem and Cutie; XMem and Cutie use r=5).
+ # For r>1, the (self.num_maskmem - 1) non-conditioning memory frames consist of
+ # (self.num_maskmem - 2) nearest frames from every r-th frames, plus the last frame.
+ memory_temporal_stride_for_eval=1,
+ # whether to apply non-overlapping constraints on the object masks in the memory encoder during evaluation (to avoid/alleviate superposing masks)
+ non_overlap_masks_for_mem_enc=False,
+ # whether to cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
+ use_obj_ptrs_in_encoder=False,
+ # the maximum number of object pointers from other frames in encoder cross attention (only relevant when `use_obj_ptrs_in_encoder=True`)
+ max_obj_ptrs_in_encoder=16,
+ # whether to add temporal positional encoding to the object pointers in the encoder (only relevant when `use_obj_ptrs_in_encoder=True`)
+ add_tpos_enc_to_obj_ptrs=True,
+ # whether to add an extra linear projection layer for the temporal positional encoding in the object pointers to avoid potential interference
+ # with spatial positional encoding (only relevant when both `use_obj_ptrs_in_encoder=True` and `add_tpos_enc_to_obj_ptrs=True`)
+ proj_tpos_enc_in_obj_ptrs=False,
+ # whether to use signed distance (instead of unsigned absolute distance) in the temporal positional encoding in the object pointers
+ # (only relevant when both `use_obj_ptrs_in_encoder=True` and `add_tpos_enc_to_obj_ptrs=True`)
+ use_signed_tpos_enc_to_obj_ptrs=False,
+ # whether to only attend to object pointers in the past (before the current frame) in the encoder during evaluation
+ # (only relevant when `use_obj_ptrs_in_encoder=True`; this might avoid pointer information too far in the future to distract the initial tracking)
+ only_obj_ptrs_in_the_past_for_eval=False,
+ # Whether to predict if there is an object in the frame
+ pred_obj_scores: bool = False,
+ # Whether to use an MLP to predict object scores
+ pred_obj_scores_mlp: bool = False,
+ # Only relevant if pred_obj_scores=True and use_obj_ptrs_in_encoder=True;
+ # Whether to have a fixed no obj pointer when there is no object present
+ # or to use it as an additive embedding with obj_ptr produced by decoder
+ fixed_no_obj_ptr: bool = False,
+ # Soft no object, i.e. mix in no_obj_ptr softly,
+ # hope to make recovery easier if there is a mistake and mitigate accumulation of errors
+ soft_no_obj_ptr: bool = False,
+ use_mlp_for_obj_ptr_proj: bool = False,
+ # add no obj embedding to spatial frames
+ no_obj_embed_spatial: bool = False,
+ # extra arguments used to construct the SAM mask decoder; if not None, it should be a dict of kwargs to be passed into `MaskDecoder` class.
+ sam_mask_decoder_extra_args=None,
+ compile_image_encoder: bool = False,
+ **kwargs,
+ ):
+ super().__init__()
+
+ # Part 1: the image backbone
+ self.image_encoder = image_encoder
+ # Use level 0, 1, 2 for high-res setting, or just level 2 for the default setting
+ self.use_high_res_features_in_sam = use_high_res_features_in_sam
+ self.num_feature_levels = 3 if use_high_res_features_in_sam else 1
+ self.use_obj_ptrs_in_encoder = use_obj_ptrs_in_encoder
+ self.max_obj_ptrs_in_encoder = max_obj_ptrs_in_encoder
+ if use_obj_ptrs_in_encoder:
+ # A conv layer to downsample the mask prompt to stride 4 (the same stride as
+ # low-res SAM mask logits) and to change its scales from 0~1 to SAM logit scale,
+ # so that it can be fed into the SAM mask decoder to generate a pointer.
+ self.mask_downsample = torch.nn.Conv2d(1, 1, kernel_size=4, stride=4)
+ self.add_tpos_enc_to_obj_ptrs = add_tpos_enc_to_obj_ptrs
+ if proj_tpos_enc_in_obj_ptrs:
+ assert add_tpos_enc_to_obj_ptrs # these options need to be used together
+ self.proj_tpos_enc_in_obj_ptrs = proj_tpos_enc_in_obj_ptrs
+ self.use_signed_tpos_enc_to_obj_ptrs = use_signed_tpos_enc_to_obj_ptrs
+ self.only_obj_ptrs_in_the_past_for_eval = only_obj_ptrs_in_the_past_for_eval
+
+ # Part 2: memory attention to condition current frame's visual features
+ # with memories (and obj ptrs) from past frames
+ self.memory_attention = memory_attention
+ self.hidden_dim = image_encoder.neck.d_model
+
+ # Part 3: memory encoder for the previous frame's outputs
+ self.memory_encoder = memory_encoder
+ self.mem_dim = self.hidden_dim
+ if hasattr(self.memory_encoder, "out_proj") and hasattr(self.memory_encoder.out_proj, "weight"):
+ # if there is compression of memories along channel dim
+ self.mem_dim = self.memory_encoder.out_proj.weight.shape[0]
+ self.num_maskmem = num_maskmem # Number of memories accessible
+ # Temporal encoding of the memories
+ self.maskmem_tpos_enc = torch.nn.Parameter(torch.zeros(num_maskmem, 1, 1, self.mem_dim))
+ trunc_normal_(self.maskmem_tpos_enc, std=0.02)
+ # a single token to indicate no memory embedding from previous frames
+ self.no_mem_embed = torch.nn.Parameter(torch.zeros(1, 1, self.hidden_dim))
+ self.no_mem_pos_enc = torch.nn.Parameter(torch.zeros(1, 1, self.hidden_dim))
+ trunc_normal_(self.no_mem_embed, std=0.02)
+ trunc_normal_(self.no_mem_pos_enc, std=0.02)
+ self.directly_add_no_mem_embed = directly_add_no_mem_embed
+ # Apply sigmoid to the output raw mask logits (to turn them from
+ # range (-inf, +inf) to range (0, 1)) before feeding them into the memory encoder
+ self.sigmoid_scale_for_mem_enc = sigmoid_scale_for_mem_enc
+ self.sigmoid_bias_for_mem_enc = sigmoid_bias_for_mem_enc
+ self.binarize_mask_from_pts_for_mem_enc = binarize_mask_from_pts_for_mem_enc
+ self.non_overlap_masks_for_mem_enc = non_overlap_masks_for_mem_enc
+ self.memory_temporal_stride_for_eval = memory_temporal_stride_for_eval
+ # On frames with mask input, whether to directly output the input mask without
+ # using a SAM prompt encoder + mask decoder
+ self.use_mask_input_as_output_without_sam = use_mask_input_as_output_without_sam
+ self.multimask_output_in_sam = multimask_output_in_sam
+ self.multimask_min_pt_num = multimask_min_pt_num
+ self.multimask_max_pt_num = multimask_max_pt_num
+ self.multimask_output_for_tracking = multimask_output_for_tracking
+ self.use_multimask_token_for_obj_ptr = use_multimask_token_for_obj_ptr
+ self.iou_prediction_use_sigmoid = iou_prediction_use_sigmoid
+
+ # Part 4: SAM-style prompt encoder (for both mask and point inputs)
+ # and SAM-style mask decoder for the final mask output
+ self.image_size = image_size
+ self.backbone_stride = backbone_stride
+ self.sam_mask_decoder_extra_args = sam_mask_decoder_extra_args
+ self.pred_obj_scores = pred_obj_scores
+ self.pred_obj_scores_mlp = pred_obj_scores_mlp
+ self.fixed_no_obj_ptr = fixed_no_obj_ptr
+ self.soft_no_obj_ptr = soft_no_obj_ptr
+ if self.fixed_no_obj_ptr:
+ assert self.pred_obj_scores
+ assert self.use_obj_ptrs_in_encoder
+ if self.pred_obj_scores and self.use_obj_ptrs_in_encoder:
+ self.no_obj_ptr = torch.nn.Parameter(torch.zeros(1, self.hidden_dim))
+ trunc_normal_(self.no_obj_ptr, std=0.02)
+ self.use_mlp_for_obj_ptr_proj = use_mlp_for_obj_ptr_proj
+ self.no_obj_embed_spatial = None
+ if no_obj_embed_spatial:
+ self.no_obj_embed_spatial = torch.nn.Parameter(torch.zeros(1, self.mem_dim))
+ trunc_normal_(self.no_obj_embed_spatial, std=0.02)
+
+ self._build_sam_heads()
+ self.max_cond_frames_in_attn = max_cond_frames_in_attn
+
+ # Model compilation
+ if compile_image_encoder:
+ # Compile the forward function (not the full module) to allow loading checkpoints.
+ print("Image encoder compilation is enabled. First forward pass will be slow.")
+ self.image_encoder.forward = torch.compile(
+ self.image_encoder.forward,
+ mode="max-autotune",
+ fullgraph=True,
+ dynamic=False,
+ )
+
+ @property
+ def device(self):
+ return next(self.parameters()).device
+
+ def forward(self, *args, **kwargs):
+ raise NotImplementedError(
+ "Please use the corresponding methods in SAM2VideoPredictor for inference or SAM2Train for training/fine-tuning"
+ "See notebooks/video_predictor_example.ipynb for an inference example.")
+
+ def _build_sam_heads(self):
+ """Build SAM-style prompt encoder and mask decoder."""
+ self.sam_prompt_embed_dim = self.hidden_dim
+ self.sam_image_embedding_size = self.image_size // self.backbone_stride
+
+ # build PromptEncoder and MaskDecoder from SAM
+ # (their hyperparameters like `mask_in_chans=16` are from SAM code)
+ self.sam_prompt_encoder = PromptEncoder(
+ embed_dim=self.sam_prompt_embed_dim,
+ image_embedding_size=(
+ self.sam_image_embedding_size,
+ self.sam_image_embedding_size,
+ ),
+ input_image_size=(self.image_size, self.image_size),
+ mask_in_chans=16,
+ )
+ self.sam_mask_decoder = MaskDecoder(
+ num_multimask_outputs=3,
+ transformer=TwoWayTransformer(
+ depth=2,
+ embedding_dim=self.sam_prompt_embed_dim,
+ mlp_dim=2048,
+ num_heads=8,
+ ),
+ transformer_dim=self.sam_prompt_embed_dim,
+ iou_head_depth=3,
+ iou_head_hidden_dim=256,
+ use_high_res_features=self.use_high_res_features_in_sam,
+ iou_prediction_use_sigmoid=self.iou_prediction_use_sigmoid,
+ pred_obj_scores=self.pred_obj_scores,
+ pred_obj_scores_mlp=self.pred_obj_scores_mlp,
+ use_multimask_token_for_obj_ptr=self.use_multimask_token_for_obj_ptr,
+ **(self.sam_mask_decoder_extra_args or {}),
+ )
+ if self.use_obj_ptrs_in_encoder:
+ # a linear projection on SAM output tokens to turn them into object pointers
+ self.obj_ptr_proj = torch.nn.Linear(self.hidden_dim, self.hidden_dim)
+ if self.use_mlp_for_obj_ptr_proj:
+ self.obj_ptr_proj = MLP(self.hidden_dim, self.hidden_dim, self.hidden_dim, 3)
+ else:
+ self.obj_ptr_proj = torch.nn.Identity()
+ if self.proj_tpos_enc_in_obj_ptrs:
+ # a linear projection on temporal positional encoding in object pointers to
+ # avoid potential interference with spatial positional encoding
+ self.obj_ptr_tpos_proj = torch.nn.Linear(self.hidden_dim, self.mem_dim)
+ else:
+ self.obj_ptr_tpos_proj = torch.nn.Identity()
+
+ def _forward_sam_heads(
+ self,
+ backbone_features,
+ point_inputs=None,
+ mask_inputs=None,
+ hidden_inputs=None,
+ high_res_features=None,
+ multimask_output=False,
+ ):
+ """
+ Forward SAM prompt encoders and mask heads.
+
+ Inputs:
+ - backbone_features: image features of [B, C, H, W] shape
+ - point_inputs: a dictionary with "point_coords" and "point_labels", where
+ 1) "point_coords" has [B, P, 2] shape and float32 dtype and contains the
+ absolute pixel-unit coordinate in (x, y) format of the P input points
+ 2) "point_labels" has shape [B, P] and int32 dtype, where 1 means
+ positive clicks, 0 means negative clicks, and -1 means padding
+ - mask_inputs: a mask of [B, 1, H*16, W*16] shape, float or bool, with the
+ same spatial size as the image.
+ - high_res_features: either 1) None or 2) or a list of length 2 containing
+ two feature maps of [B, C, 4*H, 4*W] and [B, C, 2*H, 2*W] shapes respectively,
+ which will be used as high-resolution feature maps for SAM decoder.
+ - multimask_output: if it's True, we output 3 candidate masks and their 3
+ corresponding IoU estimates, and if it's False, we output only 1 mask and
+ its corresponding IoU estimate.
+
+ Outputs:
+ - low_res_multimasks: [B, M, H*4, W*4] shape (where M = 3 if
+ `multimask_output=True` and M = 1 if `multimask_output=False`), the SAM
+ output mask logits (before sigmoid) for the low-resolution masks, with 4x
+ the resolution (1/4 stride) of the input backbone_features.
+ - high_res_multimasks: [B, M, H*16, W*16] shape (where M = 3
+ if `multimask_output=True` and M = 1 if `multimask_output=False`),
+ upsampled from the low-resolution masks, with shape size as the image
+ (stride is 1 pixel).
+ - ious, [B, M] shape, where (where M = 3 if `multimask_output=True` and M = 1
+ if `multimask_output=False`), the estimated IoU of each output mask.
+ - low_res_masks: [B, 1, H*4, W*4] shape, the best mask in `low_res_multimasks`.
+ If `multimask_output=True`, it's the mask with the highest IoU estimate.
+ If `multimask_output=False`, it's the same as `low_res_multimasks`.
+ - high_res_masks: [B, 1, H*16, W*16] shape, the best mask in `high_res_multimasks`.
+ If `multimask_output=True`, it's the mask with the highest IoU estimate.
+ If `multimask_output=False`, it's the same as `high_res_multimasks`.
+ - obj_ptr: [B, C] shape, the object pointer vector for the output mask, extracted
+ based on the output token from the SAM mask decoder.
+ """
+ B = backbone_features.size(0)
+ device = backbone_features.device
+ assert backbone_features.size(1) == self.sam_prompt_embed_dim
+ assert backbone_features.size(2) == self.sam_image_embedding_size
+ assert backbone_features.size(3) == self.sam_image_embedding_size
+
+ # a) Handle point prompts
+ if point_inputs is not None:
+ sam_point_coords = point_inputs["point_coords"]
+ sam_point_labels = point_inputs["point_labels"]
+ assert sam_point_coords.size(0) == B and sam_point_labels.size(0) == B
+ else:
+ # If no points are provide, pad with an empty point (with label -1)
+ sam_point_coords = torch.zeros(B, 1, 2, device=device)
+ sam_point_labels = -torch.ones(B, 1, dtype=torch.int32, device=device)
+
+ # b) Handle mask prompts
+ if mask_inputs is not None:
+ # If mask_inputs is provided, downsize it into low-res mask input if needed
+ # and feed it as a dense mask prompt into the SAM mask encoder
+ assert len(mask_inputs.shape) == 4 and mask_inputs.shape[:2] == (B, 1)
+ if mask_inputs.shape[-2:] != self.sam_prompt_encoder.mask_input_size:
+ sam_mask_prompt = F.interpolate(
+ mask_inputs.float(),
+ size=self.sam_prompt_encoder.mask_input_size,
+ align_corners=False,
+ mode="bilinear",
+ antialias=True, # use antialias for downsampling
+ )
+ else:
+ sam_mask_prompt = mask_inputs
+ else:
+ # Otherwise, simply feed None (and SAM's prompt encoder will add
+ # a learned `no_mask_embed` to indicate no mask input in this case).
+ sam_mask_prompt = None
+
+ sparse_embeddings, dense_embeddings = self.sam_prompt_encoder(
+ points=(sam_point_coords, sam_point_labels),
+ boxes=None,
+ masks=sam_mask_prompt,
+ hidden=hidden_inputs,
+ )
+ (
+ low_res_multimasks,
+ ious,
+ sam_output_tokens,
+ object_score_logits,
+ ) = self.sam_mask_decoder(
+ image_embeddings=backbone_features,
+ image_pe=self.sam_prompt_encoder.get_dense_pe(),
+ sparse_prompt_embeddings=sparse_embeddings,
+ dense_prompt_embeddings=dense_embeddings,
+ multimask_output=multimask_output,
+ repeat_image=False, # the image is already batched
+ high_res_features=high_res_features,
+ )
+ if self.pred_obj_scores:
+ is_obj_appearing = object_score_logits > 0
+
+ # Mask used for spatial memories is always a *hard* choice between obj and no obj,
+ # consistent with the actual mask prediction
+ # NOTE: whether to mask here during inference?
+ if getattr(self, 'inference_mode', False):
+ low_res_multimasks = torch.where(
+ is_obj_appearing[:, None, None],
+ low_res_multimasks,
+ NO_OBJ_SCORE,
+ )
+
+ # convert masks from possibly bfloat16 (or float16) to float32
+ # low_res_multimasks = low_res_multimasks.float()
+ high_res_multimasks = F.interpolate(
+ low_res_multimasks.float(),
+ size=(self.image_size, self.image_size),
+ mode="bilinear",
+ align_corners=False,
+ ).to(low_res_multimasks.dtype)
+
+ sam_output_token = sam_output_tokens[:, 0]
+ if multimask_output:
+ # take the best mask prediction (with the highest IoU estimation)
+ best_iou_inds = torch.argmax(ious, dim=-1)
+ batch_inds = torch.arange(B, device=device)
+ low_res_masks = low_res_multimasks[batch_inds, best_iou_inds].unsqueeze(1)
+ high_res_masks = high_res_multimasks[batch_inds, best_iou_inds].unsqueeze(1)
+ if sam_output_tokens.size(1) > 1:
+ sam_output_token = sam_output_tokens[batch_inds, best_iou_inds]
+ else:
+ low_res_masks, high_res_masks = low_res_multimasks, high_res_multimasks
+
+ # Extract object pointer from the SAM output token (with occlusion handling)
+ obj_ptr = self.obj_ptr_proj(sam_output_token)
+ if self.pred_obj_scores:
+ # Allow *soft* no obj ptr, unlike for masks
+ if self.soft_no_obj_ptr:
+ lambda_is_obj_appearing = object_score_logits.sigmoid()
+ else:
+ lambda_is_obj_appearing = is_obj_appearing.to(object_score_logits.dtype)
+
+ if self.fixed_no_obj_ptr:
+ obj_ptr = lambda_is_obj_appearing * obj_ptr
+ obj_ptr = obj_ptr + (1 - lambda_is_obj_appearing) * self.no_obj_ptr
+
+ return (
+ low_res_multimasks,
+ high_res_multimasks,
+ ious,
+ low_res_masks,
+ high_res_masks,
+ obj_ptr,
+ object_score_logits,
+ )
+
+ def _use_mask_as_output(self, backbone_features, high_res_features, mask_inputs):
+ """
+ Directly turn binary `mask_inputs` into a output mask logits without using SAM.
+ (same input and output shapes as in _forward_sam_heads above).
+ """
+ # Use -10/+10 as logits for neg/pos pixels (very close to 0/1 in prob after sigmoid).
+ out_scale, out_bias = 20.0, -10.0 # sigmoid(-10.0)=4.5398e-05
+ mask_inputs_float = mask_inputs.float()
+ high_res_masks = mask_inputs_float * out_scale + out_bias
+ low_res_masks = F.interpolate(
+ high_res_masks,
+ size=(high_res_masks.size(-2) // 4, high_res_masks.size(-1) // 4),
+ align_corners=False,
+ mode="bilinear",
+ antialias=True, # use antialias for downsampling
+ )
+ # a dummy IoU prediction of all 1's under mask input
+ ious = mask_inputs.new_ones(mask_inputs.size(0), 1).float()
+ if not self.use_obj_ptrs_in_encoder:
+ # all zeros as a dummy object pointer (of shape [B, C])
+ obj_ptr = torch.zeros(mask_inputs.size(0), self.hidden_dim, device=mask_inputs.device)
+ else:
+ # produce an object pointer using the SAM decoder from the mask input
+ _, _, _, _, _, obj_ptr, _ = self._forward_sam_heads(
+ backbone_features=backbone_features,
+ mask_inputs=self.mask_downsample(mask_inputs_float),
+ high_res_features=high_res_features,
+ )
+ # In this method, we are treating mask_input as output, e.g. using it directly to create spatial mem;
+ # Below, we follow the same design axiom to use mask_input to decide if obj appears or not instead of relying
+ # on the object_scores from the SAM decoder.
+ is_obj_appearing = torch.any(mask_inputs.flatten(1).float() > 0.0, dim=1)
+ is_obj_appearing = is_obj_appearing[..., None]
+ lambda_is_obj_appearing = is_obj_appearing.float()
+ object_score_logits = out_scale * lambda_is_obj_appearing + out_bias
+ if self.pred_obj_scores:
+ if self.fixed_no_obj_ptr:
+ obj_ptr = lambda_is_obj_appearing * obj_ptr
+ obj_ptr = obj_ptr + (1 - lambda_is_obj_appearing) * self.no_obj_ptr
+
+ return (
+ low_res_masks,
+ high_res_masks,
+ ious,
+ low_res_masks,
+ high_res_masks,
+ obj_ptr,
+ object_score_logits,
+ )
+
+ def forward_image(self, img_batch: torch.Tensor):
+ """Get the image feature on the input batch."""
+ backbone_out = self.image_encoder(img_batch)
+ if self.use_high_res_features_in_sam:
+ # precompute projected level 0 and level 1 features in SAM decoder
+ # to avoid running it again on every SAM click
+ backbone_out["backbone_fpn"][0] = self.sam_mask_decoder.conv_s0(backbone_out["backbone_fpn"][0])
+ backbone_out["backbone_fpn"][1] = self.sam_mask_decoder.conv_s1(backbone_out["backbone_fpn"][1])
+ return backbone_out
+
+ def _prepare_backbone_features(self, backbone_out):
+ """Prepare and flatten visual features."""
+ backbone_out = backbone_out.copy()
+ assert len(backbone_out["backbone_fpn"]) == len(backbone_out["vision_pos_enc"])
+ assert len(backbone_out["backbone_fpn"]) >= self.num_feature_levels
+
+ feature_maps = backbone_out["backbone_fpn"][-self.num_feature_levels:]
+ vision_pos_embeds = backbone_out["vision_pos_enc"][-self.num_feature_levels:]
+
+ feat_sizes = [(x.shape[-2], x.shape[-1]) for x in vision_pos_embeds]
+ # flatten NxCxHxW to HWxNxC
+ vision_feats = [x.flatten(2).permute(2, 0, 1) for x in feature_maps]
+ vision_pos_embeds = [x.flatten(2).permute(2, 0, 1) for x in vision_pos_embeds]
+
+ return backbone_out, vision_feats, vision_pos_embeds, feat_sizes
+
+ def _prepare_memory_conditioned_features(
+ self,
+ frame_idx,
+ is_init_cond_frame,
+ current_vision_feats,
+ current_vision_pos_embeds,
+ feat_sizes,
+ output_dict,
+ num_frames,
+ track_in_reverse=False, # tracking in reverse time order (for demo usage)
+ ):
+ """Fuse the current frame's visual feature map with previous memory."""
+ B = current_vision_feats[-1].size(1) # batch size on this frame
+ C = self.hidden_dim
+ H, W = feat_sizes[-1] # top-level (lowest-resolution) feature size
+ device = current_vision_feats[-1].device
+ # The case of `self.num_maskmem == 0` below is primarily used for reproducing SAM on images.
+ # In this case, we skip the fusion with any memory.
+ if self.num_maskmem == 0: # Disable memory and skip fusion
+ pix_feat = current_vision_feats[-1].permute(1, 2, 0).view(B, C, H, W)
+ return pix_feat
+
+ num_obj_ptr_tokens = 0
+ tpos_sign_mul = -1 if track_in_reverse else 1
+ # Step 1: condition the visual features of the current frame on previous memories
+ if not is_init_cond_frame:
+ # Retrieve the memories encoded with the maskmem backbone
+ to_cat_memory, to_cat_memory_pos_embed = [], []
+ # Add conditioning frames's output first (all cond frames have t_pos=0 for
+ # when getting temporal positional embedding below)
+ assert len(output_dict["cond_frame_outputs"]) > 0
+ # Select a maximum number of temporally closest cond frames for cross attention
+ cond_outputs = output_dict["cond_frame_outputs"]
+ selected_cond_outputs, unselected_cond_outputs = select_closest_cond_frames(
+ frame_idx, cond_outputs, self.max_cond_frames_in_attn)
+ t_pos_and_prevs = [(0, out) for out in selected_cond_outputs.values()]
+ # Add last (self.num_maskmem - 1) frames before current frame for non-conditioning memory
+ # the earliest one has t_pos=1 and the latest one has t_pos=self.num_maskmem-1
+ # We also allow taking the memory frame non-consecutively (with stride>1), in which case
+ # we take (self.num_maskmem - 2) frames among every stride-th frames plus the last frame.
+ stride = 1 if self.training else self.memory_temporal_stride_for_eval
+ for t_pos in range(1, self.num_maskmem):
+ t_rel = self.num_maskmem - t_pos # how many frames before current frame
+ if t_rel == 1:
+ # for t_rel == 1, we take the last frame (regardless of r)
+ if not track_in_reverse:
+ # the frame immediately before this frame (i.e. frame_idx - 1)
+ prev_frame_idx = frame_idx - t_rel
+ else:
+ # the frame immediately after this frame (i.e. frame_idx + 1)
+ prev_frame_idx = frame_idx + t_rel
+ else:
+ # for t_rel >= 2, we take the memory frame from every r-th frames
+ if not track_in_reverse:
+ # first find the nearest frame among every r-th frames before this frame
+ # for r=1, this would be (frame_idx - 2)
+ prev_frame_idx = ((frame_idx - 2) // stride) * stride
+ # then seek further among every r-th frames
+ prev_frame_idx = prev_frame_idx - (t_rel - 2) * stride
+ else:
+ # first find the nearest frame among every r-th frames after this frame
+ # for r=1, this would be (frame_idx + 2)
+ prev_frame_idx = -(-(frame_idx + 2) // stride) * stride
+ # then seek further among every r-th frames
+ prev_frame_idx = prev_frame_idx + (t_rel - 2) * stride
+ out = output_dict["non_cond_frame_outputs"].get(prev_frame_idx, None)
+ if out is None:
+ # If an unselected conditioning frame is among the last (self.num_maskmem - 1)
+ # frames, we still attend to it as if it's a non-conditioning frame.
+ out = unselected_cond_outputs.get(prev_frame_idx, None)
+ t_pos_and_prevs.append((t_pos, out))
+
+ for t_pos, prev in t_pos_and_prevs:
+ if prev is None:
+ continue # skip padding frames
+ # "maskmem_features" might have been offloaded to CPU in demo use cases,
+ # so we load it back to GPU (it's a no-op if it's already on GPU).
+ feats = prev["maskmem_features"].to(device, non_blocking=True)
+ to_cat_memory.append(feats.flatten(2).permute(2, 0, 1))
+ # Spatial positional encoding (it might have been offloaded to CPU in eval)
+ maskmem_enc = prev["maskmem_pos_enc"][-1].to(device)
+ maskmem_enc = maskmem_enc.flatten(2).permute(2, 0, 1)
+ # Temporal positional encoding
+ maskmem_enc = (maskmem_enc + self.maskmem_tpos_enc[self.num_maskmem - t_pos - 1])
+ to_cat_memory_pos_embed.append(maskmem_enc)
+
+ # Construct the list of past object pointers
+ if self.use_obj_ptrs_in_encoder:
+ max_obj_ptrs_in_encoder = min(num_frames, self.max_obj_ptrs_in_encoder)
+ # First add those object pointers from selected conditioning frames
+ # (optionally, only include object pointers in the past during evaluation)
+ if not self.training and self.only_obj_ptrs_in_the_past_for_eval:
+ ptr_cond_outputs = {
+ t: out
+ for t, out in selected_cond_outputs.items()
+ if (t >= frame_idx if track_in_reverse else t <= frame_idx)
+ }
+ else:
+ ptr_cond_outputs = selected_cond_outputs
+ pos_and_ptrs = [
+ # Temporal pos encoding contains how far away each pointer is from current frame
+ (
+ ((frame_idx - t) * tpos_sign_mul if self.use_signed_tpos_enc_to_obj_ptrs else abs(frame_idx -
+ t)),
+ out["obj_ptr"],
+ ) for t, out in ptr_cond_outputs.items()
+ ]
+ # Add up to (max_obj_ptrs_in_encoder - 1) non-conditioning frames before current frame
+ for t_diff in range(1, max_obj_ptrs_in_encoder):
+ t = frame_idx + t_diff if track_in_reverse else frame_idx - t_diff
+ if t < 0 or (num_frames is not None and t >= num_frames):
+ break
+ out = output_dict["non_cond_frame_outputs"].get(t, unselected_cond_outputs.get(t, None))
+ if out is not None:
+ pos_and_ptrs.append((t_diff, out["obj_ptr"]))
+ # If we have at least one object pointer, add them to the across attention
+ if len(pos_and_ptrs) > 0:
+ pos_list, ptrs_list = zip(*pos_and_ptrs)
+ # stack object pointers along dim=0 into [ptr_seq_len, B, C] shape
+ obj_ptrs = torch.stack(ptrs_list, dim=0)
+ # a temporal positional embedding based on how far each object pointer is from
+ # the current frame (sine embedding normalized by the max pointer num).
+ if self.add_tpos_enc_to_obj_ptrs:
+ t_diff_max = max_obj_ptrs_in_encoder - 1
+ tpos_dim = C if self.proj_tpos_enc_in_obj_ptrs else self.mem_dim
+ obj_pos = torch.tensor(pos_list).to(device=device, non_blocking=True)
+ obj_pos = get_1d_sine_pe(obj_pos / t_diff_max, dim=tpos_dim)
+ obj_pos = self.obj_ptr_tpos_proj(obj_pos.to(self.obj_ptr_tpos_proj.weight.dtype))
+ obj_pos = obj_pos.unsqueeze(1).expand(-1, B, self.mem_dim)
+ else:
+ obj_pos = obj_ptrs.new_zeros(len(pos_list), B, self.mem_dim)
+ if self.mem_dim < C:
+ # split a pointer into (C // self.mem_dim) tokens for self.mem_dim < C
+ obj_ptrs = obj_ptrs.reshape(-1, B, C // self.mem_dim, self.mem_dim)
+ obj_ptrs = obj_ptrs.permute(0, 2, 1, 3).flatten(0, 1)
+ obj_pos = obj_pos.repeat_interleave(C // self.mem_dim, dim=0)
+ to_cat_memory.append(obj_ptrs)
+ to_cat_memory_pos_embed.append(obj_pos)
+ num_obj_ptr_tokens = obj_ptrs.shape[0]
+ else:
+ num_obj_ptr_tokens = 0
+ else:
+ # for initial conditioning frames, encode them without using any previous memory
+ if self.directly_add_no_mem_embed:
+ # directly add no-mem embedding (instead of using the transformer encoder)
+ pix_feat_with_mem = current_vision_feats[-1] + self.no_mem_embed
+ pix_feat_with_mem = pix_feat_with_mem.permute(1, 2, 0).view(B, C, H, W)
+ return pix_feat_with_mem
+
+ # Use a dummy token on the first frame (to avoid empty memory input to tranformer encoder)
+ to_cat_memory = [self.no_mem_embed.expand(1, B, self.mem_dim)]
+ to_cat_memory_pos_embed = [self.no_mem_pos_enc.expand(1, B, self.mem_dim)]
+
+ # Step 2: Concatenate the memories and forward through the transformer encoder
+ memory = torch.cat(to_cat_memory, dim=0)
+ memory_pos_embed = torch.cat(to_cat_memory_pos_embed, dim=0)
+
+ pix_feat_with_mem = self.memory_attention(
+ curr=current_vision_feats,
+ curr_pos=current_vision_pos_embeds,
+ memory=memory,
+ memory_pos=memory_pos_embed,
+ num_obj_ptr_tokens=num_obj_ptr_tokens,
+ )
+ # reshape the output (HW)BC => BCHW
+ pix_feat_with_mem = pix_feat_with_mem.permute(1, 2, 0).view(B, C, H, W)
+ return pix_feat_with_mem
+
+ def _encode_new_memory(
+ self,
+ current_vision_feats,
+ feat_sizes,
+ pred_masks_high_res,
+ object_score_logits,
+ is_mask_from_pts,
+ ):
+ """Encode the current image and its prediction into a memory feature."""
+ B = current_vision_feats[-1].size(1) # batch size on this frame
+ C = self.hidden_dim
+ H, W = feat_sizes[-1] # top-level (lowest-resolution) feature size
+ # top-level feature, (HW)BC => BCHW
+ pix_feat = current_vision_feats[-1].permute(1, 2, 0).view(B, C, H, W)
+ if self.non_overlap_masks_for_mem_enc and not self.training:
+ # optionally, apply non-overlapping constraints to the masks (it's applied
+ # in the batch dimension and should only be used during eval, where all
+ # the objects come from the same video under batch size 1).
+ pred_masks_high_res = self._apply_non_overlapping_constraints(pred_masks_high_res)
+ # scale the raw mask logits with a temperature before applying sigmoid
+ binarize = self.binarize_mask_from_pts_for_mem_enc and is_mask_from_pts
+ if binarize and not self.training:
+ mask_for_mem = (pred_masks_high_res > 0).to(pred_masks_high_res.dtype)
+ else:
+ # apply sigmoid on the raw mask logits to turn them into range (0, 1)
+ mask_for_mem = torch.sigmoid(pred_masks_high_res)
+ # apply scale and bias terms to the sigmoid probabilities
+ if self.sigmoid_scale_for_mem_enc != 1.0:
+ mask_for_mem = mask_for_mem * self.sigmoid_scale_for_mem_enc
+ if self.sigmoid_bias_for_mem_enc != 0.0:
+ mask_for_mem = mask_for_mem + self.sigmoid_bias_for_mem_enc
+ maskmem_out = self.memory_encoder(
+ pix_feat,
+ mask_for_mem,
+ skip_mask_sigmoid=True # sigmoid already applied
+ )
+ maskmem_features = maskmem_out["vision_features"]
+ maskmem_pos_enc = maskmem_out["vision_pos_enc"]
+ # add a no-object embedding to the spatial memory to indicate that the frame
+ # is predicted to be occluded (i.e. no object is appearing in the frame)
+ if self.no_obj_embed_spatial is not None:
+ is_obj_appearing = (object_score_logits > 0).to(object_score_logits.dtype)
+ maskmem_features += (1 - is_obj_appearing[..., None, None]
+ ) * self.no_obj_embed_spatial[..., None, None].expand(*maskmem_features.shape)
+
+ return maskmem_features, maskmem_pos_enc
+
+ def _track_step(
+ self,
+ frame_idx,
+ is_init_cond_frame,
+ current_vision_feats,
+ current_vision_pos_embeds,
+ feat_sizes,
+ point_inputs,
+ mask_inputs,
+ hidden_inputs,
+ output_dict,
+ num_frames,
+ track_in_reverse,
+ prev_sam_mask_logits,
+ ):
+ current_out = {"point_inputs": point_inputs, "mask_inputs": mask_inputs, "hidden_inputs": hidden_inputs}
+ # High-resolution feature maps for the SAM head, reshape (HW)BC => BCHW
+ if len(current_vision_feats) > 1:
+ high_res_features = [
+ x.permute(1, 2, 0).view(x.size(1), x.size(2), *s)
+ for x, s in zip(current_vision_feats[:-1], feat_sizes[:-1])
+ ]
+ else:
+ high_res_features = None
+ if mask_inputs is not None and self.use_mask_input_as_output_without_sam:
+ # When use_mask_input_as_output_without_sam=True, we directly output the mask input
+ # (see it as a GT mask) without using a SAM prompt encoder + mask decoder.
+ pix_feat = current_vision_feats[-1].permute(1, 2, 0)
+ pix_feat = pix_feat.view(-1, self.hidden_dim, *feat_sizes[-1])
+ sam_outputs = self._use_mask_as_output(pix_feat, high_res_features, mask_inputs)
+ else:
+ # fused the visual feature with previous memory features in the memory bank
+ pix_feat = self._prepare_memory_conditioned_features(
+ frame_idx=frame_idx,
+ is_init_cond_frame=is_init_cond_frame,
+ current_vision_feats=current_vision_feats[-1:],
+ current_vision_pos_embeds=current_vision_pos_embeds[-1:],
+ feat_sizes=feat_sizes[-1:],
+ output_dict=output_dict,
+ num_frames=num_frames,
+ track_in_reverse=track_in_reverse,
+ )
+ # apply SAM-style segmentation head
+ # here we might feed previously predicted low-res SAM mask logits into the SAM mask decoder,
+ # e.g. in demo where such logits come from earlier interaction instead of correction sampling
+ # (in this case, any `mask_inputs` shouldn't reach here as they are sent to _use_mask_as_output instead)
+ if prev_sam_mask_logits is not None:
+ assert point_inputs is not None and mask_inputs is None
+ mask_inputs = prev_sam_mask_logits
+ multimask_output = self._use_multimask(is_init_cond_frame, point_inputs)
+ sam_outputs = self._forward_sam_heads(
+ backbone_features=pix_feat,
+ point_inputs=point_inputs,
+ mask_inputs=mask_inputs,
+ hidden_inputs=hidden_inputs,
+ high_res_features=high_res_features,
+ multimask_output=multimask_output,
+ )
+
+ return current_out, sam_outputs, high_res_features, pix_feat
+
+ def _encode_memory_in_output(
+ self,
+ current_vision_feats,
+ feat_sizes,
+ point_inputs,
+ run_mem_encoder,
+ high_res_masks,
+ object_score_logits,
+ current_out,
+ ):
+ if run_mem_encoder and self.num_maskmem > 0:
+ high_res_masks_for_mem_enc = high_res_masks
+ maskmem_features, maskmem_pos_enc = self._encode_new_memory(
+ current_vision_feats=current_vision_feats,
+ feat_sizes=feat_sizes,
+ pred_masks_high_res=high_res_masks_for_mem_enc,
+ object_score_logits=object_score_logits,
+ is_mask_from_pts=(point_inputs is not None),
+ )
+ current_out["maskmem_features"] = maskmem_features
+ current_out["maskmem_pos_enc"] = maskmem_pos_enc
+ else:
+ current_out["maskmem_features"] = None
+ current_out["maskmem_pos_enc"] = None
+
+ def track_step(
+ self,
+ frame_idx,
+ is_init_cond_frame,
+ current_vision_feats,
+ current_vision_pos_embeds,
+ feat_sizes,
+ point_inputs,
+ mask_inputs,
+ hidden_inputs,
+ output_dict,
+ num_frames,
+ track_in_reverse=False, # tracking in reverse time order (for demo usage)
+ # Whether to run the memory encoder on the predicted masks. Sometimes we might want
+ # to skip the memory encoder with `run_mem_encoder=False`. For example,
+ # in demo we might call `track_step` multiple times for each user click,
+ # and only encode the memory when the user finalizes their clicks. And in ablation
+ # settings like SAM training on static images, we don't need the memory encoder.
+ run_mem_encoder=True,
+ # The previously predicted SAM mask logits (which can be fed together with new clicks in demo).
+ prev_sam_mask_logits=None,
+ ):
+ current_out, sam_outputs, _, _ = self._track_step(
+ frame_idx,
+ is_init_cond_frame,
+ current_vision_feats,
+ current_vision_pos_embeds,
+ feat_sizes,
+ point_inputs,
+ mask_inputs,
+ hidden_inputs,
+ output_dict,
+ num_frames,
+ track_in_reverse,
+ prev_sam_mask_logits,
+ )
+
+ (
+ _,
+ _,
+ _,
+ low_res_masks,
+ high_res_masks,
+ obj_ptr,
+ object_score_logits,
+ ) = sam_outputs
+
+ current_out["pred_masks"] = low_res_masks
+ current_out["pred_masks_high_res"] = high_res_masks
+ current_out["obj_ptr"] = obj_ptr
+ if not self.training:
+ # Only add this in inference (to avoid unused param in activation checkpointing;
+ # it's mainly used in the demo to encode spatial memories w/ consolidated masks)
+ current_out["object_score_logits"] = object_score_logits
+
+ # Finally run the memory encoder on the predicted mask to encode
+ # it into a new memory feature (that can be used in future frames)
+ self._encode_memory_in_output(
+ current_vision_feats,
+ feat_sizes,
+ point_inputs,
+ run_mem_encoder,
+ high_res_masks,
+ object_score_logits,
+ current_out,
+ )
+
+ return current_out
+
+ def _use_multimask(self, is_init_cond_frame, point_inputs):
+ """Whether to use multimask output in the SAM head."""
+ num_pts = 0 if point_inputs is None else point_inputs["point_labels"].size(1)
+ multimask_output = (
+ self.multimask_output_in_sam and (is_init_cond_frame or self.multimask_output_for_tracking)
+ and (self.multimask_min_pt_num <= num_pts <= self.multimask_max_pt_num))
+ return multimask_output
+
+ def _apply_non_overlapping_constraints(self, pred_masks):
+ """
+ Apply non-overlapping constraints to the object scores in pred_masks. Here we
+ keep only the highest scoring object at each spatial location in pred_masks.
+ """
+ batch_size = pred_masks.size(0)
+ if batch_size == 1:
+ return pred_masks
+
+ device = pred_masks.device
+ # "max_obj_inds": object index of the object with the highest score at each location
+ max_obj_inds = torch.argmax(pred_masks, dim=0, keepdim=True)
+ # "batch_obj_inds": object index of each object slice (along dim 0) in `pred_masks`
+ batch_obj_inds = torch.arange(batch_size, device=device)[:, None, None, None]
+ keep = max_obj_inds == batch_obj_inds
+ # suppress overlapping regions' scores below -10.0 so that the foreground regions
+ # don't overlap (here sigmoid(-10.0)=4.5398e-05)
+ pred_masks = torch.where(keep, pred_masks, torch.clamp(pred_masks, max=-10.0))
+ return pred_masks
diff --git a/sam2/modeling/sam2_utils.py b/sam2/modeling/sam2_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..9baf1472f9319362c46b9e929d20354f27b103ce
--- /dev/null
+++ b/sam2/modeling/sam2_utils.py
@@ -0,0 +1,320 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+import copy
+from typing import Tuple
+
+import numpy as np
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+from sam2.utils.misc import mask_to_box
+
+
+def select_closest_cond_frames(frame_idx, cond_frame_outputs, max_cond_frame_num):
+ """
+ Select up to `max_cond_frame_num` conditioning frames from `cond_frame_outputs`
+ that are temporally closest to the current frame at `frame_idx`. Here, we take
+ - a) the closest conditioning frame before `frame_idx` (if any);
+ - b) the closest conditioning frame after `frame_idx` (if any);
+ - c) any other temporally closest conditioning frames until reaching a total
+ of `max_cond_frame_num` conditioning frames.
+
+ Outputs:
+ - selected_outputs: selected items (keys & values) from `cond_frame_outputs`.
+ - unselected_outputs: items (keys & values) not selected in `cond_frame_outputs`.
+ """
+ if max_cond_frame_num == -1 or len(cond_frame_outputs) <= max_cond_frame_num:
+ selected_outputs = cond_frame_outputs
+ unselected_outputs = {}
+ else:
+ assert max_cond_frame_num >= 2, "we should allow using 2+ conditioning frames"
+ selected_outputs = {}
+
+ # the closest conditioning frame before `frame_idx` (if any)
+ idx_before = max((t for t in cond_frame_outputs if t < frame_idx), default=None)
+ if idx_before is not None:
+ selected_outputs[idx_before] = cond_frame_outputs[idx_before]
+
+ # the closest conditioning frame after `frame_idx` (if any)
+ idx_after = min((t for t in cond_frame_outputs if t >= frame_idx), default=None)
+ if idx_after is not None:
+ selected_outputs[idx_after] = cond_frame_outputs[idx_after]
+
+ # add other temporally closest conditioning frames until reaching a total
+ # of `max_cond_frame_num` conditioning frames.
+ num_remain = max_cond_frame_num - len(selected_outputs)
+ inds_remain = sorted(
+ (t for t in cond_frame_outputs if t not in selected_outputs),
+ key=lambda x: abs(x - frame_idx),
+ )[:num_remain]
+ selected_outputs.update((t, cond_frame_outputs[t]) for t in inds_remain)
+ unselected_outputs = {t: v for t, v in cond_frame_outputs.items() if t not in selected_outputs}
+
+ return selected_outputs, unselected_outputs
+
+
+def get_1d_sine_pe(pos_inds, dim, temperature=10000):
+ """
+ Get 1D sine positional embedding as in the original Transformer paper.
+ """
+ pe_dim = dim // 2
+ dim_t = torch.arange(pe_dim, dtype=torch.float32, device=pos_inds.device)
+ dim_t = temperature**(2 * (dim_t // 2) / pe_dim)
+
+ pos_embed = pos_inds.unsqueeze(-1) / dim_t
+ pos_embed = torch.cat([pos_embed.sin(), pos_embed.cos()], dim=-1)
+ return pos_embed
+
+
+def get_activation_fn(activation):
+ """Return an activation function given a string"""
+ if activation == "relu":
+ return F.relu
+ if activation == "gelu":
+ return F.gelu
+ if activation == "glu":
+ return F.glu
+ raise RuntimeError(f"activation should be relu/gelu, not {activation}.")
+
+
+def get_clones(module, N):
+ return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
+
+
+class DropPath(nn.Module):
+ # adapted from https://github.com/huggingface/pytorch-image-models/blob/main/timm/layers/drop.py
+ def __init__(self, drop_prob=0.0, scale_by_keep=True):
+ super(DropPath, self).__init__()
+ self.drop_prob = drop_prob
+ self.scale_by_keep = scale_by_keep
+
+ def forward(self, x):
+ if self.drop_prob == 0.0 or not self.training:
+ return x
+ keep_prob = 1 - self.drop_prob
+ shape = (x.shape[0], ) + (1, ) * (x.ndim - 1)
+ random_tensor = x.new_empty(shape).bernoulli_(keep_prob)
+ if keep_prob > 0.0 and self.scale_by_keep:
+ random_tensor.div_(keep_prob)
+ return x * random_tensor
+
+
+# Lightly adapted from
+# https://github.com/facebookresearch/MaskFormer/blob/main/mask_former/modeling/transformer/transformer_predictor.py # noqa
+class MLP(nn.Module):
+
+ def __init__(
+ self,
+ input_dim: int,
+ hidden_dim: int,
+ output_dim: int,
+ num_layers: int,
+ activation: nn.Module = nn.ReLU,
+ sigmoid_output: bool = False,
+ ) -> None:
+ super().__init__()
+ self.num_layers = num_layers
+ h = [hidden_dim] * (num_layers - 1)
+ self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
+ self.sigmoid_output = sigmoid_output
+ self.act = activation()
+
+ def forward(self, x):
+ for i, layer in enumerate(self.layers):
+ x = self.act(layer(x)) if i < self.num_layers - 1 else layer(x)
+ if self.sigmoid_output:
+ x = F.sigmoid(x)
+ return x
+
+
+# From https://github.com/facebookresearch/detectron2/blob/main/detectron2/layers/batch_norm.py # noqa
+# Itself from https://github.com/facebookresearch/ConvNeXt/blob/d1fa8f6fef0a165b27399986cc2bdacc92777e40/models/convnext.py#L119 # noqa
+class LayerNorm2d(nn.Module):
+
+ def __init__(self, num_channels: int, eps: float = 1e-6) -> None:
+ super().__init__()
+ self.weight = nn.Parameter(torch.ones(num_channels))
+ self.bias = nn.Parameter(torch.zeros(num_channels))
+ self.eps = eps
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ u = x.mean(1, keepdim=True)
+ s = (x - u).pow(2).mean(1, keepdim=True)
+ x = (x - u) / torch.sqrt(s + self.eps)
+ x = self.weight[:, None, None] * x + self.bias[:, None, None]
+ return x
+
+
+def sample_box_points(
+ masks: torch.Tensor,
+ noise: float = 0.1, # SAM default
+ noise_bound: int = 20, # SAM default
+ top_left_label: int = 2,
+ bottom_right_label: int = 3,
+) -> Tuple[np.array, np.array]:
+ """
+ Sample a noised version of the top left and bottom right corners of a given `bbox`
+
+ Inputs:
+ - masks: [B, 1, H,W] boxes, dtype=torch.Tensor
+ - noise: noise as a fraction of box width and height, dtype=float
+ - noise_bound: maximum amount of noise (in pure pixesl), dtype=int
+
+ Returns:
+ - box_coords: [B, num_pt, 2], contains (x, y) coordinates of top left and bottom right box corners, dtype=torch.float
+ - box_labels: [B, num_pt], label 2 is reserverd for top left and 3 for bottom right corners, dtype=torch.int32
+ """
+ device = masks.device
+ box_coords = mask_to_box(masks)
+ B, _, H, W = masks.shape
+ box_labels = torch.tensor([top_left_label, bottom_right_label], dtype=torch.int, device=device).repeat(B)
+ if noise > 0.0:
+ if not isinstance(noise_bound, torch.Tensor):
+ noise_bound = torch.tensor(noise_bound, device=device)
+ bbox_w = box_coords[..., 2] - box_coords[..., 0]
+ bbox_h = box_coords[..., 3] - box_coords[..., 1]
+ max_dx = torch.min(bbox_w * noise, noise_bound)
+ max_dy = torch.min(bbox_h * noise, noise_bound)
+ box_noise = 2 * torch.rand(B, 1, 4, device=device) - 1
+ box_noise = box_noise * torch.stack((max_dx, max_dy, max_dx, max_dy), dim=-1)
+
+ box_coords = box_coords + box_noise
+ img_bounds = (torch.tensor([W, H, W, H], device=device) - 1) # uncentered pixel coords
+ box_coords.clamp_(torch.zeros_like(img_bounds), img_bounds) # In place clamping
+
+ box_coords = box_coords.reshape(-1, 2, 2) # always 2 points
+ box_labels = box_labels.reshape(-1, 2)
+ return box_coords, box_labels
+
+
+def sample_random_points_from_errors(gt_masks, pred_masks, num_pt=1, positive_only=False):
+ """
+ Sample `num_pt` random points (along with their labels) independently from the error regions.
+
+ Inputs:
+ - gt_masks: [B, 1, H_im, W_im] masks, dtype=torch.bool
+ - pred_masks: [B, 1, H_im, W_im] masks, dtype=torch.bool or None
+ - num_pt: int, number of points to sample independently for each of the B error maps
+
+ Outputs:
+ - points: [B, num_pt, 2], dtype=torch.float, contains (x, y) coordinates of each sampled point
+ - labels: [B, num_pt], dtype=torch.int32, where 1 means positive clicks and 0 means
+ negative clicks
+ """
+ if pred_masks is None: # if pred_masks is not provided, treat it as empty
+ pred_masks = torch.zeros_like(gt_masks)
+ assert gt_masks.dtype == torch.bool and gt_masks.size(1) == 1
+ assert pred_masks.dtype == torch.bool and pred_masks.shape == gt_masks.shape
+ assert num_pt >= 0
+
+ B, _, H_im, W_im = gt_masks.shape
+ device = gt_masks.device
+
+ # false positive region, a new point sampled in this region should have
+ # negative label to correct the FP error
+ fp_masks = ~gt_masks & pred_masks
+ # false negative region, a new point sampled in this region should have
+ # positive label to correct the FN error
+ fn_masks = gt_masks & ~pred_masks
+ # whether the prediction completely match the ground-truth on each mask
+ all_correct = torch.all((gt_masks == pred_masks).flatten(2), dim=2)
+ all_correct = all_correct[..., None, None]
+
+ # channel 0 is FP map, while channel 1 is FN map
+ pts_noise = torch.rand(B, num_pt, H_im, W_im, 2, device=device)
+ # sample a negative new click from FP region or a positive new click
+ # from FN region, depend on where the maximum falls,
+ # and in case the predictions are all correct (no FP or FN), we just
+ # sample a negative click from the background region
+ pts_noise[..., 0] *= fp_masks | (all_correct & ~gt_masks)
+ if positive_only:
+ pts_noise[..., 0] = -1
+ pts_noise[..., 1] *= fn_masks
+ pts_idx = pts_noise.flatten(2).argmax(dim=2)
+ labels = (pts_idx % 2).to(torch.int32)
+ pts_idx = pts_idx // 2
+ pts_x = pts_idx % W_im
+ pts_y = pts_idx // W_im
+ points = torch.stack([pts_x, pts_y], dim=2).to(torch.float)
+ return points, labels
+
+
+def sample_one_point_from_error_center(gt_masks, pred_masks, padding=True, positive_only=False):
+ """
+ Sample 1 random point (along with its label) from the center of each error region,
+ that is, the point with the largest distance to the boundary of each error region.
+ This is the RITM sampling method from https://github.com/saic-vul/ritm_interactive_segmentation/blob/master/isegm/inference/clicker.py
+
+ Inputs:
+ - gt_masks: [B, 1, H_im, W_im] masks, dtype=torch.bool
+ - pred_masks: [B, 1, H_im, W_im] masks, dtype=torch.bool or None
+ - padding: if True, pad with boundary of 1 px for distance transform
+
+ Outputs:
+ - points: [B, 1, 2], dtype=torch.float, contains (x, y) coordinates of each sampled point
+ - labels: [B, 1], dtype=torch.int32, where 1 means positive clicks and 0 means negative clicks
+ """
+ import cv2
+
+ if pred_masks is None:
+ pred_masks = torch.zeros_like(gt_masks)
+ assert gt_masks.dtype == torch.bool and gt_masks.size(1) == 1
+ assert pred_masks.dtype == torch.bool and pred_masks.shape == gt_masks.shape
+
+ B, _, _, W_im = gt_masks.shape
+ device = gt_masks.device
+
+ # false positive region, a new point sampled in this region should have
+ # negative label to correct the FP error
+ fp_masks = ~gt_masks & pred_masks
+ # false negative region, a new point sampled in this region should have
+ # positive label to correct the FN error
+ fn_masks = gt_masks & ~pred_masks
+
+ fp_masks = fp_masks.cpu().numpy()
+ fn_masks = fn_masks.cpu().numpy()
+ points = torch.zeros(B, 1, 2, dtype=torch.float)
+ labels = torch.ones(B, 1, dtype=torch.int32)
+ for b in range(B):
+ fn_mask = fn_masks[b, 0]
+ fp_mask = fp_masks[b, 0]
+ if padding:
+ fn_mask = np.pad(fn_mask, ((1, 1), (1, 1)), "constant")
+ fp_mask = np.pad(fp_mask, ((1, 1), (1, 1)), "constant")
+ # compute the distance of each point in FN/FP region to its boundary
+ fn_mask_dt = cv2.distanceTransform(fn_mask.astype(np.uint8), cv2.DIST_L2, 0)
+ fp_mask_dt = cv2.distanceTransform(fp_mask.astype(np.uint8), cv2.DIST_L2, 0)
+ if padding:
+ fn_mask_dt = fn_mask_dt[1:-1, 1:-1]
+ fp_mask_dt = fp_mask_dt[1:-1, 1:-1]
+
+ # take the point in FN/FP region with the largest distance to its boundary
+ fn_mask_dt_flat = fn_mask_dt.reshape(-1)
+ fp_mask_dt_flat = fp_mask_dt.reshape(-1)
+ fn_argmax = np.argmax(fn_mask_dt_flat)
+ fp_argmax = np.argmax(fp_mask_dt_flat)
+ is_positive = fn_mask_dt_flat[fn_argmax] > fp_mask_dt_flat[fp_argmax]
+ if positive_only:
+ is_positive = True
+ pt_idx = fn_argmax if is_positive else fp_argmax
+ points[b, 0, 0] = pt_idx % W_im # x
+ points[b, 0, 1] = pt_idx // W_im # y
+ labels[b, 0] = int(is_positive)
+
+ points = points.to(device)
+ labels = labels.to(device)
+ return points, labels
+
+
+def get_next_point(gt_masks, pred_masks, method, positive_only=True):
+ if method == "uniform":
+ return sample_random_points_from_errors(gt_masks, pred_masks, positive_only=positive_only)
+ elif method == "center":
+ return sample_one_point_from_error_center(gt_masks, pred_masks, positive_only=positive_only)
+ else:
+ raise ValueError(f"unknown sampling method {method}")
diff --git a/sam2/sam2_image_predictor.py b/sam2/sam2_image_predictor.py
new file mode 100644
index 0000000000000000000000000000000000000000..718de61136aa01ab5f2d3d12ce5abf024cce2449
--- /dev/null
+++ b/sam2/sam2_image_predictor.py
@@ -0,0 +1,428 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+import logging
+
+from typing import List, Optional, Tuple, Union
+
+import numpy as np
+import torch
+from PIL.Image import Image
+
+from sam2.modeling.sam2_base import SAM2Base
+
+from sam2.utils.transforms import SAM2Transforms
+
+
+class SAM2ImagePredictor:
+
+ def __init__(
+ self,
+ sam_model: SAM2Base,
+ mask_threshold=0.0,
+ max_hole_area=0.0,
+ max_sprinkle_area=0.0,
+ **kwargs,
+ ) -> None:
+ """
+ Uses SAM-2 to calculate the image embedding for an image, and then
+ allow repeated, efficient mask prediction given prompts.
+
+ Arguments:
+ sam_model (Sam-2): The model to use for mask prediction.
+ mask_threshold (float): The threshold to use when converting mask logits
+ to binary masks. Masks are thresholded at 0 by default.
+ max_hole_area (int): If max_hole_area > 0, we fill small holes in up to
+ the maximum area of max_hole_area in low_res_masks.
+ max_sprinkle_area (int): If max_sprinkle_area > 0, we remove small sprinkles up to
+ the maximum area of max_sprinkle_area in low_res_masks.
+ """
+ super().__init__()
+ self.model = sam_model
+ self._transforms = SAM2Transforms(
+ resolution=self.model.image_size,
+ mask_threshold=mask_threshold,
+ max_hole_area=max_hole_area,
+ max_sprinkle_area=max_sprinkle_area,
+ )
+
+ # Predictor state
+ self._is_image_set = False
+ self._features = None
+ self._orig_hw = None
+ # Whether the predictor is set for single image or a batch of images
+ self._is_batch = False
+
+ # Predictor config
+ self.mask_threshold = mask_threshold
+
+ # Spatial dim for backbone feature maps
+ self._bb_feat_sizes = [
+ (256, 256),
+ (128, 128),
+ (64, 64),
+ ]
+
+ @classmethod
+ def from_pretrained(cls, model_id: str, **kwargs) -> "SAM2ImagePredictor":
+ """
+ Load a pretrained model from the Hugging Face hub.
+
+ Arguments:
+ model_id (str): The Hugging Face repository ID.
+ **kwargs: Additional arguments to pass to the model constructor.
+
+ Returns:
+ (SAM2ImagePredictor): The loaded model.
+ """
+ from sam2.build_sam import build_sam2_hf
+
+ sam_model = build_sam2_hf(model_id, **kwargs)
+ return cls(sam_model, **kwargs)
+
+ @torch.no_grad()
+ def set_image(
+ self,
+ image: Union[np.ndarray, Image],
+ ) -> None:
+ """
+ Calculates the image embeddings for the provided image, allowing
+ masks to be predicted with the 'predict' method.
+
+ Arguments:
+ image (np.ndarray or PIL Image): The input image to embed in RGB format. The image should be in HWC format if np.ndarray, or WHC format if PIL Image
+ with pixel values in [0, 255].
+ image_format (str): The color format of the image, in ['RGB', 'BGR'].
+ """
+ self.reset_predictor()
+ # Transform the image to the form expected by the model
+ if isinstance(image, np.ndarray):
+ logging.info("For numpy array image, we assume (HxWxC) format")
+ self._orig_hw = [image.shape[:2]]
+ elif isinstance(image, Image):
+ w, h = image.size
+ self._orig_hw = [(h, w)]
+ else:
+ raise NotImplementedError("Image format not supported")
+
+ input_image = self._transforms(image)
+ input_image = input_image[None, ...].to(self.device)
+
+ assert (len(input_image.shape) == 4
+ and input_image.shape[1] == 3), f"input_image must be of size 1x3xHxW, got {input_image.shape}"
+ logging.info("Computing image embeddings for the provided image...")
+ backbone_out = self.model.forward_image(input_image)
+ _, vision_feats, _, _ = self.model._prepare_backbone_features(backbone_out)
+ # Add no_mem_embed, which is added to the lowest rest feat. map during training on videos
+ if self.model.directly_add_no_mem_embed:
+ vision_feats[-1] = vision_feats[-1] + self.model.no_mem_embed
+
+ feats = [
+ feat.permute(1, 2, 0).view(1, -1, *feat_size)
+ for feat, feat_size in zip(vision_feats[::-1], self._bb_feat_sizes[::-1])
+ ][::-1]
+ self._features = {"image_embed": feats[-1], "high_res_feats": feats[:-1]}
+ self._is_image_set = True
+ logging.info("Image embeddings computed.")
+
+ @torch.no_grad()
+ def set_image_batch(
+ self,
+ image_list: List[Union[np.ndarray]],
+ ) -> None:
+ """
+ Calculates the image embeddings for the provided image batch, allowing
+ masks to be predicted with the 'predict_batch' method.
+
+ Arguments:
+ image_list (List[np.ndarray]): The input images to embed in RGB format. The image should be in HWC format if np.ndarray
+ with pixel values in [0, 255].
+ """
+ self.reset_predictor()
+ assert isinstance(image_list, list)
+ self._orig_hw = []
+ for image in image_list:
+ assert isinstance(image,
+ np.ndarray), "Images are expected to be an np.ndarray in RGB format, and of shape HWC"
+ self._orig_hw.append(image.shape[:2])
+ # Transform the image to the form expected by the model
+ img_batch = self._transforms.forward_batch(image_list)
+ img_batch = img_batch.to(self.device)
+ batch_size = img_batch.shape[0]
+ assert (len(img_batch.shape) == 4
+ and img_batch.shape[1] == 3), f"img_batch must be of size Bx3xHxW, got {img_batch.shape}"
+ logging.info("Computing image embeddings for the provided images...")
+ backbone_out = self.model.forward_image(img_batch)
+ _, vision_feats, _, _ = self.model._prepare_backbone_features(backbone_out)
+ # Add no_mem_embed, which is added to the lowest rest feat. map during training on videos
+ if self.model.directly_add_no_mem_embed:
+ vision_feats[-1] = vision_feats[-1] + self.model.no_mem_embed
+
+ feats = [
+ feat.permute(1, 2, 0).view(batch_size, -1, *feat_size)
+ for feat, feat_size in zip(vision_feats[::-1], self._bb_feat_sizes[::-1])
+ ][::-1]
+ self._features = {"image_embed": feats[-1], "high_res_feats": feats[:-1]}
+ self._is_image_set = True
+ self._is_batch = True
+ logging.info("Image embeddings computed.")
+
+ def predict_batch(
+ self,
+ point_coords_batch: List[np.ndarray] = None,
+ point_labels_batch: List[np.ndarray] = None,
+ box_batch: List[np.ndarray] = None,
+ mask_input_batch: List[np.ndarray] = None,
+ multimask_output: bool = True,
+ return_logits: bool = False,
+ normalize_coords=True,
+ ) -> Tuple[List[np.ndarray], List[np.ndarray], List[np.ndarray]]:
+ """This function is very similar to predict(...), however it is used for batched mode, when the model is expected to generate predictions on multiple images.
+ It returns a tuple of lists of masks, ious, and low_res_masks_logits.
+ """
+ assert self._is_batch, "This function should only be used when in batched mode"
+ if not self._is_image_set:
+ raise RuntimeError("An image must be set with .set_image_batch(...) before mask prediction.")
+ num_images = len(self._features["image_embed"])
+ all_masks = []
+ all_ious = []
+ all_low_res_masks = []
+ for img_idx in range(num_images):
+ # Transform input prompts
+ point_coords = (point_coords_batch[img_idx] if point_coords_batch is not None else None)
+ point_labels = (point_labels_batch[img_idx] if point_labels_batch is not None else None)
+ box = box_batch[img_idx] if box_batch is not None else None
+ mask_input = (mask_input_batch[img_idx] if mask_input_batch is not None else None)
+ mask_input, unnorm_coords, labels, unnorm_box = self._prep_prompts(
+ point_coords,
+ point_labels,
+ box,
+ mask_input,
+ normalize_coords,
+ img_idx=img_idx,
+ )
+ masks, iou_predictions, low_res_masks = self._predict(
+ unnorm_coords,
+ labels,
+ unnorm_box,
+ mask_input,
+ multimask_output,
+ return_logits=return_logits,
+ img_idx=img_idx,
+ )
+ masks_np = masks.squeeze(0).float().detach().cpu().numpy()
+ iou_predictions_np = (iou_predictions.squeeze(0).float().detach().cpu().numpy())
+ low_res_masks_np = low_res_masks.squeeze(0).float().detach().cpu().numpy()
+ all_masks.append(masks_np)
+ all_ious.append(iou_predictions_np)
+ all_low_res_masks.append(low_res_masks_np)
+
+ return all_masks, all_ious, all_low_res_masks
+
+ def predict(
+ self,
+ point_coords: Optional[np.ndarray] = None,
+ point_labels: Optional[np.ndarray] = None,
+ box: Optional[np.ndarray] = None,
+ mask_input: Optional[np.ndarray] = None,
+ multimask_output: bool = True,
+ return_logits: bool = False,
+ normalize_coords=True,
+ ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
+ """
+ Predict masks for the given input prompts, using the currently set image.
+
+ Arguments:
+ point_coords (np.ndarray or None): A Nx2 array of point prompts to the
+ model. Each point is in (X,Y) in pixels.
+ point_labels (np.ndarray or None): A length N array of labels for the
+ point prompts. 1 indicates a foreground point and 0 indicates a
+ background point.
+ box (np.ndarray or None): A length 4 array given a box prompt to the
+ model, in XYXY format.
+ mask_input (np.ndarray): A low resolution mask input to the model, typically
+ coming from a previous prediction iteration. Has form 1xHxW, where
+ for SAM, H=W=256.
+ multimask_output (bool): If true, the model will return three masks.
+ For ambiguous input prompts (such as a single click), this will often
+ produce better masks than a single prediction. If only a single
+ mask is needed, the model's predicted quality score can be used
+ to select the best mask. For non-ambiguous prompts, such as multiple
+ input prompts, multimask_output=False can give better results.
+ return_logits (bool): If true, returns un-thresholded masks logits
+ instead of a binary mask.
+ normalize_coords (bool): If true, the point coordinates will be normalized to the range [0,1] and point_coords is expected to be wrt. image dimensions.
+
+ Returns:
+ (np.ndarray): The output masks in CxHxW format, where C is the
+ number of masks, and (H, W) is the original image size.
+ (np.ndarray): An array of length C containing the model's
+ predictions for the quality of each mask.
+ (np.ndarray): An array of shape CxHxW, where C is the number
+ of masks and H=W=256. These low resolution logits can be passed to
+ a subsequent iteration as mask input.
+ """
+ if not self._is_image_set:
+ raise RuntimeError("An image must be set with .set_image(...) before mask prediction.")
+
+ # Transform input prompts
+
+ mask_input, unnorm_coords, labels, unnorm_box = self._prep_prompts(point_coords, point_labels, box, mask_input,
+ normalize_coords)
+
+ masks, iou_predictions, low_res_masks = self._predict(
+ unnorm_coords,
+ labels,
+ unnorm_box,
+ mask_input,
+ multimask_output,
+ return_logits=return_logits,
+ )
+
+ masks_np = masks.squeeze(0).float().detach().cpu().numpy()
+ iou_predictions_np = iou_predictions.squeeze(0).float().detach().cpu().numpy()
+ low_res_masks_np = low_res_masks.squeeze(0).float().detach().cpu().numpy()
+ return masks_np, iou_predictions_np, low_res_masks_np
+
+ def _prep_prompts(self, point_coords, point_labels, box, mask_logits, normalize_coords, img_idx=-1):
+
+ unnorm_coords, labels, unnorm_box, mask_input = None, None, None, None
+ if point_coords is not None:
+ assert (point_labels is not None), "point_labels must be supplied if point_coords is supplied."
+ point_coords = torch.as_tensor(point_coords, dtype=torch.float, device=self.device)
+ unnorm_coords = self._transforms.transform_coords(
+ point_coords, normalize=normalize_coords, orig_hw=self._orig_hw[img_idx])
+ labels = torch.as_tensor(point_labels, dtype=torch.int, device=self.device)
+ if len(unnorm_coords.shape) == 2:
+ unnorm_coords, labels = unnorm_coords[None, ...], labels[None, ...]
+ if box is not None:
+ box = torch.as_tensor(box, dtype=torch.float, device=self.device)
+ unnorm_box = self._transforms.transform_boxes(
+ box, normalize=normalize_coords, orig_hw=self._orig_hw[img_idx]) # Bx2x2
+ if mask_logits is not None:
+ mask_input = torch.as_tensor(mask_logits, dtype=torch.float, device=self.device)
+ if len(mask_input.shape) == 3:
+ mask_input = mask_input[None, :, :, :]
+ return mask_input, unnorm_coords, labels, unnorm_box
+
+ @torch.no_grad()
+ def _predict(
+ self,
+ point_coords: Optional[torch.Tensor],
+ point_labels: Optional[torch.Tensor],
+ boxes: Optional[torch.Tensor] = None,
+ mask_input: Optional[torch.Tensor] = None,
+ multimask_output: bool = True,
+ return_logits: bool = False,
+ img_idx: int = -1,
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
+ """
+ Predict masks for the given input prompts, using the currently set image.
+ Input prompts are batched torch tensors and are expected to already be
+ transformed to the input frame using SAM2Transforms.
+
+ Arguments:
+ point_coords (torch.Tensor or None): A BxNx2 array of point prompts to the
+ model. Each point is in (X,Y) in pixels.
+ point_labels (torch.Tensor or None): A BxN array of labels for the
+ point prompts. 1 indicates a foreground point and 0 indicates a
+ background point.
+ boxes (np.ndarray or None): A Bx4 array given a box prompt to the
+ model, in XYXY format.
+ mask_input (np.ndarray): A low resolution mask input to the model, typically
+ coming from a previous prediction iteration. Has form Bx1xHxW, where
+ for SAM, H=W=256. Masks returned by a previous iteration of the
+ predict method do not need further transformation.
+ multimask_output (bool): If true, the model will return three masks.
+ For ambiguous input prompts (such as a single click), this will often
+ produce better masks than a single prediction. If only a single
+ mask is needed, the model's predicted quality score can be used
+ to select the best mask. For non-ambiguous prompts, such as multiple
+ input prompts, multimask_output=False can give better results.
+ return_logits (bool): If true, returns un-thresholded masks logits
+ instead of a binary mask.
+
+ Returns:
+ (torch.Tensor): The output masks in BxCxHxW format, where C is the
+ number of masks, and (H, W) is the original image size.
+ (torch.Tensor): An array of shape BxC containing the model's
+ predictions for the quality of each mask.
+ (torch.Tensor): An array of shape BxCxHxW, where C is the number
+ of masks and H=W=256. These low res logits can be passed to
+ a subsequent iteration as mask input.
+ """
+ if not self._is_image_set:
+ raise RuntimeError("An image must be set with .set_image(...) before mask prediction.")
+
+ if point_coords is not None:
+ concat_points = (point_coords, point_labels)
+ else:
+ concat_points = None
+
+ # Embed prompts
+ if boxes is not None:
+ box_coords = boxes.reshape(-1, 2, 2)
+ box_labels = torch.tensor([[2, 3]], dtype=torch.int, device=boxes.device)
+ box_labels = box_labels.repeat(boxes.size(0), 1)
+ # we merge "boxes" and "points" into a single "concat_points" input (where
+ # boxes are added at the beginning) to sam_prompt_encoder
+ if concat_points is not None:
+ concat_coords = torch.cat([box_coords, concat_points[0]], dim=1)
+ concat_labels = torch.cat([box_labels, concat_points[1]], dim=1)
+ concat_points = (concat_coords, concat_labels)
+ else:
+ concat_points = (box_coords, box_labels)
+
+ sparse_embeddings, dense_embeddings = self.model.sam_prompt_encoder(
+ points=concat_points,
+ boxes=None,
+ masks=mask_input,
+ )
+
+ # Predict masks
+ batched_mode = (concat_points is not None and concat_points[0].shape[0] > 1) # multi object prediction
+ high_res_features = [feat_level[img_idx].unsqueeze(0) for feat_level in self._features["high_res_feats"]]
+ low_res_masks, iou_predictions, _, _ = self.model.sam_mask_decoder(
+ image_embeddings=self._features["image_embed"][img_idx].unsqueeze(0),
+ image_pe=self.model.sam_prompt_encoder.get_dense_pe(),
+ sparse_prompt_embeddings=sparse_embeddings,
+ dense_prompt_embeddings=dense_embeddings,
+ multimask_output=multimask_output,
+ repeat_image=batched_mode,
+ high_res_features=high_res_features,
+ )
+
+ # Upscale the masks to the original image resolution
+ masks = self._transforms.postprocess_masks(low_res_masks, self._orig_hw[img_idx])
+ low_res_masks = torch.clamp(low_res_masks, -32.0, 32.0)
+ if not return_logits:
+ masks = masks > self.mask_threshold
+
+ return masks, iou_predictions, low_res_masks
+
+ def get_image_embedding(self) -> torch.Tensor:
+ """
+ Returns the image embeddings for the currently set image, with
+ shape 1xCxHxW, where C is the embedding dimension and (H,W) are
+ the embedding spatial dimension of SAM (typically C=256, H=W=64).
+ """
+ if not self._is_image_set:
+ raise RuntimeError("An image must be set with .set_image(...) to generate an embedding.")
+ assert (self._features is not None), "Features must exist if an image has been set."
+ return self._features["image_embed"]
+
+ @property
+ def device(self) -> torch.device:
+ return self.model.device
+
+ def reset_predictor(self) -> None:
+ """
+ Resets the image embeddings and other state variables.
+ """
+ self._is_image_set = False
+ self._features = None
+ self._orig_hw = None
+ self._is_batch = False
diff --git a/sam2/sam2_train.py b/sam2/sam2_train.py
new file mode 100644
index 0000000000000000000000000000000000000000..c4a328bae116c2e024da5c467b84f5a7176524cf
--- /dev/null
+++ b/sam2/sam2_train.py
@@ -0,0 +1,575 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+import logging
+
+import numpy as np
+import torch
+import torch.distributed
+from tensordict import tensorclass
+
+from sam2.modeling.sam2_base import SAM2Base
+from sam2.modeling.sam2_utils import get_next_point, sample_box_points
+from sam2.utils.misc import concat_points
+
+
+@tensorclass
+class BatchedVideoDatapoint:
+ """
+ This class represents a batch of videos with associated annotations.
+ Attributes:
+ img_batch: A [TxBxCxHxW] tensor containing the image data for each frame in the batch, where T is the number of frames per video, and B is the number of videos in the batch.
+ obj_to_frame_idx: A [TxOx2] tensor containing the image_batch index which the object belongs to. O is the number of objects in the batch.
+ masks: A [TxOxHxW] tensor containing binary masks for each object in the batch.
+ """
+
+ img_batch: torch.FloatTensor
+ obj_to_frame_idx: torch.IntTensor
+ masks: torch.BoolTensor
+
+ @property
+ def num_frames(self) -> int:
+ """
+ Returns the number of frames per video.
+ """
+ return self.img_batch.shape[0]
+
+ @property
+ def num_videos(self) -> int:
+ """
+ Returns the number of videos in the batch.
+ """
+ return self.img_batch.shape[1]
+
+ @property
+ def flat_obj_to_img_idx(self) -> torch.IntTensor:
+ """
+ Returns a flattened tensor containing the object to img index.
+ The flat index can be used to access a flattened img_batch of shape [(T*B)xCxHxW]
+ """
+ frame_idx, video_idx = self.obj_to_frame_idx.unbind(dim=-1)
+ flat_idx = video_idx * self.num_frames + frame_idx
+ return flat_idx
+
+ @property
+ def flat_img_batch(self) -> torch.FloatTensor:
+ """
+ Returns a flattened img_batch_tensor of shape [(B*T)xCxHxW]
+ """
+ return self.img_batch.transpose(0, 1).flatten(0, 1)
+
+
+class SAM2Train(SAM2Base):
+
+ def __init__(
+ self,
+ image_encoder,
+ memory_attention=None,
+ memory_encoder=None,
+ prob_to_use_pt_input_for_train=0.0,
+ prob_to_use_pt_input_for_eval=0.0,
+ prob_to_use_box_input_for_train=0.0,
+ prob_to_use_box_input_for_eval=0.0,
+ # if it is greater than 1, we interactive point sampling in the 1st frame and other randomly selected frames
+ num_frames_to_correct_for_train=1, # default: only iteratively sample on first frame
+ num_frames_to_correct_for_eval=1, # default: only iteratively sample on first frame
+ rand_frames_to_correct_for_train=False,
+ rand_frames_to_correct_for_eval=False,
+ # how many frames to use as initial conditioning frames (for both point input and mask input; the first frame is always used as an initial conditioning frame)
+ # - if `rand_init_cond_frames` below is True, we randomly sample 1~num_init_cond_frames initial conditioning frames
+ # - otherwise we sample a fixed number of num_init_cond_frames initial conditioning frames
+ # note: for point input, we sample correction points on all such initial conditioning frames, and we require that `num_frames_to_correct` >= `num_init_cond_frames`;
+ # these are initial conditioning frames because as we track the video, more conditioning frames might be added
+ # when a frame receives correction clicks under point input if `add_all_frames_to_correct_as_cond=True`
+ num_init_cond_frames_for_train=1, # default: only use the first frame as initial conditioning frame
+ num_init_cond_frames_for_eval=1, # default: only use the first frame as initial conditioning frame
+ rand_init_cond_frames_for_train=True, # default: random 1~num_init_cond_frames_for_train cond frames (to be constent w/ previous TA data loader)
+ rand_init_cond_frames_for_eval=False,
+ # if `add_all_frames_to_correct_as_cond` is True, we also append to the conditioning frame list any frame that receives a later correction click
+ # if `add_all_frames_to_correct_as_cond` is False, we conditioning frame list to only use those initial conditioning frames
+ add_all_frames_to_correct_as_cond=False,
+ # how many additional correction points to sample (on each frame selected to be corrected)
+ # note that the first frame receives an initial input click (in addition to any correction clicks)
+ num_correction_pt_per_frame=7,
+ # method for point sampling during evaluation
+ # "uniform" (sample uniformly from error region) or "center" (use the point with the largest distance to error region boundary)
+ # default to "center" to be consistent with evaluation in the SAM paper
+ pt_sampling_for_eval="center",
+ # During training, we optionally allow sampling the correction points from GT regions
+ # instead of the prediction error regions with a small probability. This might allow the
+ # model to overfit less to the error regions in training datasets
+ prob_to_sample_from_gt_for_train=0.0,
+ use_act_ckpt_iterative_pt_sampling=False,
+ # whether to forward image features per frame (as it's being tracked) during evaluation, instead of forwarding image features
+ # of all frames at once. This avoids backbone OOM errors on very long videos in evaluation, but could be slightly slower.
+ forward_backbone_per_frame_for_eval=False,
+ freeze_image_encoder=False,
+ **kwargs,
+ ):
+ super().__init__(image_encoder, memory_attention, memory_encoder, **kwargs)
+ self.use_act_ckpt_iterative_pt_sampling = use_act_ckpt_iterative_pt_sampling
+ self.forward_backbone_per_frame_for_eval = forward_backbone_per_frame_for_eval
+
+ # Point sampler and conditioning frames
+ self.prob_to_use_pt_input_for_train = prob_to_use_pt_input_for_train
+ self.prob_to_use_box_input_for_train = prob_to_use_box_input_for_train
+ self.prob_to_use_pt_input_for_eval = prob_to_use_pt_input_for_eval
+ self.prob_to_use_box_input_for_eval = prob_to_use_box_input_for_eval
+ if prob_to_use_pt_input_for_train > 0 or prob_to_use_pt_input_for_eval > 0:
+ logging.info(f"Training with points (sampled from masks) as inputs with p={prob_to_use_pt_input_for_train}")
+ assert num_frames_to_correct_for_train >= num_init_cond_frames_for_train
+ assert num_frames_to_correct_for_eval >= num_init_cond_frames_for_eval
+
+ self.num_frames_to_correct_for_train = num_frames_to_correct_for_train
+ self.num_frames_to_correct_for_eval = num_frames_to_correct_for_eval
+ self.rand_frames_to_correct_for_train = rand_frames_to_correct_for_train
+ self.rand_frames_to_correct_for_eval = rand_frames_to_correct_for_eval
+ # Initial multi-conditioning frames
+ self.num_init_cond_frames_for_train = num_init_cond_frames_for_train
+ self.num_init_cond_frames_for_eval = num_init_cond_frames_for_eval
+ self.rand_init_cond_frames_for_train = rand_init_cond_frames_for_train
+ self.rand_init_cond_frames_for_eval = rand_init_cond_frames_for_eval
+ self.add_all_frames_to_correct_as_cond = add_all_frames_to_correct_as_cond
+ self.num_correction_pt_per_frame = num_correction_pt_per_frame
+ self.pt_sampling_for_eval = pt_sampling_for_eval
+ self.prob_to_sample_from_gt_for_train = prob_to_sample_from_gt_for_train
+ # A random number generator with a fixed initial seed across GPUs
+ self.rng = np.random.default_rng(seed=42)
+
+ if freeze_image_encoder:
+ for p in self.image_encoder.parameters():
+ p.requires_grad = False
+
+ def forward(self, input: BatchedVideoDatapoint, hidden):
+ if self.training or not self.forward_backbone_per_frame_for_eval:
+ # precompute image features on all frames before tracking
+ backbone_out = self.forward_image(input.flat_img_batch)
+ else:
+ # defer image feature computation on a frame until it's being tracked
+ backbone_out = {"backbone_fpn": None, "vision_pos_enc": None}
+ # NOTE: backbone_out = self.prepare_prompt_inputs(backbone_out, input)
+ previous_stages_out = self.forward_tracking(backbone_out, input, hidden)
+
+ return previous_stages_out
+
+ def _prepare_backbone_features_per_frame(self, img_batch, img_ids):
+ """Compute the image backbone features on the fly for the given img_ids."""
+ # Only forward backbone on unique image ids to avoid repetitive computation
+ # (if `img_ids` has only one element, it's already unique so we skip this step).
+ if img_ids.numel() > 1:
+ unique_img_ids, inv_ids = torch.unique(img_ids, return_inverse=True)
+ else:
+ unique_img_ids, inv_ids = img_ids, None
+
+ # Compute the image features on those unique image ids
+ image = img_batch[unique_img_ids]
+ backbone_out = self.forward_image(image)
+ (
+ _,
+ vision_feats,
+ vision_pos_embeds,
+ feat_sizes,
+ ) = self._prepare_backbone_features(backbone_out)
+ # Inverse-map image features for `unique_img_ids` to the final image features
+ # for the original input `img_ids`.
+ if inv_ids is not None:
+ image = image[inv_ids]
+ vision_feats = [x[:, inv_ids] for x in vision_feats]
+ vision_pos_embeds = [x[:, inv_ids] for x in vision_pos_embeds]
+
+ return image, vision_feats, vision_pos_embeds, feat_sizes
+
+ def prepare_prompt_inputs(self, backbone_out, input, start_frame_idx=0):
+ """
+ Prepare input mask, point or box prompts. Optionally, we allow tracking from
+ a custom `start_frame_idx` to the end of the video (for evaluation purposes).
+ """
+ # Load the ground-truth masks on all frames (so that we can later
+ # sample correction points from them)
+ # gt_masks_per_frame = {
+ # stage_id: targets.segments.unsqueeze(1) # [B, 1, H_im, W_im]
+ # for stage_id, targets in enumerate(input.find_targets)
+ # }
+ gt_masks_per_frame = {
+ stage_id: masks.unsqueeze(1) # [B, 1, H_im, W_im]
+ for stage_id, masks in enumerate(input.masks)
+ }
+ # gt_masks_per_frame = input.masks.unsqueeze(2) # [T,B,1,H_im,W_im] keep everything in tensor form
+ backbone_out["gt_masks_per_frame"] = gt_masks_per_frame
+ num_frames = input.num_frames
+ backbone_out["num_frames"] = num_frames
+
+ # Randomly decide whether to use point inputs or mask inputs
+ if self.training:
+ prob_to_use_pt_input = self.prob_to_use_pt_input_for_train
+ prob_to_use_box_input = self.prob_to_use_box_input_for_train
+ num_frames_to_correct = self.num_frames_to_correct_for_train
+ rand_frames_to_correct = self.rand_frames_to_correct_for_train
+ num_init_cond_frames = self.num_init_cond_frames_for_train
+ rand_init_cond_frames = self.rand_init_cond_frames_for_train
+ else:
+ prob_to_use_pt_input = self.prob_to_use_pt_input_for_eval
+ prob_to_use_box_input = self.prob_to_use_box_input_for_eval
+ num_frames_to_correct = self.num_frames_to_correct_for_eval
+ rand_frames_to_correct = self.rand_frames_to_correct_for_eval
+ num_init_cond_frames = self.num_init_cond_frames_for_eval
+ rand_init_cond_frames = self.rand_init_cond_frames_for_eval
+ if num_frames == 1:
+ # here we handle a special case for mixing video + SAM on image training,
+ # where we force using point input for the SAM task on static images
+ prob_to_use_pt_input = 1.0
+ num_frames_to_correct = 1
+ num_init_cond_frames = 1
+ assert num_init_cond_frames >= 1
+ # (here `self.rng.random()` returns value in range 0.0 <= X < 1.0)
+ use_pt_input = self.rng.random() < prob_to_use_pt_input
+ if rand_init_cond_frames and num_init_cond_frames > 1:
+ # randomly select 1 to `num_init_cond_frames` frames as initial conditioning frames
+ num_init_cond_frames = self.rng.integers(1, num_init_cond_frames, endpoint=True)
+ if (use_pt_input and rand_frames_to_correct and num_frames_to_correct > num_init_cond_frames):
+ # randomly select `num_init_cond_frames` to `num_frames_to_correct` frames to sample
+ # correction clicks (only for the case of point input)
+ num_frames_to_correct = self.rng.integers(num_init_cond_frames, num_frames_to_correct, endpoint=True)
+ backbone_out["use_pt_input"] = use_pt_input
+
+ # Sample initial conditioning frames
+ if num_init_cond_frames == 1:
+ init_cond_frames = [start_frame_idx] # starting frame
+ else:
+ # starting frame + randomly selected remaining frames (without replacement)
+ init_cond_frames = [start_frame_idx] + self.rng.choice(
+ range(start_frame_idx + 1, num_frames),
+ num_init_cond_frames - 1,
+ replace=False,
+ ).tolist()
+ backbone_out["init_cond_frames"] = init_cond_frames
+ backbone_out["frames_not_in_init_cond"] = [
+ t for t in range(start_frame_idx, num_frames) if t not in init_cond_frames
+ ]
+ # Prepare mask or point inputs on initial conditioning frames
+ backbone_out["mask_inputs_per_frame"] = {} # {frame_idx: }
+ backbone_out["point_inputs_per_frame"] = {} # {frame_idx: }
+ for t in init_cond_frames:
+ if not use_pt_input:
+ backbone_out["mask_inputs_per_frame"][t] = gt_masks_per_frame[t]
+ else:
+ # During training # P(box) = prob_to_use_pt_input * prob_to_use_box_input
+ use_box_input = self.rng.random() < prob_to_use_box_input
+ if use_box_input:
+ points, labels = sample_box_points(gt_masks_per_frame[t], )
+ else:
+ # (here we only sample **one initial point** on initial conditioning frames from the
+ # ground-truth mask; we may sample more correction points on the fly)
+ points, labels = get_next_point(
+ gt_masks=gt_masks_per_frame[t],
+ pred_masks=None,
+ method=("uniform" if self.training else self.pt_sampling_for_eval),
+ )
+
+ point_inputs = {"point_coords": points, "point_labels": labels}
+ backbone_out["point_inputs_per_frame"][t] = point_inputs
+
+ # Sample frames where we will add correction clicks on the fly
+ # based on the error between prediction and ground-truth masks
+ if not use_pt_input:
+ # no correction points will be sampled when using mask inputs
+ frames_to_add_correction_pt = []
+ elif num_frames_to_correct == num_init_cond_frames:
+ frames_to_add_correction_pt = init_cond_frames
+ else:
+ assert num_frames_to_correct > num_init_cond_frames
+ # initial cond frame + randomly selected remaining frames (without replacement)
+ extra_num = num_frames_to_correct - num_init_cond_frames
+ frames_to_add_correction_pt = (
+ init_cond_frames +
+ self.rng.choice(backbone_out["frames_not_in_init_cond"], extra_num, replace=False).tolist())
+ backbone_out["frames_to_add_correction_pt"] = frames_to_add_correction_pt
+
+ return backbone_out
+
+ def forward_tracking(self, backbone_out, input: BatchedVideoDatapoint, hidden, return_dict=False):
+ """Forward video tracking on each frame (and sample correction clicks)."""
+ img_feats_already_computed = backbone_out["backbone_fpn"] is not None
+ if img_feats_already_computed:
+ # Prepare the backbone features
+ # - vision_feats and vision_pos_embeds are in (HW)BC format
+ (
+ _,
+ vision_feats,
+ vision_pos_embeds,
+ feat_sizes,
+ ) = self._prepare_backbone_features(backbone_out)
+
+ # Starting the stage loop
+ # NOTE: num_frames = backbone_out["num_frames"] =========================================
+ num_frames = input.num_frames
+ # =======================================================================================
+ # NOTE: init_cond_frames = backbone_out["init_cond_frames"] =============================
+ # init_cond_frames = list(range(num_frames))
+ init_cond_frames = [0]
+ # =======================================================================================
+ # NOTE: frames_to_add_correction_pt = backbone_out["frames_to_add_correction_pt"] =======
+ frames_to_add_correction_pt = []
+ # =======================================================================================
+ # first process all the initial conditioning frames to encode them as memory,
+ # and then conditioning on them to track the remaining frames
+ # NOTE: processing_order = init_cond_frames + backbone_out["frames_not_in_init_cond"] ===
+ frames_not_in_init_cond = [t for t in range(num_frames) if t not in init_cond_frames]
+ processing_order = init_cond_frames + frames_not_in_init_cond
+ # =======================================================================================
+ backbone_out["point_inputs_per_frame"] = {}
+ backbone_out["mask_inputs_per_frame"] = {}
+ # backbone_out["hidden_inputs_per_frame"] = {stage_id: hidden for stage_id in processing_order}
+ backbone_out["hidden_inputs_per_frame"] = {0: hidden}
+ backbone_out["gt_masks_per_frame"] = {
+ stage_id: masks.unsqueeze(1) # [B, 1, H_im, W_im]
+ for stage_id, masks in enumerate(input.masks)
+ }
+ # =======================================================================================
+ output_dict = {
+ "cond_frame_outputs": {}, # dict containing {frame_idx: }
+ "non_cond_frame_outputs": {}, # dict containing {frame_idx: }
+ }
+ for stage_id in processing_order:
+ # Get the image features for the current frames
+ # img_ids = input.find_inputs[stage_id].img_ids
+ img_ids = input.flat_obj_to_img_idx[stage_id]
+ if img_feats_already_computed:
+ # Retrieve image features according to img_ids (if they are already computed).
+ current_vision_feats = [x[:, img_ids] for x in vision_feats]
+ current_vision_pos_embeds = [x[:, img_ids] for x in vision_pos_embeds]
+ else:
+ # Otherwise, compute the image features on the fly for the given img_ids
+ # (this might be used for evaluation on long videos to avoid backbone OOM).
+ (
+ _,
+ current_vision_feats,
+ current_vision_pos_embeds,
+ feat_sizes,
+ ) = self._prepare_backbone_features_per_frame(input.flat_img_batch, img_ids)
+
+ # Get output masks based on this frame's prompts and previous memory
+ current_out = self.track_step(
+ frame_idx=stage_id,
+ is_init_cond_frame=stage_id in init_cond_frames,
+ current_vision_feats=current_vision_feats,
+ current_vision_pos_embeds=current_vision_pos_embeds,
+ feat_sizes=feat_sizes,
+ point_inputs=backbone_out["point_inputs_per_frame"].get(stage_id, None),
+ mask_inputs=backbone_out["mask_inputs_per_frame"].get(stage_id, None),
+ hidden_inputs=backbone_out["hidden_inputs_per_frame"].get(stage_id, None),
+ gt_masks=backbone_out["gt_masks_per_frame"].get(stage_id, None),
+ frames_to_add_correction_pt=frames_to_add_correction_pt,
+ output_dict=output_dict,
+ num_frames=num_frames,
+ )
+ # Append the output, depending on whether it's a conditioning frame
+ add_output_as_cond_frame = stage_id in init_cond_frames or (self.add_all_frames_to_correct_as_cond
+ and stage_id in frames_to_add_correction_pt)
+ if add_output_as_cond_frame:
+ output_dict["cond_frame_outputs"][stage_id] = current_out
+ else:
+ output_dict["non_cond_frame_outputs"][stage_id] = current_out
+
+ if return_dict:
+ return output_dict
+ # turn `output_dict` into a list for loss function
+ all_frame_outputs = {}
+ all_frame_outputs.update(output_dict["cond_frame_outputs"])
+ all_frame_outputs.update(output_dict["non_cond_frame_outputs"])
+ all_frame_outputs = [all_frame_outputs[t] for t in range(num_frames)]
+ # Make DDP happy with activation checkpointing by removing unused keys
+ all_frame_outputs = [{k: v for k, v in d.items() if k != "obj_ptr"} for d in all_frame_outputs]
+
+ return all_frame_outputs
+
+ def track_step(
+ self,
+ frame_idx,
+ is_init_cond_frame,
+ current_vision_feats,
+ current_vision_pos_embeds,
+ feat_sizes,
+ point_inputs,
+ mask_inputs,
+ hidden_inputs,
+ output_dict,
+ num_frames,
+ track_in_reverse=False, # tracking in reverse time order (for demo usage)
+ run_mem_encoder=True, # Whether to run the memory encoder on the predicted masks.
+ prev_sam_mask_logits=None, # The previously predicted SAM mask logits.
+ frames_to_add_correction_pt=None,
+ gt_masks=None,
+ ):
+ if frames_to_add_correction_pt is None:
+ frames_to_add_correction_pt = []
+ current_out, sam_outputs, high_res_features, pix_feat = self._track_step(
+ frame_idx,
+ is_init_cond_frame,
+ current_vision_feats,
+ current_vision_pos_embeds,
+ feat_sizes,
+ point_inputs,
+ mask_inputs,
+ hidden_inputs,
+ output_dict,
+ num_frames,
+ track_in_reverse,
+ prev_sam_mask_logits,
+ )
+
+ (
+ low_res_multimasks,
+ high_res_multimasks,
+ ious,
+ low_res_masks,
+ high_res_masks,
+ obj_ptr,
+ object_score_logits,
+ ) = sam_outputs
+
+ current_out["multistep_pred_masks"] = low_res_masks
+ current_out["multistep_pred_masks_high_res"] = high_res_masks
+ current_out["multistep_pred_multimasks"] = [low_res_multimasks]
+ current_out["multistep_pred_multimasks_high_res"] = [high_res_multimasks]
+ current_out["multistep_pred_ious"] = [ious]
+ current_out["multistep_point_inputs"] = [point_inputs]
+ current_out["multistep_object_score_logits"] = [object_score_logits]
+
+ # Optionally, sample correction points iteratively to correct the mask
+ if frame_idx in frames_to_add_correction_pt:
+ point_inputs, final_sam_outputs = self._iter_correct_pt_sampling(
+ is_init_cond_frame,
+ point_inputs,
+ gt_masks,
+ high_res_features,
+ pix_feat,
+ low_res_multimasks,
+ high_res_multimasks,
+ ious,
+ low_res_masks,
+ high_res_masks,
+ object_score_logits,
+ current_out,
+ )
+ (
+ _,
+ _,
+ _,
+ low_res_masks,
+ high_res_masks,
+ obj_ptr,
+ object_score_logits,
+ ) = final_sam_outputs
+
+ # Use the final prediction (after all correction steps for output and eval)
+ current_out["pred_masks"] = low_res_masks
+ current_out["pred_masks_high_res"] = high_res_masks
+ current_out["obj_ptr"] = obj_ptr
+
+ # Finally run the memory encoder on the predicted mask to encode
+ # it into a new memory feature (that can be used in future frames)
+ self._encode_memory_in_output(
+ current_vision_feats,
+ feat_sizes,
+ point_inputs,
+ run_mem_encoder,
+ high_res_masks,
+ object_score_logits,
+ current_out,
+ )
+ return current_out
+
+ def _iter_correct_pt_sampling(
+ self,
+ is_init_cond_frame,
+ point_inputs,
+ gt_masks,
+ high_res_features,
+ pix_feat_with_mem,
+ low_res_multimasks,
+ high_res_multimasks,
+ ious,
+ low_res_masks,
+ high_res_masks,
+ object_score_logits,
+ current_out,
+ ):
+
+ assert gt_masks is not None
+ all_pred_masks = [low_res_masks]
+ all_pred_high_res_masks = [high_res_masks]
+ all_pred_multimasks = [low_res_multimasks]
+ all_pred_high_res_multimasks = [high_res_multimasks]
+ all_pred_ious = [ious]
+ all_point_inputs = [point_inputs]
+ all_object_score_logits = [object_score_logits]
+ for _ in range(self.num_correction_pt_per_frame):
+ # sample a new point from the error between prediction and ground-truth
+ # (with a small probability, directly sample from GT masks instead of errors)
+ if self.training and self.prob_to_sample_from_gt_for_train > 0:
+ sample_from_gt = (self.rng.random() < self.prob_to_sample_from_gt_for_train)
+ else:
+ sample_from_gt = False
+ # if `pred_for_new_pt` is None, only GT masks will be used for point sampling
+ pred_for_new_pt = None if sample_from_gt else (high_res_masks > 0)
+ new_points, new_labels = get_next_point(
+ gt_masks=gt_masks,
+ pred_masks=pred_for_new_pt,
+ method="uniform" if self.training else self.pt_sampling_for_eval,
+ )
+ point_inputs = concat_points(point_inputs, new_points, new_labels)
+ # Feed the mask logits of the previous SAM outputs in the next SAM decoder step.
+ # For tracking, this means that when the user adds a correction click, we also feed
+ # the tracking output mask logits along with the click as input to the SAM decoder.
+ mask_inputs = low_res_masks
+ multimask_output = self._use_multimask(is_init_cond_frame, point_inputs)
+ if self.use_act_ckpt_iterative_pt_sampling and not multimask_output:
+ sam_outputs = torch.utils.checkpoint.checkpoint(
+ self._forward_sam_heads,
+ backbone_features=pix_feat_with_mem,
+ point_inputs=point_inputs,
+ mask_inputs=mask_inputs,
+ high_res_features=high_res_features,
+ multimask_output=multimask_output,
+ use_reentrant=False,
+ )
+ else:
+ sam_outputs = self._forward_sam_heads(
+ backbone_features=pix_feat_with_mem,
+ point_inputs=point_inputs,
+ mask_inputs=mask_inputs,
+ high_res_features=high_res_features,
+ multimask_output=multimask_output,
+ )
+ (
+ low_res_multimasks,
+ high_res_multimasks,
+ ious,
+ low_res_masks,
+ high_res_masks,
+ _,
+ object_score_logits,
+ ) = sam_outputs
+ all_pred_masks.append(low_res_masks)
+ all_pred_high_res_masks.append(high_res_masks)
+ all_pred_multimasks.append(low_res_multimasks)
+ all_pred_high_res_multimasks.append(high_res_multimasks)
+ all_pred_ious.append(ious)
+ all_point_inputs.append(point_inputs)
+ all_object_score_logits.append(object_score_logits)
+
+ # Concatenate the masks along channel (to compute losses on all of them,
+ # using `MultiStepIteractiveMasks`)
+ current_out["multistep_pred_masks"] = torch.cat(all_pred_masks, dim=1)
+ current_out["multistep_pred_masks_high_res"] = torch.cat(all_pred_high_res_masks, dim=1)
+ current_out["multistep_pred_multimasks"] = all_pred_multimasks
+ current_out["multistep_pred_multimasks_high_res"] = all_pred_high_res_multimasks
+ current_out["multistep_pred_ious"] = all_pred_ious
+ current_out["multistep_point_inputs"] = all_point_inputs
+ current_out["multistep_object_score_logits"] = all_object_score_logits
+
+ return point_inputs, sam_outputs
diff --git a/sam2/sam2_video_predictor.py b/sam2/sam2_video_predictor.py
new file mode 100644
index 0000000000000000000000000000000000000000..bd15a72f53c676eaab261b134d8b949b040b8f34
--- /dev/null
+++ b/sam2/sam2_video_predictor.py
@@ -0,0 +1,1272 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+from collections import OrderedDict
+
+import torch
+import torch.nn.functional as F
+from tqdm import tqdm
+
+from sam2.modeling.sam2_base import NO_OBJ_SCORE, SAM2Base
+from sam2.utils.misc import concat_points, fill_holes_in_mask_scores, load_video_frames
+
+
+class SAM2VideoPredictor(SAM2Base):
+ """The predictor class to handle user interactions and manage inference states."""
+
+ def __init__(
+ self,
+ fill_hole_area=0,
+ # whether to apply non-overlapping constraints on the output object masks
+ non_overlap_masks=False,
+ # whether to clear non-conditioning memory of the surrounding frames (which may contain outdated information) after adding correction clicks;
+ # note that this would only apply to *single-object tracking* unless `clear_non_cond_mem_for_multi_obj` is also set to True)
+ clear_non_cond_mem_around_input=False,
+ # if `add_all_frames_to_correct_as_cond` is True, we also append to the conditioning frame list any frame that receives a later correction click
+ # if `add_all_frames_to_correct_as_cond` is False, we conditioning frame list to only use those initial conditioning frames
+ add_all_frames_to_correct_as_cond=False,
+ inference_mode=True,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+ self.fill_hole_area = fill_hole_area
+ self.non_overlap_masks = non_overlap_masks
+ self.clear_non_cond_mem_around_input = clear_non_cond_mem_around_input
+ self.add_all_frames_to_correct_as_cond = add_all_frames_to_correct_as_cond
+ self.inference_mode = inference_mode
+
+ @property
+ def dtype(self):
+ return self.image_encoder.trunk.patch_embed.proj.weight.dtype
+
+ def init_state(
+ self,
+ frame,
+ frame_size=None,
+ offload_video_to_cpu=False,
+ offload_state_to_cpu=False,
+ async_loading_frames=False,
+ ):
+ """Initialize an inference state."""
+ compute_device = self.device # device of the model
+ if isinstance(frame, str):
+ images, video_height, video_width = load_video_frames(
+ video_path=frame,
+ image_size=self.image_size,
+ offload_video_to_cpu=offload_video_to_cpu,
+ async_loading_frames=async_loading_frames,
+ compute_device=compute_device,
+ )
+ else:
+ if frame_size is None:
+ frame_size = (self.image_size, self.image_size)
+ images, video_height, video_width = (frame, *frame_size)
+ inference_state = {}
+ inference_state["images"] = images
+ inference_state["num_frames"] = len(images)
+ # whether to offload the video frames to CPU memory
+ # turning on this option saves the GPU memory with only a very small overhead
+ inference_state["offload_video_to_cpu"] = offload_video_to_cpu
+ # whether to offload the inference state to CPU memory
+ # turning on this option saves the GPU memory at the cost of a lower tracking fps
+ # (e.g. in a test case of 768x768 model, fps dropped from 27 to 24 when tracking one object
+ # and from 24 to 21 when tracking two objects)
+ inference_state["offload_state_to_cpu"] = offload_state_to_cpu
+ # the original video height and width, used for resizing final output scores
+ inference_state["video_height"] = video_height
+ inference_state["video_width"] = video_width
+ inference_state["device"] = compute_device
+ if offload_state_to_cpu:
+ inference_state["storage_device"] = torch.device("cpu")
+ else:
+ inference_state["storage_device"] = compute_device
+ # inputs on each frame
+ inference_state["point_inputs_per_obj"] = {}
+ inference_state["mask_inputs_per_obj"] = {}
+ # visual features on a small number of recently visited frames for quick interactions
+ inference_state["cached_features"] = {}
+ # values that don't change across frames (so we only need to hold one copy of them)
+ inference_state["constants"] = {}
+ # mapping between client-side object id and model-side object index
+ inference_state["obj_id_to_idx"] = OrderedDict()
+ inference_state["obj_idx_to_id"] = OrderedDict()
+ inference_state["obj_ids"] = []
+ # Slice (view) of each object tracking results, sharing the same memory with "output_dict"
+ inference_state["output_dict_per_obj"] = {}
+ # A temporary storage to hold new outputs when user interact with a frame
+ # to add clicks or mask (it's merged into "output_dict" before propagation starts)
+ inference_state["temp_output_dict_per_obj"] = {}
+ # Frames that already holds consolidated outputs from click or mask inputs
+ # (we directly use their consolidated outputs during tracking)
+ # metadata for each tracking frame (e.g. which direction it's tracked)
+ inference_state["frames_tracked_per_obj"] = {}
+ # Warm up the visual backbone and cache the image feature on all frames
+ self._get_image_feature(inference_state, frame_idx=0, batch_size=1)
+ return inference_state
+
+ @classmethod
+ def from_pretrained(cls, model_id: str, **kwargs) -> "SAM2VideoPredictor":
+ """
+ Load a pretrained model from the Hugging Face hub.
+
+ Arguments:
+ model_id (str): The Hugging Face repository ID.
+ **kwargs: Additional arguments to pass to the model constructor.
+
+ Returns:
+ (SAM2VideoPredictor): The loaded model.
+ """
+ from sam2.build_sam import build_sam2_video_predictor_hf
+
+ sam_model = build_sam2_video_predictor_hf(model_id, **kwargs)
+ return sam_model
+
+ def _obj_id_to_idx(self, inference_state, obj_id):
+ """Map client-side object id to model-side object index."""
+ obj_idx = inference_state["obj_id_to_idx"].get(obj_id, None)
+ if obj_idx is not None:
+ return obj_idx
+
+ # We always allow adding new objects (including after tracking starts)
+ # get the next object slot
+ obj_idx = len(inference_state["obj_id_to_idx"])
+ inference_state["obj_id_to_idx"][obj_id] = obj_idx
+ inference_state["obj_idx_to_id"][obj_idx] = obj_id
+ inference_state["obj_ids"] = list(inference_state["obj_id_to_idx"])
+ # set up input and output structures for this object
+ inference_state["point_inputs_per_obj"][obj_idx] = {}
+ inference_state["mask_inputs_per_obj"][obj_idx] = {}
+ inference_state["output_dict_per_obj"][obj_idx] = {
+ "cond_frame_outputs": {}, # dict containing {frame_idx: }
+ "non_cond_frame_outputs": {}, # dict containing {frame_idx: }
+ }
+ inference_state["temp_output_dict_per_obj"][obj_idx] = {
+ "cond_frame_outputs": {}, # dict containing {frame_idx: }
+ "non_cond_frame_outputs": {}, # dict containing {frame_idx: }
+ }
+ inference_state["frames_tracked_per_obj"][obj_idx] = {}
+ return obj_idx
+
+ def _obj_idx_to_id(self, inference_state, obj_idx):
+ """Map model-side object index to client-side object id."""
+ return inference_state["obj_idx_to_id"][obj_idx]
+
+ def _get_obj_num(self, inference_state):
+ """Get the total number of unique object ids received so far in this session."""
+ return len(inference_state["obj_idx_to_id"])
+
+ @torch.inference_mode()
+ def add_new_hidden_state(
+ self,
+ inference_state,
+ frame_idx,
+ obj_id,
+ hidden,
+ ):
+ obj_idx = self._obj_id_to_idx(inference_state, obj_id)
+ # If this frame hasn't been tracked before, we treat it as an initial conditioning
+ # frame, meaning that the inputs points are to generate segments on this frame without
+ # using any memory from other frames, like in SAM. Otherwise (if it has been tracked),
+ # the input points will be used to correct the already tracked masks.
+ obj_frames_tracked = inference_state["frames_tracked_per_obj"][obj_idx]
+ is_init_cond_frame = frame_idx not in obj_frames_tracked
+ # whether to track in reverse time order
+ if is_init_cond_frame:
+ reverse = False
+ else:
+ reverse = obj_frames_tracked[frame_idx]["reverse"]
+ obj_output_dict = inference_state["output_dict_per_obj"][obj_idx]
+ obj_temp_output_dict = inference_state["temp_output_dict_per_obj"][obj_idx]
+ # Add a frame to conditioning output if it's an initial conditioning frame or
+ # if the model sees all frames receiving clicks/mask as conditioning frames.
+ is_cond = is_init_cond_frame or self.add_all_frames_to_correct_as_cond
+ storage_key = "cond_frame_outputs" if is_cond else "non_cond_frame_outputs"
+
+ # Get any previously predicted mask logits on this object and feed it along with
+ # the new clicks into the SAM mask decoder.
+ prev_sam_mask_logits = None
+ # lookup temporary output dict first, which contains the most recent output
+ # (if not found, then lookup conditioning and non-conditioning frame output)
+ prev_out = obj_temp_output_dict[storage_key].get(frame_idx)
+ if prev_out is None:
+ prev_out = obj_output_dict["cond_frame_outputs"].get(frame_idx)
+ if prev_out is None:
+ prev_out = obj_output_dict["non_cond_frame_outputs"].get(frame_idx)
+
+ if prev_out is not None and prev_out["pred_masks"] is not None:
+ device = inference_state["device"]
+ prev_sam_mask_logits = prev_out["pred_masks"].to(device, non_blocking=True)
+ # Clamp the scale of prev_sam_mask_logits to avoid rare numerical issues.
+ prev_sam_mask_logits = torch.clamp(prev_sam_mask_logits, -32.0, 32.0)
+ current_out, _ = self._run_single_frame_inference(
+ inference_state=inference_state,
+ output_dict=obj_output_dict, # run on the slice of a single object
+ frame_idx=frame_idx,
+ batch_size=1, # run on the slice of a single object
+ is_init_cond_frame=is_init_cond_frame,
+ point_inputs=None,
+ mask_inputs=None,
+ hidden_inputs=hidden,
+ reverse=reverse,
+ # Skip the memory encoder when adding clicks or mask. We execute the memory encoder
+ # at the beginning of `propagate_in_video` (after user finalize their clicks). This
+ # allows us to enforce non-overlapping constraints on all objects before encoding
+ # them into memory.
+ run_mem_encoder=False,
+ prev_sam_mask_logits=prev_sam_mask_logits,
+ )
+ # Add the output to the output dict (to be used as future memory)
+ obj_temp_output_dict[storage_key][frame_idx] = current_out
+
+ # Resize the output mask to the original video resolution
+ obj_ids = inference_state["obj_ids"]
+ consolidated_out = self._consolidate_temp_output_across_obj(
+ inference_state,
+ frame_idx,
+ is_cond=is_cond,
+ consolidate_at_video_res=True,
+ )
+ _, video_res_masks = self._get_orig_video_res_output(inference_state, consolidated_out["pred_masks_video_res"])
+ return frame_idx, obj_ids, video_res_masks
+
+ @torch.inference_mode()
+ def add_new_points_or_box(
+ self,
+ inference_state,
+ frame_idx,
+ obj_id,
+ points=None,
+ labels=None,
+ clear_old_points=True,
+ normalize_coords=True,
+ box=None,
+ ):
+ """Add new points to a frame."""
+ obj_idx = self._obj_id_to_idx(inference_state, obj_id)
+ point_inputs_per_frame = inference_state["point_inputs_per_obj"][obj_idx]
+ mask_inputs_per_frame = inference_state["mask_inputs_per_obj"][obj_idx]
+
+ if (points is not None) != (labels is not None):
+ raise ValueError("points and labels must be provided together")
+ if points is None and box is None:
+ raise ValueError("at least one of points or box must be provided as input")
+
+ if points is None:
+ points = torch.zeros(0, 2, dtype=torch.float32)
+ elif not isinstance(points, torch.Tensor):
+ points = torch.tensor(points, dtype=torch.float32)
+ if labels is None:
+ labels = torch.zeros(0, dtype=torch.int32)
+ elif not isinstance(labels, torch.Tensor):
+ labels = torch.tensor(labels, dtype=torch.int32)
+ if points.dim() == 2:
+ points = points.unsqueeze(0) # add batch dimension
+ if labels.dim() == 1:
+ labels = labels.unsqueeze(0) # add batch dimension
+
+ # If `box` is provided, we add it as the first two points with labels 2 and 3
+ # along with the user-provided points (consistent with how SAM 2 is trained).
+ if box is not None:
+ if not clear_old_points:
+ raise ValueError("cannot add box without clearing old points, since "
+ "box prompt must be provided before any point prompt "
+ "(please use clear_old_points=True instead)")
+ if not isinstance(box, torch.Tensor):
+ box = torch.tensor(box, dtype=torch.float32, device=points.device)
+ box_coords = box.reshape(1, 2, 2)
+ box_labels = torch.tensor([2, 3], dtype=torch.int32, device=labels.device)
+ box_labels = box_labels.reshape(1, 2)
+ points = torch.cat([box_coords, points], dim=1)
+ labels = torch.cat([box_labels, labels], dim=1)
+
+ if normalize_coords:
+ video_H = inference_state["video_height"]
+ video_W = inference_state["video_width"]
+ points = points / torch.tensor([video_W, video_H]).to(points.device)
+ # scale the (normalized) coordinates by the model's internal image size
+ points = points * self.image_size
+ points = points.to(inference_state["device"])
+ labels = labels.to(inference_state["device"])
+
+ if not clear_old_points:
+ point_inputs = point_inputs_per_frame.get(frame_idx, None)
+ else:
+ point_inputs = None
+ point_inputs = concat_points(point_inputs, points, labels)
+
+ point_inputs_per_frame[frame_idx] = point_inputs
+ mask_inputs_per_frame.pop(frame_idx, None)
+ # If this frame hasn't been tracked before, we treat it as an initial conditioning
+ # frame, meaning that the inputs points are to generate segments on this frame without
+ # using any memory from other frames, like in SAM. Otherwise (if it has been tracked),
+ # the input points will be used to correct the already tracked masks.
+ obj_frames_tracked = inference_state["frames_tracked_per_obj"][obj_idx]
+ is_init_cond_frame = frame_idx not in obj_frames_tracked
+ # whether to track in reverse time order
+ if is_init_cond_frame:
+ reverse = False
+ else:
+ reverse = obj_frames_tracked[frame_idx]["reverse"]
+ obj_output_dict = inference_state["output_dict_per_obj"][obj_idx]
+ obj_temp_output_dict = inference_state["temp_output_dict_per_obj"][obj_idx]
+ # Add a frame to conditioning output if it's an initial conditioning frame or
+ # if the model sees all frames receiving clicks/mask as conditioning frames.
+ is_cond = is_init_cond_frame or self.add_all_frames_to_correct_as_cond
+ storage_key = "cond_frame_outputs" if is_cond else "non_cond_frame_outputs"
+
+ # Get any previously predicted mask logits on this object and feed it along with
+ # the new clicks into the SAM mask decoder.
+ prev_sam_mask_logits = None
+ # lookup temporary output dict first, which contains the most recent output
+ # (if not found, then lookup conditioning and non-conditioning frame output)
+ prev_out = obj_temp_output_dict[storage_key].get(frame_idx)
+ if prev_out is None:
+ prev_out = obj_output_dict["cond_frame_outputs"].get(frame_idx)
+ if prev_out is None:
+ prev_out = obj_output_dict["non_cond_frame_outputs"].get(frame_idx)
+
+ if prev_out is not None and prev_out["pred_masks"] is not None:
+ device = inference_state["device"]
+ prev_sam_mask_logits = prev_out["pred_masks"].to(device, non_blocking=True)
+ # Clamp the scale of prev_sam_mask_logits to avoid rare numerical issues.
+ prev_sam_mask_logits = torch.clamp(prev_sam_mask_logits, -32.0, 32.0)
+ current_out, _ = self._run_single_frame_inference(
+ inference_state=inference_state,
+ output_dict=obj_output_dict, # run on the slice of a single object
+ frame_idx=frame_idx,
+ batch_size=1, # run on the slice of a single object
+ is_init_cond_frame=is_init_cond_frame,
+ point_inputs=point_inputs,
+ mask_inputs=None,
+ hidden_inputs=None,
+ reverse=reverse,
+ # Skip the memory encoder when adding clicks or mask. We execute the memory encoder
+ # at the beginning of `propagate_in_video` (after user finalize their clicks). This
+ # allows us to enforce non-overlapping constraints on all objects before encoding
+ # them into memory.
+ run_mem_encoder=False,
+ prev_sam_mask_logits=prev_sam_mask_logits,
+ )
+ # Add the output to the output dict (to be used as future memory)
+ obj_temp_output_dict[storage_key][frame_idx] = current_out
+
+ # Resize the output mask to the original video resolution
+ obj_ids = inference_state["obj_ids"]
+ consolidated_out = self._consolidate_temp_output_across_obj(
+ inference_state,
+ frame_idx,
+ is_cond=is_cond,
+ consolidate_at_video_res=True,
+ )
+ _, video_res_masks = self._get_orig_video_res_output(inference_state, consolidated_out["pred_masks_video_res"])
+ return frame_idx, obj_ids, video_res_masks
+
+ def add_new_points(self, *args, **kwargs):
+ """Deprecated method. Please use `add_new_points_or_box` instead."""
+ return self.add_new_points_or_box(*args, **kwargs)
+
+ @torch.inference_mode()
+ def add_new_mask(
+ self,
+ inference_state,
+ frame_idx,
+ obj_id,
+ mask,
+ ):
+ """Add new mask to a frame."""
+ obj_idx = self._obj_id_to_idx(inference_state, obj_id)
+ point_inputs_per_frame = inference_state["point_inputs_per_obj"][obj_idx]
+ mask_inputs_per_frame = inference_state["mask_inputs_per_obj"][obj_idx]
+
+ if not isinstance(mask, torch.Tensor):
+ mask = torch.tensor(mask, dtype=torch.bool)
+ assert mask.dim() == 2
+ mask_H, mask_W = mask.shape
+ mask_inputs_orig = mask[None, None] # add batch and channel dimension
+ mask_inputs_orig = mask_inputs_orig.float().to(inference_state["device"])
+
+ # resize the mask if it doesn't match the model's image size
+ if mask_H != self.image_size or mask_W != self.image_size:
+ mask_inputs = torch.nn.functional.interpolate(
+ mask_inputs_orig,
+ size=(self.image_size, self.image_size),
+ align_corners=False,
+ mode="bilinear",
+ antialias=True, # use antialias for downsampling
+ )
+ mask_inputs = (mask_inputs >= 0.5).float()
+ else:
+ mask_inputs = mask_inputs_orig
+
+ mask_inputs_per_frame[frame_idx] = mask_inputs
+ point_inputs_per_frame.pop(frame_idx, None)
+ # If this frame hasn't been tracked before, we treat it as an initial conditioning
+ # frame, meaning that the inputs points are to generate segments on this frame without
+ # using any memory from other frames, like in SAM. Otherwise (if it has been tracked),
+ # the input points will be used to correct the already tracked masks.
+ obj_frames_tracked = inference_state["frames_tracked_per_obj"][obj_idx]
+ is_init_cond_frame = frame_idx not in obj_frames_tracked
+ # whether to track in reverse time order
+ if is_init_cond_frame:
+ reverse = False
+ else:
+ reverse = obj_frames_tracked[frame_idx]["reverse"]
+ obj_output_dict = inference_state["output_dict_per_obj"][obj_idx]
+ obj_temp_output_dict = inference_state["temp_output_dict_per_obj"][obj_idx]
+ # Add a frame to conditioning output if it's an initial conditioning frame or
+ # if the model sees all frames receiving clicks/mask as conditioning frames.
+ is_cond = is_init_cond_frame or self.add_all_frames_to_correct_as_cond
+ storage_key = "cond_frame_outputs" if is_cond else "non_cond_frame_outputs"
+
+ current_out, _ = self._run_single_frame_inference(
+ inference_state=inference_state,
+ output_dict=obj_output_dict, # run on the slice of a single object
+ frame_idx=frame_idx,
+ batch_size=1, # run on the slice of a single object
+ is_init_cond_frame=is_init_cond_frame,
+ point_inputs=None,
+ mask_inputs=mask_inputs,
+ hidden_inputs=None,
+ reverse=reverse,
+ # Skip the memory encoder when adding clicks or mask. We execute the memory encoder
+ # at the beginning of `propagate_in_video` (after user finalize their clicks). This
+ # allows us to enforce non-overlapping constraints on all objects before encoding
+ # them into memory.
+ run_mem_encoder=False,
+ )
+ # Add the output to the output dict (to be used as future memory)
+ obj_temp_output_dict[storage_key][frame_idx] = current_out
+
+ # Resize the output mask to the original video resolution
+ obj_ids = inference_state["obj_ids"]
+ consolidated_out = self._consolidate_temp_output_across_obj(
+ inference_state,
+ frame_idx,
+ is_cond=is_cond,
+ consolidate_at_video_res=True,
+ )
+ _, video_res_masks = self._get_orig_video_res_output(inference_state, consolidated_out["pred_masks_video_res"])
+ return frame_idx, obj_ids, video_res_masks
+
+ def _get_orig_video_res_output(self, inference_state, any_res_masks):
+ """
+ Resize the object scores to the original video resolution (video_res_masks)
+ and apply non-overlapping constraints for final output.
+ """
+ device = inference_state["device"]
+ video_H = inference_state["video_height"]
+ video_W = inference_state["video_width"]
+ any_res_masks = any_res_masks.to(device, non_blocking=True)
+ if any_res_masks.shape[-2:] == (video_H, video_W):
+ video_res_masks = any_res_masks
+ else:
+ video_res_masks = torch.nn.functional.interpolate(
+ any_res_masks,
+ size=(video_H, video_W),
+ mode="bilinear",
+ align_corners=False,
+ )
+ if self.non_overlap_masks:
+ video_res_masks = self._apply_non_overlapping_constraints(video_res_masks)
+ return any_res_masks, video_res_masks
+
+ def _consolidate_temp_output_across_obj(
+ self,
+ inference_state,
+ frame_idx,
+ is_cond,
+ consolidate_at_video_res=False,
+ ):
+ """
+ Consolidate the per-object temporary outputs in `temp_output_dict_per_obj` on
+ a frame into a single output for all objects, including
+ 1) fill any missing objects either from `output_dict_per_obj` (if they exist in
+ `output_dict_per_obj` for this frame) or leave them as placeholder values
+ (if they don't exist in `output_dict_per_obj` for this frame);
+ 2) if specified, rerun memory encoder after apply non-overlapping constraints
+ on the object scores.
+ """
+ batch_size = self._get_obj_num(inference_state)
+ storage_key = "cond_frame_outputs" if is_cond else "non_cond_frame_outputs"
+ # Optionally, we allow consolidating the temporary outputs at the original
+ # video resolution (to provide a better editing experience for mask prompts).
+ if consolidate_at_video_res:
+ consolidated_H = inference_state["video_height"]
+ consolidated_W = inference_state["video_width"]
+ consolidated_mask_key = "pred_masks_video_res"
+ else:
+ consolidated_H = consolidated_W = self.image_size // 4
+ consolidated_mask_key = "pred_masks"
+
+ # Initialize `consolidated_out`. Its "maskmem_features" and "maskmem_pos_enc"
+ # will be added when rerunning the memory encoder after applying non-overlapping
+ # constraints to object scores. Its "pred_masks" are prefilled with a large
+ # negative value (NO_OBJ_SCORE) to represent missing objects.
+ consolidated_out = {
+ consolidated_mask_key:
+ torch.full(
+ size=(batch_size, 1, consolidated_H, consolidated_W),
+ fill_value=NO_OBJ_SCORE,
+ dtype=inference_state["cached_features"][frame_idx][0].dtype,
+ device=inference_state["storage_device"],
+ ),
+ }
+ for obj_idx in range(batch_size):
+ obj_temp_output_dict = inference_state["temp_output_dict_per_obj"][obj_idx]
+ obj_output_dict = inference_state["output_dict_per_obj"][obj_idx]
+ out = obj_temp_output_dict[storage_key].get(frame_idx, None)
+ # If the object doesn't appear in "temp_output_dict_per_obj" on this frame,
+ # we fall back and look up its previous output in "output_dict_per_obj".
+ # We look up both "cond_frame_outputs" and "non_cond_frame_outputs" in
+ # "output_dict_per_obj" to find a previous output for this object.
+ if out is None:
+ out = obj_output_dict["cond_frame_outputs"].get(frame_idx, None)
+ if out is None:
+ out = obj_output_dict["non_cond_frame_outputs"].get(frame_idx, None)
+ # If the object doesn't appear in "output_dict_per_obj" either, we skip it
+ # and leave its mask scores to the default scores (i.e. the NO_OBJ_SCORE
+ # placeholder above) and set its object pointer to be a dummy pointer.
+ if out is None:
+ continue
+ # Add the temporary object output mask to consolidated output mask
+ obj_mask = out["pred_masks"]
+ consolidated_pred_masks = consolidated_out[consolidated_mask_key]
+ if obj_mask.shape[-2:] == consolidated_pred_masks.shape[-2:]:
+ consolidated_pred_masks[obj_idx:obj_idx + 1] = obj_mask
+ else:
+ # Resize first if temporary object mask has a different resolution
+ resized_obj_mask = torch.nn.functional.interpolate(
+ obj_mask,
+ size=consolidated_pred_masks.shape[-2:],
+ mode="bilinear",
+ align_corners=False,
+ )
+ consolidated_pred_masks[obj_idx:obj_idx + 1] = resized_obj_mask
+
+ return consolidated_out
+
+ @torch.inference_mode()
+ def propagate_in_video_preflight(self, inference_state):
+ """Prepare inference_state and consolidate temporary outputs before tracking."""
+ # Check and make sure that every object has received input points or masks.
+ batch_size = self._get_obj_num(inference_state)
+ if batch_size == 0:
+ raise RuntimeError("No input points or masks are provided for any object; please add inputs first.")
+
+ # Consolidate per-object temporary outputs in "temp_output_dict_per_obj" and
+ # add them into "output_dict".
+ for obj_idx in range(batch_size):
+ obj_output_dict = inference_state["output_dict_per_obj"][obj_idx]
+ obj_temp_output_dict = inference_state["temp_output_dict_per_obj"][obj_idx]
+ for is_cond in [False, True]:
+ # Separately consolidate conditioning and non-conditioning temp outputs
+ storage_key = ("cond_frame_outputs" if is_cond else "non_cond_frame_outputs")
+ # Find all the frames that contain temporary outputs for any objects
+ # (these should be the frames that have just received clicks for mask inputs
+ # via `add_new_points_or_box` or `add_new_mask`)
+ for frame_idx, out in obj_temp_output_dict[storage_key].items():
+ # Run memory encoder on the temporary outputs (if the memory feature is missing)
+ if out["maskmem_features"] is None:
+ high_res_masks = torch.nn.functional.interpolate(
+ out["pred_masks"].to(inference_state["device"]),
+ size=(self.image_size, self.image_size),
+ mode="bilinear",
+ align_corners=False,
+ )
+ maskmem_features, maskmem_pos_enc = self._run_memory_encoder(
+ inference_state=inference_state,
+ frame_idx=frame_idx,
+ batch_size=1, # run on the slice of a single object
+ high_res_masks=high_res_masks,
+ object_score_logits=out["object_score_logits"],
+ # these frames are what the user interacted with
+ is_mask_from_pts=True,
+ )
+ out["maskmem_features"] = maskmem_features
+ out["maskmem_pos_enc"] = maskmem_pos_enc
+
+ obj_output_dict[storage_key][frame_idx] = out
+ if self.clear_non_cond_mem_around_input:
+ # clear non-conditioning memory of the surrounding frames
+ self._clear_obj_non_cond_mem_around_input(inference_state, frame_idx, obj_idx)
+
+ # clear temporary outputs in `temp_output_dict_per_obj`
+ obj_temp_output_dict[storage_key].clear()
+
+ # check and make sure that every object has received input points or masks
+ obj_output_dict = inference_state["output_dict_per_obj"][obj_idx]
+ if len(obj_output_dict["cond_frame_outputs"]) == 0:
+ obj_id = self._obj_idx_to_id(inference_state, obj_idx)
+ raise RuntimeError(
+ f"No input points or masks are provided for object id {obj_id}; please add inputs first.")
+ # edge case: if an output is added to "cond_frame_outputs", we remove any prior
+ # output on the same frame in "non_cond_frame_outputs"
+ for frame_idx in obj_output_dict["cond_frame_outputs"]:
+ obj_output_dict["non_cond_frame_outputs"].pop(frame_idx, None)
+
+ @torch.inference_mode()
+ def propagate_in_video(
+ self,
+ inference_state,
+ start_frame_idx=None,
+ max_frame_num_to_track=None,
+ reverse=False,
+ verbose=True,
+ ):
+ """Propagate the input points across frames to track in the entire video."""
+ self.propagate_in_video_preflight(inference_state)
+
+ obj_ids = inference_state["obj_ids"]
+ num_frames = inference_state["num_frames"]
+ batch_size = self._get_obj_num(inference_state)
+
+ # set start index, end index, and processing order
+ if start_frame_idx is None:
+ # default: start from the earliest frame with input points
+ start_frame_idx = min(t for obj_output_dict in inference_state["output_dict_per_obj"].values()
+ for t in obj_output_dict["cond_frame_outputs"])
+ if max_frame_num_to_track is None:
+ # default: track all the frames in the video
+ max_frame_num_to_track = num_frames
+ if reverse:
+ end_frame_idx = max(start_frame_idx - max_frame_num_to_track, 0)
+ if start_frame_idx > 0:
+ processing_order = range(start_frame_idx, end_frame_idx - 1, -1)
+ else:
+ processing_order = [] # skip reverse tracking if starting from frame 0
+ else:
+ end_frame_idx = min(start_frame_idx + max_frame_num_to_track, num_frames - 1)
+ processing_order = range(start_frame_idx, end_frame_idx + 1)
+
+ for frame_idx in tqdm(processing_order, desc="propagate in video", disable=not verbose):
+ pred_masks_per_obj = [None] * batch_size
+ for obj_idx in range(batch_size):
+ obj_output_dict = inference_state["output_dict_per_obj"][obj_idx]
+ # We skip those frames already in consolidated outputs (these are frames
+ # that received input clicks or mask). Note that we cannot directly run
+ # batched forward on them via `_run_single_frame_inference` because the
+ # number of clicks on each object might be different.
+ if frame_idx in obj_output_dict["cond_frame_outputs"]:
+ storage_key = "cond_frame_outputs"
+ current_out = obj_output_dict[storage_key][frame_idx]
+ device = inference_state["device"]
+ pred_masks = current_out["pred_masks"].to(device, non_blocking=True)
+ if self.clear_non_cond_mem_around_input:
+ # clear non-conditioning memory of the surrounding frames
+ self._clear_obj_non_cond_mem_around_input(inference_state, frame_idx, obj_idx)
+ else:
+ storage_key = "non_cond_frame_outputs"
+ current_out, pred_masks = self._run_single_frame_inference(
+ inference_state=inference_state,
+ output_dict=obj_output_dict,
+ frame_idx=frame_idx,
+ batch_size=1, # run on the slice of a single object
+ is_init_cond_frame=False,
+ point_inputs=None,
+ mask_inputs=None,
+ hidden_inputs=None,
+ reverse=reverse,
+ run_mem_encoder=True,
+ )
+ obj_output_dict[storage_key][frame_idx] = current_out
+
+ inference_state["frames_tracked_per_obj"][obj_idx][frame_idx] = {"reverse": reverse}
+ pred_masks_per_obj[obj_idx] = pred_masks
+
+ # Resize the output mask to the original video resolution (we directly use
+ # the mask scores on GPU for output to avoid any CPU conversion in between)
+ if len(pred_masks_per_obj) > 1:
+ all_pred_masks = torch.cat(pred_masks_per_obj, dim=0)
+ else:
+ all_pred_masks = pred_masks_per_obj[0]
+ _, video_res_masks = self._get_orig_video_res_output(inference_state, all_pred_masks)
+ yield frame_idx, obj_ids, video_res_masks
+
+ @torch.inference_mode()
+ def clear_all_prompts_in_frame(self, inference_state, frame_idx, obj_id, need_output=True):
+ """Remove all input points or mask in a specific frame for a given object."""
+ obj_idx = self._obj_id_to_idx(inference_state, obj_id)
+
+ # Clear the conditioning information on the given frame
+ inference_state["point_inputs_per_obj"][obj_idx].pop(frame_idx, None)
+ inference_state["mask_inputs_per_obj"][obj_idx].pop(frame_idx, None)
+
+ temp_output_dict_per_obj = inference_state["temp_output_dict_per_obj"]
+ temp_output_dict_per_obj[obj_idx]["cond_frame_outputs"].pop(frame_idx, None)
+ temp_output_dict_per_obj[obj_idx]["non_cond_frame_outputs"].pop(frame_idx, None)
+
+ # Remove the frame's conditioning output (possibly downgrading it to non-conditioning)
+ obj_output_dict = inference_state["output_dict_per_obj"][obj_idx]
+ out = obj_output_dict["cond_frame_outputs"].pop(frame_idx, None)
+ if out is not None:
+ # The frame is not a conditioning frame anymore since it's not receiving inputs,
+ # so we "downgrade" its output (if exists) to a non-conditioning frame output.
+ obj_output_dict["non_cond_frame_outputs"][frame_idx] = out
+ inference_state["frames_tracked_per_obj"][obj_idx].pop(frame_idx, None)
+
+ if not need_output:
+ return
+ # Finally, output updated masks per object (after removing the inputs above)
+ obj_ids = inference_state["obj_ids"]
+ is_cond = any(frame_idx in obj_temp_output_dict["cond_frame_outputs"]
+ for obj_temp_output_dict in temp_output_dict_per_obj.values())
+ consolidated_out = self._consolidate_temp_output_across_obj(
+ inference_state,
+ frame_idx,
+ is_cond=is_cond,
+ consolidate_at_video_res=True,
+ )
+ _, video_res_masks = self._get_orig_video_res_output(inference_state, consolidated_out["pred_masks_video_res"])
+ return frame_idx, obj_ids, video_res_masks
+
+ @torch.inference_mode()
+ def reset_state(self, inference_state):
+ """Remove all input points or mask in all frames throughout the video."""
+ self._reset_tracking_results(inference_state)
+ # Remove all object ids
+ inference_state["obj_id_to_idx"].clear()
+ inference_state["obj_idx_to_id"].clear()
+ inference_state["obj_ids"].clear()
+ inference_state["point_inputs_per_obj"].clear()
+ inference_state["mask_inputs_per_obj"].clear()
+ inference_state["output_dict_per_obj"].clear()
+ inference_state["temp_output_dict_per_obj"].clear()
+ inference_state["frames_tracked_per_obj"].clear()
+
+ def _reset_tracking_results(self, inference_state):
+ """Reset all tracking inputs and results across the videos."""
+ for v in inference_state["point_inputs_per_obj"].values():
+ v.clear()
+ for v in inference_state["mask_inputs_per_obj"].values():
+ v.clear()
+ for v in inference_state["output_dict_per_obj"].values():
+ v["cond_frame_outputs"].clear()
+ v["non_cond_frame_outputs"].clear()
+ for v in inference_state["temp_output_dict_per_obj"].values():
+ v["cond_frame_outputs"].clear()
+ v["non_cond_frame_outputs"].clear()
+ for v in inference_state["frames_tracked_per_obj"].values():
+ v.clear()
+
+ def _get_image_feature(self, inference_state, frame_idx, batch_size):
+ """Compute the image features on a given frame."""
+ # NOTE: check me ======================================================================
+ # # Look up in the cache first
+ # image, backbone_out = inference_state["cached_features"].get(frame_idx, (None, None))
+ # if backbone_out is None:
+ # # Cache miss -- we will run inference on a single image
+ # device = inference_state["device"]
+ # image = inference_state["images"][frame_idx].to(device).unsqueeze(0)
+ # backbone_out = self.forward_image(image)
+ # # Cache the most recent frame's feature (for repeated interactions with
+ # # a frame; we can use an LRU cache for more frames in the future).
+ # inference_state["cached_features"] = {frame_idx: (image, backbone_out)}
+ # =====================================================================================
+
+ # build cache for image features
+ if not inference_state["cached_features"]:
+ image = inference_state["images"].to(inference_state["device"])
+ backbone_out = self.forward_image(image)
+ inference_state["cached_features"] = {
+ i: (image[i, None], {
+ k: v[i, None] if torch.is_tensor(v) else [t[i, None] for t in v]
+ for k, v in backbone_out.items()
+ })
+ for i in range(image.size(0))
+ }
+
+ # retrieve from cache
+ image, backbone_out = inference_state["cached_features"][frame_idx]
+
+ # expand the features to have the same dimension as the number of objects
+ expanded_image = image.expand(batch_size, -1, -1, -1)
+ expanded_backbone_out = {
+ "backbone_fpn": backbone_out["backbone_fpn"].copy(),
+ "vision_pos_enc": backbone_out["vision_pos_enc"].copy(),
+ }
+ for i, feat in enumerate(expanded_backbone_out["backbone_fpn"]):
+ expanded_backbone_out["backbone_fpn"][i] = feat.expand(batch_size, -1, -1, -1)
+ for i, pos in enumerate(expanded_backbone_out["vision_pos_enc"]):
+ pos = pos.expand(batch_size, -1, -1, -1)
+ expanded_backbone_out["vision_pos_enc"][i] = pos
+
+ features = self._prepare_backbone_features(expanded_backbone_out)
+ features = (expanded_image, ) + features
+ return features
+
+ def _run_single_frame_inference(
+ self,
+ inference_state,
+ output_dict,
+ frame_idx,
+ batch_size,
+ is_init_cond_frame,
+ point_inputs,
+ mask_inputs,
+ hidden_inputs,
+ reverse,
+ run_mem_encoder,
+ prev_sam_mask_logits=None,
+ ):
+ """Run tracking on a single frame based on current inputs and previous memory."""
+ # Retrieve correct image features
+ (
+ _,
+ _,
+ current_vision_feats,
+ current_vision_pos_embeds,
+ feat_sizes,
+ ) = self._get_image_feature(inference_state, frame_idx, batch_size)
+
+ # point and mask should not appear as input simultaneously on the same frame
+ assert point_inputs is None or mask_inputs is None
+ current_out = self.track_step(
+ frame_idx=frame_idx,
+ is_init_cond_frame=is_init_cond_frame,
+ current_vision_feats=current_vision_feats,
+ current_vision_pos_embeds=current_vision_pos_embeds,
+ feat_sizes=feat_sizes,
+ point_inputs=point_inputs,
+ mask_inputs=mask_inputs,
+ hidden_inputs=hidden_inputs,
+ output_dict=output_dict,
+ num_frames=inference_state["num_frames"],
+ track_in_reverse=reverse,
+ run_mem_encoder=run_mem_encoder,
+ prev_sam_mask_logits=prev_sam_mask_logits,
+ )
+
+ # optionally offload the output to CPU memory to save GPU space
+ storage_device = inference_state["storage_device"]
+ maskmem_features = current_out["maskmem_features"]
+ if maskmem_features is not None:
+ maskmem_features = maskmem_features.to(inference_state["cached_features"][frame_idx][0].dtype)
+ maskmem_features = maskmem_features.to(storage_device, non_blocking=True)
+ pred_masks_gpu = current_out["pred_masks"]
+ # potentially fill holes in the predicted masks
+ if self.fill_hole_area > 0:
+ pred_masks_gpu = fill_holes_in_mask_scores(pred_masks_gpu, self.fill_hole_area)
+ pred_masks = pred_masks_gpu.to(storage_device, non_blocking=True)
+ # "maskmem_pos_enc" is the same across frames, so we only need to store one copy of it
+ maskmem_pos_enc = self._get_maskmem_pos_enc(inference_state, current_out)
+ # object pointer is a small tensor, so we always keep it on GPU memory for fast access
+ obj_ptr = current_out["obj_ptr"]
+ object_score_logits = current_out["object_score_logits"]
+ # make a compact version of this frame's output to reduce the state size
+ compact_current_out = {
+ "maskmem_features": maskmem_features,
+ "maskmem_pos_enc": maskmem_pos_enc,
+ "pred_masks": pred_masks,
+ "obj_ptr": obj_ptr,
+ "object_score_logits": object_score_logits,
+ }
+ # NOTE: reduce memory during inference ----------------------------------------
+ # https://github.com/facebookresearch/sam2/issues/196
+ # step = self.num_maskmem * self.memory_temporal_stride_for_eval * 2
+ # drop_frame_inds = [
+ # i for i in output_dict["non_cond_frame_outputs"].keys()
+ # if (i > frame_idx + step if reverse else i < frame_idx - step)
+ # ]
+ # for idx in drop_frame_inds:
+ # output_dict["non_cond_frame_outputs"].pop(idx)
+ # for obj_output_dict in inference_state["output_dict_per_obj"].values():
+ # obj_output_dict["non_cond_frame_outputs"].pop(idx, None)
+ # -----------------------------------------------------------------------------
+ return compact_current_out, pred_masks_gpu
+
+ def _run_memory_encoder(
+ self,
+ inference_state,
+ frame_idx,
+ batch_size,
+ high_res_masks,
+ object_score_logits,
+ is_mask_from_pts,
+ ):
+ """
+ Run the memory encoder on `high_res_masks`. This is usually after applying
+ non-overlapping constraints to object scores. Since their scores changed, their
+ memory also need to be computed again with the memory encoder.
+ """
+ # Retrieve correct image features
+ _, _, current_vision_feats, _, feat_sizes = self._get_image_feature(inference_state, frame_idx, batch_size)
+ maskmem_features, maskmem_pos_enc = self._encode_new_memory(
+ current_vision_feats=current_vision_feats,
+ feat_sizes=feat_sizes,
+ pred_masks_high_res=high_res_masks,
+ object_score_logits=object_score_logits,
+ is_mask_from_pts=is_mask_from_pts,
+ )
+
+ # optionally offload the output to CPU memory to save GPU space
+ storage_device = inference_state["storage_device"]
+ maskmem_features = maskmem_features.to(inference_state["cached_features"][frame_idx][0].dtype)
+ maskmem_features = maskmem_features.to(storage_device, non_blocking=True)
+ # "maskmem_pos_enc" is the same across frames, so we only need to store one copy of it
+ maskmem_pos_enc = self._get_maskmem_pos_enc(inference_state, {"maskmem_pos_enc": maskmem_pos_enc})
+ return maskmem_features, maskmem_pos_enc
+
+ def _get_maskmem_pos_enc(self, inference_state, current_out):
+ """
+ `maskmem_pos_enc` is the same across frames and objects, so we cache it as
+ a constant in the inference session to reduce session storage size.
+ """
+ model_constants = inference_state["constants"]
+ # "out_maskmem_pos_enc" should be either a list of tensors or None
+ out_maskmem_pos_enc = current_out["maskmem_pos_enc"]
+ if out_maskmem_pos_enc is not None:
+ if "maskmem_pos_enc" not in model_constants:
+ assert isinstance(out_maskmem_pos_enc, list)
+ # only take the slice for one object, since it's same across objects
+ maskmem_pos_enc = [x[0:1].clone() for x in out_maskmem_pos_enc]
+ model_constants["maskmem_pos_enc"] = maskmem_pos_enc
+ else:
+ maskmem_pos_enc = model_constants["maskmem_pos_enc"]
+ # expand the cached maskmem_pos_enc to the actual batch size
+ batch_size = out_maskmem_pos_enc[0].size(0)
+ expanded_maskmem_pos_enc = [x.expand(batch_size, -1, -1, -1) for x in maskmem_pos_enc]
+ else:
+ expanded_maskmem_pos_enc = None
+ return expanded_maskmem_pos_enc
+
+ @torch.inference_mode()
+ def remove_object(self, inference_state, obj_id, strict=False, need_output=True):
+ """
+ Remove an object id from the tracking state. If strict is True, we check whether
+ the object id actually exists and raise an error if it doesn't exist.
+ """
+ old_obj_idx_to_rm = inference_state["obj_id_to_idx"].get(obj_id, None)
+ updated_frames = []
+ # Check whether this object_id to remove actually exists and possibly raise an error.
+ if old_obj_idx_to_rm is None:
+ if not strict:
+ return inference_state["obj_ids"], updated_frames
+ raise RuntimeError(f"Cannot remove object id {obj_id} as it doesn't exist. "
+ f"All existing object ids: {inference_state['obj_ids']}.")
+
+ # If this is the only remaining object id, we simply reset the state.
+ if len(inference_state["obj_id_to_idx"]) == 1:
+ self.reset_state(inference_state)
+ return inference_state["obj_ids"], updated_frames
+
+ # There are still remaining objects after removing this object id. In this case,
+ # we need to delete the object storage from inference state tensors.
+ # Step 0: clear the input on those frames where this object id has point or mask input
+ # (note that this step is required as it might downgrade conditioning frames to
+ # non-conditioning ones)
+ obj_input_frames_inds = set()
+ obj_input_frames_inds.update(inference_state["point_inputs_per_obj"][old_obj_idx_to_rm])
+ obj_input_frames_inds.update(inference_state["mask_inputs_per_obj"][old_obj_idx_to_rm])
+ for frame_idx in obj_input_frames_inds:
+ self.clear_all_prompts_in_frame(inference_state, frame_idx, obj_id, need_output=False)
+
+ # Step 1: Update the object id mapping (note that it must be done after Step 0,
+ # since Step 0 still requires the old object id mappings in inference_state)
+ old_obj_ids = inference_state["obj_ids"]
+ old_obj_inds = list(range(len(old_obj_ids)))
+ remain_old_obj_inds = old_obj_inds.copy()
+ remain_old_obj_inds.remove(old_obj_idx_to_rm)
+ new_obj_ids = [old_obj_ids[old_idx] for old_idx in remain_old_obj_inds]
+ new_obj_inds = list(range(len(new_obj_ids)))
+ # build new mappings
+ old_idx_to_new_idx = dict(zip(remain_old_obj_inds, new_obj_inds))
+ inference_state["obj_id_to_idx"] = dict(zip(new_obj_ids, new_obj_inds))
+ inference_state["obj_idx_to_id"] = dict(zip(new_obj_inds, new_obj_ids))
+ inference_state["obj_ids"] = new_obj_ids
+
+ # Step 2: For per-object tensor storage, we shift their obj_idx in the dict keys.
+ def _map_keys(container):
+ new_kvs = []
+ for k in old_obj_inds:
+ v = container.pop(k)
+ if k in old_idx_to_new_idx:
+ new_kvs.append((old_idx_to_new_idx[k], v))
+ container.update(new_kvs)
+
+ _map_keys(inference_state["point_inputs_per_obj"])
+ _map_keys(inference_state["mask_inputs_per_obj"])
+ _map_keys(inference_state["output_dict_per_obj"])
+ _map_keys(inference_state["temp_output_dict_per_obj"])
+ _map_keys(inference_state["frames_tracked_per_obj"])
+
+ # Step 3: Further collect the outputs on those frames in `obj_input_frames_inds`, which
+ # could show an updated mask for objects previously occluded by the object being removed
+ if need_output:
+ temp_output_dict_per_obj = inference_state["temp_output_dict_per_obj"]
+ for frame_idx in obj_input_frames_inds:
+ is_cond = any(frame_idx in obj_temp_output_dict["cond_frame_outputs"]
+ for obj_temp_output_dict in temp_output_dict_per_obj.values())
+ consolidated_out = self._consolidate_temp_output_across_obj(
+ inference_state,
+ frame_idx,
+ is_cond=is_cond,
+ consolidate_at_video_res=True,
+ )
+ _, video_res_masks = self._get_orig_video_res_output(inference_state,
+ consolidated_out["pred_masks_video_res"])
+ updated_frames.append((frame_idx, video_res_masks))
+
+ return inference_state["obj_ids"], updated_frames
+
+ def _clear_non_cond_mem_around_input(self, inference_state, frame_idx):
+ """
+ Remove the non-conditioning memory around the input frame. When users provide
+ correction clicks, the surrounding frames' non-conditioning memories can still
+ contain outdated object appearance information and could confuse the model.
+
+ This method clears those non-conditioning memories surrounding the interacted
+ frame to avoid giving the model both old and new information about the object.
+ """
+ r = self.memory_temporal_stride_for_eval
+ frame_idx_begin = frame_idx - r * self.num_maskmem
+ frame_idx_end = frame_idx + r * self.num_maskmem
+ batch_size = self._get_obj_num(inference_state)
+ for obj_idx in range(batch_size):
+ obj_output_dict = inference_state["output_dict_per_obj"][obj_idx]
+ non_cond_frame_outputs = obj_output_dict["non_cond_frame_outputs"]
+ for t in range(frame_idx_begin, frame_idx_end + 1):
+ non_cond_frame_outputs.pop(t, None)
+
+
+class SAM2VideoPredictorVOS(SAM2VideoPredictor):
+ """Optimized for the VOS setting"""
+
+ def __init__(self, *args, **kwargs):
+ raise NotImplementedError("SAM2VideoPredictorVOS has not been modified for LLMs")
+ super().__init__(*args, **kwargs)
+ self._compile_all_components()
+
+ def _compile_all_components(self):
+ print("Compiling all components for VOS setting. First time may be very slow.")
+ self.memory_encoder.forward = torch.compile(
+ self.memory_encoder.forward,
+ mode="max-autotune",
+ fullgraph=True,
+ dynamic=False,
+ )
+
+ self.memory_attention.forward = torch.compile(
+ self.memory_attention.forward,
+ mode="max-autotune",
+ fullgraph=True,
+ dynamic=True, # Num. of memories varies
+ )
+
+ self.sam_prompt_encoder.forward = torch.compile(
+ self.sam_prompt_encoder.forward,
+ mode="max-autotune",
+ fullgraph=True,
+ dynamic=False, # Accuracy regression on True
+ )
+
+ self.sam_mask_decoder.forward = torch.compile(
+ self.sam_mask_decoder.forward,
+ mode="max-autotune",
+ fullgraph=True,
+ dynamic=False, # Accuracy regression on True
+ )
+
+ def forward_image(self, img_batch: torch.Tensor):
+ """
+ Identical to the corresponding method in the parent (SAM2VideoPredictor), but
+ cloning the backbone features and pos encoding to enable compilation.
+ """
+ backbone_out = self.image_encoder(img_batch)
+ if self.use_high_res_features_in_sam:
+ # precompute projected level 0 and level 1 features in SAM decoder
+ # to avoid running it again on every SAM click
+ backbone_out["backbone_fpn"][0] = self.sam_mask_decoder.conv_s0(backbone_out["backbone_fpn"][0])
+ backbone_out["backbone_fpn"][1] = self.sam_mask_decoder.conv_s1(backbone_out["backbone_fpn"][1])
+ # Clone to help torch.compile
+ for i in range(len(backbone_out["backbone_fpn"])):
+ backbone_out["backbone_fpn"][i] = backbone_out["backbone_fpn"][i].clone()
+ backbone_out["vision_pos_enc"][i] = backbone_out["vision_pos_enc"][i].clone()
+ return backbone_out
+
+ def _forward_sam_heads(
+ self,
+ backbone_features,
+ point_inputs=None,
+ mask_inputs=None,
+ high_res_features=None,
+ multimask_output=False,
+ ):
+ """
+ Identical to the corresponding method in the parent (SAM2VideoPredictor), but
+ cloning the outputs of prompt_encoder and mask_decoder to enable compilation.
+ """
+ B = backbone_features.size(0)
+ device = backbone_features.device
+ assert backbone_features.size(1) == self.sam_prompt_embed_dim
+ assert backbone_features.size(2) == self.sam_image_embedding_size
+ assert backbone_features.size(3) == self.sam_image_embedding_size
+
+ # a) Handle point prompts
+ if point_inputs is not None:
+ sam_point_coords = point_inputs["point_coords"]
+ sam_point_labels = point_inputs["point_labels"]
+ assert sam_point_coords.size(0) == B and sam_point_labels.size(0) == B
+ else:
+ # If no points are provide, pad with an empty point (with label -1)
+ sam_point_coords = torch.zeros(B, 1, 2, device=device)
+ sam_point_labels = -torch.ones(B, 1, dtype=torch.int32, device=device)
+
+ # b) Handle mask prompts
+ if mask_inputs is not None:
+ # If mask_inputs is provided, downsize it into low-res mask input if needed
+ # and feed it as a dense mask prompt into the SAM mask encoder
+ assert len(mask_inputs.shape) == 4 and mask_inputs.shape[:2] == (B, 1)
+ if mask_inputs.shape[-2:] != self.sam_prompt_encoder.mask_input_size:
+ sam_mask_prompt = F.interpolate(
+ mask_inputs.float(),
+ size=self.sam_prompt_encoder.mask_input_size,
+ align_corners=False,
+ mode="bilinear",
+ antialias=True, # use antialias for downsampling
+ )
+ else:
+ sam_mask_prompt = mask_inputs
+ else:
+ # Otherwise, simply feed None (and SAM's prompt encoder will add
+ # a learned `no_mask_embed` to indicate no mask input in this case).
+ sam_mask_prompt = None
+
+ sparse_embeddings, dense_embeddings = self.sam_prompt_encoder(
+ points=(sam_point_coords, sam_point_labels),
+ boxes=None,
+ masks=sam_mask_prompt,
+ )
+ # Clone image_pe and the outputs of sam_prompt_encoder
+ # to enable compilation
+ sparse_embeddings = sparse_embeddings.clone()
+ dense_embeddings = dense_embeddings.clone()
+ image_pe = self.sam_prompt_encoder.get_dense_pe().clone()
+ (
+ low_res_multimasks,
+ ious,
+ sam_output_tokens,
+ object_score_logits,
+ ) = self.sam_mask_decoder(
+ image_embeddings=backbone_features,
+ image_pe=image_pe,
+ sparse_prompt_embeddings=sparse_embeddings,
+ dense_prompt_embeddings=dense_embeddings,
+ multimask_output=multimask_output,
+ repeat_image=False, # the image is already batched
+ high_res_features=high_res_features,
+ )
+ # Clone the output of sam_mask_decoder
+ # to enable compilation
+ low_res_multimasks = low_res_multimasks.clone()
+ ious = ious.clone()
+ sam_output_tokens = sam_output_tokens.clone()
+ object_score_logits = object_score_logits.clone()
+
+ if self.pred_obj_scores:
+ is_obj_appearing = object_score_logits > 0
+
+ # Mask used for spatial memories is always a *hard* choice between obj and no obj,
+ # consistent with the actual mask prediction
+ low_res_multimasks = torch.where(
+ is_obj_appearing[:, None, None],
+ low_res_multimasks,
+ NO_OBJ_SCORE,
+ )
+
+ # convert masks from possibly bfloat16 (or float16) to float32
+ low_res_multimasks = low_res_multimasks.float()
+ high_res_multimasks = F.interpolate(
+ low_res_multimasks,
+ size=(self.image_size, self.image_size),
+ mode="bilinear",
+ align_corners=False,
+ )
+
+ sam_output_token = sam_output_tokens[:, 0]
+ if multimask_output:
+ # take the best mask prediction (with the highest IoU estimation)
+ best_iou_inds = torch.argmax(ious, dim=-1)
+ batch_inds = torch.arange(B, device=device)
+ low_res_masks = low_res_multimasks[batch_inds, best_iou_inds].unsqueeze(1)
+ high_res_masks = high_res_multimasks[batch_inds, best_iou_inds].unsqueeze(1)
+ if sam_output_tokens.size(1) > 1:
+ sam_output_token = sam_output_tokens[batch_inds, best_iou_inds]
+ else:
+ low_res_masks, high_res_masks = low_res_multimasks, high_res_multimasks
+
+ # Extract object pointer from the SAM output token (with occlusion handling)
+ obj_ptr = self.obj_ptr_proj(sam_output_token)
+ if self.pred_obj_scores:
+ # Allow *soft* no obj ptr, unlike for masks
+ if self.soft_no_obj_ptr:
+ lambda_is_obj_appearing = object_score_logits.sigmoid()
+ else:
+ lambda_is_obj_appearing = is_obj_appearing.float()
+
+ if self.fixed_no_obj_ptr:
+ obj_ptr = lambda_is_obj_appearing * obj_ptr
+ obj_ptr = obj_ptr + (1 - lambda_is_obj_appearing) * self.no_obj_ptr
+
+ return (
+ low_res_multimasks,
+ high_res_multimasks,
+ ious,
+ low_res_masks,
+ high_res_masks,
+ obj_ptr,
+ object_score_logits,
+ )
+
+ def _encode_new_memory(
+ self,
+ current_vision_feats,
+ feat_sizes,
+ pred_masks_high_res,
+ object_score_logits,
+ is_mask_from_pts,
+ ):
+ """
+ Identical to the corresponding method in the parent (SAM2VideoPredictor), but
+ cloning the memories and their pos enc to enable compilation.
+ """
+ B = current_vision_feats[-1].size(1) # batch size on this frame
+ C = self.hidden_dim
+ H, W = feat_sizes[-1] # top-level (lowest-resolution) feature size
+ # top-level feature, (HW)BC => BCHW
+ pix_feat = current_vision_feats[-1].permute(1, 2, 0).view(B, C, H, W)
+ if self.non_overlap_masks_for_mem_enc and not self.training:
+ # optionally, apply non-overlapping constraints to the masks (it's applied
+ # in the batch dimension and should only be used during eval, where all
+ # the objects come from the same video under batch size 1).
+ pred_masks_high_res = self._apply_non_overlapping_constraints(pred_masks_high_res)
+ # scale the raw mask logits with a temperature before applying sigmoid
+ binarize = self.binarize_mask_from_pts_for_mem_enc and is_mask_from_pts
+ if binarize and not self.training:
+ mask_for_mem = (pred_masks_high_res > 0).float()
+ else:
+ # apply sigmoid on the raw mask logits to turn them into range (0, 1)
+ mask_for_mem = torch.sigmoid(pred_masks_high_res)
+ # apply scale and bias terms to the sigmoid probabilities
+ if self.sigmoid_scale_for_mem_enc != 1.0:
+ mask_for_mem = mask_for_mem * self.sigmoid_scale_for_mem_enc
+ if self.sigmoid_bias_for_mem_enc != 0.0:
+ mask_for_mem = mask_for_mem + self.sigmoid_bias_for_mem_enc
+ maskmem_out = self.memory_encoder(
+ pix_feat,
+ mask_for_mem,
+ skip_mask_sigmoid=True # sigmoid already applied
+ )
+ # Clone the feats and pos_enc to enable compilation
+ maskmem_features = maskmem_out["vision_features"].clone()
+ maskmem_pos_enc = [m.clone() for m in maskmem_out["vision_pos_enc"]]
+ # add a no-object embedding to the spatial memory to indicate that the frame
+ # is predicted to be occluded (i.e. no object is appearing in the frame)
+ if self.no_obj_embed_spatial is not None:
+ is_obj_appearing = (object_score_logits > 0).float()
+ maskmem_features += (1 - is_obj_appearing[..., None, None]
+ ) * self.no_obj_embed_spatial[..., None, None].expand(*maskmem_features.shape)
+
+ return maskmem_features, maskmem_pos_enc
diff --git a/sam2/utils/__init__.py b/sam2/utils/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..5277f46157403e47fd830fc519144b97ef69d4ae
--- /dev/null
+++ b/sam2/utils/__init__.py
@@ -0,0 +1,5 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
diff --git a/sam2/utils/amg.py b/sam2/utils/amg.py
new file mode 100644
index 0000000000000000000000000000000000000000..1b04d26ec4ac5599999919e8896876a6506733a1
--- /dev/null
+++ b/sam2/utils/amg.py
@@ -0,0 +1,328 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+import math
+from copy import deepcopy
+from itertools import product
+from typing import Any, Dict, Generator, ItemsView, List, Tuple
+
+import numpy as np
+import torch
+
+# Very lightly adapted from https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/utils/amg.py
+
+
+class MaskData:
+ """
+ A structure for storing masks and their related data in batched format.
+ Implements basic filtering and concatenation.
+ """
+
+ def __init__(self, **kwargs) -> None:
+ for v in kwargs.values():
+ assert isinstance(
+ v, (list, np.ndarray, torch.Tensor)), "MaskData only supports list, numpy arrays, and torch tensors."
+ self._stats = dict(**kwargs)
+
+ def __setitem__(self, key: str, item: Any) -> None:
+ assert isinstance(
+ item, (list, np.ndarray, torch.Tensor)), "MaskData only supports list, numpy arrays, and torch tensors."
+ self._stats[key] = item
+
+ def __delitem__(self, key: str) -> None:
+ del self._stats[key]
+
+ def __getitem__(self, key: str) -> Any:
+ return self._stats[key]
+
+ def items(self) -> ItemsView[str, Any]:
+ return self._stats.items()
+
+ def filter(self, keep: torch.Tensor) -> None:
+ for k, v in self._stats.items():
+ if v is None:
+ self._stats[k] = None
+ elif isinstance(v, torch.Tensor):
+ self._stats[k] = v[torch.as_tensor(keep, device=v.device)]
+ elif isinstance(v, np.ndarray):
+ self._stats[k] = v[keep.detach().cpu().numpy()]
+ elif isinstance(v, list) and keep.dtype == torch.bool:
+ self._stats[k] = [a for i, a in enumerate(v) if keep[i]]
+ elif isinstance(v, list):
+ self._stats[k] = [v[i] for i in keep]
+ else:
+ raise TypeError(f"MaskData key {k} has an unsupported type {type(v)}.")
+
+ def cat(self, new_stats: "MaskData") -> None:
+ for k, v in new_stats.items():
+ if k not in self._stats or self._stats[k] is None:
+ self._stats[k] = deepcopy(v)
+ elif isinstance(v, torch.Tensor):
+ self._stats[k] = torch.cat([self._stats[k], v], dim=0)
+ elif isinstance(v, np.ndarray):
+ self._stats[k] = np.concatenate([self._stats[k], v], axis=0)
+ elif isinstance(v, list):
+ self._stats[k] = self._stats[k] + deepcopy(v)
+ else:
+ raise TypeError(f"MaskData key {k} has an unsupported type {type(v)}.")
+
+ def to_numpy(self) -> None:
+ for k, v in self._stats.items():
+ if isinstance(v, torch.Tensor):
+ self._stats[k] = v.float().detach().cpu().numpy()
+
+
+def is_box_near_crop_edge(boxes: torch.Tensor,
+ crop_box: List[int],
+ orig_box: List[int],
+ atol: float = 20.0) -> torch.Tensor:
+ """Filter masks at the edge of a crop, but not at the edge of the original image."""
+ crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device)
+ orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device)
+ boxes = uncrop_boxes_xyxy(boxes, crop_box).float()
+ near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0)
+ near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0)
+ near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge)
+ return torch.any(near_crop_edge, dim=1)
+
+
+def box_xyxy_to_xywh(box_xyxy: torch.Tensor) -> torch.Tensor:
+ box_xywh = deepcopy(box_xyxy)
+ box_xywh[2] = box_xywh[2] - box_xywh[0]
+ box_xywh[3] = box_xywh[3] - box_xywh[1]
+ return box_xywh
+
+
+def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]:
+ assert len(args) > 0 and all(len(a) == len(args[0])
+ for a in args), "Batched iteration must have inputs of all the same size."
+ n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0)
+ for b in range(n_batches):
+ yield [arg[b * batch_size:(b + 1) * batch_size] for arg in args]
+
+
+def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]:
+ """
+ Encodes masks to an uncompressed RLE, in the format expected by
+ pycoco tools.
+ """
+ # Put in fortran order and flatten h,w
+ b, h, w = tensor.shape
+ tensor = tensor.permute(0, 2, 1).flatten(1)
+
+ # Compute change indices
+ diff = tensor[:, 1:] ^ tensor[:, :-1]
+ change_indices = diff.nonzero()
+
+ # Encode run length
+ out = []
+ for i in range(b):
+ cur_idxs = change_indices[change_indices[:, 0] == i, 1]
+ cur_idxs = torch.cat([
+ torch.tensor([0], dtype=cur_idxs.dtype, device=cur_idxs.device),
+ cur_idxs + 1,
+ torch.tensor([h * w], dtype=cur_idxs.dtype, device=cur_idxs.device),
+ ])
+ btw_idxs = cur_idxs[1:] - cur_idxs[:-1]
+ counts = [] if tensor[i, 0] == 0 else [0]
+ counts.extend(btw_idxs.detach().cpu().tolist())
+ out.append({"size": [h, w], "counts": counts})
+ return out
+
+
+def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray:
+ """Compute a binary mask from an uncompressed RLE."""
+ h, w = rle["size"]
+ mask = np.empty(h * w, dtype=bool)
+ idx = 0
+ parity = False
+ for count in rle["counts"]:
+ mask[idx:idx + count] = parity
+ idx += count
+ parity ^= True
+ mask = mask.reshape(w, h)
+ return mask.transpose() # Put in C order
+
+
+def area_from_rle(rle: Dict[str, Any]) -> int:
+ return sum(rle["counts"][1::2])
+
+
+def calculate_stability_score(masks: torch.Tensor, mask_threshold: float, threshold_offset: float) -> torch.Tensor:
+ """
+ Computes the stability score for a batch of masks. The stability
+ score is the IoU between the binary masks obtained by thresholding
+ the predicted mask logits at high and low values.
+ """
+ # One mask is always contained inside the other.
+ # Save memory by preventing unnecessary cast to torch.int64
+ intersections = ((masks > (mask_threshold + threshold_offset)).sum(-1,
+ dtype=torch.int16).sum(-1, dtype=torch.int32))
+ unions = ((masks > (mask_threshold - threshold_offset)).sum(-1, dtype=torch.int16).sum(-1, dtype=torch.int32))
+ return intersections / unions
+
+
+def build_point_grid(n_per_side: int) -> np.ndarray:
+ """Generates a 2D grid of points evenly spaced in [0,1]x[0,1]."""
+ offset = 1 / (2 * n_per_side)
+ points_one_side = np.linspace(offset, 1 - offset, n_per_side)
+ points_x = np.tile(points_one_side[None, :], (n_per_side, 1))
+ points_y = np.tile(points_one_side[:, None], (1, n_per_side))
+ points = np.stack([points_x, points_y], axis=-1).reshape(-1, 2)
+ return points
+
+
+def build_all_layer_point_grids(n_per_side: int, n_layers: int, scale_per_layer: int) -> List[np.ndarray]:
+ """Generates point grids for all crop layers."""
+ points_by_layer = []
+ for i in range(n_layers + 1):
+ n_points = int(n_per_side / (scale_per_layer**i))
+ points_by_layer.append(build_point_grid(n_points))
+ return points_by_layer
+
+
+def generate_crop_boxes(im_size: Tuple[int, ...], n_layers: int,
+ overlap_ratio: float) -> Tuple[List[List[int]], List[int]]:
+ """
+ Generates a list of crop boxes of different sizes. Each layer
+ has (2**i)**2 boxes for the ith layer.
+ """
+ crop_boxes, layer_idxs = [], []
+ im_h, im_w = im_size
+ short_side = min(im_h, im_w)
+
+ # Original image
+ crop_boxes.append([0, 0, im_w, im_h])
+ layer_idxs.append(0)
+
+ def crop_len(orig_len, n_crops, overlap):
+ return int(math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops))
+
+ for i_layer in range(n_layers):
+ n_crops_per_side = 2**(i_layer + 1)
+ overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side))
+
+ crop_w = crop_len(im_w, n_crops_per_side, overlap)
+ crop_h = crop_len(im_h, n_crops_per_side, overlap)
+
+ crop_box_x0 = [int((crop_w - overlap) * i) for i in range(n_crops_per_side)]
+ crop_box_y0 = [int((crop_h - overlap) * i) for i in range(n_crops_per_side)]
+
+ # Crops in XYWH format
+ for x0, y0 in product(crop_box_x0, crop_box_y0):
+ box = [x0, y0, min(x0 + crop_w, im_w), min(y0 + crop_h, im_h)]
+ crop_boxes.append(box)
+ layer_idxs.append(i_layer + 1)
+
+ return crop_boxes, layer_idxs
+
+
+def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: List[int]) -> torch.Tensor:
+ x0, y0, _, _ = crop_box
+ offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device)
+ # Check if boxes has a channel dimension
+ if len(boxes.shape) == 3:
+ offset = offset.unsqueeze(1)
+ return boxes + offset
+
+
+def uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor:
+ x0, y0, _, _ = crop_box
+ offset = torch.tensor([[x0, y0]], device=points.device)
+ # Check if points has a channel dimension
+ if len(points.shape) == 3:
+ offset = offset.unsqueeze(1)
+ return points + offset
+
+
+def uncrop_masks(masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int) -> torch.Tensor:
+ x0, y0, x1, y1 = crop_box
+ if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h:
+ return masks
+ # Coordinate transform masks
+ pad_x, pad_y = orig_w - (x1 - x0), orig_h - (y1 - y0)
+ pad = (x0, pad_x - x0, y0, pad_y - y0)
+ return torch.nn.functional.pad(masks, pad, value=0)
+
+
+def remove_small_regions(mask: np.ndarray, area_thresh: float, mode: str) -> Tuple[np.ndarray, bool]:
+ """
+ Removes small disconnected regions and holes in a mask. Returns the
+ mask and an indicator of if the mask has been modified.
+ """
+ import cv2 # type: ignore
+
+ assert mode in ["holes", "islands"]
+ correct_holes = mode == "holes"
+ working_mask = (correct_holes ^ mask).astype(np.uint8)
+ n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8)
+ sizes = stats[:, -1][1:] # Row 0 is background label
+ small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh]
+ if len(small_regions) == 0:
+ return mask, False
+ fill_labels = [0] + small_regions
+ if not correct_holes:
+ fill_labels = [i for i in range(n_labels) if i not in fill_labels]
+ # If every region is below threshold, keep largest
+ if len(fill_labels) == 0:
+ fill_labels = [int(np.argmax(sizes)) + 1]
+ mask = np.isin(regions, fill_labels)
+ return mask, True
+
+
+def coco_encode_rle(uncompressed_rle: Dict[str, Any]) -> Dict[str, Any]:
+ from pycocotools import mask as mask_utils # type: ignore
+
+ h, w = uncompressed_rle["size"]
+ rle = mask_utils.frPyObjects(uncompressed_rle, h, w)
+ rle["counts"] = rle["counts"].decode("utf-8") # Necessary to serialize with json
+ return rle
+
+
+def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor:
+ """
+ Calculates boxes in XYXY format around masks. Return [0,0,0,0] for
+ an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4.
+ """
+ # torch.max below raises an error on empty inputs, just skip in this case
+ if torch.numel(masks) == 0:
+ return torch.zeros(*masks.shape[:-2], 4, device=masks.device)
+
+ # Normalize shape to CxHxW
+ shape = masks.shape
+ h, w = shape[-2:]
+ if len(shape) > 2:
+ masks = masks.flatten(0, -3)
+ else:
+ masks = masks.unsqueeze(0)
+
+ # Get top and bottom edges
+ in_height, _ = torch.max(masks, dim=-1)
+ in_height_coords = in_height * torch.arange(h, device=in_height.device)[None, :]
+ bottom_edges, _ = torch.max(in_height_coords, dim=-1)
+ in_height_coords = in_height_coords + h * (~in_height)
+ top_edges, _ = torch.min(in_height_coords, dim=-1)
+
+ # Get left and right edges
+ in_width, _ = torch.max(masks, dim=-2)
+ in_width_coords = in_width * torch.arange(w, device=in_width.device)[None, :]
+ right_edges, _ = torch.max(in_width_coords, dim=-1)
+ in_width_coords = in_width_coords + w * (~in_width)
+ left_edges, _ = torch.min(in_width_coords, dim=-1)
+
+ # If the mask is empty the right edge will be to the left of the left edge.
+ # Replace these boxes with [0, 0, 0, 0]
+ empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)
+ out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1)
+ out = out * (~empty_filter).unsqueeze(-1)
+
+ # Return to original shape
+ if len(shape) > 2:
+ out = out.reshape(*shape[:-2], 4)
+ else:
+ out = out[0]
+
+ return out
diff --git a/sam2/utils/misc.py b/sam2/utils/misc.py
new file mode 100644
index 0000000000000000000000000000000000000000..a62e527eabf4c342c279f19348126bbe70ddf205
--- /dev/null
+++ b/sam2/utils/misc.py
@@ -0,0 +1,340 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+import os
+import warnings
+from threading import Thread
+
+import numpy as np
+import torch
+from PIL import Image
+from tqdm import tqdm
+
+
+def get_sdpa_settings():
+ if torch.cuda.is_available():
+ old_gpu = torch.cuda.get_device_properties(0).major < 7
+ # only use Flash Attention on Ampere (8.0) or newer GPUs
+ use_flash_attn = torch.cuda.get_device_properties(0).major >= 8
+ if not use_flash_attn:
+ warnings.warn(
+ "Flash Attention is disabled as it requires a GPU with Ampere (8.0) CUDA capability.",
+ category=UserWarning,
+ stacklevel=2,
+ )
+ # keep math kernel for PyTorch versions before 2.2 (Flash Attention v2 is only
+ # available on PyTorch 2.2+, while Flash Attention v1 cannot handle all cases)
+ pytorch_version = tuple(int(v) for v in torch.__version__.split(".")[:2])
+ if pytorch_version < (2, 2):
+ warnings.warn(
+ f"You are using PyTorch {torch.__version__} without Flash Attention v2 support. "
+ "Consider upgrading to PyTorch 2.2+ for Flash Attention v2 (which could be faster).",
+ category=UserWarning,
+ stacklevel=2,
+ )
+ math_kernel_on = pytorch_version < (2, 2) or not use_flash_attn
+ else:
+ old_gpu = True
+ use_flash_attn = False
+ math_kernel_on = True
+
+ return old_gpu, use_flash_attn, math_kernel_on
+
+
+def get_connected_components(mask):
+ """
+ Get the connected components (8-connectivity) of binary masks of shape (N, 1, H, W).
+
+ Inputs:
+ - mask: A binary mask tensor of shape (N, 1, H, W), where 1 is foreground and 0 is
+ background.
+
+ Outputs:
+ - labels: A tensor of shape (N, 1, H, W) containing the connected component labels
+ for foreground pixels and 0 for background pixels.
+ - counts: A tensor of shape (N, 1, H, W) containing the area of the connected
+ components for foreground pixels and 0 for background pixels.
+ """
+ from sam2 import _C
+
+ return _C.get_connected_componnets(mask.to(torch.uint8).contiguous())
+
+
+def mask_to_box(masks: torch.Tensor):
+ """
+ compute bounding box given an input mask
+
+ Inputs:
+ - masks: [B, 1, H, W] masks, dtype=torch.Tensor
+
+ Returns:
+ - box_coords: [B, 1, 4], contains (x, y) coordinates of top left and bottom right box corners, dtype=torch.Tensor
+ """
+ B, _, h, w = masks.shape
+ device = masks.device
+ xs = torch.arange(w, device=device, dtype=torch.int32)
+ ys = torch.arange(h, device=device, dtype=torch.int32)
+ grid_xs, grid_ys = torch.meshgrid(xs, ys, indexing="xy")
+ grid_xs = grid_xs[None, None, ...].expand(B, 1, h, w)
+ grid_ys = grid_ys[None, None, ...].expand(B, 1, h, w)
+ min_xs, _ = torch.min(torch.where(masks, grid_xs, w).flatten(-2), dim=-1)
+ max_xs, _ = torch.max(torch.where(masks, grid_xs, -1).flatten(-2), dim=-1)
+ min_ys, _ = torch.min(torch.where(masks, grid_ys, h).flatten(-2), dim=-1)
+ max_ys, _ = torch.max(torch.where(masks, grid_ys, -1).flatten(-2), dim=-1)
+ bbox_coords = torch.stack((min_xs, min_ys, max_xs, max_ys), dim=-1)
+
+ return bbox_coords
+
+
+def _load_img_as_tensor(img_path, image_size):
+ img_pil = Image.open(img_path)
+ img_np = np.array(img_pil.convert("RGB").resize((image_size, image_size)))
+ if img_np.dtype == np.uint8: # np.uint8 is expected for JPEG images
+ img_np = img_np / 255.0
+ else:
+ raise RuntimeError(f"Unknown image dtype: {img_np.dtype} on {img_path}")
+ img = torch.from_numpy(img_np).permute(2, 0, 1)
+ video_width, video_height = img_pil.size # the original video size
+ return img, video_height, video_width
+
+
+class AsyncVideoFrameLoader:
+ """
+ A list of video frames to be load asynchronously without blocking session start.
+ """
+
+ def __init__(
+ self,
+ img_paths,
+ image_size,
+ offload_video_to_cpu,
+ img_mean,
+ img_std,
+ compute_device,
+ ):
+ self.img_paths = img_paths
+ self.image_size = image_size
+ self.offload_video_to_cpu = offload_video_to_cpu
+ self.img_mean = img_mean
+ self.img_std = img_std
+ # items in `self.images` will be loaded asynchronously
+ self.images = [None] * len(img_paths)
+ # catch and raise any exceptions in the async loading thread
+ self.exception = None
+ # video_height and video_width be filled when loading the first image
+ self.video_height = None
+ self.video_width = None
+ self.compute_device = compute_device
+
+ # load the first frame to fill video_height and video_width and also
+ # to cache it (since it's most likely where the user will click)
+ self.__getitem__(0)
+
+ # load the rest of frames asynchronously without blocking the session start
+ def _load_frames():
+ try:
+ for n in tqdm(range(len(self.images)), desc="frame loading (JPEG)"):
+ self.__getitem__(n)
+ except Exception as e:
+ self.exception = e
+
+ self.thread = Thread(target=_load_frames, daemon=True)
+ self.thread.start()
+
+ def __getitem__(self, index):
+ if self.exception is not None:
+ raise RuntimeError("Failure in frame loading thread") from self.exception
+
+ img = self.images[index]
+ if img is not None:
+ return img
+
+ img, video_height, video_width = _load_img_as_tensor(self.img_paths[index], self.image_size)
+ self.video_height = video_height
+ self.video_width = video_width
+ # normalize by mean and std
+ img -= self.img_mean
+ img /= self.img_std
+ if not self.offload_video_to_cpu:
+ img = img.to(self.compute_device, non_blocking=True)
+ self.images[index] = img
+ return img
+
+ def __len__(self):
+ return len(self.images)
+
+
+def load_video_frames(
+ video_path,
+ image_size,
+ offload_video_to_cpu,
+ img_mean=(0.485, 0.456, 0.406),
+ img_std=(0.229, 0.224, 0.225),
+ async_loading_frames=False,
+ compute_device=torch.device("cuda"),
+):
+ """
+ Load the video frames from video_path. The frames are resized to image_size as in
+ the model and are loaded to GPU if offload_video_to_cpu=False. This is used by the demo.
+ """
+ is_bytes = isinstance(video_path, bytes)
+ is_str = isinstance(video_path, str)
+ is_mp4_path = is_str and os.path.splitext(video_path)[-1] in [".mp4", ".MP4"]
+ if is_bytes or is_mp4_path:
+ return load_video_frames_from_video_file(
+ video_path=video_path,
+ image_size=image_size,
+ offload_video_to_cpu=offload_video_to_cpu,
+ img_mean=img_mean,
+ img_std=img_std,
+ compute_device=compute_device,
+ )
+ elif is_str and os.path.isdir(video_path):
+ return load_video_frames_from_jpg_images(
+ video_path=video_path,
+ image_size=image_size,
+ offload_video_to_cpu=offload_video_to_cpu,
+ img_mean=img_mean,
+ img_std=img_std,
+ async_loading_frames=async_loading_frames,
+ compute_device=compute_device,
+ )
+ else:
+ raise NotImplementedError("Only MP4 video and JPEG folder are supported at this moment")
+
+
+def load_video_frames_from_jpg_images(
+ video_path,
+ image_size,
+ offload_video_to_cpu,
+ img_mean=(0.485, 0.456, 0.406),
+ img_std=(0.229, 0.224, 0.225),
+ async_loading_frames=False,
+ compute_device=torch.device("cuda"),
+):
+ """
+ Load the video frames from a directory of JPEG files (".jpg" format).
+
+ The frames are resized to image_size x image_size and are loaded to GPU if
+ `offload_video_to_cpu` is `False` and to CPU if `offload_video_to_cpu` is `True`.
+
+ You can load a frame asynchronously by setting `async_loading_frames` to `True`.
+ """
+ if isinstance(video_path, str) and os.path.isdir(video_path):
+ jpg_folder = video_path
+ else:
+ raise NotImplementedError(
+ "Only JPEG frames are supported at this moment. For video files, you may use "
+ "ffmpeg (https://ffmpeg.org/) to extract frames into a folder of JPEG files, such as \n"
+ "```\n"
+ "ffmpeg -i .mp4 -q:v 2 -start_number 0 /'%05d.jpg'\n"
+ "```\n"
+ "where `-q:v` generates high-quality JPEG frames and `-start_number 0` asks "
+ "ffmpeg to start the JPEG file from 00000.jpg.")
+
+ frame_names = [p for p in os.listdir(jpg_folder) if os.path.splitext(p)[-1] in [".jpg", ".jpeg", ".JPG", ".JPEG"]]
+ frame_names.sort(key=lambda p: int(os.path.splitext(p)[0]))
+ num_frames = len(frame_names)
+ if num_frames == 0:
+ raise RuntimeError(f"no images found in {jpg_folder}")
+ img_paths = [os.path.join(jpg_folder, frame_name) for frame_name in frame_names]
+ img_mean = torch.tensor(img_mean, dtype=torch.float32)[:, None, None]
+ img_std = torch.tensor(img_std, dtype=torch.float32)[:, None, None]
+
+ if async_loading_frames:
+ lazy_images = AsyncVideoFrameLoader(
+ img_paths,
+ image_size,
+ offload_video_to_cpu,
+ img_mean,
+ img_std,
+ compute_device,
+ )
+ return lazy_images, lazy_images.video_height, lazy_images.video_width
+
+ images = torch.zeros(num_frames, 3, image_size, image_size, dtype=torch.float32)
+ for n, img_path in enumerate(tqdm(img_paths, desc="frame loading (JPEG)")):
+ images[n], video_height, video_width = _load_img_as_tensor(img_path, image_size)
+ if not offload_video_to_cpu:
+ images = images.to(compute_device)
+ img_mean = img_mean.to(compute_device)
+ img_std = img_std.to(compute_device)
+ # normalize by mean and std
+ images -= img_mean
+ images /= img_std
+ return images, video_height, video_width
+
+
+def load_video_frames_from_video_file(
+ video_path,
+ image_size,
+ offload_video_to_cpu,
+ img_mean=(0.485, 0.456, 0.406),
+ img_std=(0.229, 0.224, 0.225),
+ compute_device=torch.device("cuda"),
+):
+ """Load the video frames from a video file."""
+ import decord
+
+ img_mean = torch.tensor(img_mean, dtype=torch.float32)[:, None, None]
+ img_std = torch.tensor(img_std, dtype=torch.float32)[:, None, None]
+ # Get the original video height and width
+ decord.bridge.set_bridge("torch")
+ video_height, video_width, _ = decord.VideoReader(video_path).next().shape
+ # Iterate over all frames in the video
+ images = []
+ for frame in decord.VideoReader(video_path, width=image_size, height=image_size):
+ images.append(frame.permute(2, 0, 1))
+
+ images = torch.stack(images, dim=0).float() / 255.0
+ if not offload_video_to_cpu:
+ images = images.to(compute_device)
+ img_mean = img_mean.to(compute_device)
+ img_std = img_std.to(compute_device)
+ # normalize by mean and std
+ images -= img_mean
+ images /= img_std
+ return images, video_height, video_width
+
+
+def fill_holes_in_mask_scores(mask, max_area):
+ """
+ A post processor to fill small holes in mask scores with area under `max_area`.
+ """
+ # Holes are those connected components in background with area <= self.max_area
+ # (background regions are those with mask scores <= 0)
+ assert max_area > 0, "max_area must be positive"
+
+ input_mask = mask
+ try:
+ labels, areas = get_connected_components(mask <= 0)
+ is_hole = (labels > 0) & (areas <= max_area)
+ # We fill holes with a small positive mask score (0.1) to change them to foreground.
+ mask = torch.where(is_hole, 0.1, mask)
+ except Exception as e:
+ # Skip the post-processing step on removing small holes if the CUDA kernel fails
+ warnings.warn(
+ f"{e}\n\nSkipping the post-processing step due to the error above. You can "
+ "still use SAM 2 and it's OK to ignore the error above, although some post-processing "
+ "functionality may be limited (which doesn't affect the results in most cases; see "
+ "https://github.com/facebookresearch/sam2/blob/main/INSTALL.md).",
+ category=UserWarning,
+ stacklevel=2,
+ )
+ mask = input_mask
+
+ return mask
+
+
+def concat_points(old_point_inputs, new_points, new_labels):
+ """Add new points and labels to previous point inputs (add at the end)."""
+ if old_point_inputs is None:
+ points, labels = new_points, new_labels
+ else:
+ points = torch.cat([old_point_inputs["point_coords"], new_points], dim=1)
+ labels = torch.cat([old_point_inputs["point_labels"], new_labels], dim=1)
+
+ return {"point_coords": points, "point_labels": labels}
diff --git a/sam2/utils/transforms.py b/sam2/utils/transforms.py
new file mode 100644
index 0000000000000000000000000000000000000000..8d9c279b955b09dc011e8f2f8ac8c6943e5ea315
--- /dev/null
+++ b/sam2/utils/transforms.py
@@ -0,0 +1,108 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+import warnings
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from torchvision.transforms import Normalize, Resize, ToTensor
+
+
+class SAM2Transforms(nn.Module):
+
+ def __init__(self, resolution, mask_threshold, max_hole_area=0.0, max_sprinkle_area=0.0):
+ """
+ Transforms for SAM2.
+ """
+ super().__init__()
+ self.resolution = resolution
+ self.mask_threshold = mask_threshold
+ self.max_hole_area = max_hole_area
+ self.max_sprinkle_area = max_sprinkle_area
+ self.mean = [0.485, 0.456, 0.406]
+ self.std = [0.229, 0.224, 0.225]
+ self.to_tensor = ToTensor()
+ self.transforms = torch.jit.script(
+ nn.Sequential(
+ Resize((self.resolution, self.resolution)),
+ Normalize(self.mean, self.std),
+ ))
+
+ def __call__(self, x):
+ x = self.to_tensor(x)
+ return self.transforms(x)
+
+ def forward_batch(self, img_list):
+ img_batch = [self.transforms(self.to_tensor(img)) for img in img_list]
+ img_batch = torch.stack(img_batch, dim=0)
+ return img_batch
+
+ def transform_coords(self, coords: torch.Tensor, normalize=False, orig_hw=None) -> torch.Tensor:
+ """
+ Expects a torch tensor with length 2 in the last dimension. The coordinates can be in absolute image or normalized coordinates,
+ If the coords are in absolute image coordinates, normalize should be set to True and original image size is required.
+
+ Returns
+ Un-normalized coordinates in the range of [0, 1] which is expected by the SAM2 model.
+ """
+ if normalize:
+ assert orig_hw is not None
+ h, w = orig_hw
+ coords = coords.clone()
+ coords[..., 0] = coords[..., 0] / w
+ coords[..., 1] = coords[..., 1] / h
+
+ coords = coords * self.resolution # unnormalize coords
+ return coords
+
+ def transform_boxes(self, boxes: torch.Tensor, normalize=False, orig_hw=None) -> torch.Tensor:
+ """
+ Expects a tensor of shape Bx4. The coordinates can be in absolute image or normalized coordinates,
+ if the coords are in absolute image coordinates, normalize should be set to True and original image size is required.
+ """
+ boxes = self.transform_coords(boxes.reshape(-1, 2, 2), normalize, orig_hw)
+ return boxes
+
+ def postprocess_masks(self, masks: torch.Tensor, orig_hw) -> torch.Tensor:
+ """
+ Perform PostProcessing on output masks.
+ """
+ from sam2.utils.misc import get_connected_components
+
+ masks = masks.float()
+ input_masks = masks
+ mask_flat = masks.flatten(0, 1).unsqueeze(1) # flatten as 1-channel image
+ try:
+ if self.max_hole_area > 0:
+ # Holes are those connected components in background with area <= self.fill_hole_area
+ # (background regions are those with mask scores <= self.mask_threshold)
+ labels, areas = get_connected_components(mask_flat <= self.mask_threshold)
+ is_hole = (labels > 0) & (areas <= self.max_hole_area)
+ is_hole = is_hole.reshape_as(masks)
+ # We fill holes with a small positive mask score (10.0) to change them to foreground.
+ masks = torch.where(is_hole, self.mask_threshold + 10.0, masks)
+
+ if self.max_sprinkle_area > 0:
+ labels, areas = get_connected_components(mask_flat > self.mask_threshold)
+ is_hole = (labels > 0) & (areas <= self.max_sprinkle_area)
+ is_hole = is_hole.reshape_as(masks)
+ # We fill holes with negative mask score (-10.0) to change them to background.
+ masks = torch.where(is_hole, self.mask_threshold - 10.0, masks)
+ except Exception as e:
+ # Skip the post-processing step if the CUDA kernel fails
+ warnings.warn(
+ f"{e}\n\nSkipping the post-processing step due to the error above. You can "
+ "still use SAM 2 and it's OK to ignore the error above, although some post-processing "
+ "functionality may be limited (which doesn't affect the results in most cases; see "
+ "https://github.com/facebookresearch/sam2/blob/main/INSTALL.md).",
+ category=UserWarning,
+ stacklevel=2,
+ )
+ masks = input_masks
+
+ masks = F.interpolate(masks.float(), orig_hw, mode="bilinear", align_corners=False).to(masks.dtype)
+ return masks
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..80940321f2fe2ef05ed8c94fd1a1d3af8bd4a419
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,16 @@
+[yapf]
+column_limit = 120
+based_on_style = pep8
+blank_line_before_nested_class_or_def = true
+split_before_expression_after_opening_paren = true
+
+[isort]
+line_length = 120
+multi_line_output = 0
+known_third_party = cv2,decord,deepspeed,gradio,hydra,imageio,matplotlib,nncore,numpy,omegaconf,pandas,peft,PIL,pycocotools,pysrt,requests,safetensors,spaces,tabulate,termplotlib,tqdm,tensordict,torch,torchvision,transformers
+no_lines_before = STDLIB,LOCALFOLDER
+default_section = FIRSTPARTY
+
+[flake8]
+max-line-length = 500
+extend-ignore = E741
diff --git a/unipixel/constants.py b/unipixel/constants.py
new file mode 100755
index 0000000000000000000000000000000000000000..8cd68cfb9413cbbce6875373110a08b475594852
--- /dev/null
+++ b/unipixel/constants.py
@@ -0,0 +1,7 @@
+# Copyright (c) 2025 Ye Liu. Licensed under the BSD-3-Clause License.
+
+IGNORE_INDEX = -100
+
+REF_TOKEN = '<|ref|>'
+SEG_TOKEN = '<|seg|>'
+MEM_TOKEN = '<|mem|>'
diff --git a/unipixel/conversation.py b/unipixel/conversation.py
new file mode 100755
index 0000000000000000000000000000000000000000..6fc932f3a19550da852b6b5dddff68226cd229d7
--- /dev/null
+++ b/unipixel/conversation.py
@@ -0,0 +1,49 @@
+# Copyright (c) 2025 Ye Liu. Licensed under the BSD-3-Clause License.
+
+from dataclasses import dataclass
+from typing import List
+
+
+@dataclass
+class Conversation:
+ style: str
+ system: str
+ roles: List[str]
+ seps: List[str]
+ messages: List[str]
+
+ def append_message(self, role, msg):
+ self.messages.append([role, msg])
+
+ def clear(self):
+ self.messages = []
+
+ def get_prompt(self):
+ assert self.style in ('chatml', )
+
+ prompt = self.system + self.seps[0] if self.system is not None else ''
+
+ for i, (role, msg) in enumerate(self.messages):
+ prompt += role
+ sep = self.seps[i % 2]
+ if msg is not None:
+ prompt += msg
+ if not prompt.endswith(sep):
+ prompt += sep
+
+ prompt = prompt.lstrip('\n')
+ return prompt
+
+
+def get_conv(conv_type):
+ if conv_type == 'chatml':
+ conv = Conversation(
+ style='chatml',
+ system='<|im_start|>system\nYou are a helpful assistant.',
+ roles=('\n<|im_start|>user\n', '\n<|im_start|>assistant\n'),
+ seps=('<|im_end|>', '<|im_end|>'),
+ messages=[])
+ else:
+ raise ValueError(f'unknown conversation type: {conv_type}')
+
+ return conv
diff --git a/unipixel/dataset/utils.py b/unipixel/dataset/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..f7724b6b6e3bb739253752d4b777ab435d5915cc
--- /dev/null
+++ b/unipixel/dataset/utils.py
@@ -0,0 +1,531 @@
+# Copyright (c) 2025 Ye Liu. Licensed under the BSD-3-Clause License.
+
+import base64
+import copy
+import math
+import os
+import warnings
+from io import BytesIO
+from typing import Optional
+
+import cv2
+import decord
+import nncore
+import numpy as np
+import requests
+import torch
+import torchvision.transforms.functional as T
+from PIL import Image
+from pycocotools.mask import decode, frPyObjects, merge
+from torchvision import transforms
+from torchvision.transforms import InterpolationMode
+
+from unipixel.constants import IGNORE_INDEX
+from unipixel.conversation import get_conv
+
+IMAGE_FACTOR = 28
+MIN_PIXELS = 4 * 28 * 28
+MAX_PIXELS = 16384 * 28 * 28
+MAX_RATIO = 200
+
+VIDEO_MIN_PIXELS = 128 * 28 * 28
+VIDEO_MAX_PIXELS = 768 * 28 * 28
+FRAME_FACTOR = 2
+FPS = 2.0
+FPS_MIN_FRAMES = 4
+FPS_MAX_FRAMES = 768
+
+# Set the maximum number of video token inputs.
+# Here, 128K represents the maximum number of input tokens for the VLLM model.
+# Remember to adjust it according to your own configuration.
+VIDEO_TOTAL_PIXELS = int(float(os.environ.get('VIDEO_MAX_PIXELS', 128000 * 28 * 28 * 0.9)))
+
+
+def round_by_factor(number: int, factor: int) -> int:
+ """Returns the closest integer to 'number' that is divisible by 'factor'."""
+ return round(number / factor) * factor
+
+
+def ceil_by_factor(number: int, factor: int) -> int:
+ """Returns the smallest integer greater than or equal to 'number' that is divisible by 'factor'."""
+ return math.ceil(number / factor) * factor
+
+
+def floor_by_factor(number: int, factor: int) -> int:
+ """Returns the largest integer less than or equal to 'number' that is divisible by 'factor'."""
+ return math.floor(number / factor) * factor
+
+
+def smart_resize(height: int,
+ width: int,
+ factor: int = IMAGE_FACTOR,
+ min_pixels: int = MIN_PIXELS,
+ max_pixels: int = MAX_PIXELS) -> tuple[int, int]:
+ """
+ Rescales the image so that the following conditions are met:
+
+ 1. Both dimensions (height and width) are divisible by 'factor'.
+
+ 2. The total number of pixels is within the range ['min_pixels', 'max_pixels'].
+
+ 3. The aspect ratio of the image is maintained as closely as possible.
+ """
+ if max(height, width) / min(height, width) > MAX_RATIO:
+ raise ValueError(
+ f"absolute aspect ratio must be smaller than {MAX_RATIO}, got {max(height, width) / min(height, width)}")
+ h_bar = max(factor, round_by_factor(height, factor))
+ w_bar = max(factor, round_by_factor(width, factor))
+ # change order here to ensure not exceeding max_pixels
+ if h_bar * w_bar < min_pixels:
+ beta = math.sqrt(min_pixels / (height * width))
+ h_bar = ceil_by_factor(height * beta, factor)
+ w_bar = ceil_by_factor(width * beta, factor)
+ if h_bar * w_bar > max_pixels:
+ beta = math.sqrt((height * width) / max_pixels)
+ h_bar = floor_by_factor(height / beta, factor)
+ w_bar = floor_by_factor(width / beta, factor)
+ return h_bar, w_bar
+
+
+def to_rgb(pil_image: Image.Image) -> Image.Image:
+ if pil_image.mode == 'RGBA':
+ white_background = Image.new("RGB", pil_image.size, (255, 255, 255))
+ white_background.paste(pil_image, mask=pil_image.split()[3]) # Use alpha channel as mask
+ return white_background
+ else:
+ return pil_image.convert("RGB")
+
+
+def fetch_image(ele: dict[str, str | Image.Image], size_factor: int = IMAGE_FACTOR) -> Image.Image:
+ if "image" in ele:
+ image = ele["image"]
+ else:
+ image = ele["image_url"]
+ image_obj = None
+ if isinstance(image, Image.Image):
+ image_obj = image
+ elif image.startswith("http://") or image.startswith("https://"):
+ # fix memory leak issue while using BytesIO
+ with requests.get(image, stream=True) as response:
+ response.raise_for_status()
+ with BytesIO(response.content) as bio:
+ image_obj = copy.deepcopy(Image.open(bio))
+ elif image.startswith("file://"):
+ image_obj = Image.open(image[7:])
+ elif image.startswith("data:image"):
+ if "base64," in image:
+ _, base64_data = image.split("base64,", 1)
+ data = base64.b64decode(base64_data)
+ # fix memory leak issue while using BytesIO
+ with BytesIO(data) as bio:
+ image_obj = copy.deepcopy(Image.open(bio))
+ else:
+ image_obj = Image.open(image)
+ if image_obj is None:
+ raise ValueError(f"Unrecognized image input, support local path, http url, base64 and PIL.Image, got {image}")
+ image = to_rgb(image_obj)
+
+ if "resized_height" in ele and "resized_width" in ele:
+ resized_height, resized_width = smart_resize(
+ ele["resized_height"],
+ ele["resized_width"],
+ factor=size_factor,
+ )
+ else:
+ width, height = image.size
+ min_pixels = ele.get("min_pixels", MIN_PIXELS)
+ max_pixels = ele.get("max_pixels", MAX_PIXELS)
+ resized_height, resized_width = smart_resize(
+ height,
+ width,
+ factor=size_factor,
+ min_pixels=min_pixels,
+ max_pixels=max_pixels,
+ )
+ image = image.resize((resized_width, resized_height))
+
+ return image
+
+
+def smart_nframes(
+ ele: dict,
+ total_frames: int,
+ video_fps: int | float,
+) -> int:
+ """calculate the number of frames for video used for model inputs.
+
+ Args:
+ ele (dict): a dict contains the configuration of video.
+ support either `fps` or `nframes`:
+ - nframes: the number of frames to extract for model inputs.
+ - fps: the fps to extract frames for model inputs.
+ - min_frames: the minimum number of frames of the video, only used when fps is provided.
+ - max_frames: the maximum number of frames of the video, only used when fps is provided.
+ total_frames (int): the original total number of frames of the video.
+ video_fps (int | float): the original fps of the video.
+
+ Raises:
+ ValueError: nframes should in interval [FRAME_FACTOR, total_frames].
+
+ Returns:
+ int: the number of frames for video used for model inputs.
+ """
+ assert not ("fps" in ele and "nframes" in ele), "Only accept either `fps` or `nframes`"
+ if "nframes" in ele:
+ nframes = round_by_factor(ele["nframes"], FRAME_FACTOR)
+ else:
+ fps = ele.get("fps", FPS)
+ min_frames = ceil_by_factor(ele.get("min_frames", FPS_MIN_FRAMES), FRAME_FACTOR)
+ max_frames = floor_by_factor(ele.get("max_frames", min(FPS_MAX_FRAMES, total_frames)), FRAME_FACTOR)
+ nframes = total_frames / video_fps * fps
+ nframes = min(min(max(nframes, min_frames), max_frames), total_frames)
+ nframes = floor_by_factor(nframes, FRAME_FACTOR)
+ if not (FRAME_FACTOR <= nframes and nframes <= total_frames):
+ raise ValueError(f"nframes should in interval [{FRAME_FACTOR}, {total_frames}], but got {nframes}.")
+ return nframes
+
+
+def calculate_video_frame_range(
+ ele: dict,
+ total_frames: int,
+ video_fps: float,
+) -> tuple[int, int, int]:
+ """
+ Calculate the start and end frame indices based on the given time range.
+
+ Args:
+ ele (dict): A dictionary containing optional 'video_start' and 'video_end' keys (in seconds).
+ total_frames (int): Total number of frames in the video.
+ video_fps (float): Frames per second of the video.
+
+ Returns:
+ tuple: A tuple containing (start_frame, end_frame, frame_count).
+
+ Raises:
+ ValueError: If input parameters are invalid or the time range is inconsistent.
+ """
+ # Validate essential parameters
+ if video_fps <= 0:
+ raise ValueError("video_fps must be a positive number")
+ if total_frames <= 0:
+ raise ValueError("total_frames must be a positive integer")
+
+ # Get start and end time in seconds
+ video_start = ele.get("video_start", None)
+ video_end = ele.get("video_end", None)
+ if video_start is None and video_end is None:
+ return 0, total_frames - 1, total_frames
+
+ max_duration = total_frames / video_fps
+ # Process start frame
+ if video_start is not None:
+ video_start_clamped = max(0.0, min(video_start, max_duration))
+ start_frame = math.ceil(video_start_clamped * video_fps)
+ else:
+ start_frame = 0
+ # Process end frame
+ if video_end is not None:
+ video_end_clamped = max(0.0, min(video_end, max_duration))
+ end_frame = math.floor(video_end_clamped * video_fps)
+ end_frame = min(end_frame, total_frames - 1)
+ else:
+ end_frame = total_frames - 1
+
+ # Validate frame order
+ if start_frame >= end_frame:
+ raise ValueError(
+ f"Invalid time range: Start frame {start_frame} (at {video_start_clamped if video_start is not None else 0}s) "
+ f"exceeds end frame {end_frame} (at {video_end_clamped if video_end is not None else max_duration}s). "
+ f"Video duration: {max_duration:.2f}s ({total_frames} frames @ {video_fps}fps)")
+
+ return start_frame, end_frame, end_frame - start_frame + 1
+
+
+def _read_video_decord(ele: dict, ) -> (torch.Tensor, float):
+ """read video using decord.VideoReader
+
+ Args:
+ ele (dict): a dict contains the configuration of video.
+ support keys:
+ - video: the path of video. support "file://", "http://", "https://" and local path.
+ - video_start: the start time of video.
+ - video_end: the end time of video.
+ Returns:
+ torch.Tensor: the video tensor with shape (T, C, H, W).
+ """
+ decord.bridge.set_bridge("torch")
+ video_path = ele["video"]
+ vr = decord.VideoReader(video_path, num_threads=ele.get('num_threads', 0))
+ total_frames, video_fps = len(vr), vr.get_avg_fps()
+ start_frame, end_frame, total_frames = calculate_video_frame_range(
+ ele,
+ total_frames,
+ video_fps,
+ )
+ nframes = smart_nframes(ele, total_frames=total_frames, video_fps=video_fps)
+ idx = torch.linspace(start_frame, end_frame, nframes).round().long().tolist()
+ video = vr.get_batch(idx).permute(0, 3, 1, 2) # Convert to TCHW format
+ sample_fps = nframes / max(total_frames, 1e-6) * video_fps
+ return video, sample_fps
+
+
+def fetch_video(ele: dict,
+ image_factor: int = IMAGE_FACTOR,
+ return_video_sample_fps: bool = False,
+ sanity_check=False) -> torch.Tensor | list[Image.Image]:
+ if isinstance(ele["video"], str):
+ video, sample_fps = _read_video_decord(ele)
+ nframes, _, height, width = video.shape
+ min_pixels = ele.get("min_pixels", VIDEO_MIN_PIXELS)
+ total_pixels = ele.get("total_pixels", VIDEO_TOTAL_PIXELS)
+ max_pixels = max(min(VIDEO_MAX_PIXELS, total_pixels / nframes * FRAME_FACTOR), int(min_pixels * 1.05))
+ max_pixels_supposed = ele.get("max_pixels", max_pixels)
+ max_pixels = min(max_pixels_supposed, max_pixels)
+ if "resized_height" in ele and "resized_width" in ele:
+ resized_height, resized_width = smart_resize(
+ ele["resized_height"],
+ ele["resized_width"],
+ factor=image_factor,
+ )
+ else:
+ resized_height, resized_width = smart_resize(
+ height,
+ width,
+ factor=image_factor,
+ min_pixels=min_pixels,
+ max_pixels=max_pixels,
+ )
+ video = transforms.functional.resize(
+ video,
+ [resized_height, resized_width],
+ interpolation=InterpolationMode.BICUBIC,
+ antialias=True,
+ ).float()
+
+ if sanity_check and (video == 0).all():
+ raise ValueError("video '{}' contains all zeros".format(ele["video"]))
+
+ if return_video_sample_fps:
+ return video, sample_fps
+ return video
+ else:
+ assert isinstance(ele["video"], (list, tuple))
+ process_info = ele.copy()
+ process_info.pop("type", None)
+ process_info.pop("video", None)
+ images = [
+ fetch_image({
+ "image": video_element,
+ **process_info
+ }, size_factor=image_factor) for video_element in ele["video"]
+ ]
+ nframes = ceil_by_factor(len(images), FRAME_FACTOR)
+ if len(images) < nframes:
+ images.extend([images[-1]] * (nframes - len(images)))
+ if return_video_sample_fps:
+ return images, process_info.pop("fps", 2.0)
+ return images
+
+
+def extract_vision_info(conversations: list[dict] | list[list[dict]]) -> list[dict]:
+ vision_infos = []
+ if isinstance(conversations[0], dict):
+ conversations = [conversations]
+ for conversation in conversations:
+ for message in conversation:
+ if isinstance(message["content"], list):
+ for ele in message["content"]:
+ if ("image" in ele or "image_url" in ele or "video" in ele
+ or ele.get("type", "") in ("image", "image_url", "video")):
+ vision_infos.append(ele)
+ return vision_infos
+
+
+def process_vision_info(
+ conversations: list[dict] | list[list[dict]],
+ return_video_kwargs: bool = False,
+ sanity_check=False
+) -> tuple[list[Image.Image] | None, list[torch.Tensor | list[Image.Image]] | None, Optional[dict]]:
+
+ vision_infos = extract_vision_info(conversations)
+ # Read images or videos
+ image_inputs = []
+ video_inputs = []
+ video_sample_fps_list = []
+ for vision_info in vision_infos:
+ if "image" in vision_info or "image_url" in vision_info:
+ image_inputs.append(fetch_image(vision_info))
+ elif "video" in vision_info:
+ video_input, video_sample_fps = fetch_video(
+ vision_info, return_video_sample_fps=True, sanity_check=sanity_check)
+ video_sample_fps_list.append(video_sample_fps)
+ video_inputs.append(video_input)
+ else:
+ raise ValueError("image, image_url or video should in content.")
+ if len(image_inputs) == 0:
+ image_inputs = None
+ if len(video_inputs) == 0:
+ video_inputs = None
+ if return_video_kwargs:
+ return image_inputs, video_inputs, {'fps': video_sample_fps_list}
+ return image_inputs, video_inputs
+
+
+def resize(mask, size):
+ return T.resize(mask.unsqueeze(0).unsqueeze(0), size)[0, 0]
+
+
+def process_masks(sample, frame_size, inds):
+ if sample['mask_type'] == 'image':
+ # case 1: list of masks or paths to masks
+ masks = []
+ for obj_oids in sample['oids']:
+ obj_masks = []
+ for i in inds:
+ label = sample['masks'][i]
+ if isinstance(label, str):
+ label = np.array(Image.open(label))
+ elif label is None:
+ label = np.full(frame_size, -1)
+ obj_masks.append(torch.from_numpy(sum([label == oid for oid in obj_oids])).float())
+ masks.append(obj_masks)
+ elif sample['mask_type'] == 'image_sep':
+ # case 2: list of masks or paths to masks (one object per image)
+ masks = []
+ for raw_obj_masks in sample['masks']:
+ obj_masks = []
+ for i in inds:
+ label = raw_obj_masks[i]
+ if isinstance(label, str):
+ label = np.array(Image.open(label))
+ elif label is None:
+ label = np.full(frame_size, -1)
+ obj_masks.append(torch.from_numpy(label == 255).float())
+ masks.append(obj_masks)
+ elif sample['mask_type'] == 'rle':
+ # case 3: list of lists of multi-region RLE masks
+ raw_masks = nncore.load(sample['masks']) if isinstance(sample['masks'], str) else sample['masks']
+ masks = []
+ for raw_obj_masks in raw_masks:
+ obj_masks = []
+ for i in inds:
+ mask = torch.zeros(frame_size)
+ for rle in raw_obj_masks[i]:
+ if isinstance(rle, list):
+ rles = frPyObjects(rle, sample.get('height', frame_size[0]), sample.get('width', frame_size[1]))
+ mask += resize(torch.from_numpy(decode(merge(rles))).float(), frame_size)
+ elif isinstance(rle, dict):
+ if isinstance(rle['counts'], list):
+ rle = frPyObjects(rle, *rle['size'])
+ mask += resize(torch.from_numpy(decode(rle)).float(), frame_size)
+ elif rle is None:
+ mask += 0
+ else:
+ raise TypeError(f'unknown rle mask: {rle}')
+ obj_masks.append((mask > 0).float())
+ masks.append(obj_masks)
+ elif sample['mask_type'] == 'polygon':
+ # case 4: list of lists of polygons
+ masks = []
+ for raw_obj_masks in sample['masks']:
+ obj_masks = []
+ for i in inds:
+ # step 1: sort shapes
+ areas = []
+ for shape in raw_obj_masks[i]:
+ tmp = np.zeros(frame_size, dtype=np.uint8)
+ cv2.polylines(tmp, np.array([shape['points']], dtype=np.int32), True, 1, 1)
+ cv2.fillPoly(tmp, np.array([shape['points']], dtype=np.int32), 1)
+ areas.append(tmp.sum())
+ shapes = [raw_obj_masks[i][j] for j in list(np.argsort(areas)[::-1].astype(np.int32))]
+ # step 2: draw masks
+ mask = np.zeros(frame_size, dtype=np.uint8)
+ for shape in shapes:
+ assert shape['label'] in ('target', 'ignore'), shape
+ label = 1 if shape['label'] == 'target' else -1 # replacing 255 with -1 here
+ cv2.polylines(mask, np.array([shape['points']], dtype=np.int32), True, label, 1)
+ cv2.fillPoly(mask, np.array([shape['points']], dtype=np.int32), label)
+ obj_masks.append(torch.from_numpy(mask).float())
+ masks.append(obj_masks)
+ elif sample['mask_type'] == 'vicas':
+ # case 5: special case for vicas dataset
+ masks = []
+ for obj_rle_path in sample['masks']:
+ obj_rles, obj_masks = nncore.load(obj_rle_path), []
+ for i in inds:
+ mask = torch.zeros(frame_size)
+ for rle in obj_rles[i]:
+ mask += 0 if rle is None else resize(torch.from_numpy(decode(rle)).float(), frame_size)
+ obj_masks.append((mask > 0).float())
+ masks.append(obj_masks)
+ elif sample['mask_type'] == 'sav':
+ # case 6: special case for sav dataset
+ annos = nncore.load(sample['masks'])['masklet']
+ masks = [[]]
+ for i in inds:
+ mask = resize(torch.from_numpy(decode(annos[i][int(sample['qid'])])).float(), frame_size)
+ masks[0].append(mask)
+ else:
+ raise TypeError(f"unknown mask type: {sample['mask_type']}")
+
+ return masks
+
+
+def build_obj_to_frame_idx(label_mask, batch_mode):
+ step_t_obj_to_frame_idx = [[]] if batch_mode else [[] for _ in range(label_mask.size(0))]
+
+ # t: frame_idx v: video_idx
+ for t in range(len(step_t_obj_to_frame_idx)):
+ if batch_mode:
+ for v in range(label_mask.size(0)):
+ for _ in range(label_mask.size(1)):
+ step_t_obj_to_frame_idx[t].append(torch.IntTensor([t, v]))
+ else:
+ for _ in range(label_mask.size(1)):
+ step_t_obj_to_frame_idx[t].append(torch.IntTensor([t, 0]))
+
+ label_obj_to_frame_idx = torch.stack([torch.stack(o) for o in step_t_obj_to_frame_idx])
+ return label_obj_to_frame_idx
+
+
+def preprocess_chatml(input_ids, text, tokenizer):
+ conv = get_conv('chatml')
+
+ rounds = [m + conv.seps[0] for m in text.split(conv.seps[0])]
+ assert (len(rounds) % 2 == 0) == (conv.system is not None)
+ assert rounds[-1] == conv.seps[0]
+ rounds = rounds[:-1]
+
+ if conv.system is None:
+ rounds = [''.join(rounds[i:i + 2]) for i in range(0, len(rounds), 2)]
+ else:
+ rounds = [''.join(rounds[:3])] + [''.join(rounds[i:i + 2]) for i in range(3, len(rounds), 2)]
+
+ labels = input_ids.clone()
+
+ sep = conv.seps[0] + conv.roles[1]
+ cur_len = 0
+
+ for i, rou in enumerate(rounds):
+ if len(rou) == 0:
+ break
+
+ ins = sep.join(rou.split(sep)[:-1]) + sep
+
+ rou_len = tokenizer(rou, return_length=True).length[0]
+ ins_len = tokenizer(ins, return_length=True).length[0]
+
+ labels[cur_len:cur_len + ins_len] = IGNORE_INDEX
+ cur_len += rou_len
+
+ if labels.size(0) != cur_len:
+ warnings.warn(f'Tokenization mismatch: {labels.size(0)} and {cur_len}')
+
+ return labels
+
+
+def preprocess(input_ids, text, tokenizer, conv_type):
+ if conv_type == 'chatml':
+ return preprocess_chatml(input_ids, text, tokenizer)
+ else:
+ raise ValueError(f'unknown conversation type: {conv_type}')
diff --git a/unipixel/model/__init__.py b/unipixel/model/__init__.py
new file mode 100755
index 0000000000000000000000000000000000000000..a71e77ace9e19e2a9dd2aaa5e149384c2ac4da3c
--- /dev/null
+++ b/unipixel/model/__init__.py
@@ -0,0 +1,5 @@
+# Copyright (c) 2025 Ye Liu. Licensed under the BSD-3-Clause License.
+
+from .qwen2_5_vl import PatchedQwen2_5_VLProcessor, PixelQwen2_5_VLConfig, PixelQwen2_5_VLForConditionalGeneration
+
+MODELS = {'qwen2_5_vl': (PixelQwen2_5_VLConfig, PixelQwen2_5_VLForConditionalGeneration, PatchedQwen2_5_VLProcessor)}
diff --git a/unipixel/model/builder.py b/unipixel/model/builder.py
new file mode 100755
index 0000000000000000000000000000000000000000..13140af181429838cbc5e6c4325d838c3fb9d73c
--- /dev/null
+++ b/unipixel/model/builder.py
@@ -0,0 +1,109 @@
+# Copyright (c) 2025 Ye Liu. Licensed under the BSD-3-Clause License.
+
+import nncore
+import torch
+import torch.nn as nn
+from peft import PeftModel
+from safetensors.torch import load_model
+from transformers import AutoConfig, AutoModel, AutoProcessor, Qwen2_5_VLForConditionalGeneration
+
+from unipixel.utils.env import get_auto_device
+
+
+def build_model(model_path,
+ config=None,
+ image_size=None,
+ is_trainable=False,
+ merge_adapter=False,
+ attn_implementation='flash_attention_2',
+ device='auto',
+ dtype='bfloat16'):
+ # set do_resize to false to avoid duplicated resizing
+ # https://github.com/huggingface/transformers/blob/main/src/transformers/models/qwen2_vl/image_processing_qwen2_vl.py
+ processor = AutoProcessor.from_pretrained(model_path, use_fast=True, do_resize=False)
+
+ config = config or AutoConfig.from_pretrained(model_path)
+ config.sam2_inference_mode = not is_trainable
+
+ # override sam2 image size
+ if image_size is not None:
+ config.sam2_image_size = image_size
+
+ adapter_path = nncore.join(model_path, 'adapter_model.safetensors')
+ partial_path = nncore.join(model_path, 'pytorch_model.safetensors')
+
+ if nncore.is_file(adapter_path) or nncore.is_file(partial_path):
+ print(f'Loading base model from {config.base_model_path}...')
+ model = AutoModel.from_pretrained(
+ config.base_model_path,
+ config=config,
+ low_cpu_mem_usage=True,
+ ignore_mismatched_sizes=True,
+ attn_implementation=attn_implementation,
+ torch_dtype=dtype,
+ device_map='auto' if device == 'all' else None)
+
+ meta_state_dict = {
+ n: torch.empty_like(p, device='cpu')
+ for n, p in model.named_parameters() if p.device == torch.device('meta')
+ }
+ model.load_state_dict(meta_state_dict, strict=False, assign=True)
+
+ # sam2 weights might be replaced later
+ if model.config.sam2_checkpoint:
+ model.load_sam2_weights()
+
+ embed_tokens = model.get_input_embeddings()
+ size = (embed_tokens.num_embeddings, embed_tokens.embedding_dim)
+ if embed_tokens.weight.size() != size:
+ print(f'Resizing embed_tokens from {embed_tokens.weight.size()} to {size}...')
+ model.model.language_model.embed_tokens.weight = nn.Parameter(embed_tokens.weight.new_empty(size))
+
+ size = (model.lm_head.out_features, model.lm_head.in_features)
+ if model.lm_head.weight.size() != size:
+ print(f'Resizing lm_head from {model.lm_head.weight.size()} to {size}...')
+ model.lm_head.weight = nn.Parameter(model.lm_head.weight.new_empty(size))
+
+ if nncore.is_file(adapter_path):
+ print(f'Loading adapter from {model_path}...')
+ # transformers integration does not support merge_and_unload, use peft instead
+ model = PeftModel.from_pretrained(
+ model,
+ model_path,
+ is_trainable=is_trainable,
+ low_cpu_mem_usage=True,
+ # load adapters to the same device as embed_tokens
+ torch_device=str(embed_tokens.weight.device))
+
+ if nncore.is_file(partial_path):
+ print(f'Loading state dict from {partial_path}...')
+ _, unexpected = load_model(model, partial_path, strict=False, device=str(model.device))
+ assert len(unexpected) == 0, f'unexpected parameters: {unexpected}'
+
+ if (not is_trainable or merge_adapter) and nncore.is_file(adapter_path):
+ print('Merging adapter and unloading...')
+ model = model.merge_and_unload()
+ model._hf_peft_config_loaded = False
+ else:
+ print(f'Loading full model from {model_path}...')
+
+ if config.model_type == 'qwen2_5_vl':
+ model_cls = Qwen2_5_VLForConditionalGeneration
+ else:
+ model_cls = AutoModel
+
+ model = model_cls.from_pretrained(
+ model_path,
+ config=config,
+ low_cpu_mem_usage=True,
+ attn_implementation=attn_implementation,
+ torch_dtype=dtype,
+ device_map='auto' if device == 'all' else None)
+
+ model.requires_grad_(False)
+
+ if not is_trainable and device != 'all':
+ device = get_auto_device() if device == 'auto' else device
+ model = model.to(device).eval()
+
+ return model, processor
diff --git a/unipixel/model/qwen2_5_vl.py b/unipixel/model/qwen2_5_vl.py
new file mode 100644
index 0000000000000000000000000000000000000000..49fc2ddde9e92ac8a3ed883ede84afd49511d8ab
--- /dev/null
+++ b/unipixel/model/qwen2_5_vl.py
@@ -0,0 +1,399 @@
+# Copyright (c) 2025 Ye Liu. Licensed under the BSD-3-Clause License.
+
+import random
+
+import torch
+import torch.nn as nn
+from hydra import compose
+from hydra.utils import instantiate
+from nncore.nn import constant_init_, xavier_init_
+from transformers import (AutoConfig, AutoModel, AutoProcessor, Qwen2_5_VLConfig, Qwen2_5_VLForConditionalGeneration,
+ Qwen2_5_VLModel, Qwen2_5_VLProcessor, Qwen2_5_VLTextModel)
+from transformers.models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES
+from transformers.models.qwen2_5_vl.modeling_qwen2_5_vl import Qwen2_5_VisionTransformerPretrainedModel, Qwen2RMSNorm
+
+from sam2.loss_fns import MultiStepMultiMasksAndIous
+from sam2.modeling.position_encoding import PositionEmbedding1DRandom
+from sam2.modeling.sam.prompt_encoder import PromptEncoder
+from sam2.sam2_train import BatchedVideoDatapoint
+
+
+def cache_state_hook(module, inputs, ouputs=None):
+ module.state = inputs[0] if isinstance(inputs, tuple) else inputs
+
+
+class PatchedQwen2_5_VLProcessor(Qwen2_5_VLProcessor):
+
+ def _check_special_mm_tokens(self, text, *args, **kwargs):
+ self.cache_text = text
+ return super()._check_special_mm_tokens(text, *args, **kwargs)
+
+
+class PixelQwen2_5_VLConfig(Qwen2_5_VLConfig):
+ model_type = 'pixel_qwen2_5_vl'
+
+
+class PixelQwen2_5_VisionTransformerPretrainedModel(Qwen2_5_VisionTransformerPretrainedModel):
+
+ def __init__(self, config, *args, **kwargs):
+ super().__init__(config, *args, **kwargs)
+ self.merger.mlp.register_forward_pre_hook(cache_state_hook)
+
+
+class PixelQwen2_5_VLModel(Qwen2_5_VLModel):
+ config_class = PixelQwen2_5_VLConfig
+
+ def __init__(self, config):
+ super(Qwen2_5_VLModel, self).__init__(config)
+ self.visual = PixelQwen2_5_VisionTransformerPretrainedModel._from_config(config.vision_config)
+ self.language_model = Qwen2_5_VLTextModel._from_config(config.text_config)
+ self.rope_deltas = None
+ self.post_init()
+ self.language_model.norm.register_forward_pre_hook(cache_state_hook)
+
+
+class PixelQwen2_5_VLForConditionalGeneration(Qwen2_5_VLForConditionalGeneration):
+ config_class = PixelQwen2_5_VLConfig
+
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.model = PixelQwen2_5_VLModel(config)
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+
+ if self.config.sam2_config is not None:
+ overrides = [f'++model.image_size={self.config.sam2_image_size}']
+ if self.config.sam2_inference_mode:
+ overrides.append('++model._target_=sam2.sam2_video_predictor.SAM2VideoPredictor')
+
+ cfg = compose(config_name=self.config.sam2_config, overrides=overrides)
+ self.sam2 = instantiate(cfg.model)
+
+ sam_dim, llm_dim = self.sam2.hidden_dim, self.config.hidden_size
+
+ self.seg_head = nn.Sequential(
+ Qwen2RMSNorm(llm_dim), nn.Linear(llm_dim, llm_dim), nn.GELU(),
+ nn.Linear(llm_dim, sam_dim * self.config.sam2_hidden_tokens))
+
+ self.ref_encoder = PromptEncoder(
+ embed_dim=sam_dim,
+ image_embedding_size=(self.sam2.sam_image_embedding_size, self.sam2.sam_image_embedding_size),
+ input_image_size=(self.config.sam2_image_size, self.config.sam2_image_size),
+ mask_in_chans=16)
+
+ self.ref_proj_single = nn.Linear(sam_dim * 2, sam_dim * 3)
+ self.ref_proj_double = nn.Linear(sam_dim * 3, sam_dim * 3)
+ self.ref_proj = nn.Sequential(nn.GELU(), nn.Linear(sam_dim * 6, llm_dim))
+
+ self.tem_pe = PositionEmbedding1DRandom(sam_dim // 2)
+ self.tem_emb = nn.Embedding(1, sam_dim)
+ self.tem_proj = nn.Linear(sam_dim, sam_dim * 3)
+
+ self.msk_proj = nn.Sequential(
+ nn.Linear(self.visual.merger.hidden_size, self.visual.merger.hidden_size), nn.GELU(),
+ nn.Linear(self.visual.merger.hidden_size, llm_dim))
+
+ self.loss_seg = MultiStepMultiMasksAndIous(
+ dict(loss_mask=100, loss_dice=5, loss_iou=5, loss_class=5),
+ supervise_all_iou=True,
+ iou_use_l1_loss=True,
+ pred_obj_scores=True,
+ focal_alpha=0.25,
+ focal_gamma=2.0,
+ focal_alpha_obj_score=-1.0,
+ focal_gamma_obj_score=0.0)
+
+ self.post_init()
+
+ @torch.no_grad()
+ def init_parameters(self):
+ # initialize ref_encoder with weights from sam2.sam_prompt_encoder
+ for p0, p1 in zip(self.ref_encoder.parameters(), self.sam2.sam_prompt_encoder.parameters()):
+ p0.copy_(p1)
+
+ # initialize msk_proj with weights from visual.merger.mlp
+ for p0, p1 in zip(self.msk_proj.parameters(), self.visual.merger.mlp.parameters()):
+ p0.copy_(p1)
+
+ # reset extra parameters
+ for s in ('seg_head', 'ref_proj_single', 'ref_proj_double', 'ref_proj', 'tem_proj'):
+ b = getattr(self, s, None)
+ if b is None:
+ continue
+ for n, m in b.named_modules():
+ if isinstance(m, nn.Linear):
+ print(f'Reset parameters of {b.__class__.__name__} {n} ({m.__class__.__name__})')
+ xavier_init_(m, distribution='uniform')
+ elif isinstance(m, nn.LayerNorm):
+ print(f'Reset parameters of {b.__class__.__name__} {n} ({m.__class__.__name__})')
+ constant_init_(m)
+
+ def load_sam2_weights(self):
+ state_dict = torch.load(self.config.sam2_checkpoint, map_location=self.sam2.device, weights_only=True)['model']
+ state_dict['memory_encoder.fuser.layers.0.weight'] = state_dict.pop('memory_encoder.fuser.layers.0.gamma')
+ state_dict['memory_encoder.fuser.layers.1.weight'] = state_dict.pop('memory_encoder.fuser.layers.1.gamma')
+ self.sam2.load_state_dict(state_dict)
+
+ def forward(self,
+ input_ids=None,
+ attention_mask=None,
+ position_ids=None,
+ past_key_values=None,
+ inputs_embeds=None,
+ labels=None,
+ use_cache=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ pixel_values=None,
+ pixel_values_videos=None,
+ image_grid_thw=None,
+ video_grid_thw=None,
+ rope_deltas=None,
+ cache_position=None,
+ second_per_grid_ts=None,
+ frames=None,
+ frame_size=None,
+ point_coords=None,
+ point_labels=None,
+ point_frames=None,
+ refer_mask=None,
+ label_obj_to_frame_idx=None,
+ label_mask=None):
+ if caching := not self.training and (past_key_values is None or len(past_key_values) == 0):
+ self.seg = []
+
+ # move input_ids to the correct device (in case of auto device map)
+ input_ids = input_ids.to(self.model.language_model.embed_tokens.weight.device)
+
+ if inputs_embeds is None:
+ inputs_embeds = self.get_input_embeddings()(input_ids)
+ device, dtype = inputs_embeds.device, inputs_embeds.dtype
+
+ if pixel_values is not None:
+ image_embeds = self.get_image_features(pixel_values, image_grid_thw)
+ image_embeds = torch.cat(image_embeds)
+ n_image_tokens = (input_ids == self.config.image_token_id).sum()
+ n_image_features = image_embeds.shape[0]
+ assert n_image_tokens == n_image_features
+
+ mask = input_ids == self.config.image_token_id
+ mask_unsqueezed = mask.unsqueeze(-1)
+ mask_expanded = mask_unsqueezed.expand_as(inputs_embeds)
+ image_mask = mask_expanded.to(device)
+
+ image_embeds = image_embeds.to(device, dtype)
+ inputs_embeds = inputs_embeds.masked_scatter(image_mask, image_embeds)
+
+ if pixel_values_videos is not None:
+ video_embeds = self.get_video_features(pixel_values_videos, video_grid_thw)
+ video_embeds = torch.cat(video_embeds)
+ n_video_tokens = (input_ids == self.config.video_token_id).sum()
+ n_video_features = video_embeds.shape[0]
+ assert n_video_tokens == n_video_features
+
+ mask = input_ids == self.config.video_token_id
+ mask_unsqueezed = mask.unsqueeze(-1)
+ mask_expanded = mask_unsqueezed.expand_as(inputs_embeds)
+ video_mask = mask_expanded.to(device)
+
+ video_embeds = video_embeds.to(device, dtype)
+ inputs_embeds = inputs_embeds.masked_scatter(video_mask, video_embeds)
+
+ if any(k is not None for k in (point_coords, point_labels, point_frames)):
+ assert all(k is not None for k in (point_coords, point_labels, point_frames))
+
+ ref = []
+ for batch_idx in range(video_grid_thw.size(0)):
+ for obj_point_coords, obj_point_labels in zip(point_coords[batch_idx], point_labels[batch_idx]):
+ obj_ref, _ = self.ref_encoder((obj_point_coords, obj_point_labels), None, None, None)
+ assert obj_ref.size(1) in (2, 3), obj_ref.size()
+ if obj_ref.size(1) == 2:
+ obj_ref = self.ref_proj_single(obj_ref.flatten(1))
+ else:
+ obj_ref = self.ref_proj_double(obj_ref.flatten(1))
+ ref.append(obj_ref)
+ ref = torch.cat(ref)
+
+ tem = []
+ for batch_idx in range(video_grid_thw.size(0)):
+ # temporal merge size set to 2
+ size = video_grid_thw[batch_idx][0].item() * 2
+ for obj_point_frames in point_frames[batch_idx]:
+ obj_tem = obj_point_frames.unsqueeze(0).float()
+ obj_tem = self.tem_pe.forward_with_coords(obj_tem, size)
+ assert obj_tem.size(0) == 1, obj_tem.size()
+ tem.append(obj_tem[0])
+ tem = torch.cat(tem)
+ tem = tem + self.tem_emb(torch.LongTensor([0]).to(device))
+ tem = self.tem_proj(tem)
+
+ ref_emb = self.ref_proj(torch.cat((ref, tem), dim=1)).to(device, dtype)
+ ref_mask = input_ids == self.config.ref_token_id
+ # replace only the [ tokens in the instruction
+ # ref_mask = ref_mask * (labels == IGNORE_INDEX) if self.training else ref_mask
+ ref_mask = ref_mask.unsqueeze(-1).expand_as(inputs_embeds).to(device)
+ inputs_embeds = inputs_embeds.masked_scatter(ref_mask, ref_emb)
+
+ if refer_mask is not None:
+ mem, base_idx = [], 0
+ for batch_idx in range(video_grid_thw.size(0)):
+ size = video_grid_thw[batch_idx].prod().item() // 4
+ step = video_grid_thw[batch_idx][1] * video_grid_thw[batch_idx][2] // 4
+
+ # emb = self.model.visual.merger.ln_q.state[base_idx:base_idx + size]
+ # map grouped order back to raster scan order
+ # dim = emb.size(1)
+ # emb = emb.permute(1, 0).reshape(dim, -1, 2, 2).permute(0, 2, 1, 3).reshape(dim, -1).permute(1, 0)
+ emb = self.model.visual.merger.mlp.state[base_idx:base_idx + size]
+ batch_refer_mask = refer_mask[batch_idx]
+
+ for obj_idx in range(batch_refer_mask.size(1)):
+ mask = batch_refer_mask[:, obj_idx].flatten()
+ assert mask.size(0) == emb.size(0) == size
+ obj_emb = []
+ for i in range(0, size, step):
+ frame_mask = mask[i:i + step]
+ if frame_mask.any():
+ obj_emb.append(emb[i:i + step][frame_mask].mean(dim=0))
+ if len(obj_emb) > 0:
+ obj_emb = torch.stack(obj_emb)
+ mem.append(obj_emb)
+
+ base_idx = base_idx + size
+
+ mem_mask = input_ids == self.config.mem_token_id
+
+ if len(mem) > 0:
+ mem_emb = self.msk_proj(torch.cat(mem))
+ mem_mask = mem_mask.unsqueeze(-1).expand_as(inputs_embeds).to(device)
+ assert mem_emb.size(0) == mem_mask.all(dim=-1).sum(), (mem_emb.size(), mem_mask.all(dim=-1).sum())
+ inputs_embeds = inputs_embeds.masked_scatter(mem_mask, mem_emb)
+ else:
+ assert not mem_mask.any()
+
+ # ensure gradient tracking (in case that embed_tokens has been frozen)
+ if self.training and not inputs_embeds.requires_grad:
+ inputs_embeds.requires_grad = True
+
+ outputs = super().forward(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ labels=labels,
+ use_cache=not self.training,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=True,
+ pixel_values=pixel_values,
+ pixel_values_videos=pixel_values_videos,
+ image_grid_thw=image_grid_thw,
+ video_grid_thw=video_grid_thw,
+ rope_deltas=rope_deltas,
+ cache_position=cache_position,
+ second_per_grid_ts=second_per_grid_ts)
+
+ if self.config.sam2_config is not None and self.config.sam2_enable_decoder and frames is not None:
+ # decoder block -> -2 -> decoder block -> state -> norm -> -1
+ seg_tokens_all = self.seg_head(self.model.language_model.norm.state)
+ seg_tokens_all = seg_tokens_all.reshape(*seg_tokens_all.shape[:2], self.config.sam2_hidden_tokens, -1)
+
+ if self.training and label_obj_to_frame_idx is not None and label_mask is not None:
+ loss_seg_all, avg_factor = 0, 0
+ shift_inputs = input_ids[..., 1:].contiguous()
+
+ for batch_idx, (obj_to_frame_idx, mask) in enumerate(zip(label_obj_to_frame_idx, label_mask)):
+ # supervise all tokens (including those in inputs)
+ inds = torch.where(shift_inputs[batch_idx] == self.config.seg_token_id)[0]
+ assert inds.size(0) == mask.size(1)
+
+ if self.config.sample_objects > 0 and inds.size(0) > self.config.sample_objects:
+ sample_inds = random.sample(list(range(inds.size(0))), self.config.sample_objects)
+ obj_to_frame_idx = obj_to_frame_idx[:, sample_inds]
+ inds = inds[sample_inds]
+ mask = mask[:, sample_inds]
+
+ if self.config.sam2_batch_mode:
+ seg_tokens = seg_tokens_all[batch_idx][inds].repeat(mask.size(0), 1, 1) # (t * o) * 2 * c
+ img_batch = frames[batch_idx].unsqueeze(0) # 1 * t * c * h * w
+ masks = mask.view(1, -1, mask.size(2), mask.size(3)) # 1 * (t * o) * h * w
+ else:
+ seg_tokens = seg_tokens_all[batch_idx][inds] # o * 2 * c
+ img_batch = frames[batch_idx].unsqueeze(1) # t * 1 * c * h * w
+ masks = mask # t * o * h * w
+
+ data = BatchedVideoDatapoint(img_batch=img_batch, obj_to_frame_idx=obj_to_frame_idx, masks=masks)
+ pred = self.sam2(data, seg_tokens)
+
+ loss_seg = self.loss_seg(pred, masks)
+ loss_seg = loss_seg['core_loss'] / masks.size(0)
+
+ loss_seg_all += loss_seg
+ avg_factor += 1
+
+ assert avg_factor > 0
+ outputs.loss = outputs.loss + loss_seg_all / avg_factor
+ else:
+ assert len(frames) == len(frame_size) == 1
+ seg_tokens = []
+
+ if caching:
+ # case 1: input contains
+ shift_inputs = input_ids[..., 1:].contiguous()
+ inds = torch.where(shift_inputs[0] == self.config.seg_token_id)[0].to(seg_tokens_all.device)
+ seg_tokens += [t for t in seg_tokens_all[0][inds].unsqueeze(1)]
+
+ if outputs.logits[0, -1].argmax() == self.config.seg_token_id:
+ # case 2: output contains
+ seg_tokens.append(seg_tokens_all[0, -1, None])
+
+ for seg_token in seg_tokens:
+ if self.config.sam2_batch_mode:
+ pred_mask = []
+ for idx in range(frames[0].size(0)):
+ state = self.sam2.init_state(frames[0][idx, None], frame_size[0])
+ self.sam2.add_new_hidden_state(state, 0, 0, seg_token)
+ pred_mask += [o[2] for o in self.sam2.propagate_in_video(state, verbose=False)]
+ pred_mask = torch.cat(pred_mask, dim=1)
+ else:
+ state = self.sam2.init_state(frames[0], frame_size[0])
+ self.sam2.add_new_hidden_state(state, 0, 0, seg_token)
+ pred_mask = torch.cat([o[2] for o in self.sam2.propagate_in_video(state, verbose=False)], dim=1)
+
+ assert pred_mask.size(1) == frames[0].size(0)
+ self.seg.append((pred_mask > 0).cpu())
+
+ return outputs
+
+ def prepare_inputs_for_generation(self,
+ *args,
+ cache_position=None,
+ frames=None,
+ frame_size=None,
+ point_coords=None,
+ point_labels=None,
+ point_frames=None,
+ refer_mask=None,
+ **kwargs):
+ model_inputs = super().prepare_inputs_for_generation(*args, cache_position=cache_position, **kwargs)
+
+ model_inputs.update({
+ 'frames': frames,
+ 'frame_size': frame_size,
+ 'point_coords': point_coords if cache_position[0] == 0 else None,
+ 'point_labels': point_labels if cache_position[0] == 0 else None,
+ 'point_frames': point_frames if cache_position[0] == 0 else None,
+ 'refer_mask': refer_mask if cache_position[0] == 0 else None
+ })
+
+ return model_inputs
+
+
+# set the patched model to a vision model
+MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES[PixelQwen2_5_VLConfig.model_type] = 'PixelQwen2_5_VLForConditionalGeneration'
+
+AutoConfig.register(PixelQwen2_5_VLConfig.model_type, PixelQwen2_5_VLConfig)
+AutoModel.register(PixelQwen2_5_VLConfig, PixelQwen2_5_VLForConditionalGeneration)
+AutoProcessor.register(PixelQwen2_5_VLConfig, PatchedQwen2_5_VLProcessor)
diff --git a/unipixel/utils/env.py b/unipixel/utils/env.py
new file mode 100644
index 0000000000000000000000000000000000000000..9a56b9260565ea7cdf91b78a8834c1a771f13f08
--- /dev/null
+++ b/unipixel/utils/env.py
@@ -0,0 +1,13 @@
+# Copyright (c) 2025 Ye Liu. Licensed under the BSD-3-Clause License.
+
+import torch
+
+
+def get_auto_device():
+ try:
+ import torch_npu
+ has_npu = torch_npu.npu.is_available()
+ except ImportError:
+ has_npu = False
+
+ return 'cuda' if torch.cuda.is_available() else 'npu' if has_npu else 'cpu'
diff --git a/unipixel/utils/io.py b/unipixel/utils/io.py
new file mode 100644
index 0000000000000000000000000000000000000000..21c74ad0113e2ef71bcb2e432a7f7417d962d22d
--- /dev/null
+++ b/unipixel/utils/io.py
@@ -0,0 +1,257 @@
+# Copyright (c) 2025 Ye Liu. Licensed under the BSD-3-Clause License.
+
+import random
+import re
+
+import decord
+import nncore
+import numpy as np
+import pysrt
+import torch
+from decord import VideoReader
+from PIL import Image
+
+
+def load_image(path):
+ image = Image.open(path).convert('RGB')
+ image = torch.from_numpy(np.array(image)).unsqueeze(0)
+ return image
+
+
+def load_video(path, sample_frames=-1):
+ frame_mode = nncore.is_dir(path)
+
+ if frame_mode:
+ paths = nncore.ls(path, ext=('jpg', 'png'), join_path=True)
+ paths.sort(key=lambda p: int(re.sub(r'^\D*', '', nncore.pure_name(p))))
+ vlen = len(paths)
+ else:
+ decord.bridge.set_bridge('torch')
+ vr = VideoReader(path, num_threads=1)
+ vlen = len(vr)
+
+ if sample_frames > 0 and vlen > sample_frames:
+ inds = np.arange(0, vlen, (vlen - 1) / (sample_frames - 1))[:sample_frames].round().astype(int).tolist()
+ assert len(inds) == sample_frames
+ else:
+ inds = list(range(vlen))
+
+ if frame_mode:
+ images = [paths[i] for i in inds]
+ frames = torch.cat([load_image(i) for i in images])
+ else:
+ frames = vr.get_batch(inds)
+ images = [Image.fromarray(t.numpy()) for t in frames]
+
+ return frames, images
+
+
+def load_frames(paths, sample_frames=-1, sample_type='uniform', sample_for_llm_only=False):
+ assert sample_type in ('uniform', 'random')
+
+ vlen = len(paths)
+
+ if isinstance(sample_frames, str):
+ sep = [int(n) for n in sample_frames.split(',')]
+ assert len(sep) in (1, 2)
+ sample_frames = int(random.randint(*sep)) if len(sep) > 1 else int(sep[0])
+
+ # NOTE: some videos and images are shorter than sample_frames
+ if sample_frames > 0 and vlen > sample_frames:
+ if sample_type == 'uniform':
+ inds = np.arange(0, vlen, (vlen - 1) / (sample_frames - 1))[:sample_frames].round().astype(int).tolist()
+ else:
+ seps = np.arange(0, vlen, (vlen - 1) / sample_frames)[:sample_frames + 1].round().astype(int).tolist()
+ inds = [random.choice(range(sep, max(sep + 1, seps[i + 1]))) for i, sep in enumerate(seps[:-1])]
+ assert len(inds) == sample_frames
+ else:
+ inds = list(range(len(paths)))
+
+ if sample_for_llm_only:
+ frames = torch.cat([load_image(p) for p in paths])
+ else:
+ frames = torch.cat([load_image(paths[i]) for i in inds])
+
+ paths = [paths[i] for i in inds]
+ return frames, paths, inds
+
+
+def load_frames_with_inds(path,
+ keep,
+ single_frame_mode=False,
+ sample_frames=-1,
+ sample_type='uniform',
+ sample_for_llm_only=False,
+ num_threads=0):
+ assert sample_type in ('uniform', 'random')
+
+ frame_mode = nncore.is_dir(path)
+
+ if frame_mode:
+ paths = nncore.ls(path, ext='jpg', join_path=True)
+ paths.sort(key=lambda p: int(re.sub(r'^\D*', '', nncore.pure_name(p))))
+ else:
+ decord.bridge.set_bridge('torch')
+ vr = VideoReader(path, num_threads=num_threads)
+
+ if single_frame_mode:
+ vlen = len(paths) if frame_mode else len(vr)
+ assert vlen > 1 and len(keep) == 1
+ imap = list(range(vlen))
+ else:
+ vlen = len(keep)
+ imap = keep
+
+ if isinstance(sample_frames, str):
+ sep = [int(n) for n in sample_frames.split(',')]
+ assert len(sep) in (1, 2)
+ sample_frames = int(random.randint(*sep)) if len(sep) > 1 else int(sep[0])
+
+ # some videos and images are shorter than sample_frames
+ if sample_frames > 0 and vlen > sample_frames:
+ if sample_type == 'uniform':
+ inds = np.arange(0, vlen, (vlen - 1) / (sample_frames - 1))[:sample_frames].round().astype(int).tolist()
+ else:
+ seps = np.arange(0, vlen, (vlen - 1) / sample_frames)[:sample_frames + 1].round().astype(int).tolist()
+ inds = [random.choice(range(sep, max(sep + 1, seps[i + 1]))) for i, sep in enumerate(seps[:-1])]
+
+ if single_frame_mode:
+ # ensure that keep is in the sampled indices
+ dist = [abs(keep[0] - i) for i in inds]
+ inds[dist.index(min(dist))] = keep[0]
+
+ assert len(inds) == sample_frames
+ else:
+ inds = list(range(vlen))
+
+ if frame_mode:
+ images = [paths[imap[i]] for i in inds]
+ else:
+ img_tensor = vr.get_batch([imap[i] for i in inds])
+ images = [Image.fromarray(t.numpy()) for t in img_tensor]
+
+ if single_frame_mode:
+ frames = load_image(paths[keep[0]]) if frame_mode else vr.get_batch(keep)
+ elif sample_for_llm_only:
+ frames = torch.cat([load_image(p) for p in paths]) if frame_mode else vr.get_batch(imap)
+ else:
+ frames = torch.cat([load_image(paths[imap[i]]) for i in inds]) if frame_mode else img_tensor.clone()
+
+ return frames, images, inds
+
+
+def load_frames_with_inds_keep(path,
+ all_frame_inds,
+ frame_idx,
+ sample_frames=-1,
+ sample_type='uniform',
+ sample_for_llm_only=False,
+ num_threads=0):
+ assert sample_type in ('uniform', 'random')
+
+ frame_mode = nncore.is_dir(path)
+
+ if frame_mode:
+ paths = nncore.ls(path, ext='jpg', join_path=True)
+ paths.sort(key=lambda p: int(re.sub(r'^\D*', '', nncore.pure_name(p))))
+ else:
+ decord.bridge.set_bridge('torch')
+ vr = VideoReader(path, num_threads=num_threads)
+
+ vlen = len(all_frame_inds)
+ imap = all_frame_inds
+
+ if isinstance(sample_frames, str):
+ sep = [int(n) for n in sample_frames.split(',')]
+ assert len(sep) in (1, 2)
+ sample_frames = int(random.randint(*sep)) if len(sep) > 1 else int(sep[0])
+
+ # some videos and images are shorter than sample_frames
+ if sample_frames > 0 and vlen > sample_frames:
+ if sample_type == 'uniform':
+ inds = np.arange(0, vlen, (vlen - 1) / (sample_frames - 1))[:sample_frames].round().astype(int).tolist()
+ else:
+ seps = np.arange(0, vlen, (vlen - 1) / sample_frames)[:sample_frames + 1].round().astype(int).tolist()
+ inds = [random.choice(range(sep, max(sep + 1, seps[i + 1]))) for i, sep in enumerate(seps[:-1])]
+
+ # ensure that keep is in the sampled indices
+ keep = all_frame_inds.index(frame_idx)
+ dist = [abs(keep - i) for i in inds]
+ inds[dist.index(min(dist))] = keep
+
+ assert len(inds) == sample_frames
+ else:
+ inds = list(range(vlen))
+
+ if frame_mode:
+ images = [paths[imap[i]] for i in inds]
+ else:
+ img_tensor = vr.get_batch([imap[i] for i in inds])
+ images = [Image.fromarray(t.numpy()) for t in img_tensor]
+
+ if sample_for_llm_only:
+ frames = torch.cat([load_image(p) for p in paths]) if frame_mode else vr.get_batch(imap)
+ else:
+ frames = torch.cat([load_image(paths[imap[i]]) for i in inds]) if frame_mode else img_tensor.clone()
+
+ return frames, images, inds
+
+
+def load_frames_with_stride(path,
+ every_n_frames=4,
+ sample_frames=-1,
+ sample_type='uniform',
+ sample_for_llm_only=False,
+ num_threads=0):
+ assert sample_type in ('uniform', 'random')
+
+ decord.bridge.set_bridge('torch')
+ vr = VideoReader(path, num_threads=num_threads)
+
+ keep = list(range(0, len(vr), every_n_frames))
+ vlen = len(keep)
+
+ if isinstance(sample_frames, str):
+ sep = [int(n) for n in sample_frames.split(',')]
+ assert len(sep) in (1, 2)
+ sample_frames = int(random.randint(*sep)) if len(sep) > 1 else int(sep[0])
+
+ # some videos and images are shorter than sample_frames
+ if sample_frames > 0 and vlen > sample_frames:
+ if sample_type == 'uniform':
+ inds = np.arange(0, vlen, (vlen - 1) / (sample_frames - 1))[:sample_frames].round().astype(int).tolist()
+ else:
+ seps = np.arange(0, vlen, (vlen - 1) / sample_frames)[:sample_frames + 1].round().astype(int).tolist()
+ inds = [random.choice(range(sep, max(sep + 1, seps[i + 1]))) for i, sep in enumerate(seps[:-1])]
+ assert len(inds) == sample_frames
+ else:
+ inds = list(range(vlen))
+
+ img_tensor = vr.get_batch([keep[i] for i in inds])
+ images = [Image.fromarray(t.numpy()) for t in img_tensor]
+ frames = vr.get_batch(keep) if sample_for_llm_only else img_tensor.clone()
+
+ return frames, images, inds
+
+
+def load_subtitle(path):
+ subs = pysrt.open(path)
+
+ parsed = []
+ for sub in subs:
+ s, e = sub.start.to_time(), sub.end.to_time()
+ s = (s.hour * 60 + s.minute) * 60 + s.second + s.microsecond / 1000000
+ e = (e.hour * 60 + e.minute) * 60 + e.second + e.microsecond / 1000000
+ parsed.append((s, e, sub.text))
+
+ return parsed
+
+
+def get_duration(path, num_threads=1):
+ # sometimes the video is loaded as a list of frames
+ if isinstance(path, list):
+ return len(path)
+
+ vr = VideoReader(path, num_threads=num_threads)
+ duration = len(vr) / vr.get_avg_fps()
+ return duration
diff --git a/unipixel/utils/transforms.py b/unipixel/utils/transforms.py
new file mode 100644
index 0000000000000000000000000000000000000000..b4805de01ea01e2588a9c77243e08084f143d113
--- /dev/null
+++ b/unipixel/utils/transforms.py
@@ -0,0 +1,34 @@
+# Copyright (c) 2025 Ye Liu. Licensed under the BSD-3-Clause License.
+
+import torchvision.transforms as T
+
+HIERA_MEAN = [0.485, 0.456, 0.406]
+HIERA_STD = [0.229, 0.224, 0.225]
+
+
+class Normalize:
+
+ def __init__(self, mean, std):
+ self.mean = mean
+ self.std = std
+
+ def __call__(self, video):
+ mean, std = video.new_tensor(self.mean), video.new_tensor(self.std)
+ mean, std = mean[None, :, None, None], std[None, :, None, None]
+ return (video - mean) / std
+
+
+class Resize(T.Resize):
+
+ def __init__(self, size):
+ super().__init__(size, antialias=True)
+
+
+class ToTensor:
+
+ def __call__(self, video):
+ return video.float().permute(0, 3, 1, 2) / 255
+
+
+def get_sam2_transform(size):
+ return T.Compose([ToTensor(), Resize((size, size)), Normalize(HIERA_MEAN, HIERA_STD)])
diff --git a/unipixel/utils/visualizer.py b/unipixel/utils/visualizer.py
new file mode 100644
index 0000000000000000000000000000000000000000..3c5f7d646e16ff9850600ade449846e7a258a4d9
--- /dev/null
+++ b/unipixel/utils/visualizer.py
@@ -0,0 +1,793 @@
+# Modified from https://github.com/facebookresearch/detectron2/blob/main/detectron2/utils/visualizer.py
+
+import colorsys
+import io
+import math
+import random
+from enum import Enum, unique
+
+import cv2
+import imageio.v3 as iio
+import matplotlib as mpl
+import matplotlib.colors as mplc
+import matplotlib.figure as mplfigure
+import numpy as np
+import pycocotools.mask as mask_util
+import torch
+from matplotlib.backends.backend_agg import FigureCanvasAgg
+
+_SMALL_OBJECT_AREA_THRESH = 1000
+_LARGE_MASK_AREA_THRESH = 120000
+
+_COLORS = np.array([
+ 0.000, 0.447, 0.741, 0.850, 0.325, 0.098, 0.929, 0.694, 0.125, 0.494, 0.184, 0.556, 0.466, 0.674, 0.188, 0.301,
+ 0.745, 0.933, 0.635, 0.078, 0.184, 0.300, 0.300, 0.300, 0.600, 0.600, 0.600, 1.000, 0.000, 0.000, 1.000, 0.500,
+ 0.000, 0.749, 0.749, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 1.000, 0.667, 0.000, 1.000, 0.333, 0.333, 0.000,
+ 0.333, 0.667, 0.000, 0.333, 1.000, 0.000, 0.667, 0.333, 0.000, 0.667, 0.667, 0.000, 0.667, 1.000, 0.000, 1.000,
+ 0.333, 0.000, 1.000, 0.667, 0.000, 1.000, 1.000, 0.000, 0.000, 0.333, 0.500, 0.000, 0.667, 0.500, 0.000, 1.000,
+ 0.500, 0.333, 0.000, 0.500, 0.333, 0.333, 0.500, 0.333, 0.667, 0.500, 0.333, 1.000, 0.500, 0.667, 0.000, 0.500,
+ 0.667, 0.333, 0.500, 0.667, 0.667, 0.500, 0.667, 1.000, 0.500, 1.000, 0.000, 0.500, 1.000, 0.333, 0.500, 1.000,
+ 0.667, 0.500, 1.000, 1.000, 0.500, 0.000, 0.333, 1.000, 0.000, 0.667, 1.000, 0.000, 1.000, 1.000, 0.333, 0.000,
+ 1.000, 0.333, 0.333, 1.000, 0.333, 0.667, 1.000, 0.333, 1.000, 1.000, 0.667, 0.000, 1.000, 0.667, 0.333, 1.000,
+ 0.667, 0.667, 1.000, 0.667, 1.000, 1.000, 1.000, 0.000, 1.000, 1.000, 0.333, 1.000, 1.000, 0.667, 1.000, 0.333,
+ 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000, 0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.167,
+ 0.000, 0.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000, 0.833, 0.000, 0.000, 1.000, 0.000,
+ 0.000, 0.000, 0.167, 0.000, 0.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000, 0.833, 0.000,
+ 0.000, 1.000, 0.000, 0.000, 0.000, 0.143, 0.143, 0.143, 0.857, 0.857, 0.857, 1.000, 1.000, 1.000
+]).astype(np.float32).reshape(-1, 3)
+
+
+def random_color(rgb=False, maximum=1):
+ idx = np.random.randint(0, len(_COLORS))
+ ret = _COLORS[idx] * maximum
+ if not rgb:
+ ret = ret[::-1]
+ return ret
+
+
+def sample_color(rgb=False, maximum=1):
+ inds = list(range(len(_COLORS)))
+ random.shuffle(inds)
+ ret = _COLORS[inds] * maximum
+ if not rgb:
+ ret = ret[::-1]
+ return ret
+
+
+@unique
+class ColorMode(Enum):
+ """
+ Enum of different color modes to use for instance visualizations.
+ """
+
+ IMAGE = 0
+ """
+ Picks a random color for every instance and overlay segmentations with low opacity.
+ """
+ SEGMENTATION = 1
+ """
+ Let instances of the same category have similar colors
+ (from metadata.thing_colors), and overlay them with
+ high opacity. This provides more attention on the quality of segmentation.
+ """
+ IMAGE_BW = 2
+ """
+ Same as IMAGE, but convert all areas without masks to gray-scale.
+ Only available for drawing per-instance mask predictions.
+ """
+
+
+class GenericMask:
+ """
+ Attribute:
+ polygons (list[ndarray]): list[ndarray]: polygons for this mask.
+ Each ndarray has format [x, y, x, y, ...]
+ mask (ndarray): a binary mask
+ """
+
+ def __init__(self, mask_or_polygons, height, width):
+ self._mask = self._polygons = self._has_holes = None
+ self.height = height
+ self.width = width
+
+ m = mask_or_polygons
+ if isinstance(m, dict):
+ # RLEs
+ assert "counts" in m and "size" in m
+ if isinstance(m["counts"], list): # uncompressed RLEs
+ h, w = m["size"]
+ assert h == height and w == width
+ m = mask_util.frPyObjects(m, h, w)
+ self._mask = mask_util.decode(m)[:, :]
+ return
+
+ if isinstance(m, list): # list[ndarray]
+ self._polygons = [np.asarray(x).reshape(-1) for x in m]
+ return
+
+ if isinstance(m, np.ndarray): # assumed to be a binary mask
+ assert m.shape[1] != 2, m.shape
+ assert m.shape == (
+ height,
+ width,
+ ), f"mask shape: {m.shape}, target dims: {height}, {width}"
+ self._mask = m.astype("uint8")
+ return
+
+ raise ValueError("GenericMask cannot handle object {} of type '{}'".format(m, type(m)))
+
+ @property
+ def mask(self):
+ if self._mask is None:
+ self._mask = self.polygons_to_mask(self._polygons)
+ return self._mask
+
+ @property
+ def polygons(self):
+ if self._polygons is None:
+ self._polygons, self._has_holes = self.mask_to_polygons(self._mask)
+ return self._polygons
+
+ @property
+ def has_holes(self):
+ if self._has_holes is None:
+ if self._mask is not None:
+ self._polygons, self._has_holes = self.mask_to_polygons(self._mask)
+ else:
+ self._has_holes = False # if original format is polygon, does not have holes
+ return self._has_holes
+
+ def mask_to_polygons(self, mask):
+ # cv2.RETR_CCOMP flag retrieves all the contours and arranges them to a 2-level
+ # hierarchy. External contours (boundary) of the object are placed in hierarchy-1.
+ # Internal contours (holes) are placed in hierarchy-2.
+ # cv2.CHAIN_APPROX_NONE flag gets vertices of polygons from contours.
+ mask = np.ascontiguousarray(mask) # some versions of cv2 does not support incontiguous arr
+ res = cv2.findContours(mask.astype("uint8"), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
+ hierarchy = res[-1]
+ if hierarchy is None: # empty mask
+ return [], False
+ has_holes = (hierarchy.reshape(-1, 4)[:, 3] >= 0).sum() > 0
+ res = res[-2]
+ res = [x.flatten() for x in res]
+ # These coordinates from OpenCV are integers in range [0, W-1 or H-1].
+ # We add 0.5 to turn them into real-value coordinate space. A better solution
+ # would be to first +0.5 and then dilate the returned polygon by 0.5.
+ res = [x + 0.5 for x in res if len(x) >= 6]
+ return res, has_holes
+
+ def polygons_to_mask(self, polygons):
+ rle = mask_util.frPyObjects(polygons, self.height, self.width)
+ rle = mask_util.merge(rle)
+ return mask_util.decode(rle)[:, :]
+
+ def area(self):
+ return self.mask.sum()
+
+ def bbox(self):
+
+ p = mask_util.frPyObjects(self.polygons, self.height, self.width)
+ p = mask_util.merge(p)
+ bbox = mask_util.toBbox(p)
+ bbox[2] += bbox[0]
+ bbox[3] += bbox[1]
+ return bbox
+
+
+class VisImage:
+
+ def __init__(self, img, scale=1.0):
+ """
+ Args:
+ img (ndarray): an RGB image of shape (H, W, 3) in range [0, 255].
+ scale (float): scale the input image
+ """
+ self.img = img
+ self.scale = scale
+ self.width, self.height = img.shape[1], img.shape[0]
+ self._setup_figure(img)
+
+ def _setup_figure(self, img):
+ """
+ Args:
+ Same as in :meth:`__init__()`.
+
+ Returns:
+ fig (matplotlib.pyplot.figure): top level container for all the image plot elements.
+ ax (matplotlib.pyplot.Axes): contains figure elements and sets the coordinate system.
+ """
+ fig = mplfigure.Figure(frameon=False)
+ self.dpi = fig.get_dpi()
+ # add a small 1e-2 to avoid precision lost due to matplotlib's truncation
+ # (https://github.com/matplotlib/matplotlib/issues/15363)
+ fig.set_size_inches(
+ (self.width * self.scale + 1e-2) / self.dpi,
+ (self.height * self.scale + 1e-2) / self.dpi,
+ )
+ self.canvas = FigureCanvasAgg(fig)
+ # self.canvas = mpl.backends.backend_cairo.FigureCanvasCairo(fig)
+ ax = fig.add_axes([0.0, 0.0, 1.0, 1.0])
+ ax.axis("off")
+ self.fig = fig
+ self.ax = ax
+ self.reset_image(img)
+
+ def reset_image(self, img):
+ """
+ Args:
+ img: same as in __init__
+ """
+ img = img.astype("uint8")
+ self.ax.imshow(img, extent=(0, self.width, self.height, 0), interpolation="nearest")
+
+ def save(self, filepath, fig_format=None):
+ """
+ Args:
+ filepath (str): a string that contains the absolute path, including the file name, where
+ the visualized image will be saved.
+ """
+ if fig_format is not None:
+ self.fig.savefig(filepath, format=fig_format)
+ else:
+ self.fig.savefig(filepath)
+
+ def get_image(self):
+ """
+ Returns:
+ ndarray:
+ the visualized image of shape (H, W, 3) (RGB) in uint8 type.
+ The shape is scaled w.r.t the input image using the given `scale` argument.
+ """
+ canvas = self.canvas
+ s, (width, height) = canvas.print_to_buffer()
+ # buf = io.BytesIO() # works for cairo backend
+ # canvas.print_rgba(buf)
+ # width, height = self.width, self.height
+ # s = buf.getvalue()
+
+ buffer = np.frombuffer(s, dtype="uint8")
+
+ img_rgba = buffer.reshape(height, width, 4)
+ rgb, alpha = np.split(img_rgba, [3], axis=2)
+ return rgb.astype("uint8")
+
+
+class Visualizer:
+ """
+ Visualizer that draws data about detection/segmentation on images.
+
+ It contains methods like `draw_{text,box,circle,line,binary_mask,polygon}`
+ that draw primitive objects to images, as well as high-level wrappers like
+ `draw_{instance_predictions,sem_seg,panoptic_seg_predictions,dataset_dict}`
+ that draw composite data in some pre-defined style.
+
+ Note that the exact visualization style for the high-level wrappers are subject to change.
+ Style such as color, opacity, label contents, visibility of labels, or even the visibility
+ of objects themselves (e.g. when the object is too small) may change according
+ to different heuristics, as long as the results still look visually reasonable.
+
+ To obtain a consistent style, you can implement custom drawing functions with the
+ abovementioned primitive methods instead. If you need more customized visualization
+ styles, you can process the data yourself following their format documented in
+ tutorials (:doc:`/tutorials/models`, :doc:`/tutorials/datasets`). This class does not
+ intend to satisfy everyone's preference on drawing styles.
+
+ This visualizer focuses on high rendering quality rather than performance. It is not
+ designed to be used for real-time applications.
+ """
+
+ def __init__(self, img_rgb, scale=1.0, instance_mode=ColorMode.IMAGE):
+ """
+ Args:
+ img_rgb: a numpy array of shape (H, W, C), where H and W correspond to
+ the height and width of the image respectively. C is the number of
+ color channels. The image is required to be in RGB format since that
+ is a requirement of the Matplotlib library. The image is also expected
+ to be in the range [0, 255].
+ instance_mode (ColorMode): defines one of the pre-defined style for drawing
+ instances on an image.
+ """
+ self.img = np.asarray(img_rgb).clip(0, 255).astype(np.uint8)
+ self.output = VisImage(self.img, scale=scale)
+ self.cpu_device = torch.device("cpu")
+
+ # too small texts are useless, therefore clamp to 9
+ self._default_font_size = max(np.sqrt(self.output.height * self.output.width) // 90, 10 // scale)
+ self._default_font_size = 18
+ self._instance_mode = instance_mode
+
+ import matplotlib.colors as mcolors
+ css4_colors = mcolors.CSS4_COLORS
+ self.color_proposals = [list(mcolors.hex2color(color)) for color in css4_colors.values()]
+
+ def draw_text(
+ self,
+ text,
+ position,
+ *,
+ font_size=None,
+ color="g",
+ horizontal_alignment="center",
+ rotation=0,
+ ):
+ """
+ Args:
+ text (str): class label
+ position (tuple): a tuple of the x and y coordinates to place text on image.
+ font_size (int, optional): font of the text. If not provided, a font size
+ proportional to the image width is calculated and used.
+ color: color of the text. Refer to `matplotlib.colors` for full list
+ of formats that are accepted.
+ horizontal_alignment (str): see `matplotlib.text.Text`
+ rotation: rotation angle in degrees CCW
+
+ Returns:
+ output (VisImage): image object with text drawn.
+ """
+ if not font_size:
+ font_size = self._default_font_size
+
+ # since the text background is dark, we don't want the text to be dark
+ color = np.maximum(list(mplc.to_rgb(color)), 0.15)
+ color[np.argmax(color)] = max(0.8, np.max(color))
+
+ def contrasting_color(rgb):
+ """Returns 'white' or 'black' depending on which color contrasts more with the given RGB value."""
+
+ # Decompose the RGB tuple
+ R, G, B = rgb
+
+ # Calculate the Y value
+ Y = 0.299 * R + 0.587 * G + 0.114 * B
+
+ # If Y value is greater than 128, it's closer to white so return black. Otherwise, return white.
+ return 'black' if Y > 128 else 'white'
+
+ bbox_background = contrasting_color(color * 255)
+
+ x, y = position
+ self.output.ax.text(
+ x,
+ y,
+ text,
+ size=font_size * self.output.scale,
+ family="sans-serif",
+ bbox={
+ "facecolor": bbox_background,
+ "alpha": 0.8,
+ "pad": 0.7,
+ "edgecolor": "none"
+ },
+ verticalalignment="top",
+ horizontalalignment=horizontal_alignment,
+ color=color,
+ zorder=10,
+ rotation=rotation,
+ )
+ return self.output
+
+ def draw_box(self, box_coord, alpha=0.5, edge_color="g", line_style="-"):
+ """
+ Args:
+ box_coord (tuple): a tuple containing x0, y0, x1, y1 coordinates, where x0 and y0
+ are the coordinates of the image's top left corner. x1 and y1 are the
+ coordinates of the image's bottom right corner.
+ alpha (float): blending efficient. Smaller values lead to more transparent masks.
+ edge_color: color of the outline of the box. Refer to `matplotlib.colors`
+ for full list of formats that are accepted.
+ line_style (string): the string to use to create the outline of the boxes.
+
+ Returns:
+ output (VisImage): image object with box drawn.
+ """
+ x0, y0, x1, y1 = box_coord
+ width = x1 - x0
+ height = y1 - y0
+
+ linewidth = max(self._default_font_size / 12, 1)
+
+ self.output.ax.add_patch(
+ mpl.patches.Rectangle(
+ (x0, y0),
+ width,
+ height,
+ fill=False,
+ edgecolor=edge_color,
+ linewidth=linewidth * self.output.scale,
+ alpha=alpha,
+ linestyle=line_style,
+ ))
+ return self.output
+
+ def draw_rotated_box_with_label(self, rotated_box, alpha=0.5, edge_color="g", line_style="-", label=None):
+ """
+ Draw a rotated box with label on its top-left corner.
+
+ Args:
+ rotated_box (tuple): a tuple containing (cnt_x, cnt_y, w, h, angle),
+ where cnt_x and cnt_y are the center coordinates of the box.
+ w and h are the width and height of the box. angle represents how
+ many degrees the box is rotated CCW with regard to the 0-degree box.
+ alpha (float): blending efficient. Smaller values lead to more transparent masks.
+ edge_color: color of the outline of the box. Refer to `matplotlib.colors`
+ for full list of formats that are accepted.
+ line_style (string): the string to use to create the outline of the boxes.
+ label (string): label for rotated box. It will not be rendered when set to None.
+
+ Returns:
+ output (VisImage): image object with box drawn.
+ """
+ cnt_x, cnt_y, w, h, angle = rotated_box
+ area = w * h
+ # use thinner lines when the box is small
+ linewidth = self._default_font_size / (6 if area < _SMALL_OBJECT_AREA_THRESH * self.output.scale else 3)
+
+ theta = angle * math.pi / 180.0
+ c = math.cos(theta)
+ s = math.sin(theta)
+ rect = [(-w / 2, h / 2), (-w / 2, -h / 2), (w / 2, -h / 2), (w / 2, h / 2)]
+ # x: left->right ; y: top->down
+ rotated_rect = [(s * yy + c * xx + cnt_x, c * yy - s * xx + cnt_y) for (xx, yy) in rect]
+ for k in range(4):
+ j = (k + 1) % 4
+ self.draw_line(
+ [rotated_rect[k][0], rotated_rect[j][0]],
+ [rotated_rect[k][1], rotated_rect[j][1]],
+ color=edge_color,
+ linestyle="--" if k == 1 else line_style,
+ linewidth=linewidth,
+ )
+
+ if label is not None:
+ text_pos = rotated_rect[1] # topleft corner
+
+ height_ratio = h / np.sqrt(self.output.height * self.output.width)
+ label_color = self._change_color_brightness(edge_color, brightness_factor=0.7)
+ font_size = (np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) * 0.5 * self._default_font_size)
+ self.draw_text(label, text_pos, color=label_color, font_size=font_size, rotation=angle)
+
+ return self.output
+
+ def draw_circle(self, circle_coord, color, radius=3):
+ """
+ Args:
+ circle_coord (list(int) or tuple(int)): contains the x and y coordinates
+ of the center of the circle.
+ color: color of the polygon. Refer to `matplotlib.colors` for a full list of
+ formats that are accepted.
+ radius (int): radius of the circle.
+
+ Returns:
+ output (VisImage): image object with box drawn.
+ """
+ x, y = circle_coord
+ self.output.ax.add_patch(mpl.patches.Circle(circle_coord, radius=radius, fill=True, color=color))
+ return self.output
+
+ def draw_line(self, x_data, y_data, color, linestyle="-", linewidth=None):
+ """
+ Args:
+ x_data (list[int]): a list containing x values of all the points being drawn.
+ Length of list should match the length of y_data.
+ y_data (list[int]): a list containing y values of all the points being drawn.
+ Length of list should match the length of x_data.
+ color: color of the line. Refer to `matplotlib.colors` for a full list of
+ formats that are accepted.
+ linestyle: style of the line. Refer to `matplotlib.lines.Line2D`
+ for a full list of formats that are accepted.
+ linewidth (float or None): width of the line. When it's None,
+ a default value will be computed and used.
+
+ Returns:
+ output (VisImage): image object with line drawn.
+ """
+ if linewidth is None:
+ linewidth = self._default_font_size / 3
+ linewidth = max(linewidth, 1)
+ self.output.ax.add_line(
+ mpl.lines.Line2D(
+ x_data,
+ y_data,
+ linewidth=linewidth * self.output.scale,
+ color=color,
+ linestyle=linestyle,
+ ))
+ return self.output
+
+ def draw_binary_mask(self, binary_mask, color=None, *, edge_color=None, text=None, alpha=0.7, area_threshold=10):
+ """
+ Args:
+ binary_mask (ndarray): numpy array of shape (H, W), where H is the image height and
+ W is the image width. Each value in the array is either a 0 or 1 value of uint8
+ type.
+ color: color of the mask. Refer to `matplotlib.colors` for a full list of
+ formats that are accepted. If None, will pick a random color.
+ edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a
+ full list of formats that are accepted.
+ text (str): if None, will be drawn on the object
+ alpha (float): blending efficient. Smaller values lead to more transparent masks.
+ area_threshold (float): a connected component smaller than this area will not be shown.
+
+ Returns:
+ output (VisImage): image object with mask drawn.
+ """
+ if color is None:
+ color = random_color(rgb=True, maximum=1)
+ color = mplc.to_rgb(color)
+
+ has_valid_segment = False
+ binary_mask = binary_mask.astype("uint8") # opencv needs uint8
+ mask = GenericMask(binary_mask, self.output.height, self.output.width)
+ shape2d = (binary_mask.shape[0], binary_mask.shape[1])
+
+ if not mask.has_holes:
+ # draw polygons for regular masks
+ for segment in mask.polygons:
+ area = mask_util.area(mask_util.frPyObjects([segment], shape2d[0], shape2d[1]))
+ if area < (area_threshold or 0):
+ continue
+ has_valid_segment = True
+ segment = segment.reshape(-1, 2)
+ self.draw_polygon(segment, color=color, edge_color=edge_color, alpha=alpha)
+ else:
+ # Use Path/PathPatch to draw vector graphics:
+ # https://stackoverflow.com/questions/8919719/how-to-plot-a-complex-polygon
+ # rgba = np.zeros(shape2d + (4,), dtype="float32")
+ # rgba[:, :, :3] = color
+ # rgba[:, :, 3] = (mask.mask == 1).astype("float32") * alpha
+ # has_valid_segment = True
+ # self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0))
+ print('has hole')
+ for segment in mask.polygons:
+ area = mask_util.area(mask_util.frPyObjects([segment], shape2d[0], shape2d[1]))
+ if area < (area_threshold or 0):
+ continue
+ has_valid_segment = True
+ segment = segment.reshape(-1, 2)
+ self.draw_polygon(segment, color=color, edge_color=edge_color, alpha=alpha)
+
+ if text is not None and has_valid_segment:
+ lighter_color = self._change_color_brightness(color, brightness_factor=0.7)
+ self._draw_text_in_mask(binary_mask, text, lighter_color)
+ return self.output
+
+ def _draw_number_in_mask(self, binary_mask, text, color, label_mode='1'):
+ """
+ Find proper places to draw text given a binary mask.
+ """
+
+ def number_to_string(n):
+ chars = []
+ while n:
+ n, remainder = divmod(n - 1, 26)
+ chars.append(chr(97 + remainder))
+ return ''.join(reversed(chars))
+
+ binary_mask = np.pad(binary_mask, ((1, 1), (1, 1)), 'constant')
+ mask_dt = cv2.distanceTransform(binary_mask, cv2.DIST_L2, 0)
+ mask_dt = mask_dt[1:-1, 1:-1]
+ max_dist = np.max(mask_dt)
+ coords_y, coords_x = np.where(mask_dt == max_dist) # coords is [y, x]
+
+ if label_mode == 'a':
+ text = number_to_string(int(text))
+ else:
+ text = text
+
+ self.draw_text(text, (coords_x[len(coords_x) // 2] + 2, coords_y[len(coords_y) // 2] - 6), color=color)
+
+ def draw_binary_mask_with_number(self,
+ binary_mask,
+ color=None,
+ *,
+ edge_color=None,
+ text=None,
+ label_mode='1',
+ alpha=0.1,
+ anno_mode=['Mask'],
+ area_threshold=10):
+ """
+ Args:
+ binary_mask (ndarray): numpy array of shape (H, W), where H is the image height and
+ W is the image width. Each value in the array is either a 0 or 1 value of uint8
+ type.
+ color: color of the mask. Refer to `matplotlib.colors` for a full list of
+ formats that are accepted. If None, will pick a random color.
+ edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a
+ full list of formats that are accepted.
+ text (str): if None, will be drawn on the object
+ alpha (float): blending efficient. Smaller values lead to more transparent masks.
+ area_threshold (float): a connected component smaller than this area will not be shown.
+
+ Returns:
+ output (VisImage): image object with mask drawn.
+ """
+ if color is None:
+ randint = random.randint(0, len(self.color_proposals) - 1)
+ color = self.color_proposals[randint]
+ color = mplc.to_rgb(color)
+
+ has_valid_segment = True
+ binary_mask = binary_mask.astype("uint8") # opencv needs uint8
+ mask = GenericMask(binary_mask, self.output.height, self.output.width)
+ shape2d = (binary_mask.shape[0], binary_mask.shape[1])
+
+ if 'Mask' in anno_mode:
+ if not mask.has_holes:
+ # draw polygons for regular masks
+ for segment in mask.polygons:
+ area = mask_util.area(mask_util.frPyObjects([segment], shape2d[0], shape2d[1]))
+ if area < (area_threshold or 0):
+ continue
+ has_valid_segment = True
+ segment = segment.reshape(-1, 2)
+ self.draw_polygon(segment, color=color, edge_color=edge_color, alpha=alpha)
+ else:
+ # Use Path/PathPatch to draw vector graphics:
+ # https://stackoverflow.com/questions/8919719/how-to-plot-a-complex-polygon
+ for segment in mask.polygons:
+ area = mask_util.area(mask_util.frPyObjects([segment], shape2d[0], shape2d[1]))
+ if area < (area_threshold or 0):
+ continue
+ has_valid_segment = True
+ segment = segment.reshape(-1, 2)
+ self.draw_polygon(segment, color=color, edge_color=edge_color, alpha=alpha)
+ # rgba = np.zeros(shape2d + (4,), dtype="float32")
+ # rgba[:, :, :3] = color
+ # rgba[:, :, 3] = (mask.mask == 1).astype("float32") * alpha
+ # self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0))
+
+ if 'Box' in anno_mode:
+ bbox = mask.bbox()
+ self.draw_box(bbox, edge_color=color, alpha=0.75)
+
+ if 'Mark' in anno_mode:
+ has_valid_segment = True
+ else:
+ has_valid_segment = False
+
+ if text is not None and has_valid_segment:
+ # lighter_color = tuple([x*0.2 for x in color])
+ lighter_color = [1, 1, 1] # self._change_color_brightness(color, brightness_factor=0.7)
+ self._draw_number_in_mask(binary_mask, text, lighter_color, label_mode)
+ return self.output
+
+ def draw_polygon(self, segment, color, edge_color=None, alpha=0.5):
+ """
+ Args:
+ segment: numpy array of shape Nx2, containing all the points in the polygon.
+ color: color of the polygon. Refer to `matplotlib.colors` for a full list of
+ formats that are accepted.
+ edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a
+ full list of formats that are accepted. If not provided, a darker shade
+ of the polygon color will be used instead.
+ alpha (float): blending efficient. Smaller values lead to more transparent masks.
+
+ Returns:
+ output (VisImage): image object with polygon drawn.
+ """
+ if edge_color is None:
+ # make edge color darker than the polygon color
+ if alpha > 0.8:
+ edge_color = self._change_color_brightness(color, brightness_factor=-0.7)
+ else:
+ edge_color = color
+ edge_color = mplc.to_rgb(edge_color) + (1, )
+
+ polygon = mpl.patches.Polygon(
+ segment,
+ fill=True,
+ facecolor=mplc.to_rgb(color) + (alpha, ),
+ edgecolor=edge_color,
+ linewidth=1, # max(self._default_font_size // 5 * self.output.scale, 1),
+ )
+ self.output.ax.add_patch(polygon)
+ return self.output
+
+ """
+ Internal methods:
+ """
+
+ def _jitter(self, color):
+ """
+ Randomly modifies given color to produce a slightly different color than the color given.
+
+ Args:
+ color (tuple[double]): a tuple of 3 elements, containing the RGB values of the color
+ picked. The values in the list are in the [0.0, 1.0] range.
+
+ Returns:
+ jittered_color (tuple[double]): a tuple of 3 elements, containing the RGB values of the
+ color after being jittered. The values in the list are in the [0.0, 1.0] range.
+ """
+ color = mplc.to_rgb(color)
+ # np.random.seed(0)
+ vec = np.random.rand(3)
+ # better to do it in another color space
+ vec = vec / np.linalg.norm(vec) * 0.5
+ res = np.clip(vec + color, 0, 1)
+ return tuple(res)
+
+ def _create_grayscale_image(self, mask=None):
+ """
+ Create a grayscale version of the original image.
+ The colors in masked area, if given, will be kept.
+ """
+ img_bw = self.img.astype("f4").mean(axis=2)
+ img_bw = np.stack([img_bw] * 3, axis=2)
+ if mask is not None:
+ img_bw[mask] = self.img[mask]
+ return img_bw
+
+ def _change_color_brightness(self, color, brightness_factor):
+ """
+ Depending on the brightness_factor, gives a lighter or darker color i.e. a color with
+ less or more saturation than the original color.
+
+ Args:
+ color: color of the polygon. Refer to `matplotlib.colors` for a full list of
+ formats that are accepted.
+ brightness_factor (float): a value in [-1.0, 1.0] range. A lightness factor of
+ 0 will correspond to no change, a factor in [-1.0, 0) range will result in
+ a darker color and a factor in (0, 1.0] range will result in a lighter color.
+
+ Returns:
+ modified_color (tuple[double]): a tuple containing the RGB values of the
+ modified color. Each value in the tuple is in the [0.0, 1.0] range.
+ """
+ assert brightness_factor >= -1.0 and brightness_factor <= 1.0
+ color = mplc.to_rgb(color)
+ polygon_color = colorsys.rgb_to_hls(*mplc.to_rgb(color))
+ modified_lightness = polygon_color[1] + (brightness_factor * polygon_color[1])
+ modified_lightness = 0.0 if modified_lightness < 0.0 else modified_lightness
+ modified_lightness = 1.0 if modified_lightness > 1.0 else modified_lightness
+ modified_color = colorsys.hls_to_rgb(polygon_color[0], modified_lightness, polygon_color[2])
+ return modified_color
+
+ def _draw_text_in_mask(self, binary_mask, text, color):
+ """
+ Find proper places to draw text given a binary mask.
+ """
+ # sometimes drawn on wrong objects. the heuristics here can improve.
+ _num_cc, cc_labels, stats, centroids = cv2.connectedComponentsWithStats(binary_mask, 8)
+ if stats[1:, -1].size == 0:
+ return
+ largest_component_id = np.argmax(stats[1:, -1]) + 1
+
+ # draw text on the largest component, as well as other very large components.
+ for cid in range(1, _num_cc):
+ if cid == largest_component_id or stats[cid, -1] > _LARGE_MASK_AREA_THRESH:
+ # median is more stable than centroid
+ # center = centroids[largest_component_id]
+ center = np.median((cc_labels == cid).nonzero(), axis=1)[::-1]
+ bottom = np.max((cc_labels == cid).nonzero(), axis=1)[::-1]
+ center[1] = bottom[1] + 2
+ self.draw_text(text, center, color=color)
+
+ def get_output(self):
+ """
+ Returns:
+ output (VisImage): the image output containing the visualizations added
+ to the image.
+ """
+ return self.output
+
+
+def draw_mask(frames, masks, colors=None):
+ if colors is None:
+ colors = [random_color(rgb=True, maximum=1) for _ in range(len(masks))]
+
+ imgs = []
+ for i in range(frames.size(0)):
+ vis = Visualizer(frames[i].numpy())
+
+ for j in range(len(masks)):
+ fig = vis.draw_binary_mask_with_number(masks[j][0, i].bool().numpy(), color=colors[j], alpha=0.3)
+
+ buffer = io.BytesIO()
+ fig.save(buffer)
+ buffer.seek(0)
+ img = iio.imread(buffer)
+ imgs.append(img)
+
+ return imgs
]