sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
huggingface/transformers:src/transformers/models/pixio/convert_pixio_to_pytorch.py | # Copyright 2025 Meta AI and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert Pixio checkpoints from the original repository.
URL: https://github.com/facebookresearch/pixio/tree/main
"""
import argparse
from io import BytesIO
from pathlib import Path
import httpx
import torch
from PIL import Image
from transformers import BitImageProcessor, PixioConfig, PixioModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
logger = logging.get_logger(__name__)
def get_pixio_config(model_name):
if "vitb16" in model_name:
kwargs = {
"hidden_size": 768,
"num_hidden_layers": 12,
"num_attention_heads": 12,
}
elif "vitl16" in model_name:
kwargs = {
"hidden_size": 1024,
"num_hidden_layers": 24,
"num_attention_heads": 16,
}
elif "vith16" in model_name:
kwargs = {
"hidden_size": 1280,
"num_hidden_layers": 32,
"num_attention_heads": 16,
}
elif "vit1b16" in model_name:
kwargs = {
"hidden_size": 1536,
"num_hidden_layers": 48,
"num_attention_heads": 24,
}
elif "vit5b16" in model_name:
kwargs = {
"hidden_size": 3072,
"num_hidden_layers": 48,
"num_attention_heads": 32,
}
else:
raise ValueError(f"Model '{model_name}' not supported")
config = PixioConfig(**kwargs)
return config
def create_rename_keys(config):
rename_keys = []
# fmt: off
# patch embedding layer
rename_keys.append(("cls_token", "embeddings.cls_token"))
rename_keys.append(("pos_embed", "embeddings.position_embeddings"))
rename_keys.append(("patch_embed.proj.weight", "embeddings.patch_embeddings.projection.weight"))
rename_keys.append(("patch_embed.proj.bias", "embeddings.patch_embeddings.projection.bias"))
for i in range(config.num_hidden_layers):
# layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"encoder.layer.{i}.norm1.weight"))
rename_keys.append((f"blocks.{i}.norm1.bias", f"encoder.layer.{i}.norm1.bias"))
rename_keys.append((f"blocks.{i}.norm2.weight", f"encoder.layer.{i}.norm2.weight"))
rename_keys.append((f"blocks.{i}.norm2.bias", f"encoder.layer.{i}.norm2.bias"))
# MLP
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"encoder.layer.{i}.mlp.fc1.weight"))
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"encoder.layer.{i}.mlp.fc1.bias"))
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"encoder.layer.{i}.mlp.fc2.weight"))
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"encoder.layer.{i}.mlp.fc2.bias"))
# attention projection layer
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"encoder.layer.{i}.attention.output.dense.weight"))
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"encoder.layer.{i}.attention.output.dense.bias"))
# final layernorm
rename_keys.append(("norm.weight", "layernorm.weight"))
rename_keys.append(("norm.bias", "layernorm.bias"))
# fmt: on
return rename_keys
def rename_key(dct, old, new):
val = dct.pop(old)
dct[new] = val
# we split up the matrix of each encoder layer into queries, keys and values
def read_in_q_k_v(state_dict, config):
for i in range(config.num_hidden_layers):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
in_proj_weight = state_dict.pop(f"blocks.{i}.attn.qkv.weight")
in_proj_bias = state_dict.pop(f"blocks.{i}.attn.qkv.bias")
# next, add query, keys and values (in that order) to the state dict
state_dict[f"encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[: config.hidden_size, :]
state_dict[f"encoder.layer.{i}.attention.attention.query.bias"] = in_proj_bias[: config.hidden_size]
state_dict[f"encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
state_dict[f"encoder.layer.{i}.attention.attention.key.bias"] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
state_dict[f"encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[-config.hidden_size :, :]
state_dict[f"encoder.layer.{i}.attention.attention.value.bias"] = in_proj_bias[-config.hidden_size :]
# We will verify our results on an image of cute cats
def prepare_img():
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
with httpx.stream("GET", url) as response:
image = Image.open(BytesIO(response.read())).convert("RGB")
return image
@torch.no_grad()
def convert_pixio_checkpoint(model_name, checkpoint_path, pytorch_dump_folder_path, push_to_hub=False):
"""
Copy/paste/tweak model's weights to our Pixio structure.
"""
# define default Pixio configuration
config = get_pixio_config(model_name)
state_dict = torch.load(checkpoint_path, map_location="cpu")
rename_keys = create_rename_keys(config)
for src, dest in rename_keys:
rename_key(state_dict, src, dest)
read_in_q_k_v(state_dict, config)
# load HuggingFace model
model = PixioModel(config).eval()
model.load_state_dict(state_dict)
# load image
image = prepare_img()
processor = BitImageProcessor(
size={"height": 256, "width": 256},
do_center_crop=False,
crop_size={"height": 256, "width": 256},
resample=PILImageResampling.BICUBIC,
image_mean=IMAGENET_DEFAULT_MEAN,
image_std=IMAGENET_DEFAULT_STD,
)
pixel_values = processor(image, return_tensors="pt").pixel_values
with torch.no_grad():
outputs = model(pixel_values, output_hidden_states=True)
print("last layer class embeddings w/ LayerNorm:")
print(outputs.last_hidden_state[:, : config.n_cls_tokens])
print("last layer patch embeddings w/ LayerNorm:")
print(outputs.last_hidden_state[:, config.n_cls_tokens :])
print("last layer class embeddings w/o LayerNorm:")
print(outputs.hidden_states[-1][:, : config.n_cls_tokens])
print("last layer patch embeddings w/o LayerNorm:")
print(outputs.hidden_states[-1][:, config.n_cls_tokens :])
if pytorch_dump_folder_path is not None:
Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
print(f"Saving model {model_name} to {pytorch_dump_folder_path}")
model.save_pretrained(pytorch_dump_folder_path)
print(f"Saving image processor to {pytorch_dump_folder_path}")
processor.save_pretrained(pytorch_dump_folder_path)
if push_to_hub:
name = model_name.replace("_", "-")
model.push_to_hub(f"facebook/{name}")
processor.push_to_hub(f"facebook/{name}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="pixio_vith16",
type=str,
choices=[
"pixio_vitb16",
"pixio_vitl16",
"pixio_vith16",
"pixio_vit1b16",
"pixio_vit5b16",
],
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path",
required=True,
type=str,
help="Path of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model to the Hugging Face hub.",
)
args = parser.parse_args()
convert_pixio_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/pixio/convert_pixio_to_pytorch.py",
"license": "Apache License 2.0",
"lines": 196,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/pixio/modular_pixio.py | # Copyright 2025 Meta AI and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch Pixio model."""
import torch
from torch import nn
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import BackboneOutput, BaseModelOutput, BaseModelOutputWithPooling
from ...utils import auto_docstring, is_tracing, logging
from ...utils.generic import merge_with_config_defaults
from ...utils.output_capturing import capture_outputs
from ..dinov2.configuration_dinov2 import Dinov2Config
from ..dinov2.modeling_dinov2 import (
Dinov2Backbone,
Dinov2DropPath,
Dinov2MLP,
)
from ..vit.modeling_vit import ViTAttention, ViTPatchEmbeddings, ViTPreTrainedModel
logger = logging.get_logger(__name__)
class PixioConfig(Dinov2Config):
r"""
This is the configuration class to store the configuration of a [`PixioModel`]. It is used to instantiate a
Pixio model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the ViT
[facebook/pixio-huge](https://huggingface.co/facebook/pixio-huge) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 1280):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
mlp_ratio (`int`, *optional*, defaults to 4):
Ratio of the hidden size of the MLPs relative to the `hidden_size`.
n_cls_tokens (`int`, *optional*, defaults to 8):
Number of class tokens in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
image_size (`int`, *optional*, defaults to 256):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 16):
The size (resolution) of each patch.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the queries, keys and values.
drop_path_rate (`float`, *optional*, defaults to 0.0):
Stochastic depth rate per sample (when applied in the main path of residual layers).
out_features (`list[str]`, *optional*):
If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
(depending on how many stages the model has). If unset and `out_indices` is set, will default to the
corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
out_indices (`list[int]`, *optional*):
If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
If unset and `out_features` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
apply_layernorm (`bool`, *optional*, defaults to `True`):
Whether to apply layer normalization to the feature maps in case the model is used as backbone.
reshape_hidden_states (`bool`, *optional*, defaults to `True`):
Whether to reshape the feature maps to 4D tensors of shape `(batch_size, hidden_size, height, width)` in
case the model is used as backbone. If `False`, the feature maps will be 3D tensors of shape `(batch_size,
seq_len, hidden_size)`.
Example:
```python
>>> from transformers import PixioConfig, PixioModel
>>> # Initializing a Pixio pixio-huge style configuration
>>> configuration = PixioConfig()
>>> # Initializing a model (with random weights) from the pixio-huge style configuration
>>> model = PixioModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "pixio"
def __init__(
self,
hidden_size=1280,
num_hidden_layers=32,
num_attention_heads=16,
mlp_ratio=4,
n_cls_tokens=8,
hidden_act="gelu",
hidden_dropout_prob=0.0,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
layer_norm_eps=1e-6,
image_size=256,
patch_size=16,
num_channels=3,
qkv_bias=True,
drop_path_rate=0.0,
out_features=None,
out_indices=None,
apply_layernorm=True,
reshape_hidden_states=True,
**kwargs,
):
super().__init__(
hidden_size=hidden_size,
num_hidden_layers=num_hidden_layers,
num_attention_heads=num_attention_heads,
mlp_ratio=mlp_ratio,
hidden_act=hidden_act,
hidden_dropout_prob=hidden_dropout_prob,
attention_probs_dropout_prob=attention_probs_dropout_prob,
initializer_range=initializer_range,
layer_norm_eps=layer_norm_eps,
image_size=image_size,
patch_size=patch_size,
num_channels=num_channels,
qkv_bias=qkv_bias,
drop_path_rate=drop_path_rate,
apply_layernorm=apply_layernorm,
reshape_hidden_states=reshape_hidden_states,
)
self.n_cls_tokens = n_cls_tokens
del self.layerscale_value
del self.use_swiglu_ffn
del self.use_mask_token
class PixioPatchEmbeddings(ViTPatchEmbeddings):
pass
class PixioEmbeddings(nn.Module):
"""
Construct the CLS tokens, position and patch embeddings.
"""
def __init__(self, config: PixioConfig) -> None:
super().__init__()
self.cls_token = nn.Parameter(torch.randn(1, config.n_cls_tokens, config.hidden_size))
self.mask_token = None
self.patch_embeddings = PixioPatchEmbeddings(config)
num_patches = self.patch_embeddings.num_patches
self.position_embeddings = nn.Parameter(torch.randn(1, num_patches + config.n_cls_tokens, config.hidden_size))
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.n_cls_tokens = config.n_cls_tokens
self.patch_size = config.patch_size
self.config = config
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
"""
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support tracing and interpolation at torch.float32 precision.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
"""
num_patches = embeddings.shape[1] - self.n_cls_tokens
num_positions = self.position_embeddings.shape[1] - self.n_cls_tokens
if not is_tracing() and num_patches == num_positions and height == width:
return self.position_embeddings
class_pos_embed = self.position_embeddings[:, : self.n_cls_tokens]
patch_pos_embed = self.position_embeddings[:, self.n_cls_tokens :]
dim = embeddings.shape[-1]
new_height = height // self.patch_size
new_width = width // self.patch_size
sqrt_num_positions = int(num_positions**0.5)
patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
target_dtype = patch_pos_embed.dtype
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed.to(torch.float32),
size=(new_height, new_width),
mode="bicubic",
align_corners=False,
).to(dtype=target_dtype)
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed, patch_pos_embed), dim=1)
def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
batch_size, _, height, width = pixel_values.shape
target_dtype = self.patch_embeddings.projection.weight.dtype
embeddings = self.patch_embeddings(pixel_values.to(dtype=target_dtype))
cls_tokens = self.cls_token.expand(batch_size, -1, -1)
embeddings = torch.cat((cls_tokens, embeddings), dim=1)
embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
embeddings = self.dropout(embeddings)
return embeddings
class PixioAttention(ViTAttention):
pass
class PixioDropPath(Dinov2DropPath):
pass
class PixioMLP(Dinov2MLP):
pass
class PixioLayer(GradientCheckpointingLayer):
def __init__(self, config: PixioConfig) -> None:
super().__init__()
self.norm1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.attention = PixioAttention(config)
self.drop_path = PixioDropPath(config.drop_path_rate) if config.drop_path_rate > 0.0 else nn.Identity()
self.norm2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.mlp = PixioMLP(config)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states_norm = self.norm1(hidden_states)
self_attention_output = self.attention(hidden_states_norm)
hidden_states = self.drop_path(self_attention_output) + hidden_states
layer_output = self.norm2(hidden_states)
layer_output = self.mlp(layer_output)
layer_output = self.drop_path(layer_output) + hidden_states
return layer_output
class PixioEncoder(nn.Module):
def __init__(self, config: PixioConfig):
super().__init__()
self.config = config
self.layer = nn.ModuleList([PixioLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(self, hidden_states: torch.Tensor, output_hidden_states: bool = False) -> BaseModelOutput:
all_hidden_states = [hidden_states] if output_hidden_states else None
for i, layer_module in enumerate(self.layer):
hidden_states = layer_module(hidden_states)
if all_hidden_states:
all_hidden_states.append(hidden_states)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=tuple(all_hidden_states) if all_hidden_states else None,
)
class PixioPreTrainedModel(ViTPreTrainedModel):
pass
@auto_docstring
class PixioModel(PixioPreTrainedModel):
def __init__(self, config: PixioConfig):
super().__init__(config)
self.config = config
self.embeddings = PixioEmbeddings(config)
self.encoder = PixioEncoder(config)
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.post_init()
def get_input_embeddings(self) -> PixioPatchEmbeddings:
return self.embeddings.patch_embeddings
@merge_with_config_defaults
@capture_outputs(tie_last_hidden_states=False)
@auto_docstring
def forward(
self,
pixel_values: torch.Tensor | None = None,
output_hidden_states: bool | None = None,
**kwargs,
) -> BaseModelOutputWithPooling:
if output_hidden_states is None:
output_hidden_states = self.config.output_hidden_states
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
embedding_output = self.embeddings(pixel_values)
encoder_outputs: BaseModelOutput = self.encoder(embedding_output, output_hidden_states=output_hidden_states)
sequence_output = encoder_outputs.last_hidden_state
sequence_output = self.layernorm(sequence_output)
pooled_output = sequence_output[:, : self.embeddings.n_cls_tokens, :].mean(dim=1)
return BaseModelOutputWithPooling(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
)
@auto_docstring(
custom_intro="""
Pixio backbone, to be used with frameworks like DETR and MaskFormer.
"""
)
class PixioBackbone(Dinov2Backbone):
@merge_with_config_defaults
@capture_outputs
@auto_docstring
def forward(
self, pixel_values: torch.Tensor, output_hidden_states: bool | None = None, **kwargs
) -> BackboneOutput:
r"""
Examples:
```python
>>> from transformers import AutoImageProcessor, AutoBackbone
>>> import torch
>>> from PIL import Image
>>> import httpx
>>> from io import BytesIO
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> with httpx.stream("GET", url) as response:
... image = Image.open(BytesIO(response.read()))
>>> processor = AutoImageProcessor.from_pretrained("facebook/pixio-huge")
>>> model = AutoBackbone.from_pretrained(
... "facebook/pixio-huge", out_features=["stage7", "stage15", "stage23", "stage31"]
... )
>>> inputs = processor(image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> feature_maps = outputs.feature_maps
>>> list(feature_maps[-1].shape)
[1, 1280, 16, 16]
```"""
if output_hidden_states is None:
output_hidden_states = self.config.output_hidden_states
embedding_output = self.embeddings(pixel_values)
output: BaseModelOutput = self.encoder(embedding_output, output_hidden_states=True)
hidden_states = output.hidden_states
feature_maps = []
for stage, hidden_state in zip(self.stage_names, hidden_states):
if stage in self.out_features:
if self.config.apply_layernorm:
hidden_state = self.layernorm(hidden_state)
if self.config.reshape_hidden_states:
hidden_state = hidden_state[:, self.embeddings.n_cls_tokens :]
batch_size, _, height, width = pixel_values.shape
patch_size = self.config.patch_size
hidden_state = hidden_state.reshape(batch_size, height // patch_size, width // patch_size, -1)
hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous()
feature_maps.append(hidden_state)
return BackboneOutput(
feature_maps=tuple(feature_maps),
hidden_states=hidden_states if output_hidden_states else None,
)
__all__ = ["PixioConfig", "PixioModel", "PixioPreTrainedModel", "PixioBackbone"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/pixio/modular_pixio.py",
"license": "Apache License 2.0",
"lines": 325,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/pixio/test_modeling_pixio.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch Pixio model."""
import unittest
from functools import cached_property
from transformers import PixioConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import PixioBackbone, PixioModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class PixioModelTester:
def __init__(
self,
parent,
batch_size=13,
image_size=30,
patch_size=2,
num_channels=3,
is_training=True,
use_labels=True,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
n_cls_tokens=1,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
type_sequence_label_size=10,
initializer_range=0.02,
scope=None,
attn_implementation="eager",
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.is_training = is_training
self.use_labels = use_labels
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.n_cls_tokens = n_cls_tokens
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.scope = scope
self.attn_implementation = attn_implementation
# in Pixio, the seq length equals the number of patches + class tokens
num_patches = (image_size // patch_size) ** 2
self.seq_length = num_patches + n_cls_tokens
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
labels = None
if self.use_labels:
labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
config = self.get_config()
return config, pixel_values, labels
def get_config(self):
return PixioConfig(
image_size=self.image_size,
patch_size=self.patch_size,
num_channels=self.num_channels,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
n_cls_tokens=self.n_cls_tokens,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
initializer_range=self.initializer_range,
attn_implementation=self.attn_implementation,
)
def create_and_check_model(self, config, pixel_values, labels):
model = PixioModel(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_backbone(self, config, pixel_values, labels):
model = PixioBackbone(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
# verify hidden states
self.parent.assertEqual(len(result.feature_maps), len(config.out_features))
expected_size = self.image_size // config.patch_size
self.parent.assertListEqual(
list(result.feature_maps[0].shape), [self.batch_size, model.channels[0], expected_size, expected_size]
)
# verify channels
self.parent.assertEqual(len(model.channels), len(config.out_features))
# verify backbone works with out_features=None
config.out_features = None
model = PixioBackbone(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps), 1)
self.parent.assertListEqual(
list(result.feature_maps[0].shape), [self.batch_size, model.channels[0], expected_size, expected_size]
)
# verify channels
self.parent.assertEqual(len(model.channels), 1)
# verify backbone works with apply_layernorm=False and reshape_hidden_states=False
config.apply_layernorm = False
config.reshape_hidden_states = False
model = PixioBackbone(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps), 1)
self.parent.assertListEqual(
list(result.feature_maps[0].shape), [self.batch_size, self.seq_length, self.hidden_size]
)
def create_and_check_for_image_classification(self, config, pixel_values, labels):
self.parent.skipTest(reason="Pixio currently exposes only the base model and backbone.")
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
pixel_values,
labels,
) = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class PixioModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as Pixio does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
test_torch_exportable = True
all_model_classes = (
(
PixioModel,
PixioBackbone,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = {"image-feature-extraction": PixioModel} if is_torch_available() else {}
test_resize_embeddings = False
def setUp(self):
self.model_tester = PixioModelTester(self)
self.config_tester = ConfigTester(self, config_class=PixioConfig, has_text_modality=False, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model_get_set_embeddings(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, nn.Linear))
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_backbone(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*config_and_inputs)
def test_for_image_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*config_and_inputs)
def test_batching_equivalence(self, atol=1e-4, rtol=1e-4):
super().test_batching_equivalence(atol=atol, rtol=rtol)
# We will verify our results on an image of cute cats
def prepare_img():
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
class PixioModelIntegrationTest(unittest.TestCase):
@cached_property
def default_image_processor(self):
return AutoImageProcessor.from_pretrained("LiheYoung/pixio-vith16") if is_vision_available() else None
@slow
def test_inference_no_head(self):
model = PixioModel.from_pretrained("LiheYoung/pixio-vith16").to(torch_device)
image_processor = self.default_image_processor
image = prepare_img()
inputs = image_processor(image, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the last hidden states
expected_shape = torch.Size((1, 264, 1280))
self.assertEqual(outputs.last_hidden_state.shape, expected_shape)
expected_slice = torch.tensor(
[[0.7420, -1.4220, 0.1580], [0.3938, -1.4386, 0.2878], [0.2898, -1.4012, 0.3667]],
device=torch_device,
)
# valid the first three patch tokens
torch.testing.assert_close(outputs.last_hidden_state[0, 8:11, :3], expected_slice, rtol=1e-4, atol=1e-4)
@require_torch
class PixioBackboneTest(unittest.TestCase, BackboneTesterMixin):
all_model_classes = (PixioBackbone,) if is_torch_available() else ()
config_class = PixioConfig
has_attentions = False
def setUp(self):
self.model_tester = PixioModelTester(self)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/pixio/test_modeling_pixio.py",
"license": "Apache License 2.0",
"lines": 233,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/jais2/modular_jais2.py | # Copyright 2025 the HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch.nn as nn
from ...modeling_rope_utils import RopeParameters
from ...utils import auto_docstring, can_return_tuple
from ..llama.configuration_llama import LlamaConfig
from ..llama.modeling_llama import (
LlamaDecoderLayer,
LlamaForCausalLM,
LlamaModel,
LlamaPreTrainedModel,
)
from ..nemotron.modeling_nemotron import NemotronMLP
class Jais2Config(LlamaConfig):
r"""
This is the configuration class to store the configuration of a [`Jais2Model`]. It is used to instantiate a Jais2
model according to the specified arguments, defining the model architecture.
[inceptionai/Jais-2-8B-Chat](https://huggingface.co/inceptionai/Jais-2-8B-Chat).
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 150272):
Vocabulary size of the Jais2 model.
hidden_size (`int`, *optional*, defaults to 3328):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 26624):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 26):
Number of attention heads for each attention layer.
num_key_value_heads (`int`, *optional*):
Number of key_value heads for Grouped Query Attention.
hidden_act (`str`, *optional*, defaults to `"relu2"`):
The non-linear activation function in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 8192):
The maximum sequence length.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether to return last key/values attentions.
pad_token_id (`int`, *optional*):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 0):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 150024):
End of stream token id.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings.
attention_bias (`bool`, *optional*, defaults to `True`):
Whether to use a bias in the query, key, value and output projection layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
mlp_bias (`bool`, *optional*, defaults to `True`):
Whether to use a bias in up_proj, down_proj and gate_proj layers.
head_dim (`int`, *optional*):
The attention head dimension.
rope_parameters (`dict`, *optional*):
The RoPE parameters.
"""
model_type = "jais2"
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise",
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
}
def __init__(
self,
vocab_size: int | None = 150272,
hidden_size: int | None = 3328,
intermediate_size: int | None = 26624,
num_hidden_layers: int | None = 32,
num_attention_heads: int | None = 26,
num_key_value_heads: int | None = None,
hidden_act: str | None = "relu2",
max_position_embeddings: int | None = 8192,
initializer_range: float | None = 0.02,
layer_norm_eps: float | None = 1e-5,
use_cache: bool | None = True,
pad_token_id: int | None = None,
bos_token_id: int | None = 0,
eos_token_id: int | None = 150024,
tie_word_embeddings: bool | None = False,
attention_bias: bool | None = True,
attention_dropout: float | None = 0.0,
mlp_bias: bool | None = True,
head_dim: int | None = None,
rope_parameters: RopeParameters | dict[str, RopeParameters] | None = None,
**kwargs,
):
super().__init__(
vocab_size=vocab_size,
hidden_size=hidden_size,
intermediate_size=intermediate_size,
num_hidden_layers=num_hidden_layers,
num_attention_heads=num_attention_heads,
num_key_value_heads=num_key_value_heads,
hidden_act=hidden_act,
max_position_embeddings=max_position_embeddings,
initializer_range=initializer_range,
use_cache=use_cache,
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
attention_bias=attention_bias,
attention_dropout=attention_dropout,
mlp_bias=mlp_bias,
head_dim=head_dim,
rope_parameters=rope_parameters,
**kwargs,
)
self.layer_norm_eps = layer_norm_eps
del self.rms_norm_eps
del self.pretraining_tp
class Jais2MLP(NemotronMLP):
pass
class Jais2DecoderLayer(LlamaDecoderLayer):
def __init__(self, config: Jais2Config, layer_idx: int):
super().__init__(config, layer_idx)
self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
class Jais2PreTrainedModel(LlamaPreTrainedModel):
pass
class Jais2Model(LlamaModel):
def __init__(self, config: Jais2Config):
super().__init__(config)
self.norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
class Jais2ForCausalLM(LlamaForCausalLM):
@can_return_tuple
@auto_docstring
def forward(self, **super_kwargs):
r"""
Example:
```python
>>> from transformers import AutoTokenizer, Jais2ForCausalLM
>>> model = Jais2ForCausalLM.from_pretrained("inceptionai/Jais-2-8B-Chat")
>>> tokenizer = AutoTokenizer.from_pretrained("inceptionai/Jais-2-8B-Chat")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```"""
return super().forward(**super_kwargs)
__all__ = [
"Jais2Config",
"Jais2Model",
"Jais2ForCausalLM",
"Jais2PreTrainedModel",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/jais2/modular_jais2.py",
"license": "Apache License 2.0",
"lines": 168,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/jais2/test_modeling_jais2.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch Jais2 model."""
import unittest
from transformers import AutoTokenizer, is_torch_available
from transformers.testing_utils import (
Expectations,
cleanup,
require_deterministic_for_xpu,
require_torch,
require_torch_accelerator,
slow,
torch_device,
)
from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester
if is_torch_available():
import torch
from transformers import (
Jais2Config,
Jais2ForCausalLM,
Jais2Model,
)
class Jais2ModelTester(CausalLMModelTester):
if is_torch_available():
config_class = Jais2Config
base_model_class = Jais2Model
causal_lm_class = Jais2ForCausalLM
config_overrides = {
"hidden_act": "relu2",
}
@require_torch
class Jais2ModelTest(CausalLMModelTest, unittest.TestCase):
model_tester_class = Jais2ModelTester
all_model_classes = (
(
Jais2Model,
Jais2ForCausalLM,
)
if is_torch_available()
else ()
)
all_generative_model_classes = (Jais2ForCausalLM,) if is_torch_available() else ()
pipeline_model_mapping = (
{
"feature-extraction": Jais2Model,
"text-generation": Jais2ForCausalLM,
}
if is_torch_available()
else {}
)
@slow
@require_torch_accelerator
class Jais2IntegrationTest(unittest.TestCase):
def setUp(self):
cleanup(torch_device, gc_collect=True)
def tearDown(self):
cleanup(torch_device, gc_collect=True)
@require_deterministic_for_xpu
def test_model_logits(self):
model_id = "inceptionai/Jais-2-8B-Chat"
dummy_input = torch.LongTensor([[0, 0, 0, 0, 0, 0, 1, 2, 3], [1, 1, 2, 3, 4, 5, 6, 7, 8]]).to(torch_device)
attention_mask = dummy_input.ne(0).to(torch.long)
model = Jais2ForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16, device_map="auto")
with torch.no_grad():
logits = model(dummy_input, attention_mask=attention_mask).logits
logits = logits.float()
# fmt: off
EXPECTED_LOGITS_BATCH0 = Expectations(
{
("cuda", None): [-0.9751, -1.0918, -0.9600, -0.9526, -0.9600, -0.9551, -0.9624, -0.9644, -0.9644, -0.9600, -0.9561, -0.9658, -0.9585, -0.9688, -0.9663],
("xpu", 3): [-0.9692, -1.0859, -0.9541, -0.9468, -0.9546, -0.9492, -0.9570, -0.9585, -0.9585, -0.9541, -0.9507, -0.9604, -0.9526, -0.9634, -0.9609],
}
).get_expectation()
EXPECTED_LOGITS_BATCH1 = Expectations(
{
("cuda", None): [-1.5361, -1.6328, -1.5283, -1.5225, -1.5293, -1.5244, -1.5322, -1.5332, -1.5332, -1.5293, -1.5254, -1.5352, -1.5273, -1.5381, -1.5361],
("xpu", 3): [-1.5342, -1.6318, -1.5264, -1.5205, -1.5273, -1.5225, -1.5303, -1.5312, -1.5312, -1.5273, -1.5234, -1.5332, -1.5254, -1.5361, -1.5342],
}
).get_expectation()
# fmt: on
torch.testing.assert_close(
logits[0, -1, :15],
torch.tensor(EXPECTED_LOGITS_BATCH0, device=torch_device),
rtol=1e-3,
atol=1e-3,
)
torch.testing.assert_close(
logits[1, -1, :15],
torch.tensor(EXPECTED_LOGITS_BATCH1, device=torch_device),
rtol=1e-3,
atol=1e-3,
)
def test_model_generation(self):
tokenizer = AutoTokenizer.from_pretrained("inceptionai/Jais-2-8B-Chat")
model = Jais2ForCausalLM.from_pretrained(
"inceptionai/Jais-2-8B-Chat", torch_dtype=torch.float16, device_map="auto"
)
input_text = "Simply put, the theory of relativity states that"
model_inputs = tokenizer(input_text, return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
generated_ids = model.generate(**model_inputs, max_new_tokens=32, do_sample=False)
generated_text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
EXPECTED_TEXT = "Simply put, the theory of relativity states that the laws of physics are the same for all non-accelerating observers, and that the speed of light in a vacuum is the same for all observers," # fmt: skip
self.assertEqual(generated_text, EXPECTED_TEXT)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/jais2/test_modeling_jais2.py",
"license": "Apache License 2.0",
"lines": 115,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/pe_audio/configuration_pe_audio.py | # Copyright 2025 the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...configuration_utils import PreTrainedConfig, PretrainedConfig
from ...modeling_rope_utils import RopeParameters
from ...utils import logging
from ..auto import CONFIG_MAPPING, AutoConfig
logger = logging.get_logger(__name__)
class PeAudioEncoderConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`PeAudioEncoder`]. It is used to instantiate a
PeAudioEncoder model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of pe-av-large.
e.g. [facebook/pe-av-large](https://huggingface.co/facebook/pe-av-large)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
dac_config (`Union[PreTrainedConfig, dict]`, *optional*):
Configuration for the DAC audio encoder used to tokenize the raw audio inputs. If a dictionary is passed, it
will be used to instantiate a [`~transformers.DacConfig`] with default DAC hyperparameters.
hidden_size (`int`, *optional*, defaults to 1792):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 4800):
Dimension of the feedforward layers in the Transformer blocks.
num_hidden_layers (`int`, *optional*, defaults to 6):
Number of Transformer encoder blocks.
num_attention_heads (`int`, *optional*, defaults to 14):
Number of attention heads used in each attention layer.
num_key_value_heads (`int`, *optional*):
Number of key and value heads for grouped-query attention. If unset, this defaults to `num_attention_heads`.
head_dim (`int`, *optional*, defaults to 128):
Dimension of each attention head for query, key, and value projections.
hidden_act (`str`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the Transformer blocks.
max_position_embeddings (`int`, *optional*, defaults to 10000):
Maximum sequence length supported by the rotary position embeddings.
initializer_range (`float`, *optional*, defaults to 0.02):
Standard deviation of the truncated normal initializer for weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
Epsilon used by the RMS normalization layers.
rope_parameters (`Union[RopeParameters, dict]`, *optional*, defaults to `{'rope_theta': 20000}`):
Parameters for the rotary position embeddings, such as the base `rope_theta`.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use bias terms in the query, key, value, and output projections.
attention_dropout (`float`, *optional*, defaults to 0.0):
Dropout ratio applied to attention probabilities.
```python
>>> from transformers import PeAudioEncoder, PeAudioEncoderConfig
>>> # Initializing a PeAudioEncoder style configuration
>>> configuration = PeAudioEncoderConfig()
>>> # Initializing a model from the pe-av-large style configuration
>>> model = PeAudioEncoder(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "pe_audio_encoder"
sub_configs = {"dac_config": AutoConfig}
base_config_key = "audio_video_config"
_default_dac_config_kwargs = {
"downsampling_ratios": [2, 8, 10, 12],
"encoder_hidden_size": 64,
"codebook_dim": 128,
}
def __init__(
self,
dac_config: dict | PreTrainedConfig | None = None,
hidden_size: int | None = 1792,
intermediate_size: int | None = 4800,
num_hidden_layers: int | None = 6,
num_attention_heads: int | None = 14,
num_key_value_heads: int | None = None,
head_dim: int | None = 128,
hidden_act: str | None = "silu",
max_position_embeddings: int | None = 10000,
initializer_range: float | None = 0.02,
rms_norm_eps: float | None = 1e-5,
rope_parameters: RopeParameters | dict | None = {"rope_theta": 20000},
attention_bias: bool | None = False,
attention_dropout: float | None = 0.0,
**kwargs,
):
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.head_dim = head_dim
self.hidden_act = hidden_act
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.rope_parameters = rope_parameters
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
if isinstance(dac_config, dict):
dac_config["model_type"] = dac_config.get("model_type", "dac")
dac_config = CONFIG_MAPPING[dac_config["model_type"]](**{**self._default_dac_config_kwargs, **dac_config})
elif dac_config is None:
dac_config = CONFIG_MAPPING["dac"](**self._default_dac_config_kwargs)
self.dac_config = dac_config
super().__init__(**kwargs)
class PeAudioConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`PeAudioModel`]. It is used to instantiate a
PeAudioModel model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of pe-av-large.
e.g. [facebook/pe-av-large](https://huggingface.co/facebook/pe-av-large)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
text_config (`dict` or `PreTrainedConfig`, *optional*):
Configuration for the text model component.
audio_config (`dict` or `PreTrainedConfig`, *optional*):
Configuration for the audio encoder component.
```python
>>> from transformers import PeAudioModel, PeAudioConfig
>>> # Initializing a PeAudioModel style configuration
>>> configuration = PeAudioConfig()
>>> # Initializing a model from the pe-av-large style configuration
>>> model = PeAudioModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "pe_audio"
sub_configs = {"text_config": AutoConfig, "audio_config": PeAudioEncoderConfig}
base_config_key = "audio_video_config"
_default_text_config_kwargs = {
"model_type": "modernbert",
"hidden_size": 1024,
"intermediate_size": 2624,
"num_hidden_layers": 22,
"num_attention_heads": 16,
}
def __init__(
self,
text_config=None,
audio_config=None,
**kwargs,
):
if isinstance(text_config, dict):
text_config["model_type"] = text_config.get("model_type", "modernbert")
text_config = CONFIG_MAPPING[text_config["model_type"]](
**{**self._default_text_config_kwargs, **text_config}
)
elif text_config is None:
text_config = CONFIG_MAPPING["modernbert"](**self._default_text_config_kwargs)
if isinstance(audio_config, dict):
audio_config = PeAudioEncoderConfig(**audio_config)
elif audio_config is None:
audio_config = PeAudioEncoderConfig()
self.text_config = text_config
self.audio_config = audio_config
super().__init__(**kwargs)
__all__ = ["PeAudioEncoderConfig", "PeAudioConfig"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/pe_audio/configuration_pe_audio.py",
"license": "Apache License 2.0",
"lines": 167,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/pe_audio/feature_extraction_pe_audio.py | # Copyright 2025 the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import load_audio
from ...utils import PaddingStrategy, TensorType, logging
logger = logging.get_logger(__name__)
class PeAudioFeatureExtractor(SequenceFeatureExtractor):
r"""
Constructs a PeAudioFeatureExtractor feature extractor.
This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains
most of the main methods. Users should refer to this superclass for more information regarding those methods.
Args:
feature_size (`int`, *optional*, defaults to 1):
The feature dimension of the extracted features. Use 1 for mono, 2 for stereo.
sampling_rate (`int`, *optional*, defaults to 48000):
The sampling rate at which the audio waveform should be digitalized, expressed in hertz (Hz).
padding_value (`float`, *optional*, defaults to 0.0):
The value that is used for padding.
hop_length (`int`, *optional*, defaults to 1920):
Overlap length between successive windows.
"""
model_input_names = ["input_values"]
def __init__(
self,
feature_size: int = 1,
sampling_rate: int = 48_000,
padding_value: float = 0.0,
hop_length: int = 1920,
**kwargs,
):
super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs)
self.hop_length = hop_length
def _reflect_pad(self, wav):
if len(wav) % self.hop_length == 0:
return wav
p1d = (0, self.hop_length - (len(wav) % self.hop_length))
return np.pad(wav, p1d, "reflect")
def __call__(
self,
raw_audio: np.ndarray | list[float] | list[np.ndarray] | list[list[float]] | str | list[str],
padding: bool | str | PaddingStrategy | None = None,
truncation: bool | None = False,
max_length: int | None = None,
return_tensors: str | TensorType | None = None,
sampling_rate: int | None = None,
) -> BatchFeature:
from_file = False
if isinstance(raw_audio, str):
raw_audio = [raw_audio]
if isinstance(raw_audio, (list, tuple)) and isinstance(raw_audio[0], str):
loaded = []
for audio_file in raw_audio:
loaded.append(load_audio(audio_file, self.sampling_rate))
raw_audio = loaded
from_file = True
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}."
)
elif not from_file:
logger.warning(
f"It is strongly recommended to pass the `sampling_rate` argument to `{self.__class__.__name__}()`. "
"Failing to do so can result in silent errors that might be hard to debug."
)
if padding and truncation:
raise ValueError("Both padding and truncation were set. Make sure you only set one.")
elif padding is None:
# by default let's pad the inputs
padding = True
is_batched = bool(
isinstance(raw_audio, (list, tuple)) and (isinstance(raw_audio[0], (np.ndarray, tuple, list)))
)
if is_batched:
raw_audio = [np.asarray(audio, dtype=np.float32).T for audio in raw_audio]
elif not is_batched and not isinstance(raw_audio, np.ndarray):
raw_audio = np.asarray(raw_audio, dtype=np.float32)
elif isinstance(raw_audio, np.ndarray) and raw_audio.dtype is np.dtype(np.float64):
raw_audio = raw_audio.astype(np.float32)
# always return batch
if not is_batched:
raw_audio = [np.asarray(raw_audio).T]
if isinstance(raw_audio, list):
raw_audio = [self._reflect_pad(x) for x in raw_audio]
else:
raw_audio = self._reflect_pad(raw_audio)
# verify inputs are valid
for example in raw_audio:
if example.ndim > 2:
raise ValueError(f"Expected input shape (channels, length) but got shape {example.shape}")
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f"Expected mono audio but example has {example.shape[-1]} channels")
if self.feature_size == 2:
raise ValueError("Stereo audio isn't supported for now")
input_values = BatchFeature({"input_values": raw_audio})
# normal padding on batch
padded_inputs = self.pad(
input_values,
max_length=max_length,
truncation=truncation,
padding=padding,
return_attention_mask=padding,
pad_to_multiple_of=self.hop_length,
)
if padding:
padded_inputs["padding_mask"] = padded_inputs.pop("attention_mask")
if padding:
padded_inputs.input_values = padded_inputs.input_values[:, np.newaxis, :]
input_values = []
for example in padded_inputs.pop("input_values"):
if self.feature_size == 1:
example = example[..., None]
input_values.append(example.T)
padded_inputs["input_values"] = input_values
if return_tensors is not None:
padded_inputs = padded_inputs.convert_to_tensors(return_tensors)
return padded_inputs
__all__ = ["PeAudioFeatureExtractor"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/pe_audio/feature_extraction_pe_audio.py",
"license": "Apache License 2.0",
"lines": 133,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/pe_audio/modular_pe_audio.py | # Copyright 2025 the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Any
import torch
import torch.nn as nn
import torch.nn.functional as F
from ... import initialization as init
from ...configuration_utils import PreTrainedConfig
from ...masking_utils import create_bidirectional_mask
from ...modeling_outputs import BaseModelOutputWithPooling, MaskedLMOutput
from ...utils import ModelOutput, auto_docstring, can_return_tuple
from ...utils.generic import merge_with_config_defaults
from ...utils.output_capturing import capture_outputs
from ..auto import AutoModel
from ..dac.modeling_dac import DacEncoder, DacEncoderBlock, Snake1d
from ..pe_audio_video.modeling_pe_audio_video import (
PeAudioVideoContrastiveHead,
PeAudioVideoEncoder,
PeAudioVideoPreTrainedModel,
)
from .configuration_pe_audio import PeAudioConfig, PeAudioEncoderConfig
class PeAudioDacEncoderBlock(DacEncoderBlock):
def __init__(self, config: PreTrainedConfig, stride: int = 1, stride_index: int = 1):
super().__init__(config, stride=stride, stride_index=stride_index)
class PeAudioDacEncoder(DacEncoder):
def __init__(self, config: PreTrainedConfig):
super().__init__(config)
class PeAudioEncoderEmbedder(nn.Module):
def __init__(self, config: PeAudioEncoderConfig):
super().__init__()
self.dac_encoder = PeAudioDacEncoder(config.dac_config)
self.bottleneck = nn.Conv1d(config.dac_config.hidden_size, config.dac_config.codebook_dim, 1)
self.data_proj = nn.Linear(config.dac_config.codebook_dim, config.hidden_size)
self.config = config
def forward(
self,
input_values: torch.Tensor,
padding_mask: torch.Tensor | None = None,
) -> tuple[torch.Tensor, torch.Tensor | None]:
with torch.no_grad(), torch.backends.cudnn.flags(enabled=False):
hidden_states = self.dac_encoder(input_values)
hidden_states = self.bottleneck(hidden_states)
codec_features = hidden_states.transpose(1, 2)
inputs_embeds = self.data_proj(codec_features)
if padding_mask is not None:
padding_mask = padding_mask[:, :: self.config.dac_config.hop_length]
return inputs_embeds, padding_mask
class PeAudioContrastiveHead(PeAudioVideoContrastiveHead): ...
class PeAudioPreTrainedModel(PeAudioVideoPreTrainedModel):
base_model_prefix = "audio_model"
@torch.no_grad()
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, nn.Conv1d):
init.trunc_normal_(module.weight, std=0.02)
init.constant_(module.bias, 0)
elif isinstance(module, Snake1d):
init.ones_(module.alpha)
elif isinstance(module, nn.ConvTranspose1d):
module.reset_parameters()
elif isinstance(module, nn.Embedding):
init.normal_(module.weight, mean=0.0, std=0.02)
@dataclass
@auto_docstring(
custom_intro="""
Class for outputs of [`PeAudioEncoder`].
"""
)
class PeAudioEncoderOutput(BaseModelOutputWithPooling):
codec_features: torch.FloatTensor | None = None
output_mask: tuple[torch.FloatTensor] | None = None
# TODO: add the capture of codec features?
@auto_docstring(
custom_intro="""
The PeAudio Encoder model.
"""
)
class PeAudioEncoder(PeAudioVideoEncoder):
base_model_prefix = "audio_model.audio_encoder"
@can_return_tuple
@merge_with_config_defaults
@capture_outputs
def forward(
self,
input_values: torch.Tensor,
padding_mask: torch.Tensor | None = None,
**kwargs,
) -> tuple | BaseModelOutputWithPooling:
inputs_embeds, padding_mask = self.embedder(input_values, padding_mask=padding_mask)
inputs_embeds, attention_mask = self.patch_embedder(inputs_embeds, padding_mask=padding_mask)
if attention_mask is not None:
attention_mask = create_bidirectional_mask(
config=self.config,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
)
position_ids = torch.arange(inputs_embeds.shape[1], device=inputs_embeds.device).unsqueeze(0)
position_embeddings = self.rotary_emb(inputs_embeds, position_ids)
hidden_states = inputs_embeds
for encoder_layer in self.layers[: self.config.num_hidden_layers]:
hidden_states = encoder_layer(
hidden_states,
attention_mask=attention_mask,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = self.norm(hidden_states)
hidden_states = self.output(hidden_states)
return PeAudioEncoderOutput(
last_hidden_state=hidden_states[:, 1:],
pooler_output=hidden_states[:, 0],
output_mask=padding_mask,
)
# TODO: not sure about the typing for text_model_output
@dataclass
# @auto_docstring
class PeAudioOutput(ModelOutput):
loss: torch.FloatTensor | None = None
logits_audio_text: torch.FloatTensor | None = None
text_audio_embeds: torch.FloatTensor | None = None
audio_embeds: torch.FloatTensor | None = None
text_outputs: BaseModelOutputWithPooling = None
audio_outputs: BaseModelOutputWithPooling = None
def to_tuple(self) -> tuple[Any]:
return tuple(
self[k] if k not in ["text_outputs", "audio_outputs"] else getattr(self, k).to_tuple() for k in self.keys()
)
class PeAudioModel(PeAudioPreTrainedModel):
def __init__(self, config: PeAudioConfig):
super().__init__(config)
self.text_model = AutoModel.from_config(config.text_config)
self.audio_encoder = PeAudioEncoder(config.audio_config)
self.text_audio_head = PeAudioContrastiveHead(config.text_config.hidden_size, config.text_config.hidden_size)
self.audio_head = PeAudioContrastiveHead(config.audio_config.hidden_size, config.text_config.hidden_size)
self.text_audio_logit_scale = nn.Parameter(torch.zeros(1))
self.text_audio_logit_bias = nn.Parameter(torch.zeros(1))
self.post_init()
def get_text_audio_embeds(self, input_ids, attention_mask=None):
# TODO: naming can be improved here...
text_outputs: MaskedLMOutput = self.text_model(
input_ids=input_ids,
attention_mask=attention_mask,
return_dict=True,
)
text_audio_embeds = text_outputs.hidden_states[-1][:, 0]
return self.text_audio_head(text_audio_embeds)
def get_audio_embeds(self, input_values, padding_mask=None):
audio_outputs: BaseModelOutputWithPooling = self.audio_encoder(
input_values=input_values,
padding_mask=padding_mask,
return_dict=True,
)
audio_embeds = audio_outputs.pooler_output
return self.audio_head(audio_embeds)
@can_return_tuple
def forward(
self,
input_ids: torch.Tensor,
input_values: torch.Tensor,
attention_mask: torch.Tensor | None = None,
padding_mask: torch.Tensor | None = None,
return_loss: bool | None = None,
**kwargs,
) -> PeAudioOutput:
audio_outputs: BaseModelOutputWithPooling = self.audio_encoder(
input_values=input_values, padding_mask=padding_mask, **kwargs
)
kwargs["output_hidden_states"] = True
text_outputs: MaskedLMOutput = self.text_model(input_ids=input_ids, attention_mask=attention_mask, **kwargs)
audio_embeds = audio_outputs.pooler_output
audio_embeds = self.audio_head(audio_embeds)
text_audio_embeds = text_outputs.hidden_states[-1][:, 0]
text_audio_embeds = self.text_audio_head(text_audio_embeds)
logits_audio_text = audio_embeds @ text_audio_embeds.T
logits_audio_text = logits_audio_text * self.text_audio_logit_scale.to(
logits_audio_text.device
) + self.text_audio_logit_bias.to(logits_audio_text.device)
loss = None
if return_loss:
labels = torch.eye(logits_audio_text.shape[0], device=logits_audio_text.device)
loss = -F.logsigmoid(labels * logits_audio_text).sum() / logits_audio_text.shape[0]
return PeAudioOutput(
logits_audio_text=logits_audio_text,
text_audio_embeds=text_audio_embeds,
audio_embeds=audio_embeds,
text_outputs=text_outputs,
audio_outputs=audio_outputs,
loss=loss,
)
# TODO: underline in documentation that logits output shape is
# 1. Model: (n_audio, n_text)
# 2. Frame-level: (n_audio, n_text, n_frames)
class PeAudioFrameLevelModel(PeAudioModel):
def get_audio_embeds(self, input_values, padding_mask=None):
audio_outputs: BaseModelOutputWithPooling = self.audio_encoder(
input_values=input_values,
padding_mask=padding_mask,
return_dict=True,
)
audio_embeds = audio_outputs.last_hidden_state
audio_embeds = self.audio_head(audio_embeds)
return audio_embeds
@can_return_tuple
def forward(
self,
input_ids: torch.Tensor,
input_values: torch.Tensor,
attention_mask: torch.Tensor | None = None,
padding_mask: torch.Tensor | None = None,
return_loss: bool | None = None,
**kwargs,
) -> PeAudioOutput:
audio_outputs: BaseModelOutputWithPooling = self.audio_encoder(
input_values=input_values, padding_mask=padding_mask, **kwargs
)
kwargs["output_hidden_states"] = True
text_outputs: MaskedLMOutput = self.text_model(input_ids=input_ids, attention_mask=attention_mask, **kwargs)
audio_embeds = audio_outputs.last_hidden_state
audio_embeds = self.audio_head(audio_embeds)
text_audio_embeds = text_outputs.hidden_states[-1][:, 0]
text_audio_embeds = self.text_audio_head(text_audio_embeds)
logits_audio_text = (audio_embeds @ text_audio_embeds.T).transpose(1, 2)
logits_audio_text = logits_audio_text * self.text_audio_logit_scale + self.text_audio_logit_bias
loss = None
if return_loss:
labels = torch.eye(logits_audio_text.shape[0], device=logits_audio_text.device)
loss = -F.logsigmoid(labels * logits_audio_text).sum() / logits_audio_text.shape[0]
return PeAudioOutput(
logits_audio_text=logits_audio_text,
text_audio_embeds=text_audio_embeds,
audio_embeds=audio_embeds,
text_outputs=text_outputs,
audio_outputs=audio_outputs,
loss=loss,
)
__all__ = [
"PeAudioFrameLevelModel",
"PeAudioModel",
"PeAudioEncoder",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/pe_audio/modular_pe_audio.py",
"license": "Apache License 2.0",
"lines": 252,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/pe_audio/processing_pe_audio.py | # Copyright 2025 the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...processing_utils import ProcessorMixin
class PeAudioProcessor(ProcessorMixin):
attributes = ["feature_extractor", "tokenizer"]
feature_extractor_class = "PeAudioFeatureExtractor"
tokenizer_class = "AutoTokenizer"
__all__ = ["PeAudioProcessor"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/pe_audio/processing_pe_audio.py",
"license": "Apache License 2.0",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/pe_audio_video/configuration_pe_audio_video.py | # Copyright 2025 the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...configuration_utils import PreTrainedConfig, PretrainedConfig
from ...modeling_rope_utils import RopeParameters
from ...utils import logging
from ..auto import CONFIG_MAPPING, AutoConfig
logger = logging.get_logger(__name__)
class PeAudioVideoEncoderConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`PeAudioVideoEncoderModel`]. It is used to instantiate a
PeAudioVideoEncoder model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of pe-av-large.
e.g. [facebook/pe-av-large](https://huggingface.co/facebook/pe-av-large)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
audio_config (`Union[PreTrainedConfig, dict]`, *optional*):
Configuration for the audio encoder. If a dictionary is provided, it is used to instantiate
[`~transformers.PeAudioEncoderConfig`].
video_config (`Union[PreTrainedConfig, dict]`, *optional*):
Configuration for the video encoder. If a dictionary is provided, it is used to instantiate
[`~transformers.PeVideoEncoderConfig`].
hidden_size (`int`, *optional*, defaults to 1792):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 4800):
Dimension of the feedforward layers in the Transformer blocks.
num_hidden_layers (`int`, *optional*, defaults to 6):
Number of Transformer encoder blocks.
num_attention_heads (`int`, *optional*, defaults to 14):
Number of attention heads used in each attention layer.
num_key_value_heads (`int`, *optional*):
Number of key and value heads for grouped-query attention. If unset, this defaults to `num_attention_heads`.
head_dim (`int`, *optional*, defaults to 128):
Dimension of each attention head for query, key, and value projections.
hidden_act (`str`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the Transformer blocks.
max_position_embeddings (`int`, *optional*, defaults to 10000):
Maximum sequence length supported by the rotary position embeddings.
initializer_range (`float`, *optional*, defaults to 0.02):
Standard deviation of the truncated normal initializer for weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
Epsilon used by the RMS normalization layers.
rope_parameters (`Union[RopeParameters, dict]`, *optional*, defaults to `{'rope_theta': 20000}`):
Parameters for the rotary position embeddings, such as the base `rope_theta`.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use bias terms in the query, key, value, and output projections.
attention_dropout (`float`, *optional*, defaults to 0.0):
Dropout ratio applied to attention probabilities.
```python
>>> from transformers import PeAudioVideoEncoder, PeAudioVideoEncoderConfig
>>> # Initializing a PeAudioVideoEncoder style configuration
>>> configuration = PeAudioVideoEncoderConfig()
>>> # Initializing a model from the pe-av-large style configuration
>>> model = PeAudioVideoEncoder(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "pe_audio_video_encoder"
base_config_key = "audio_video_config"
sub_configs = {"audio_config": AutoConfig, "video_config": AutoConfig}
def __init__(
self,
audio_config: dict | PreTrainedConfig | None = None,
video_config: dict | PreTrainedConfig | None = None,
hidden_size: int | None = 1792,
intermediate_size: int | None = 4800,
num_hidden_layers: int | None = 6,
num_attention_heads: int | None = 14,
num_key_value_heads: int | None = None,
head_dim: int | None = 128,
hidden_act: str | None = "silu",
max_position_embeddings: int | None = 10000,
initializer_range: float | None = 0.02,
rms_norm_eps: float | None = 1e-5,
rope_parameters: RopeParameters | dict | None = {"rope_theta": 20000},
attention_bias: bool | None = False,
attention_dropout: float | None = 0.0,
**kwargs,
):
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.head_dim = head_dim
self.hidden_act = hidden_act
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.rope_parameters = rope_parameters
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
if isinstance(audio_config, dict):
audio_config["model_type"] = audio_config.get("model_type", "pe_audio_encoder")
audio_config = CONFIG_MAPPING[audio_config["model_type"]](**audio_config)
elif audio_config is None:
audio_config = CONFIG_MAPPING["pe_audio_encoder"]()
if isinstance(video_config, dict):
video_config["model_type"] = video_config.get("model_type", "pe_video_encoder")
video_config = CONFIG_MAPPING[video_config["model_type"]](**video_config)
elif video_config is None:
video_config = CONFIG_MAPPING["pe_video_encoder"]()
self.audio_config = audio_config
self.video_config = video_config
super().__init__(**kwargs)
class PeAudioVideoConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`PeAudioVideoModel`]. It is used to instantiate a
PeAudioVideoModel model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of pe-av-large.
e.g. [facebook/pe-av-large](https://huggingface.co/facebook/pe-av-large)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
text_config (`dict` or `PreTrainedConfig`, *optional*):
Configuration for the text model component.
audio_video_config (`dict` or `PreTrainedConfig`, *optional*):
Configuration for the audio-video encoder component.
```python
>>> from transformers import PeAudioVideoModel, PeAudioVideoConfig
>>> # Initializing a PeAudioVideoModel style configuration
>>> configuration = PeAudioVideoConfig()
>>> # Initializing a model from the pe-av-large style configuration
>>> model = PeAudioModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "pe_audio_video"
sub_configs = {"text_config": AutoConfig, "audio_video_config": PeAudioVideoEncoderConfig}
_default_text_config_kwargs = {
"model_type": "modernbert",
"hidden_size": 1024,
"intermediate_size": 2624,
"num_hidden_layers": 22,
"num_attention_heads": 16,
}
def __init__(
self,
text_config=None,
audio_video_config=None,
**kwargs,
):
if isinstance(text_config, dict):
text_config["model_type"] = text_config.get("model_type", "modernbert")
text_config = CONFIG_MAPPING[text_config["model_type"]](
**{**self._default_text_config_kwargs, **text_config}
)
elif text_config is None:
text_config = CONFIG_MAPPING["modernbert"](**self._default_text_config_kwargs)
if isinstance(audio_video_config, dict):
audio_video_config = PeAudioVideoEncoderConfig(**audio_video_config)
elif audio_video_config is None:
audio_video_config = PeAudioVideoEncoderConfig()
self.text_config = text_config
self.audio_video_config = audio_video_config
super().__init__(**kwargs)
@property
def audio_config(self):
return CONFIG_MAPPING["pe_audio"](
text_config=self.text_config,
audio_config=self.audio_video_config.audio_config,
)
@property
def video_config(self):
return CONFIG_MAPPING["pe_video"](
text_config=self.text_config,
video_config=self.audio_video_config.video_config,
)
__all__ = ["PeAudioVideoEncoderConfig", "PeAudioVideoConfig"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/pe_audio_video/configuration_pe_audio_video.py",
"license": "Apache License 2.0",
"lines": 183,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/pe_audio_video/convert_pe_audio_video_to_hf.py | # Copyright 2025 the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import re
import safetensors.torch
from transformers.models.pe_audio_video.modeling_pe_audio_video import PeAudioVideoConfig, PeAudioVideoModel
from transformers.utils import cached_file
ORIGINAL_TO_CONVERTED_KEY_MAPPING = {
r"audio_video_model\.audio_model\.dac_vae_encoder\.encoder": "audio_video_encoder.audio_encoder.dac_encoder",
r"audio_video_model\.audio_model\.dac_vae_encoder\.bottleneck\.in_proj": "audio_video_encoder.audio_encoder.bottleneck",
r"audio_video_model\.audio_model\.data_proj": "audio_video_encoder.audio_encoder.data_proj",
r"audio_video_model\.audio_model\.transformer\.embeddings\.resnet_block": "audio_video_encoder.audio_encoder.embeddings.resnet_block",
r"audio_video_model\.audio_model\.transformer\.embeddings\.cls_token": "audio_video_encoder.audio_encoder.embeddings.class_embedding",
r"audio_video_model\.audio_model\.transformer\.layers": "audio_video_encoder.audio_encoder.layers",
r"audio_video_model\.audio_model\.transformer\.norm": "audio_video_encoder.audio_encoder.norm",
r"audio_video_model\.audio_model\.transformer\.output": "audio_video_encoder.audio_encoder.output",
r"audio_video_model\.video_model\.clip_vision_model": "audio_video_encoder.video_encoder.vision_model",
r"audio_video_model\.video_model\.proj": "audio_video_encoder.video_encoder.proj",
r"audio_video_model\.video_model\.data_proj": "audio_video_encoder.video_encoder.data_proj",
r"audio_video_model\.video_model\.transformer\.embeddings\.resnet_block": "audio_video_encoder.video_encoder.embeddings.resnet_block",
r"audio_video_model\.video_model\.transformer\.embeddings\.cls_token": "audio_video_encoder.video_encoder.embeddings.class_embedding",
r"audio_video_model\.video_model\.transformer\.layers": "audio_video_encoder.video_encoder.layers",
r"audio_video_model\.video_model\.transformer\.norm": "audio_video_encoder.video_encoder.norm",
r"audio_video_model\.video_model\.transformer\.output": "audio_video_encoder.video_encoder.output",
r"audio_video_model\.transformer\.embeddings\.resnet_block": "audio_video_encoder.embeddings.resnet_block",
r"audio_video_model\.transformer\.embeddings\.cls_token": "audio_video_encoder.embeddings.class_embedding",
r"audio_video_model\.transformer\.layers": "audio_video_encoder.layers",
r"audio_video_model\.transformer\.norm": "audio_video_encoder.norm",
r"audio_video_model\.transformer\.output": "audio_video_encoder.output",
r"audio_video_model\.transformer\.modality_aligner.conv": "audio_video_encoder.video_proj",
r"audio_video_model\.transformer\.modality_aligner.layer_norm": "audio_video_encoder.video_norm",
r"audio_video_model\.transformer\.concat_modality_proj": "audio_video_encoder.concat_modality_proj",
r"audio_video_model\.transformer\.data_proj": "audio_video_encoder.data_proj",
r"audio_video_text_head": "text_head_audio_video",
r"audio_text_head": "text_head_audio",
r"video_text_head": "text_head_video",
}
path = cached_file("facebook/pe-av-large", "model.safetensors")
state_dict = safetensors.torch.load_file(path)
config = PeAudioVideoConfig()
model = PeAudioVideoModel(config)
def convert_key(key, mapping):
for pattern, replacement in mapping.items():
key = re.sub(pattern, replacement, key)
return key
def permute(w, n_heads, dim1, dim2):
"""
Permute weights for rotary embeddings.
Based on convert_perception_lm_weights_to_hf.py line 227-228
"""
# return w.view(n_heads, dim1 // n_heads // 2, 2, dim2).transpose(1, 2).reshape(dim1, dim2)
return w
converted_state_dict = {}
for original_key, tensor in state_dict.items():
if "out_proj" in original_key:
# this is not used and should be ignored
continue
if "text_model" in original_key:
converted_state_dict[original_key] = tensor
continue
elif "audio_model" in original_key:
current_config = config.audio_video_config.audio_config
elif "video_model" in original_key:
current_config = config.audio_video_config.video_config
else:
current_config = None
if current_config is not None:
# Get config parameters for permutation
n_heads = current_config.num_attention_heads
num_key_value_heads = current_config.num_key_value_heads
hidden_size = current_config.hidden_size
head_dim = getattr(current_config, "head_dim", hidden_size // n_heads)
# Calculate dimensions
dim = n_heads * head_dim
key_value_dim = num_key_value_heads * head_dim
converted_key = convert_key(original_key, ORIGINAL_TO_CONVERTED_KEY_MAPPING)
if ".self_attn.q_proj.weight" in converted_key:
converted_state_dict[converted_key] = permute(tensor, n_heads=n_heads, dim1=dim, dim2=hidden_size)
elif ".self_attn.k_proj.weight" in converted_key:
converted_state_dict[converted_key] = permute(
tensor, n_heads=num_key_value_heads, dim1=key_value_dim, dim2=hidden_size
)
else:
converted_state_dict[converted_key] = tensor
model.load_state_dict(converted_state_dict, strict=True, assign=True)
del model.config._name_or_path
print("Saving the model.")
model.save_pretrained("/raid/eustache/sam-audio/converted", safe_serialization=True)
del state_dict, model
# Safety check: reload the converted model
gc.collect()
print("Reloading the model to check if it's saved correctly.")
PeAudioVideoModel.from_pretrained("/raid/eustache/sam-audio/converted", device_map="auto")
print("Model reloaded successfully.")
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/pe_audio_video/convert_pe_audio_video_to_hf.py",
"license": "Apache License 2.0",
"lines": 105,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/pe_audio_video/modular_pe_audio_video.py | # Copyright 2025 the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Callable
from dataclasses import dataclass
from typing import Any
import torch
import torch.nn as nn
from ... import initialization as init
from ...masking_utils import create_bidirectional_mask
from ...modeling_outputs import BaseModelOutputWithPooling, MaskedLMOutput
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel, eager_attention_forward
from ...processing_utils import Unpack
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple
from ...utils.generic import merge_with_config_defaults
from ...utils.output_capturing import capture_outputs
from ..auto import AutoModel
from ..qwen3.modeling_qwen3 import Qwen3Attention, Qwen3DecoderLayer, Qwen3RMSNorm, Qwen3RotaryEmbedding
from .configuration_pe_audio_video import PeAudioVideoConfig, PeAudioVideoEncoderConfig
class PeAudioVideoMaskedGroupNorm(nn.GroupNorm):
def forward(self, x, padding_mask=None):
if padding_mask is None:
return super().forward(x)
batch_size, hidden_size, seq_len = x.shape
group_size = hidden_size // self.num_groups
grouped_shape = (batch_size, -1, group_size, seq_len)
x_grouped = x.view(grouped_shape)
padding_mask_grouped = padding_mask.reshape(grouped_shape).bool()
mean = torch.masked.mean(x_grouped, mask=padding_mask_grouped, dim=(2, 3), keepdim=True)
var = torch.masked.var(x_grouped, mask=padding_mask_grouped, dim=(2, 3), keepdim=True, unbiased=False)
x_norm = (x_grouped - mean) / torch.sqrt(var + self.eps)
x_norm = x_norm.view(x.shape)
if self.affine:
x_norm = x_norm * self.weight.view(1, -1, 1) + self.bias.view(1, -1, 1)
return x_norm * padding_mask
class PeAudioVideoConvBlock1d(nn.Module):
def __init__(self, config):
super().__init__()
self.groupnorm = PeAudioVideoMaskedGroupNorm(num_groups=1, num_channels=config.hidden_size)
self.activation = nn.SiLU()
self.project = nn.Conv1d(
in_channels=config.hidden_size,
out_channels=config.hidden_size,
kernel_size=3,
padding="same",
)
def forward(self, x, padding_mask=None):
x = self.groupnorm(x, padding_mask=padding_mask)
x = self.activation(x)
return self.project(x)
class PeAudioVideoResnetBlock1d(nn.Module):
def __init__(self, config):
super().__init__()
self.block1 = PeAudioVideoConvBlock1d(config)
self.block2 = PeAudioVideoConvBlock1d(config)
def forward(self, hidden_states, padding_mask=None):
"""
Args:
hidden_states: (batch_size, seq_len, hidden_size)
padding_mask: (batch_size, seq_len)
Returns:
hidden_states: (batch_size, seq_len, hidden_size)
"""
# transpose for convolutions
# (batch_size, seq_len, hidden_size) -> (batch_size, hidden_size, seq_len)
hidden_states = hidden_states.transpose(1, 2)
if padding_mask is not None:
padding_mask = padding_mask.unsqueeze(1).expand_as(hidden_states)
residual = hidden_states
hidden_states = self.block1(hidden_states, padding_mask=padding_mask)
hidden_states = self.block2(hidden_states, padding_mask=padding_mask)
hidden_states = residual + hidden_states
return hidden_states.transpose(1, 2)
class PeAudioVideoEncoderPatchEmbedder(nn.Module):
def __init__(self, config):
super().__init__()
self.resnet_block = PeAudioVideoResnetBlock1d(config)
self.class_embedding = nn.Parameter(torch.randn(1, 1, config.hidden_size))
def forward(self, inputs_embeds, padding_mask=None):
# Embedding step: prepend class token and run the ResNet block.
hidden_states = torch.cat(
[self.class_embedding.expand(inputs_embeds.size(0), -1, -1), inputs_embeds],
dim=1,
)
if padding_mask is not None:
# TODO: any reason why we take padding_mask[0] and not just 1?
padding_mask = torch.cat([padding_mask[:, [0]], padding_mask], dim=1)
hidden_states = self.resnet_block(hidden_states, padding_mask=padding_mask)
return hidden_states, padding_mask
class PeAudioVideoContrastiveHead(nn.Module):
def __init__(
self,
in_dim: int,
out_dim: int,
) -> None:
super().__init__()
self.layer_norm = nn.LayerNorm(normalized_shape=in_dim, eps=1e-6)
self.proj = nn.Linear(in_dim, out_dim, bias=False)
def forward(self, x: torch.Tensor) -> torch.FloatTensor:
return self.proj(self.layer_norm(x))
class PeAudioVideoEncoderEmbedder(nn.Module):
def __init__(self, config: PeAudioVideoEncoderConfig):
super().__init__()
self.audio_encoder = AutoModel.from_config(config.audio_config)
self.video_encoder = AutoModel.from_config(config.video_config)
self.video_proj = nn.Conv1d(config.video_config.hidden_size, config.audio_config.hidden_size, 1)
self.video_norm = nn.LayerNorm(config.audio_config.hidden_size)
self.concat_modality_proj = nn.Linear(
config.audio_config.hidden_size + config.video_config.hidden_size,
config.hidden_size,
)
self.data_proj = nn.Linear(config.hidden_size, config.hidden_size)
def _align_video_hidden_state(
self,
video_hidden_state: torch.Tensor,
audio_hidden_state: torch.Tensor,
padding_mask_videos: torch.Tensor | None = None,
padding_mask: torch.Tensor | None = None,
) -> torch.Tensor:
"""
Align video_hidden_state to audio_hidden_state by nearest neighbor interpolation.
"""
if video_hidden_state.shape[1] == audio_hidden_state.shape[1]:
return video_hidden_state
if padding_mask_videos is not None:
video_lengths = padding_mask_videos.sum(dim=-1)
else:
video_lengths = video_hidden_state.shape[1] * video_hidden_state.new_ones(
video_hidden_state.shape[0], dtype=torch.long
)
if padding_mask is not None:
audio_lengths = padding_mask.sum(dim=-1)
else:
audio_lengths = audio_hidden_state.shape[1] * audio_hidden_state.new_ones(
audio_hidden_state.shape[0], dtype=torch.long
)
if (audio_lengths == video_hidden_state.shape[1]).all() or (
video_lengths == audio_hidden_state.shape[1]
).all():
# no need to align taking into account the padding masks
# note: when one of the above is true, we can expect the other to be true as there is no reason
# to have masked audio without masked video and vice versa
return nn.functional.interpolate(
video_hidden_state.transpose(1, 2), size=audio_hidden_state.shape[1], mode="nearest"
).transpose(1, 2)
aligned_shape = (*audio_hidden_state.shape[:2], video_hidden_state.shape[-1])
aligned_hidden_state = audio_hidden_state.new_zeros(aligned_shape)
for i, (hidden_state, video_length, audio_length) in enumerate(
zip(video_hidden_state, video_lengths, audio_lengths)
):
hidden_state = hidden_state[:video_length]
if hidden_state.numel() > 0 and audio_length > 0:
interpolated_hidden_state = nn.functional.interpolate(
hidden_state[None].transpose(1, 2), size=audio_length, mode="nearest"
).transpose(1, 2)[0]
aligned_hidden_state[i, :audio_length, :] = interpolated_hidden_state
return aligned_hidden_state
def forward(
self,
input_values: torch.Tensor,
pixel_values_videos: torch.Tensor,
padding_mask: torch.Tensor | None = None,
padding_mask_videos: torch.Tensor | None = None,
):
audio_output = self.audio_encoder(input_values, padding_mask=padding_mask)
video_output = self.video_encoder(pixel_values_videos, padding_mask_videos=padding_mask_videos)
audio_hidden_state = audio_output.last_hidden_state
video_hidden_state = video_output.last_hidden_state
padding_mask = audio_output.output_mask
video_hidden_state = self.video_proj(video_hidden_state.transpose(1, 2)).transpose(1, 2)
video_hidden_state = self._align_video_hidden_state(
video_hidden_state=video_hidden_state,
audio_hidden_state=audio_hidden_state,
padding_mask_videos=padding_mask_videos,
padding_mask=padding_mask,
)
video_hidden_state = self.video_norm(video_hidden_state)
inputs_embeds = torch.cat([audio_hidden_state, video_hidden_state], dim=-1)
inputs_embeds = self.concat_modality_proj(inputs_embeds)
inputs_embeds = self.data_proj(inputs_embeds)
return inputs_embeds, padding_mask, audio_output, video_output
class PeAudioVideoEncoderAttention(Qwen3Attention):
def __init__(self, config, layer_idx):
super().__init__(config, layer_idx)
self.is_causal = False
del self.sliding_window
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None,
attention_mask: torch.Tensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, torch.Tensor]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_norm(self.q_proj(hidden_states).view(hidden_shape)).transpose(1, 2)
key_states = self.k_norm(self.k_proj(hidden_states).view(hidden_shape)).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface(
self.config._attn_implementation, eager_attention_forward
)
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class PeAudioVideoEncoderLayer(Qwen3DecoderLayer):
def __init__(self, config, layer_idx):
super().__init__(config, layer_idx)
del self.attention_type
class PeAudioVideoEncoderRMSNorm(Qwen3RMSNorm): ...
def stack_freqs(cos: torch.Tensor, sin: torch.Tensor):
dim = cos.size(-1)
cos = cos.narrow(-1, 0, dim // 2)
sin = sin.narrow(-1, 0, dim // 2)
freqs_cis = torch.stack((cos, -sin, sin, cos), dim=-1).view(*cos.size(), 2, 2)
return freqs_cis
def apply_rotary_pos_emb(q, k, cos, sin, unsqueeze_dim=1):
freqs_cis = stack_freqs(cos, sin)
freqs_cis = freqs_cis.unsqueeze(unsqueeze_dim)
q_ = q.reshape(*q.shape[:-1], -1, 1, 2)
k_ = k.reshape(*k.shape[:-1], -1, 1, 2)
return (q_ * freqs_cis).sum(5).flatten(3), (k_ * freqs_cis).sum(5).flatten(3)
class PeAudioVideoEncoderRotaryEmbedding(Qwen3RotaryEmbedding): ...
@auto_docstring
class PeAudioVideoPreTrainedModel(PreTrainedModel):
config: PeAudioVideoConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["PeAudioVideoEncoderLayer"]
_skip_keys_device_placement = ["past_key_values"]
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": PeAudioVideoEncoderLayer,
"attentions": PeAudioVideoEncoderAttention,
}
def _init_weights(self, module):
super()._init_weights(module)
if hasattr(self.config, "initializer_range"):
std = self.config.initializer_range
else:
# 0.02 is the standard default value across the library
std = getattr(self.config.get_text_config(), "initializer_range", 0.02)
if isinstance(module, PeAudioVideoEncoderPatchEmbedder):
embed_dim = module.class_embedding.shape[-1]
init.normal_(module.class_embedding, mean=0.0, std=embed_dim**-0.5 * std)
@dataclass
@auto_docstring(
custom_intro="""
Class for outputs of [`PeAudioVideoEncoder`].
"""
)
class PeAudioVideoEncoderOutput(BaseModelOutputWithPooling):
audio_model_output: BaseModelOutputWithPooling | None = None
video_model_output: BaseModelOutputWithPooling | None = None
@auto_docstring(
custom_intro="""
The PeAudioVideo Encoder model.
"""
)
class PeAudioVideoEncoder(PeAudioVideoPreTrainedModel):
config: PeAudioVideoEncoderConfig
main_input_name = "input_values"
base_model_prefix = "audio_video_encoder"
def __init__(self, config: PeAudioVideoEncoderConfig):
super().__init__(config)
self.embedder = PeAudioVideoEncoderEmbedder(config)
self.patch_embedder = PeAudioVideoEncoderPatchEmbedder(config)
self.layers = nn.ModuleList(
[PeAudioVideoEncoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = PeAudioVideoEncoderRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = PeAudioVideoEncoderRotaryEmbedding(config=config)
self.output = nn.Linear(config.hidden_size, config.hidden_size, bias=False)
self.gradient_checkpointing = False
self.post_init()
@can_return_tuple
@merge_with_config_defaults
@capture_outputs
def forward(
self,
input_values: torch.Tensor | None = None,
pixel_values_videos: torch.Tensor | None = None,
padding_mask: torch.Tensor | None = None,
padding_mask_videos: torch.Tensor | None = None,
**kwargs,
) -> tuple | PeAudioVideoEncoderOutput:
inputs_embeds, padding_mask, audio_output, video_output = self.embedder(
input_values,
pixel_values_videos,
padding_mask=padding_mask,
padding_mask_videos=padding_mask_videos,
)
inputs_embeds, attention_mask = self.patch_embedder(inputs_embeds, padding_mask=padding_mask)
if attention_mask is not None:
attention_mask = create_bidirectional_mask(
config=self.config,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
)
position_ids = torch.arange(inputs_embeds.shape[1], device=inputs_embeds.device).unsqueeze(0)
position_embeddings = self.rotary_emb(inputs_embeds, position_ids)
hidden_states = inputs_embeds
for encoder_layer in self.layers[: self.config.num_hidden_layers]:
hidden_states = encoder_layer(
hidden_states,
attention_mask=attention_mask,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = self.norm(hidden_states)
hidden_states = self.output(hidden_states)
return PeAudioVideoEncoderOutput(
last_hidden_state=hidden_states[:, 1:],
pooler_output=hidden_states[:, 0],
audio_model_output=audio_output,
video_model_output=video_output,
)
@dataclass
@auto_docstring(
custom_intro="""
Class for outputs of [`PeAudioVideoModel`] when using text, audio, and/or video.
"""
)
class PeAudioVideoOutput(ModelOutput):
# embeddings
audio_embeds: torch.FloatTensor | None = None
video_embeds: torch.FloatTensor | None = None
audio_video_embeds: torch.FloatTensor | None = None
text_audio_embeds: torch.FloatTensor | None = None
text_video_embeds: torch.FloatTensor | None = None
text_audio_video_embeds: torch.FloatTensor | None = None
audio_plus_text_embeds: torch.FloatTensor | None = None
video_plus_text_embeds: torch.FloatTensor | None = None
# model outputs
# TODO: update types to the correct ones
text_outputs: MaskedLMOutput | None = None
audio_outputs: BaseModelOutputWithPooling | None = None
video_outputs: BaseModelOutputWithPooling | None = None
audio_video_outputs: BaseModelOutputWithPooling | None = None
# logits
logits_audio_text: torch.FloatTensor | None = None
logits_video_text: torch.FloatTensor | None = None
logits_audio_video: torch.FloatTensor | None = None
logits_audio_video_text: torch.FloatTensor | None = None
logits_audio_plus_text_video: torch.FloatTensor | None = None
logits_video_plus_text_audio: torch.FloatTensor | None = None
audio_text_loss: torch.FloatTensor | None = None
video_text_loss: torch.FloatTensor | None = None
audio_video_loss: torch.FloatTensor | None = None
audio_video_text_loss: torch.FloatTensor | None = None
audio_plus_text_video_loss: torch.FloatTensor | None = None
video_plus_text_audio_loss: torch.FloatTensor | None = None
loss: torch.FloatTensor | None = None
def to_tuple(self) -> tuple[Any]:
return tuple(self[k] if not k.endswith("model_output") else getattr(self, k).to_tuple() for k in self.keys())
@dataclass
class AudioVideoEmbeddings(ModelOutput):
audio_embeds: torch.FloatTensor | None = None
video_embeds: torch.FloatTensor | None = None
audio_video_embeds: torch.FloatTensor | None = None
class PeAudioVideoModel(PeAudioVideoPreTrainedModel):
_tied_weights_keys = {
r"audio_model\.text_model\.(?!rotary_emb)": r"^text_model\.(?!rotary_emb)",
r"video_model\.text_model\.(?!rotary_emb)": r"^text_model\.(?!rotary_emb)",
r"audio_video_encoder\.embedder\.audio_encoder\.(?!rotary_emb)": r"audio_model\.audio_encoder\.(?!rotary_emb)",
r"audio_video_encoder\.embedder\.video_encoder\.(?!rotary_emb|.*\.rope\.pos_embed)": r"video_model\.video_encoder\.(?!rotary_emb|.*\.rope\.pos_embed)",
}
def __init__(self, config: PeAudioVideoConfig):
super().__init__(config)
self.text_model = AutoModel.from_config(config.text_config)
self.audio_model = AutoModel.from_config(config.audio_config)
self.video_model = AutoModel.from_config(config.video_config)
self.audio_video_encoder = PeAudioVideoEncoder(config.audio_video_config)
text_hidden_size = config.text_config.hidden_size
audio_hidden_size = config.audio_video_config.audio_config.hidden_size
video_hidden_size = config.audio_video_config.video_config.hidden_size
# audio-video
self.audio_video_head = PeAudioVideoContrastiveHead(config.audio_video_config.hidden_size, text_hidden_size)
self.text_audio_video_head = PeAudioVideoContrastiveHead(text_hidden_size, text_hidden_size)
self.audio_video_logit_scale = nn.Parameter(torch.zeros(1))
self.audio_video_logit_bias = nn.Parameter(torch.zeros(1))
self.text_audio_video_logit_scale = nn.Parameter(torch.zeros(1))
self.text_audio_video_logit_bias = nn.Parameter(torch.zeros(1))
# text-audio
self.audio_plus_text_head = PeAudioVideoContrastiveHead(text_hidden_size + audio_hidden_size, text_hidden_size)
self.audio_plus_text_logit_scale = nn.Parameter(torch.zeros(1))
self.audio_plus_text_logit_bias = nn.Parameter(torch.zeros(1))
# text-video
self.video_plus_text_head = PeAudioVideoContrastiveHead(text_hidden_size + video_hidden_size, text_hidden_size)
self.video_plus_text_logit_scale = nn.Parameter(torch.zeros(1))
self.video_plus_text_logit_bias = nn.Parameter(torch.zeros(1))
self.post_init()
def _contrastive_loss(self, logits: torch.Tensor) -> torch.Tensor:
labels = torch.eye(logits.shape[0], device=logits.device)
loss = -nn.functional.logsigmoid(labels * logits).sum() / logits.shape[0]
return loss
def get_text_audio_embeds(self, input_ids, attention_mask=None):
return self.audio_model.get_text_embeds(input_ids, attention_mask)
def get_text_video_embeds(self, input_ids, attention_mask=None):
return self.video_model.get_text_embeds(input_ids, attention_mask)
def get_text_audio_video_embeds(self, input_ids, attention_mask=None):
text_outputs: MaskedLMOutput = self.text_model(
input_ids=input_ids,
attention_mask=attention_mask,
return_dict=True,
)
text_embeds = text_outputs.hidden_states[-1][:, 0]
return self.text_audio_video_head(text_embeds)
def get_audio_embeds(self, input_values, padding_mask=None):
return self.audio_model.get_audio_embeds(input_values, padding_mask)
def get_video_embeds(self, pixel_values_videos, padding_mask_videos=None):
return self.video_model.get_video_embeds(pixel_values_videos, padding_mask_videos)
def get_audio_video_embeds(
self,
input_values: torch.Tensor,
pixel_values_videos: torch.Tensor,
padding_mask: torch.Tensor | None = None,
padding_mask_videos: torch.Tensor | None = None,
return_audio_embeds: bool = False,
return_video_embeds: bool = False,
**kwargs,
) -> AudioVideoEmbeddings:
audio_video_outputs = self.audio_video_encoder(
input_values=input_values,
pixel_values_videos=pixel_values_videos,
padding_mask=padding_mask,
padding_mask_videos=padding_mask_videos,
**kwargs,
)
if return_audio_embeds:
audio_embeds = self.audio_model.audio_head(audio_video_outputs.audio_model_output.pooler_output)
if return_video_embeds:
video_embeds = self.video_model.video_head(audio_video_outputs.video_model_output.pooler_output)
audio_video_embeds = self.audio_video_head(audio_video_outputs.pooler_output)
return AudioVideoEmbeddings(
audio_embeds=audio_embeds if return_audio_embeds else None,
video_embeds=video_embeds if return_video_embeds else None,
audio_video_embeds=audio_video_embeds,
)
def get_audio_plus_text_embeds(
self,
input_ids: torch.Tensor,
input_values: torch.Tensor,
attention_mask: torch.Tensor | None = None,
padding_mask: torch.Tensor | None = None,
) -> torch.Tensor:
audio_embeds = self.audio_model.audio_encoder(
input_values=input_values,
padding_mask=padding_mask,
return_dict=True,
)
text_outputs: MaskedLMOutput = self.text_model(
input_ids=input_ids,
attention_mask=attention_mask,
return_dict=True,
)
text_embeds = text_outputs.hidden_states[-1][:, 0]
audio_plus_text_embeds = torch.cat([text_embeds, audio_embeds], dim=-1)
return self.audio_plus_text_head(audio_plus_text_embeds)
def get_video_plus_text_embeds(
self,
input_ids: torch.Tensor,
pixel_values_videos: torch.Tensor,
attention_mask: torch.Tensor | None = None,
padding_mask_videos: torch.Tensor | None = None,
) -> torch.Tensor:
video_embeds = self.video_model.video_encoder(
pixel_values_videos=pixel_values_videos,
padding_mask_videos=padding_mask_videos,
return_dict=True,
)
text_outputs: MaskedLMOutput = self.text_model(
input_ids=input_ids,
attention_mask=attention_mask,
return_dict=True,
)
text_embeds = text_outputs.hidden_states[-1][:, 0]
video_plus_text_embeds = torch.cat([text_embeds, video_embeds], dim=-1)
return self.video_plus_text_head(video_plus_text_embeds)
@can_return_tuple
def forward(
self,
input_ids: torch.Tensor | None = None,
pixel_values_videos: torch.Tensor | None = None,
input_values: torch.Tensor | None = None,
attention_mask: torch.Tensor | None = None,
padding_mask_videos: torch.Tensor | None = None,
padding_mask: torch.Tensor | None = None,
return_loss=False,
**kwargs,
) -> PeAudioVideoOutput:
if sum([input_ids is not None, pixel_values_videos is not None, input_values is not None]) < 2:
raise ValueError("At least two of input_ids, pixel_values_videos, or input_values must be provided")
if pixel_values_videos is None:
outputs = self.audio_model(
input_ids=input_ids,
input_values=input_values,
attention_mask=attention_mask,
padding_mask=padding_mask,
return_dict=True,
)
audio_plus_text_embeds = torch.cat(
[outputs.audio_outputs.pooler_output, outputs.text_outputs.hidden_states[-1][:, 0]], dim=-1
)
audio_plus_text_embeds = self.audio_plus_text_head(audio_plus_text_embeds)
return PeAudioVideoOutput(audio_plus_text_embeds=audio_plus_text_embeds, **outputs)
if input_values is None:
outputs = self.video_model(
input_ids=input_ids,
pixel_values_videos=pixel_values_videos,
attention_mask=attention_mask,
padding_mask_videos=padding_mask_videos,
return_dict=True,
)
video_plus_text_embeds = torch.cat(
[outputs.video_outputs.pooler_output, outputs.text_outputs.hidden_states[-1][:, 0]], dim=-1
)
video_plus_text_embeds = self.video_plus_text_head(video_plus_text_embeds)
return PeAudioVideoOutput(video_plus_text_embeds=video_plus_text_embeds, **outputs)
audio_video_outputs = self.audio_video_encoder(
input_values=input_values,
pixel_values_videos=pixel_values_videos,
padding_mask=padding_mask,
padding_mask_videos=padding_mask_videos,
**kwargs,
)
audio_embeds = audio_video_outputs.audio_model_output.pooler_output
video_embeds = audio_video_outputs.video_model_output.pooler_output
audio_video_embeds = audio_video_outputs.pooler_output
audio_embeds = self.audio_model.audio_head(audio_embeds)
video_embeds = self.video_model.video_head(video_embeds)
audio_video_embeds = self.audio_video_head(audio_video_embeds)
logits_audio_video = audio_embeds @ video_embeds.T
logits_audio_video = logits_audio_video * self.audio_video_logit_scale + self.audio_video_logit_bias
audio_video_loss = self._contrastive_loss(logits_audio_video) if return_loss else None
if input_ids is None:
return PeAudioVideoOutput(
logits_audio_video=logits_audio_video,
audio_embeds=audio_embeds,
video_embeds=video_embeds,
audio_video_embeds=audio_video_embeds,
loss=audio_video_loss,
audio_video_loss=audio_video_loss,
)
kwargs["output_hidden_states"] = True
text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, **kwargs)
text_embeds = text_outputs.hidden_states[-1][:, 0]
audio_plus_text_embeds = torch.cat([audio_video_outputs.audio_model_output.pooler_output, text_embeds], dim=-1)
video_plus_text_embeds = torch.cat([audio_video_outputs.video_model_output.pooler_output, text_embeds], dim=-1)
text_audio_embeds = self.audio_model.text_audio_head(text_embeds)
text_video_embeds = self.video_model.text_video_head(text_embeds)
text_audio_video_embeds = self.text_audio_video_head(text_embeds)
audio_plus_text_embeds = self.audio_plus_text_head(audio_plus_text_embeds)
video_plus_text_embeds = self.video_plus_text_head(video_plus_text_embeds)
logits_audio_text = audio_embeds @ text_audio_embeds.T
logits_video_text = video_embeds @ text_video_embeds.T
logits_audio_video_text = audio_video_embeds @ text_audio_video_embeds.T
logits_audio_plus_text_video = audio_plus_text_embeds @ video_embeds.T
logits_video_plus_text_audio = video_plus_text_embeds @ audio_embeds.T
logits_audio_text = (
logits_audio_text * self.audio_model.text_audio_logit_scale + self.audio_model.text_audio_logit_bias
)
logits_video_text = (
logits_video_text * self.video_model.text_video_logit_scale + self.video_model.text_video_logit_bias
)
logits_audio_video_text = (
logits_audio_video_text * self.text_audio_video_logit_scale + self.text_audio_video_logit_bias
)
logits_audio_plus_text_video = (
logits_audio_plus_text_video * self.audio_plus_text_logit_scale + self.audio_plus_text_logit_bias
)
logits_video_plus_text_audio = (
logits_video_plus_text_audio * self.video_plus_text_logit_scale + self.video_plus_text_logit_bias
)
if return_loss:
audio_text_loss = self._contrastive_loss(logits_audio_text)
video_text_loss = self._contrastive_loss(logits_video_text)
audio_video_text_loss = self._contrastive_loss(logits_audio_video_text)
audio_plus_text_video_loss = self._contrastive_loss(logits_audio_plus_text_video)
video_plus_text_audio_loss = self._contrastive_loss(logits_video_plus_text_audio)
loss = (
audio_video_text_loss
+ audio_text_loss
+ video_text_loss
+ audio_video_loss
+ audio_plus_text_video_loss
+ video_plus_text_audio_loss
)
return PeAudioVideoOutput(
# embeddings
audio_embeds=audio_embeds,
video_embeds=video_embeds,
audio_video_embeds=audio_video_embeds,
text_audio_embeds=text_audio_embeds,
text_video_embeds=text_video_embeds,
text_audio_video_embeds=text_audio_video_embeds,
audio_plus_text_embeds=audio_plus_text_embeds,
video_plus_text_embeds=video_plus_text_embeds,
# model outputs
text_outputs=text_outputs,
audio_outputs=audio_video_outputs.audio_model_output,
video_outputs=audio_video_outputs.video_model_output,
audio_video_outputs=audio_video_outputs,
# logits
logits_audio_text=logits_audio_text,
logits_video_text=logits_video_text,
logits_audio_video=logits_audio_video,
logits_audio_video_text=logits_audio_video_text,
logits_audio_plus_text_video=logits_audio_plus_text_video,
logits_video_plus_text_audio=logits_video_plus_text_audio,
# losses
audio_text_loss=audio_text_loss if return_loss else None,
video_text_loss=video_text_loss if return_loss else None,
audio_video_loss=audio_video_loss if return_loss else None,
audio_video_text_loss=audio_video_text_loss if return_loss else None,
audio_plus_text_video_loss=audio_plus_text_video_loss if return_loss else None,
video_plus_text_audio_loss=video_plus_text_audio_loss if return_loss else None,
loss=loss if return_loss else None,
)
__all__ = [
"PeAudioVideoModel",
"PeAudioVideoEncoder",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/pe_audio_video/modular_pe_audio_video.py",
"license": "Apache License 2.0",
"lines": 648,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/pe_audio_video/processing_pe_audio_video.py | # Copyright 2025 the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...processing_utils import ProcessorMixin
class PeAudioVideoProcessor(ProcessorMixin):
attributes = ["feature_extractor", "video_processor", "tokenizer"]
feature_extractor_class = "PeAudioFeatureExtractor"
tokenizer_class = "AutoTokenizer"
video_processor_class = "PeVideoVideoProcessor"
__all__ = ["PeAudioVideoProcessor"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/pe_audio_video/processing_pe_audio_video.py",
"license": "Apache License 2.0",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/pe_video/configuration_pe_video.py | # Copyright 2025 the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...configuration_utils import PreTrainedConfig, PretrainedConfig
from ...modeling_rope_utils import RopeParameters
from ...utils import logging
from ..auto import CONFIG_MAPPING, AutoConfig
from ..timm_wrapper import TimmWrapperConfig
logger = logging.get_logger(__name__)
class PeVideoEncoderConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`PeVideoEncoder`]. It is used to instantiate a
PeVideoEncoder model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of pe-av-large.
e.g. [facebook/pe-av-large](https://huggingface.co/facebook/pe-av-large)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vision_config (`Union[PreTrainedConfig, dict]`, *optional*):
Configuration for the vision backbone used to extract frame embeddings. If a dictionary is provided, it is
used to instantiate a [`~transformers.TimmWrapperConfig`] with the PE default arguments.
hidden_size (`int`, *optional*, defaults to 1792):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 4800):
Dimension of the feedforward layers in the Transformer blocks.
num_hidden_layers (`int`, *optional*, defaults to 6):
Number of Transformer encoder blocks.
num_attention_heads (`int`, *optional*, defaults to 14):
Number of attention heads used in each attention layer.
num_key_value_heads (`int`, *optional*):
Number of key and value heads for grouped-query attention. If unset, this defaults to `num_attention_heads`.
head_dim (`int`, *optional*, defaults to 128):
Dimension of each attention head for query, key, and value projections.
hidden_act (`str`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the Transformer blocks.
max_position_embeddings (`int`, *optional*, defaults to 10000):
Maximum sequence length supported by the rotary position embeddings.
initializer_range (`float`, *optional*, defaults to 0.02):
Standard deviation of the truncated normal initializer for weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
Epsilon used by the RMS normalization layers.
rope_parameters (`Union[RopeParameters, dict]`, *optional*, defaults to `{'rope_theta': 20000}`):
Parameters for the rotary position embeddings, such as the base `rope_theta`.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use bias terms in the query, key, value, and output projections.
attention_dropout (`float`, *optional*, defaults to 0.0):
Dropout ratio applied to attention probabilities.
```python
>>> from transformers import PeAudioEncoder, PeAudioEncoderConfig
>>> # Initializing a PeAudioEncoder style configuration
>>> configuration = PeAudioEncoderConfig()
>>> # Initializing a model from the pe-av-large style configuration
>>> model = PeAudioEncoder(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "pe_video_encoder"
sub_configs = {"vision_config": TimmWrapperConfig}
base_config_key = "audio_video_config"
_default_vision_config_kwargs = {
"architecture": "vit_pe_core_large_patch14_336",
"do_pooling": True,
"num_classes": 1024,
"global_pool": "map",
"initializer_range": 0.02,
}
def __init__(
self,
vision_config: dict | PreTrainedConfig | None = None,
hidden_size: int | None = 1792,
intermediate_size: int | None = 4800,
num_hidden_layers: int | None = 6,
num_attention_heads: int | None = 14,
num_key_value_heads: int | None = None,
head_dim: int | None = 128,
hidden_act: str | None = "silu",
max_position_embeddings: int | None = 10000,
initializer_range: float | None = 0.02,
rms_norm_eps: float | None = 1e-5,
rope_parameters: RopeParameters | dict | None = {"rope_theta": 20000},
attention_bias: bool | None = False,
attention_dropout: float | None = 0.0,
**kwargs,
):
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.head_dim = head_dim
self.hidden_act = hidden_act
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.rope_parameters = rope_parameters
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
if isinstance(vision_config, dict):
vision_config["model_type"] = vision_config.get("model_type", "timm_wrapper")
vision_config = CONFIG_MAPPING[vision_config["model_type"]].from_dict(
{**self._default_vision_config_kwargs, **vision_config}
)
elif vision_config is None:
vision_config = CONFIG_MAPPING["timm_wrapper"].from_dict(self._default_vision_config_kwargs)
self.vision_config = vision_config
super().__init__(**kwargs)
class PeVideoConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`PeVideoModel`]. It is used to instantiate a
PeVideoModel model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of pe-av-large.
e.g. [facebook/pe-av-large](https://huggingface.co/facebook/pe-av-large)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
text_config (`dict` or `PreTrainedConfig`, *optional*):
Configuration for the text model component.
video_config (`dict` or `PreTrainedConfig`, *optional*):
Configuration for the video encoder component.
```python
>>> from transformers import PeVideoModel, PeVideoConfig
>>> # Initializing a PeVideoModel style configuration
>>> configuration = PeVideoConfig()
>>> # Initializing a model from the pe-av-large style configuration
>>> model = PeVideoModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "pe_video"
sub_configs = {"text_config": AutoConfig, "video_config": PeVideoEncoderConfig}
base_config_key = "audio_video_config"
_default_text_config_kwargs = {
"model_type": "modernbert",
"hidden_size": 1024,
"intermediate_size": 2624,
"num_hidden_layers": 22,
"num_attention_heads": 16,
}
def __init__(
self,
text_config=None,
video_config=None,
**kwargs,
):
if isinstance(text_config, dict):
text_config["model_type"] = text_config.get("model_type", "modernbert")
text_config = CONFIG_MAPPING[text_config["model_type"]](
**{**self._default_text_config_kwargs, **text_config}
)
elif text_config is None:
text_config = CONFIG_MAPPING["modernbert"](**self._default_text_config_kwargs)
if isinstance(video_config, dict):
video_config = PeVideoEncoderConfig(**video_config)
elif video_config is None:
video_config = PeVideoEncoderConfig()
self.text_config = text_config
self.video_config = video_config
super().__init__(**kwargs)
__all__ = ["PeVideoEncoderConfig", "PeVideoConfig"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/pe_video/configuration_pe_video.py",
"license": "Apache License 2.0",
"lines": 172,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/pe_video/modular_pe_video.py | # Copyright 2025 the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Any
import torch
import torch.nn as nn
import torch.nn.functional as F
from ...masking_utils import create_bidirectional_mask
from ...modeling_outputs import BaseModelOutputWithPooling, MaskedLMOutput
from ...processing_utils import Unpack
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple
from ...utils.generic import merge_with_config_defaults
from ...utils.output_capturing import capture_outputs
from ..auto import AutoModel, AutoModelForImageClassification
from ..pe_audio_video.modeling_pe_audio_video import (
PeAudioVideoContrastiveHead,
PeAudioVideoEncoder,
PeAudioVideoEncoderPatchEmbedder,
PeAudioVideoPreTrainedModel,
)
from .configuration_pe_video import PeVideoConfig, PeVideoEncoderConfig
# TODO: not sure about the typing for text_model_output
@dataclass
# @auto_docstring
class PeVideoOutput(ModelOutput):
loss: torch.FloatTensor | None = None
logits_video_text: torch.FloatTensor | None = None
text_video_embeds: torch.FloatTensor | None = None
video_embeds: torch.FloatTensor | None = None
text_outputs: BaseModelOutputWithPooling = None
video_outputs: BaseModelOutputWithPooling = None
def to_tuple(self) -> tuple[Any]:
return tuple(
self[k] if k not in ["text_outputs", "video_outputs"] else getattr(self, k).to_tuple() for k in self.keys()
)
class PeVideoContrastiveHead(PeAudioVideoContrastiveHead): ...
class PeVideoEncoderPatchEmbedder(PeAudioVideoEncoderPatchEmbedder): ...
class PeVideoEncoderEmbedder(nn.Module):
def __init__(self, config: PeVideoEncoderConfig):
super().__init__()
self.vision_model = AutoModelForImageClassification.from_config(config.vision_config)
self.proj = nn.Linear(config.vision_config.num_labels, config.hidden_size, bias=False)
self.data_proj = nn.Linear(config.hidden_size, config.hidden_size)
def forward(
self,
pixel_values_videos: torch.Tensor,
padding_mask: torch.Tensor | None = None,
) -> tuple[torch.Tensor, torch.Tensor | None]:
input_shape = pixel_values_videos.shape
pixel_values_videos = pixel_values_videos.view(-1, *input_shape[2:])
vision_encoder_outputs = self.vision_model(pixel_values_videos)
logits = vision_encoder_outputs.logits.view(*input_shape[:2], -1)
logits = F.normalize(logits, dim=-1)
vision_features = self.proj(logits)
inputs_embeds = self.data_proj(vision_features)
return inputs_embeds, padding_mask
class PeVideoPreTrainedModel(PeAudioVideoPreTrainedModel):
base_model_prefix = "video_model"
main_input_name = "pixel_values_videos"
@auto_docstring(
custom_intro="""
The PeVideo Encoder model.
"""
)
class PeVideoEncoder(PeAudioVideoEncoder):
base_model_prefix = "video_model.video_encoder"
main_input_name = "pixel_values_videos"
def __init__(self, config: PeVideoEncoderConfig):
super().__init__(config)
self.embedder = PeVideoEncoderEmbedder(config)
@can_return_tuple
@merge_with_config_defaults
@capture_outputs
def forward(
self,
pixel_values_videos: torch.Tensor,
padding_mask_videos: torch.Tensor | None = None,
**kwargs,
) -> tuple | BaseModelOutputWithPooling:
inputs_embeds, padding_mask = self.embedder(pixel_values_videos, padding_mask=padding_mask_videos)
inputs_embeds, attention_mask = self.patch_embedder(inputs_embeds, padding_mask=padding_mask)
if attention_mask is not None:
attention_mask = create_bidirectional_mask(
config=self.config,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
)
position_ids = torch.arange(inputs_embeds.shape[1], device=inputs_embeds.device).unsqueeze(0)
position_embeddings = self.rotary_emb(inputs_embeds, position_ids)
hidden_states = inputs_embeds
for encoder_layer in self.layers[: self.config.num_hidden_layers]:
hidden_states = encoder_layer(
hidden_states,
attention_mask=attention_mask,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = self.norm(hidden_states)
hidden_states = self.output(hidden_states)
return BaseModelOutputWithPooling(
last_hidden_state=hidden_states[:, 1:],
pooler_output=hidden_states[:, 0],
)
class PeVideoModel(PeVideoPreTrainedModel):
main_input_name = "input_ids"
def __init__(self, config: PeVideoConfig):
super().__init__(config)
self.text_model = AutoModel.from_config(config.text_config)
self.video_encoder = PeVideoEncoder(config.video_config)
self.text_video_head = PeVideoContrastiveHead(config.text_config.hidden_size, config.text_config.hidden_size)
self.video_head = PeVideoContrastiveHead(config.video_config.hidden_size, config.text_config.hidden_size)
self.text_video_logit_scale = nn.Parameter(torch.zeros(1))
self.text_video_logit_bias = nn.Parameter(torch.zeros(1))
self.post_init()
@can_return_tuple
@auto_docstring
def get_text_features(
self,
input_ids: torch.Tensor,
attention_mask: torch.Tensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | BaseModelOutputWithPooling:
text_outputs: BaseModelOutputWithPooling = self.text_model(
input_ids=input_ids,
attention_mask=attention_mask,
return_dict=True,
**kwargs,
)
text_outputs.pooler_output = self.text_video_head(text_outputs.last_hidden_state)
return text_outputs
@can_return_tuple
@auto_docstring
def get_video_features(
self,
pixel_values_videos: torch.Tensor,
padding_mask_videos: torch.Tensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | BaseModelOutputWithPooling:
video_outputs: BaseModelOutputWithPooling = self.video_encoder(
pixel_values_videos=pixel_values_videos,
padding_mask_videos=padding_mask_videos,
return_dict=True,
**kwargs,
)
video_outputs.pooler_output = self.video_head(video_outputs.pooler_output)
return video_outputs
@can_return_tuple
def forward(
self,
input_ids: torch.Tensor,
pixel_values_videos: torch.Tensor,
attention_mask: torch.Tensor | None = None,
padding_mask_videos: torch.Tensor | None = None,
return_loss: bool | None = None,
**kwargs,
) -> PeVideoOutput:
video_outputs: BaseModelOutputWithPooling = self.video_encoder(
pixel_values_videos=pixel_values_videos, padding_mask_videos=padding_mask_videos, **kwargs
)
kwargs["output_hidden_states"] = True
text_outputs: MaskedLMOutput = self.text_model(input_ids=input_ids, attention_mask=attention_mask, **kwargs)
video_embeds = video_outputs.pooler_output
video_embeds = self.video_head(video_embeds)
text_video_embeds = text_outputs.hidden_states[-1][:, 0]
text_video_embeds = self.text_video_head(text_video_embeds)
logits_video_text = video_embeds @ text_video_embeds.T
logits_video_text = logits_video_text * self.text_video_logit_scale + self.text_video_logit_bias
loss = None
if return_loss:
labels = torch.eye(logits_video_text.shape[0], device=logits_video_text.device)
loss = -F.logsigmoid(labels * logits_video_text).sum() / logits_video_text.shape[0]
return PeVideoOutput(
logits_video_text=logits_video_text,
text_video_embeds=text_video_embeds,
video_embeds=video_embeds,
text_outputs=text_outputs,
video_outputs=video_outputs,
loss=loss,
)
__all__ = [
"PeVideoEncoder",
"PeVideoModel",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/pe_video/modular_pe_video.py",
"license": "Apache License 2.0",
"lines": 194,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/pe_video/processing_pe_video.py | from ...processing_utils import ProcessorMixin
class PeVideoProcessor(ProcessorMixin):
attributes = ["video_processor", "tokenizer"]
video_processor_class = "PeVideoVideoProcessor"
tokenizer_class = "AutoTokenizer"
__all__ = ["PeVideoProcessor"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/pe_video/processing_pe_video.py",
"license": "Apache License 2.0",
"lines": 6,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
huggingface/transformers:src/transformers/models/pe_video/video_processing_pe_video.py | # Copyright 2025 the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ...image_processing_utils import BatchFeature
from ...image_utils import PILImageResampling
from ...processing_utils import Unpack, VideosKwargs
from ...video_processing_utils import BaseVideoProcessor, VideoMetadata
from ...video_utils import VideoInput
class PeVideoVideoProcessor(BaseVideoProcessor):
resample = PILImageResampling.BILINEAR
def sample_frames(
self,
metadata: VideoMetadata,
num_frames: int | None = None,
fps: int | float | None = None,
**kwargs,
):
if num_frames:
total_frames = metadata.total_num_frames
num_frames = num_frames if num_frames is not None else self.num_frames
assert num_frames is not None, "`num_frames` must be specified if `fixed_len_video == True`"
frame_idxs = [int(i * (total_frames - 1) / (num_frames - 1)) for i in range(num_frames)]
return torch.tensor(frame_idxs)
else:
return super().sample_frames(metadata, num_frames, fps, **kwargs)
def _preprocess(
self,
videos: VideoInput,
**kwargs: Unpack[VideosKwargs],
) -> BatchFeature:
# Always set `return_tensors` to `None` since it won't pad variable length videos
# We'll handle this after we call the parent' method
return_tensors = kwargs.pop("return_tensors", None)
result = super()._preprocess(videos, **kwargs)
pixels = result.pixel_values_videos
data = {"pixel_values_videos": pixels}
if return_tensors:
lengths = torch.tensor([video.size(0) for video in pixels])
pixels = torch.nn.utils.rnn.pad_sequence(pixels, batch_first=True, padding_value=0.0)
data["pixel_values_videos"] = pixels
if lengths.unique().size(0) > 1:
mask = torch.arange(lengths.max())[None] < lengths[:, None]
data["padding_mask_videos"] = mask
return BatchFeature(data=data, tensor_type=return_tensors)
__all__ = ["PeVideoVideoProcessor"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/pe_video/video_processing_pe_video.py",
"license": "Apache License 2.0",
"lines": 56,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/pe_audio_video/test_modeling_pe_audio_video.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from huggingface_hub import hf_hub_download
from transformers import PeAudioVideoEncoderConfig, PeAudioVideoProcessor
from transformers.testing_utils import (
cleanup,
require_torch,
slow,
torch_device,
)
from transformers.utils import is_torch_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import (
ModelTesterMixin,
floats_tensor,
ids_tensor,
)
if is_torch_available():
import torch
from transformers import (
PeAudioVideoEncoder,
PeAudioVideoModel,
)
class PeAudioVideoEncoderTester:
def __init__(
self,
parent,
config_kwargs={
"audio_config": {
"dac_config": {
"encoder_hidden_size": 16,
"downsampling_ratios": [2, 4, 4],
"decoder_hidden_size": 16,
"n_codebooks": 6,
"codebook_size": 512,
"codebook_dim": 32,
"quantizer_dropout": 0.0,
"commitment_loss_weight": 0.25,
"codebook_loss_weight": 1.0,
},
"hidden_size": 32,
"intermediate_size": 37,
"num_hidden_layers": 2,
"num_attention_heads": 2,
"num_key_value_heads": 2,
"head_dim": 128,
"hidden_act": "silu",
"max_position_embeddings": 512,
"initializer_range": 0.02,
"rms_norm_eps": 1e-5,
"use_cache": True,
"rope_theta": 20000,
"rope_scaling": None,
"attention_bias": False,
"max_window_layers": 28,
"attention_dropout": 0.0,
},
"video_config": {
"vision_config": {
"architecture": "vit_pe_core_large_patch14_336",
"model_args": {
"embed_dim": 64,
"img_size": (14, 14),
"depth": 2,
},
"num_classes": 4,
},
"hidden_size": 32,
"intermediate_size": 37,
"num_hidden_layers": 2,
"num_attention_heads": 2,
"num_key_value_heads": 2,
"head_dim": 128,
"hidden_act": "silu",
"max_position_embeddings": 512,
"initializer_range": 0.02,
"rms_norm_eps": 1e-5,
"use_cache": True,
"rope_theta": 20000,
"rope_scaling": None,
"attention_bias": False,
"max_window_layers": 28,
"attention_dropout": 0.0,
},
"hidden_size": 32,
"intermediate_size": 37,
"num_hidden_layers": 2,
"num_attention_heads": 2,
"num_key_value_heads": 2,
"head_dim": 128,
"hidden_act": "silu",
"max_position_embeddings": 512,
"initializer_range": 0.02,
"rms_norm_eps": 1e-5,
"use_cache": True,
"rope_theta": 20000,
"rope_scaling": None,
"attention_bias": False,
"max_window_layers": 28,
"attention_dropout": 0.0,
},
batch_size=12,
num_audio_channels=1,
num_video_channels=3,
audio_seq_length=160,
num_frames=24,
is_training=True,
):
self.parent = parent
self.config_kwargs = config_kwargs
for key, value in config_kwargs.items():
setattr(self, key, value)
self.batch_size = batch_size
self.num_audio_channels = num_audio_channels
self.num_video_channels = num_video_channels
self.audio_seq_length = audio_seq_length
self.num_frames = num_frames
self.is_training = is_training
@property
def seq_length(self):
config = self.get_config()
# seq_length is what gets feeded to the transformer
# we first have to divide by hop_length to get the number of frames
# then we add 1 because we add the class token
return self.audio_seq_length // config.audio_config.dac_config.hop_length + 1
def prepare_config_and_inputs(self):
input_values = floats_tensor([self.batch_size, self.num_audio_channels, self.audio_seq_length])
# Generate valid_lengths in range [1, self.audio_seq_length] to ensure at least one valid frame
valid_audio_lengths = ids_tensor([self.batch_size], self.audio_seq_length - 1) + 1
padding_mask = torch.arange(self.audio_seq_length, device=torch_device)[None, :] < valid_audio_lengths[:, None]
padding_mask = padding_mask.int()
pixel_values_videos = floats_tensor(
[
self.batch_size,
self.num_frames,
self.num_video_channels,
self.config_kwargs["video_config"]["vision_config"]["model_args"]["img_size"][0],
self.config_kwargs["video_config"]["vision_config"]["model_args"]["img_size"][1],
]
)
# Generate valid_lengths in range [1, self.num_frames] to ensure at least one valid frame
valid_video_lengths = ids_tensor([self.batch_size], self.num_frames - 1) + 1
padding_mask_videos = (
torch.arange(self.num_frames, device=torch_device)[None, :] < valid_video_lengths[:, None]
)
padding_mask_videos = padding_mask_videos.int()
config = self.get_config()
return config, input_values, padding_mask, pixel_values_videos, padding_mask_videos
def get_config(self):
if not hasattr(self, "_config"):
self._config = PeAudioVideoEncoderConfig(**self.config_kwargs)
return self._config
def create_and_check_model(self, config, input_values, padding_mask, pixel_values_videos, padding_mask_videos):
model = PeAudioVideoEncoder(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
result = model(
input_values,
padding_mask=padding_mask,
pixel_values_videos=pixel_values_videos,
padding_mask_videos=padding_mask_videos,
)
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_values, padding_mask, pixel_values_videos, padding_mask_videos = config_and_inputs
inputs_dict = {
"input_values": input_values,
"padding_mask": padding_mask,
"pixel_values_videos": pixel_values_videos,
"padding_mask_videos": padding_mask_videos,
}
return config, inputs_dict
@require_torch
class PeAudioVideoEncoderTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (PeAudioVideoEncoder,)
additional_model_inputs = ["pixel_values_videos", "padding_mask_videos"]
test_resize_embeddings = False
_is_composite = True
test_torch_exportable = False
def setUp(self):
self.model_tester = PeAudioVideoEncoderTester(self)
self.config_tester = ConfigTester(
self, config_class=PeAudioVideoEncoderConfig, has_text_modality=False, hidden_size=37
)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skip(reason="PeAudioVideoEncoder does not have usual input embeddings")
def test_model_get_set_embeddings(self):
pass
@unittest.skip("PeAudioVideoEncoder does not have language_model, vision_tower, multi_modal_projector.")
def test_sdpa_can_dispatch_composite_models(self):
pass
@unittest.skip(
"TimmWrapperForImageClassification does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet."
)
def test_can_set_attention_dynamically_composite_model(self):
pass
@unittest.skip("ViT PE / TimmWrapperModel cannot be tested with meta device")
def test_can_be_initialized_on_meta(self):
pass
@unittest.skip("ViT PE / TimmWrapperModel cannot be tested with meta device")
def test_can_load_with_meta_device_context_manager(self):
pass
@unittest.skip("PeAudioVideoEncoder does not support feed forward chunking")
def test_feed_forward_chunking(self):
pass
@unittest.skip("#TODO @eustlb this should be fixed tho")
def test_save_load(self):
pass
@unittest.skip(reason="TimmWrapperModel does not support model parallelism")
def test_model_parallelism(self):
pass
@unittest.skip(reason="@eustlb this is not really expected")
def test_batching_equivalence(self):
pass
@unittest.skip(reason="@eustlb this is not really expected just the class embedding!")
def test_can_init_all_missing_weights(self):
pass
@require_torch
class PeAudioVideoModelIntegrationTest(unittest.TestCase):
def setUp(self):
self.checkpoint_name = "/raid/eustache/sam-audio/converted"
self.dtype = torch.float32
self.processor = PeAudioVideoProcessor.from_pretrained("facebook/pe-av-large")
def tearDown(self):
cleanup(torch_device, gc_collect=True)
@slow
@unittest.skip(reason="TODO when released")
def test(self):
video_path = hf_hub_download(
repo_id="eustlb/dummy-video-dataset", filename="audiobox.mp4", repo_type="dataset"
)
audio_path = hf_hub_download(
repo_id="eustlb/dummy-video-dataset", filename="audiobox.mp4", repo_type="dataset"
)
inputs = self.processor(
text=["A woman and a man speaking", "A woman speaking"],
audio=[audio_path, "/home/eustache_lebihan/add-sam-audio/audiobox_first5sec.mp4"],
videos=[video_path, "/home/eustache_lebihan/add-sam-audio/audiobox_first5sec.mp4"],
return_tensors="pt",
padding=True,
).to(torch_device)
model = PeAudioVideoModel.from_pretrained(
self.checkpoint_name, dtype=self.dtype, device_map=torch_device, attn_implementation="eager"
)
with torch.no_grad():
outputs = model(**inputs)
print(outputs)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/pe_audio_video/test_modeling_pe_audio_video.py",
"license": "Apache License 2.0",
"lines": 265,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/models/pe_video/test_modeling_pe_video.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import PeVideoConfig, PeVideoEncoderConfig
from transformers.testing_utils import (
require_torch,
slow,
torch_device,
)
from transformers.utils import is_torch_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import (
ModelTesterMixin,
floats_tensor,
ids_tensor,
random_attention_mask,
require_torch_gpu,
)
if is_torch_available():
import torch
from transformers import (
ModernBertConfig,
PeVideoEncoder,
PeVideoModel,
)
class PeVideoEncoderTester:
def __init__(
self,
parent,
config_kwargs={
"vision_config": {
"architecture": "vit_pe_core_large_patch14_336",
"model_args": {
"embed_dim": 64,
"img_size": (14, 14),
"depth": 2,
},
"num_classes": 4,
},
"hidden_size": 32,
"intermediate_size": 37,
"num_hidden_layers": 2,
"num_attention_heads": 2,
"num_key_value_heads": 2,
"head_dim": 16,
"hidden_act": "silu",
"max_position_embeddings": 512,
"initializer_range": 0.02,
"rms_norm_eps": 1e-5,
"use_cache": True,
"rope_theta": 20000,
"rope_scaling": None,
"attention_bias": False,
"max_window_layers": 28,
"attention_dropout": 0.0,
},
batch_size=4,
num_frames=8,
num_channels=3,
is_training=True,
):
self.parent = parent
self.config_kwargs = config_kwargs
for key, value in config_kwargs.items():
setattr(self, key, value)
self.batch_size = batch_size
self.num_frames = num_frames
self.num_channels = num_channels
self.is_training = is_training
@property
def seq_length(self):
# seq_length is what gets fed to the transformer
# we add 1 because we add the class token
return self.num_frames + 1
def prepare_config_and_inputs(self):
pixel_values_videos = floats_tensor(
[
self.batch_size,
self.num_frames,
self.num_channels,
self.config_kwargs["vision_config"]["model_args"]["img_size"][0],
self.config_kwargs["vision_config"]["model_args"]["img_size"][1],
]
)
# Generate valid_lengths in range [1, num_frames] to ensure at least one valid frame
valid_lengths = ids_tensor([self.batch_size], self.num_frames - 1) + 1
padding_mask_videos = torch.arange(self.num_frames, device=torch_device).unsqueeze(0) < valid_lengths[:, None]
padding_mask_videos = padding_mask_videos.int()
config = self.get_config()
return config, pixel_values_videos, padding_mask_videos
def get_config(self):
return PeVideoEncoderConfig(**self.config_kwargs)
def create_and_check_model(self, config, pixel_values_videos, padding_mask_videos):
model = PeVideoEncoder(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
result = model(pixel_values_videos, padding_mask_videos=padding_mask_videos)
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values_videos, padding_mask_videos = config_and_inputs
inputs_dict = {"pixel_values_videos": pixel_values_videos, "padding_mask_videos": padding_mask_videos}
return config, inputs_dict
@require_torch
class PeVideoEncoderTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (PeVideoEncoder,)
test_resize_embeddings = False
_is_composite = True
def setUp(self):
self.model_tester = PeVideoEncoderTester(self)
self.config_tester = ConfigTester(
self, config_class=PeVideoEncoderConfig, has_text_modality=False, hidden_size=37
)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skip(reason="Timm Eva (PE) weights cannot be fully constructed in _init_weights")
def test_can_init_all_missing_weights(self):
pass
@unittest.skip(reason="PeVideoEncoder does not have usual input embeddings")
def test_model_get_set_embeddings(self):
pass
@unittest.skip("Cannot set `output_attentions` for timm models.")
def test_attention_outputs(self):
pass
@unittest.skip("TimmWrapperModel cannot be tested with meta device")
def test_can_be_initialized_on_meta(self):
pass
@unittest.skip("TimmWrapperModel cannot be tested with meta device")
def test_can_load_with_meta_device_context_manager(self):
pass
@unittest.skip("Cannot set `output_attentions` for timm models.")
def test_retain_grad_hidden_states_attentions(self):
pass
@unittest.skip(reason="PeVideoEncoder does not support feedforward chunking yet")
def test_feed_forward_chunking(self):
pass
@unittest.skip(reason="PeAudioModel uses some timm stuff not compatible")
def test_save_load(self):
pass
@unittest.skip(reason="TimmWrapperModel does not support model parallelism")
def test_model_parallelism(self):
pass
@unittest.skip(reason="@eustlb this is not really expected")
def test_batching_equivalence(self):
pass
class PeVideoTextModelTester:
"""
Only a ModelTester and no PeVideoTextModelTest since text model is ModernBertModel that is already tested.
"""
def __init__(
self,
parent,
config_kwargs={
"vocab_size": 99,
"pad_token_id": 0,
"hidden_size": 32,
"num_hidden_layers": 2,
"num_attention_heads": 4,
"intermediate_size": 37,
"hidden_activation": "gelu",
"mlp_dropout": 0.0,
"attention_dropout": 0.0,
"embedding_dropout": 0.0,
"classifier_dropout": 0.0,
"max_position_embeddings": 512,
"type_vocab_size": 16,
"is_decoder": False,
"initializer_range": 0.02,
},
batch_size=4,
seq_length=7,
is_training=True,
use_input_mask=True,
use_labels=True,
):
self.parent = parent
self.config_kwargs = config_kwargs
for key, value in config_kwargs.items():
setattr(self, key, value)
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_labels = use_labels
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
config = self.get_config()
return config, input_ids, input_mask
def get_config(self):
return ModernBertConfig(**self.config_kwargs)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, input_mask = config_and_inputs
inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
class PeVideoModelTester:
def __init__(self, parent, text_kwargs=None, video_kwargs=None, is_training=True):
if text_kwargs is None:
text_kwargs = {}
if video_kwargs is None:
video_kwargs = {}
self.parent = parent
self.text_model_tester = PeVideoTextModelTester(parent, **text_kwargs)
self.video_model_tester = PeVideoEncoderTester(parent, **video_kwargs)
self.batch_size = self.text_model_tester.batch_size # need bs for batching_equivalence test
self.is_training = is_training
def prepare_config_and_inputs(self):
_, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs()
_, pixel_values_videos, padding_mask_videos = self.video_model_tester.prepare_config_and_inputs()
config = self.get_config()
return config, input_ids, attention_mask, pixel_values_videos, padding_mask_videos
def get_config(self):
text_config = self.text_model_tester.get_config()
video_config = self.video_model_tester.get_config()
return PeVideoConfig(
text_config=text_config.to_dict(),
video_config=video_config.to_dict(),
projection_dim=32,
)
def create_and_check_model(self, config, input_ids, attention_mask, pixel_values_videos, padding_mask_videos):
model = PeVideoModel(config).to(torch_device).eval()
with torch.no_grad():
_ = model(input_ids, pixel_values_videos, attention_mask, padding_mask_videos)
# TODO: there is no logits per video for now
# self.parent.assertEqual(result.logits_per_video.shape, (self.video_model_tester.batch_size, self.text_model_tester.batch_size))
# self.parent.assertEqual(result.logits_per_text.shape, (self.text_model_tester.batch_size, self.video_model_tester.batch_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, attention_mask, pixel_values_videos, padding_mask_videos = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"pixel_values_videos": pixel_values_videos,
"padding_mask_videos": padding_mask_videos,
}
return config, inputs_dict
@require_torch
class PeVideoModelTest(ModelTesterMixin, unittest.TestCase):
# TODO: add PipelineTesterMixin
all_model_classes = (PeVideoModel,)
additional_model_inputs = ["pixel_values_videos", "padding_mask_videos"]
test_resize_embeddings = False
has_attentions = False
_is_composite = True
def setUp(self):
self.model_tester = PeVideoModelTester(self)
self.config_tester = ConfigTester(
self, config_class=PeVideoConfig, has_text_modality=False, common_properties=[], hidden_size=37
)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skip(reason="PeVideoModel does not have usual input embeddings")
def test_model_get_set_embeddings(self):
pass
@unittest.skip(
"TimmWrapperForImageClassification does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet."
)
def test_can_set_attention_dynamically_composite_model(self):
pass
@unittest.skip(reason="Hidden_states is tested in individual model tests")
def test_hidden_states_output(self):
pass
@unittest.skip(reason="Retain_grad is tested in individual model tests")
def test_retain_grad_hidden_states_attentions(self):
pass
@unittest.skip(reason="PeVideoModel does not support feed forward chunking yet")
def test_feed_forward_chunking(self):
pass
@unittest.skip("#TODO @eustlb this should be fixed tho")
def test_save_load(self):
pass
@unittest.skip(reason="@eustlb this is not really expected")
def test_batching_equivalence(self):
pass
@unittest.skip(reason="@eustlb this is not really expected")
def test_can_init_all_missing_weights(self):
pass
@unittest.skip(reason="TimmWrapperModel does not support model parallelism")
def test_model_parallelism(self):
pass
@require_torch_gpu # pe-video contains triton code which cannot run on CPU, so we only test on GPU
def test_all_tensors_are_parameter_or_buffer(self):
super().test_all_tensors_are_parameter_or_buffer()
@require_torch
class PeVideoIntegrationTest(unittest.TestCase):
@slow
def test_inference(self):
# TODO: Add integration test when pretrained model is available
pass
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/pe_video/test_modeling_pe_video.py",
"license": "Apache License 2.0",
"lines": 310,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:examples/modular-transformers/modular_test_suffix.py | import torch.nn as nn
from transformers.models.llama.modeling_llama import LlamaDecoderLayer
class TestSuffixDecoderLayer(nn.module):
pass
# Here, we want to add "Llama" as a suffix to the base `TestModel` name for all required dependencies
class TestSuffixLlamaDecoderLayer(LlamaDecoderLayer):
pass
| {
"repo_id": "huggingface/transformers",
"file_path": "examples/modular-transformers/modular_test_suffix.py",
"license": "Apache License 2.0",
"lines": 7,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
huggingface/transformers:src/transformers/models/paddleocr_vl/modular_paddleocr_vl.py | # Copyright 2025 The PaddlePaddle Team and The HuggingFace Inc. team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import Optional
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from ... import initialization as init
from ...activations import GELUActivation
from ...cache_utils import Cache, DynamicCache
from ...image_processing_utils import BatchFeature
from ...image_processing_utils_fast import BaseImageProcessorFast, group_images_by_shape, reorder_images
from ...image_transforms import convert_to_rgb, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
SizeDict,
get_image_size,
infer_channel_dimension_format,
is_scaled_image,
make_list_of_images,
to_numpy_array,
)
from ...masking_utils import create_bidirectional_mask, create_causal_mask
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPast, BaseModelOutputWithPooling
from ...modeling_utils import PreTrainedModel
from ...models.qwen2_vl.image_processing_qwen2_vl import Qwen2VLImageProcessor
from ...processing_utils import (
ProcessingKwargs,
ProcessorMixin,
Unpack,
)
from ...tokenization_utils_base import PreTokenizedInput, TextInput
from ...utils import (
TensorType,
TransformersKwargs,
auto_docstring,
can_return_tuple,
logging,
torch_compilable_check,
torch_int,
)
from ...utils.generic import merge_with_config_defaults
from ...utils.output_capturing import capture_outputs
from ..ernie4_5.configuration_ernie4_5 import Ernie4_5Config
from ..ernie4_5.modeling_ernie4_5 import (
Ernie4_5DecoderLayer,
Ernie4_5MLP,
Ernie4_5Model,
Ernie4_5RMSNorm,
)
from ..qwen2_5_omni.modeling_qwen2_5_omni import (
Qwen2_5OmniAttention,
)
from ..qwen2_vl.configuration_qwen2_vl import Qwen2VLConfig
from ..qwen2_vl.modeling_qwen2_vl import (
Qwen2VLCausalLMOutputWithPast,
Qwen2VLForConditionalGeneration,
Qwen2VLModel,
Qwen2VLModelOutputWithPast,
Qwen2VLRotaryEmbedding,
VisionRotaryEmbedding,
)
from ..siglip.configuration_siglip import SiglipVisionConfig
from ..siglip.modeling_siglip import (
SiglipMLP,
SiglipVisionEmbeddings,
)
from ..video_llama_3.modeling_video_llama_3 import (
VideoLlama3VisionAttention,
VideoLlama3VisionEncoder,
VideoLlama3VisionEncoderLayer,
)
logger = logging.get_logger(__name__)
def smart_resize(
height: int,
width: int,
factor: int = 28,
min_pixels: int = 384 * 384,
max_pixels: int = 1536 * 1536,
):
if height < factor:
width = round((width * factor) / height)
height = factor
if width < factor:
height = round((height * factor) / width)
width = factor
if max(height, width) / min(height, width) > 200:
raise ValueError(
f"absolute aspect ratio must be smaller than 200, got {max(height, width) / min(height, width)}"
)
h_bar = round(height / factor) * factor
w_bar = round(width / factor) * factor
if h_bar * w_bar > max_pixels:
beta = math.sqrt((height * width) / max_pixels)
h_bar = math.floor(height / beta / factor) * factor
w_bar = math.floor(width / beta / factor) * factor
elif h_bar * w_bar < min_pixels:
beta = math.sqrt(min_pixels / (height * width))
h_bar = math.ceil(height * beta / factor) * factor
w_bar = math.ceil(width * beta / factor) * factor
return h_bar, w_bar
class PaddleOCRVLImageProcessor(Qwen2VLImageProcessor):
r"""
Constructs a PaddleOCRVL image processor that dynamically resizes images based on the original images.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions.
size (`dict[str, int]`, *optional*):
Size of the image after resizing. `shortest_edge` and `longest_edge` keys must be present.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
Resampling filter to use when resizing the image.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*):
Mean to use if normalizing the image. This is a float or list of floats for each channel in the image.
image_std (`float` or `list[float]`, *optional*):
Standard deviation to use if normalizing the image. This is a float or list of floats for each channel in the image.
do_convert_rgb (`bool`, *optional*, defaults to `True`):
Whether to convert the image to RGB.
min_pixels (`int`, *optional*, defaults to `384 * 384`):
The min pixels of the image to resize the image.
max_pixels (`int`, *optional*, defaults to `1536 * 1536`):
The max pixels of the image to resize the image.
patch_size (`int`, *optional*, defaults to 14):
The spatial patch size of the vision encoder.
temporal_patch_size (`int`, *optional*, defaults to 1):
The temporal patch size of the vision encoder.
merge_size (`int`, *optional*, defaults to 2):
The merge size of the vision encoder to llm encoder.
"""
model_input_names = [
"pixel_values",
"image_grid_thw",
]
def __init__(
self,
do_resize: bool = True,
size: dict[str, int] | None = None,
resample: PILImageResampling = PILImageResampling.BICUBIC,
do_rescale: bool = True,
rescale_factor: int | float = 1 / 255,
do_normalize: bool = True,
image_mean: float | list[float] | None = None,
image_std: float | list[float] | None = None,
do_convert_rgb: bool = True,
min_pixels: int = 384 * 384,
max_pixels: int = 1536 * 1536,
patch_size: int = 14,
temporal_patch_size: int = 1,
merge_size: int = 2,
**kwargs,
) -> None:
super().__init__()
def _preprocess(
self,
images: ImageInput,
do_resize: bool | None = None,
size: dict[str, int] | None = None,
resample: PILImageResampling = None,
do_rescale: bool | None = None,
rescale_factor: float | None = None,
do_normalize: bool | None = None,
image_mean: float | list[float] | None = None,
image_std: float | list[float] | None = None,
patch_size: int | None = None,
temporal_patch_size: int | None = None,
merge_size: int | None = None,
do_convert_rgb: bool | None = None,
data_format: ChannelDimension | None = ChannelDimension.FIRST,
input_data_format: str | ChannelDimension | None = None,
):
"""
Preprocess an image or batch of images. Copy of the `preprocess` method from `CLIPImageProcessor`.
Args:
images (`ImageInput`):
Image or batch of images to preprocess. Expects pixel values ranging from 0 to 255. If pixel values range from 0 to 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`Dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing. `shortest_edge` and `longest_edge` keys must be present.
resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the `PILImageResampling` enums.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Scale factor to use if rescaling the image.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
Mean to use if normalizing the image. Can be a float or a list of floats corresponding to the number of channels in the image.
image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
Standard deviation to use if normalizing the image. Can be a float or a list of floats corresponding to the number of channels in the image.
patch_size (`int`, *optional*, defaults to `self.patch_size`):
The spatial patch size of the vision encoder.
temporal_patch_size (`int`, *optional*, defaults to `self.temporal_patch_size`):
The temporal patch size of the vision encoder.
merge_size (`int`, *optional*, defaults to `self.merge_size`):
The merge size of the vision encoder to llm encoder.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
Whether to convert the image to RGB.
data_format (`ChannelDimension`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
images = make_list_of_images(images)
images = self.fetch_images(images)
if do_convert_rgb:
images = [convert_to_rgb(image) for image in images]
# All transformations expect numpy arrays.
images = [to_numpy_array(image) for image in images]
if is_scaled_image(images[0]) and do_rescale:
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])
height, width = get_image_size(images[0], channel_dim=input_data_format)
resized_height, resized_width = height, width
processed_images = []
for image in images:
if do_resize:
resized_height, resized_width = smart_resize(
height,
width,
factor=patch_size * merge_size,
min_pixels=size["shortest_edge"],
max_pixels=size["longest_edge"],
)
image = resize(
image,
size=(resized_height, resized_width),
resample=resample,
input_data_format=input_data_format,
)
if do_rescale:
image = self.rescale(image, scale=rescale_factor, input_data_format=input_data_format)
if do_normalize:
image = self.normalize(
image=image,
mean=image_mean,
std=image_std,
input_data_format=input_data_format,
)
image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
processed_images.append(image)
patches = np.array(processed_images)
if data_format == ChannelDimension.LAST:
patches = patches.transpose(0, 3, 1, 2)
if patches.shape[0] == 1:
patches = np.tile(patches, (temporal_patch_size, 1, 1, 1))
channel = patches.shape[1]
grid_t = patches.shape[0] // temporal_patch_size
grid_h, grid_w = (
resized_height // patch_size,
resized_width // patch_size,
)
patches = patches.reshape(
grid_t,
temporal_patch_size,
channel,
grid_h,
patch_size,
grid_w,
patch_size,
)
patches = patches.transpose(0, 3, 5, 2, 1, 4, 6)
if temporal_patch_size != 1:
raise ValueError(f"temporal_patch_size must be 1!, but got {temporal_patch_size}!")
flatten_patches = patches.reshape(grid_t * grid_h * grid_w, channel, patch_size, patch_size)
return flatten_patches, (grid_t, grid_h, grid_w)
class PaddleOCRVLImageProcessorFast(BaseImageProcessorFast):
def __init__(
self,
do_resize: bool = True,
size: dict[str, int] | None = None,
resample: PILImageResampling = PILImageResampling.BICUBIC,
do_rescale: bool = True,
rescale_factor: int | float = 1 / 255,
do_normalize: bool = True,
image_mean: float | list[float] | None = None,
image_std: float | list[float] | None = None,
do_convert_rgb: bool = True,
min_pixels: int = 384 * 384,
max_pixels: int = 1536 * 1536,
patch_size: int = 14,
temporal_patch_size: int = 1,
merge_size: int = 2,
**kwargs,
) -> None:
super().__init__(**kwargs)
if size is not None and ("shortest_edge" not in size or "longest_edge" not in size):
raise ValueError("size must contain 'shortest_edge' and 'longest_edge' keys.")
else:
size = {"shortest_edge": 384 * 384, "longest_edge": 1536 * 1536}
# backward compatibility: override size with min_pixels and max_pixels if they are provided
if min_pixels is not None:
size["shortest_edge"] = min_pixels
if max_pixels is not None:
size["longest_edge"] = max_pixels
self.min_pixels = size["shortest_edge"]
self.max_pixels = size["longest_edge"]
self.size = size
self.do_resize = do_resize
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD
self.patch_size = patch_size
self.temporal_patch_size = temporal_patch_size
self.merge_size = merge_size
self.do_convert_rgb = do_convert_rgb
def _preprocess(
self,
images: list["torch.Tensor"],
do_resize: bool,
size: SizeDict,
interpolation: Optional["F.InterpolationMode"],
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
image_mean: float | list[float] | None,
image_std: float | list[float] | None,
disable_grouping: bool | None,
return_tensors: str | TensorType | None,
patch_size: int | None = None,
temporal_patch_size: int | None = None,
merge_size: int | None = None,
**kwargs,
):
patch_size = patch_size if patch_size is not None else self.patch_size
temporal_patch_size = temporal_patch_size if temporal_patch_size is not None else self.temporal_patch_size
merge_size = merge_size if merge_size is not None else self.merge_size
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
resized_images_grouped = {}
for shape, stacked_images in grouped_images.items():
height, width = stacked_images.shape[-2:]
if do_resize:
resized_height, resized_width = smart_resize(
height,
width,
factor=patch_size * merge_size,
min_pixels=size["shortest_edge"],
max_pixels=size["longest_edge"],
)
stacked_images = self.resize(
image=stacked_images,
size=SizeDict(height=resized_height, width=resized_width),
interpolation=interpolation,
)
resized_images_grouped[shape] = stacked_images
resized_images = reorder_images(resized_images_grouped, grouped_images_index)
# Group images by size for further processing
# Needed in case do_resize is False, or resize returns images with different sizes
grouped_images, grouped_images_index = group_images_by_shape(resized_images, disable_grouping=disable_grouping)
processed_images_grouped = {}
processed_grids = {}
for shape, stacked_images in grouped_images.items():
resized_height, resized_width = stacked_images.shape[-2:]
# Fused rescale and normalize
patches = self.rescale_and_normalize(
stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
if patches.ndim == 4:
# add a temporal dimension if we have images
patches = patches.unsqueeze(1)
if patches.shape[1] % temporal_patch_size != 0:
repeats = patches[:, -1:].repeat(1, temporal_patch_size - 1, 1, 1, 1)
patches = torch.cat([patches, repeats], dim=1)
batch_size, grid_t, channel = patches.shape[:3]
grid_t = grid_t // temporal_patch_size
grid_h, grid_w = (
resized_height // patch_size,
resized_width // patch_size,
)
patches = patches.view(
batch_size,
grid_t,
temporal_patch_size,
channel,
grid_h,
patch_size,
grid_w,
patch_size,
)
patches = patches.permute(0, 1, 4, 6, 3, 2, 5, 7)
flatten_patches = patches.reshape(batch_size, grid_t * grid_h * grid_w, channel, patch_size, patch_size)
processed_images_grouped[shape] = flatten_patches
processed_grids[shape] = [[grid_t, grid_h, grid_w]] * batch_size
processed_images = reorder_images(processed_images_grouped, grouped_images_index)
processed_grids = reorder_images(processed_grids, grouped_images_index)
pixel_values = torch.cat(processed_images, dim=0)
image_grid_thw = torch.tensor(processed_grids)
return BatchFeature(
data={"pixel_values": pixel_values, "image_grid_thw": image_grid_thw}, tensor_type=return_tensors
)
class PaddleOCRVLProcessorKwargs(ProcessingKwargs, total=False):
_defaults = {
"text_kwargs": {
"padding": False,
"return_mm_token_type_ids": True,
},
}
class PaddleOCRVLProcessor(ProcessorMixin):
r"""
[`PaddleOCRVLProcessor`] offers all the functionalities of [`PaddleOCRVLImageProcessor`] and [`LLamaTokenizerFast`]. See the
[`~PaddleOCRVLProcessor.__call__`] and [`~PaddleOCRVLProcessor.decode`] for more information.
Args:
image_processor ([`PaddleOCRVLImageProcessor`], *optional*):
The image processor is a required input.
tokenizer ([`LLamaTokenizerFast`], *optional*):
The tokenizer is a required input.
chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
in a chat into a tokenizable string.
"""
image_processor_class = "AutoImageProcessor"
tokenizer_class = "AutoTokenizer"
def __init__(self, image_processor=None, tokenizer=None, chat_template=None, **kwargs):
self.image_token = tokenizer.image_token
self.image_token_id = tokenizer.image_token_id
super().__init__(image_processor, tokenizer, chat_template=chat_template)
def __call__(
self,
images: ImageInput = None,
text: TextInput | PreTokenizedInput | list[TextInput] | list[PreTokenizedInput] = None,
**kwargs: Unpack[PaddleOCRVLProcessorKwargs],
) -> BatchFeature:
"""
Args:
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. Both channels-first and channels-last formats are supported.
text (`str`, `List[str]`, `List[List[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors of a particular framework. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
- `'jax'`: Return JAX `jnp.ndarray` objects.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
- **image_grid_thw** -- List of image 3D grid in LLM. Returned when `images` is not `None`.
"""
output_kwargs = self._merge_kwargs(
PaddleOCRVLProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
if images is not None:
image_inputs = self.image_processor(images=images, **output_kwargs["images_kwargs"])
image_grid_thw = image_inputs["image_grid_thw"]
else:
image_inputs = {}
image_grid_thw = None
if not isinstance(text, list):
text = [text]
text = text.copy()
if image_grid_thw is not None:
index = 0
for i in range(len(text)):
while self.image_token in text[i]:
text[i] = text[i].replace(
self.image_token,
"<|placeholder|>"
* (
image_grid_thw[index].prod()
// self.image_processor.merge_size
// self.image_processor.merge_size
),
1,
)
index += 1
text[i] = text[i].replace("<|placeholder|>", self.image_token)
return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None)
return_mm_token_type_ids = output_kwargs["text_kwargs"].pop("return_mm_token_type_ids", False)
text_inputs = self.tokenizer(text, **output_kwargs["text_kwargs"], return_tensors=None)
if return_mm_token_type_ids:
array_ids = np.array(text_inputs["input_ids"])
mm_token_type_ids = np.zeros_like(text_inputs["input_ids"])
mm_token_type_ids[array_ids == self.image_token_id] = 1
text_inputs["mm_token_type_ids"] = mm_token_type_ids.tolist()
return BatchFeature(data={**text_inputs, **image_inputs}, tensor_type=return_tensors)
class PaddleOCRVisionConfig(SiglipVisionConfig):
r"""
This is the configuration class to store the configuration of a [`PaddleOCRVisionModel`]. It is used to instantiate a
PaddleOCRVL vision encoder according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the vision encoder of the PaddleOCRVL
[PaddlePaddle/PaddleOCRVL](https://huggingface.co/PaddlePaddle/PaddleOCR-VL) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 1152):
Dimensionality of the encoder layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 4304):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
num_hidden_layers (`int`, *optional*, defaults to 27):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
num_channels (`int`, *optional*, defaults to 3):
Number of channels in the input images.
image_size (`int`, *optional*, defaults to 384):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 14):
The size (resolution) of each patch.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
spatial_merge_size (`int`, *optional*, defaults to 2):
The size used for merging spatial dimensions.
Example:
```python
>>> from transformers import PaddleOCRVisionConfig, PaddleOCRVisionModel
>>> # Initializing a PaddleOCRVisionConfig with PaddlePaddle/PaddleOCR-VL style configuration
>>> configuration = PaddleOCRVisionConfig()
>>> # Initializing a PaddleOCRVisionModel (with random weights) from the PaddlePaddle/PaddleOCR-VL style configuration
>>> model = PaddleOCRVisionModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = "paddleocr_vl_vision"
base_config_key = "vision_config"
def __init__(
self,
hidden_size=1152,
intermediate_size=4304,
num_hidden_layers=27,
num_attention_heads=16,
num_channels=3,
image_size=384,
patch_size=14,
hidden_act="gelu_pytorch_tanh",
layer_norm_eps=1e-6,
attention_dropout=0.0,
spatial_merge_size=2,
**kwargs,
):
super().__init__()
self.spatial_merge_size = spatial_merge_size
class PaddleOCRTextConfig(Ernie4_5Config):
model_type = "paddleocr_vl_text"
class PaddleOCRVLConfig(Qwen2VLConfig):
r"""
This is the configuration class to store the configuration of a [`PaddleOCRVLForConditionalGeneration`]. It is used to instantiate a
PaddleOCRVL model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of
PaddleOCRVL [PaddlePaddle/PaddleOCR-VL](https://huggingface.co/PaddlePaddle/PaddleOCR-VL).
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
text_config (`Union[PreTrainedConfig, dict]`, *optional*, defaults to `PaddleOCRTextConfig`):
The config object or dictionary of the text backbone.
vision_config (`Union[PreTrainedConfig, dict]`, *optional*, defaults to `PaddleOCRVisionConfig`):
The config object or dictionary of the vision backbone.
image_token_id (`int`, *optional*, defaults to 100295):
The image token index to encode the image prompt.
video_token_id (`int`, *optional*, defaults to 100296):
The video token index to encode the image prompt.
vision_start_token_id (`int`, *optional*, defaults to 101305):
The token index to denote start of vision input.
vision_end_token_id (`int`, *optional*, defaults to 101306):
The token index to denote end of vision input.
tie_word_embeddings (`bool`, *optional*, defaults to `True`):
Whether the model's input and output word embeddings should be tied.
```python
>>> from transformers import PaddleOCRVLForConditionalGeneration, PaddleOCRVLConfig
>>> # Initializing a PaddleOCRVL style configuration
>>> configuration = PaddleOCRVLConfig()
>>> # Initializing a model from the PaddleOCRVL style configuration
>>> model = PaddleOCRVLForConditionalGeneration(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
sub_configs = {"vision_config": PaddleOCRVisionConfig, "text_config": PaddleOCRTextConfig}
def __init__(
self,
text_config=None,
vision_config=None,
image_token_id=100295,
video_token_id=100296,
vision_start_token_id=101305,
vision_end_token_id=101306,
tie_word_embeddings=True,
**kwargs,
):
super().__init__()
class PaddleOCRProjector(nn.Module):
def __init__(self, config: PaddleOCRVLConfig):
super().__init__()
self.merge_kernel_size = (config.vision_config.spatial_merge_size, config.vision_config.spatial_merge_size)
hidden_size = config.vision_config.hidden_size * self.merge_kernel_size[0] * self.merge_kernel_size[1]
self.pre_norm = torch.nn.LayerNorm(config.vision_config.hidden_size, eps=1e-05)
self.linear_1 = nn.Linear(hidden_size, hidden_size, bias=True)
self.act = GELUActivation()
self.linear_2 = nn.Linear(hidden_size, config.text_config.hidden_size, bias=True)
def forward(self, image_features: torch.Tensor, image_grid_thw: torch.Tensor) -> torch.Tensor:
image_features_chunks = image_features.split(image_grid_thw.prod(dim=1).tolist(), dim=0)
m1, m2 = self.merge_kernel_size
processed_features = []
for image_feature, image_grid in zip(image_features_chunks, image_grid_thw):
image_feature = self.pre_norm(image_feature)
t, h, w = image_grid
d = image_feature.shape[-1]
h_block = h // m1
w_block = w // m2
image_feature = image_feature.reshape(t, h_block, m1, w_block, m2, d)
image_feature = image_feature.transpose(2, 3)
image_feature = image_feature.reshape(t * h_block * w_block, m1 * m2 * d)
hidden_states = self.linear_1(image_feature)
hidden_states = self.act(hidden_states)
hidden_states = self.linear_2(hidden_states)
processed_features.append(hidden_states)
return torch.cat(processed_features, dim=0)
class PaddleOCRVisionRotaryEmbedding(VisionRotaryEmbedding):
pass
class PaddleOCRRotaryEmbedding(Qwen2VLRotaryEmbedding):
pass
class PaddleOCRMLP(Ernie4_5MLP):
def __init__(self, config: PaddleOCRTextConfig):
super().__init__()
class PaddleOCRAttention(Qwen2_5OmniAttention):
def __init__(self, config: PaddleOCRVLConfig, layer_idx: int | None = None):
super().__init__()
self.attention_dropout = 0.0
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.use_bias)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.use_bias)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.use_bias)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.use_bias)
class PaddleOCRRMSNorm(Ernie4_5RMSNorm):
pass
class PaddleOCRDecoderLayer(Ernie4_5DecoderLayer):
def __init__(self, config: PaddleOCRTextConfig, layer_idx: int):
super().__init__()
@auto_docstring
class PaddleOCRVLPreTrainedModel(PreTrainedModel):
config: PaddleOCRVLConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["PaddleOCRDecoderLayer"]
_skip_keys_device_placement = ["past_key_values"]
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": PaddleOCRDecoderLayer,
"attentions": PaddleOCRAttention,
}
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, PaddleOCRVisionEmbeddings):
init.copy_(module.position_ids, torch.arange(module.position_ids.shape[-1]).expand((1, -1)))
elif isinstance(module, PaddleOCRVisionRotaryEmbedding):
inv_freq = 1.0 / (module.theta ** (torch.arange(0, module.dim, 2, dtype=torch.float) / module.dim))
init.copy_(module.inv_freq, inv_freq)
class PaddleOCRTextModel(PaddleOCRVLPreTrainedModel, Ernie4_5Model):
def __init__(self, config: PaddleOCRTextConfig):
super().__init__(config)
@merge_with_config_defaults
@capture_outputs
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
cache_position: torch.LongTensor | None = None,
use_cache: bool | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds: torch.Tensor = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position: torch.Tensor = (
torch.arange(inputs_embeds.shape[1], device=inputs_embeds.device) + past_seen_tokens
)
if position_ids is None:
position_ids = cache_position.view(1, 1, -1).expand(3, inputs_embeds.shape[0], -1)
elif position_ids.ndim == 2:
position_ids = position_ids[None, ...].expand(3, position_ids.shape[0], -1)
if position_ids.ndim == 3 and position_ids.shape[0] == 4:
text_position_ids = position_ids[0]
position_ids = position_ids[1:]
else:
text_position_ids = None
causal_mask = create_causal_mask(
config=self.config,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
position_ids=text_position_ids,
)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids)
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
hidden_states = decoder_layer(
hidden_states,
attention_mask=causal_mask,
position_embeddings=position_embeddings,
position_ids=text_position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
class PaddleOCRVisionEmbeddings(SiglipVisionEmbeddings):
def __init__(self, config: PaddleOCRVisionConfig):
super().__init__()
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
num_positions = self.position_embedding.weight.shape[0]
patch_pos_embed = self.position_embedding.weight.unsqueeze(0)
dim = embeddings.shape[-1]
sqrt_num_positions = torch_int(num_positions**0.5)
patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed,
size=(height, width),
mode="bilinear",
align_corners=False,
)
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return patch_pos_embed
def forward(
self,
pixel_values: torch.FloatTensor,
image_grid_thw: list[tuple[int, int, int] | list[tuple[int, int, int]]] | None = None,
) -> torch.Tensor:
"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, sequence_length, image_channels, patch_size, patch_size)`):
The tensors corresponding to the input images.
image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
The temporal, height and width of feature shape of each image in LLM.
"""
batch_size, squence_len, channel, height, width = pixel_values.shape
target_dtype = self.patch_embedding.weight.dtype
pixel_values = pixel_values.reshape(batch_size * squence_len, channel, height, width)
patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid]
embeddings = patch_embeds.flatten(-2).squeeze(-1)
embeddings = embeddings.reshape(batch_size, squence_len, -1)
start = 0
embeddings = embeddings.squeeze(0)
tmp_embeddings = []
for image_grid in image_grid_thw:
t, h, w = image_grid
end = start + t * h * w
image_embeddings = embeddings[start:end, :]
position_embedding = self.interpolate_pos_encoding(image_embeddings, h, w).squeeze(0).repeat(t, 1)
image_embeddings = image_embeddings + position_embedding
tmp_embeddings.append(image_embeddings)
start = end
embeddings = torch.concat(tmp_embeddings, dim=0)
return embeddings
class PaddleOCRVisionAttention(VideoLlama3VisionAttention):
def __init__(self, config: PaddleOCRVisionConfig):
super().__init__()
class PaddleOCRVisionMLP(SiglipMLP):
def __init__(self, config: PaddleOCRVisionConfig):
super().__init__()
class PaddleOCRVisionEncoderLayer(VideoLlama3VisionEncoderLayer):
def __init__(self, config: PaddleOCRVisionConfig):
super().__init__()
class PaddleOCRVisionEncoder(VideoLlama3VisionEncoder):
def __init__(self, config: PaddleOCRVisionConfig):
super().__init__()
embed_dim = config.hidden_size
num_heads = config.num_attention_heads
head_dim = embed_dim // num_heads
self.rotary_pos_emb = PaddleOCRVisionRotaryEmbedding(head_dim // 2)
def forward(
self,
inputs_embeds: torch.FloatTensor,
cu_seqlens: torch.Tensor,
attention_mask: torch.Tensor | None = None,
image_grid_thw: list[tuple[int, int, int] | list[tuple[int, int, int]]] | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutput:
r"""
inputs_embeds (`torch.FloatTensor` of shape `(sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
cu_seqlens (`torch.Tensor` of shape `(num_images + 1,)`):
The cumulative sequence lengths of each image or video feature.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
The attention_mask used in forward function shape [batch_size X sequence_length] if not None.
image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
The temporal, height and width of feature shape of each image in LLM.
"""
device = inputs_embeds.device
hidden_states = inputs_embeds
attention_mask = create_bidirectional_mask(
config=self.config,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
)
split_hids = []
split_wids = []
for t, h, w in image_grid_thw:
image_pids = torch.arange(t * h * w, device=device) % (h * w)
sample_hids = image_pids // w
sample_wids = image_pids % w
split_hids.append(sample_hids)
split_wids.append(sample_wids)
width_position_ids = torch.concat(split_wids, dim=0)
height_position_ids = torch.concat(split_hids, dim=0)
pids = torch.stack([height_position_ids, width_position_ids], dim=-1)
max_grid_size = pids.max() + 1
rotary_embeddings_max_grid = self.rotary_pos_emb(max_grid_size)
rotary_embeddings = rotary_embeddings_max_grid[pids].flatten(1)
rotary_embeddings = rotary_embeddings.repeat(1, 2)
position_embeddings = (rotary_embeddings.cos(), rotary_embeddings.sin())
for encoder_layer in self.layers:
hidden_states = encoder_layer(
hidden_states,
cu_seqlens=cu_seqlens,
position_embeddings=position_embeddings,
**kwargs,
)
return BaseModelOutput(
last_hidden_state=hidden_states,
)
class PaddleOCRVisionTransformer(PaddleOCRVLPreTrainedModel):
config: PaddleOCRVisionConfig
main_input_name = "pixel_values"
input_modalities = "image"
_can_record_outputs = {
"hidden_states": PaddleOCRVisionEncoderLayer,
"attentions": PaddleOCRVisionAttention,
}
def __init__(self, config: PaddleOCRVisionConfig):
super().__init__(config)
self.config = config
embed_dim = config.hidden_size
self.embeddings = PaddleOCRVisionEmbeddings(config)
self.encoder = PaddleOCRVisionEncoder(config)
self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
self.post_init()
@merge_with_config_defaults
@capture_outputs(tie_last_hidden_states=False)
def forward(
self,
pixel_values: torch.FloatTensor,
cu_seqlens: torch.Tensor,
attention_mask: torch.Tensor | None = None,
image_grid_thw: list[tuple[int, int, int] | list[tuple[int, int, int]]] | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutputWithPooling:
"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, sequence_length, patch_size * patch_size * image_channels)`):
The tensors corresponding to the input images.
cu_seqlens (`torch.Tensor` of shape `(num_images + 1,)`):
The cumulative sequence lengths of each image or video feature.
attention_mask (`torch.Tensor`, *optional*):
The attention_mask used in forward function shape [batch_size X sequence_length] if not None.
image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
The temporal, height and width of feature shape of each image in LLM.
"""
hidden_states = self.embeddings(pixel_values, image_grid_thw=image_grid_thw)
encoder_outputs: BaseModelOutput = self.encoder(
inputs_embeds=hidden_states,
cu_seqlens=cu_seqlens,
attention_mask=attention_mask,
image_grid_thw=image_grid_thw,
**kwargs,
)
last_hidden_state = encoder_outputs.last_hidden_state
last_hidden_state = self.post_layernorm(last_hidden_state)
return BaseModelOutputWithPooling(
last_hidden_state=last_hidden_state,
pooler_output=None,
)
class PaddleOCRVisionModel(PaddleOCRVLPreTrainedModel):
config: PaddleOCRVisionConfig
main_input_name = "pixel_values"
input_modalities = "image"
def __init__(self, config: PaddleOCRVisionConfig):
super().__init__(config)
self.vision_model = PaddleOCRVisionTransformer(config)
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
pixel_values: torch.FloatTensor,
cu_seqlens: torch.Tensor,
image_grid_thw: list[tuple[int, int, int] | list[tuple[int, int, int]]] | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | BaseModelOutputWithPooling:
"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, sequence_length, image_channels, patch_size, patch_size)`):
The tensors corresponding to the input images.
cu_seqlens (`torch.Tensor` of shape `(num_images + 1,)`):
The cumulative sequence lengths of each image or video feature.
image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
The temporal, height and width of feature shape of each image in LLM.
"""
return self.vision_model(
pixel_values=pixel_values,
cu_seqlens=cu_seqlens,
image_grid_thw=image_grid_thw,
**kwargs,
)
class PaddleOCRVLModelOutputWithPast(Qwen2VLModelOutputWithPast):
pass
class PaddleOCRVLCausalLMOutputWithPast(Qwen2VLCausalLMOutputWithPast):
pass
class PaddleOCRVLModel(Qwen2VLModel):
_checkpoint_conversion_mapping = {"^model": "language_model"}
_keys_to_ignore_on_load_unexpected = ["packing_position_embedding", "vision_model.head"]
def __init__(self, config: PaddleOCRVLConfig):
super().__init__(config)
self.visual = PaddleOCRVisionModel._from_config(config.vision_config)
self.projector = PaddleOCRProjector(config)
self.language_model = PaddleOCRTextModel._from_config(config.text_config)
self.rope_deltas = None
self.post_init()
def get_input_embeddings(self):
return self.language_model.embed_tokens
def set_input_embeddings(self, value):
self.language_model.embed_tokens = value
def get_video_features(self):
raise AttributeError("PaddleOCRVLModel does not support video.")
@can_return_tuple
@auto_docstring
def get_image_features(
self,
pixel_values: torch.FloatTensor,
image_grid_thw: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | BaseModelOutputWithPooling:
r"""
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
The tensors corresponding to the input images.
image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
The temporal, height and width of feature shape of each image in LLM.
"""
pixel_values = pixel_values.type(self.visual.dtype).unsqueeze(0)
cu_seqlens = torch.repeat_interleave(image_grid_thw[:, 1] * image_grid_thw[:, 2], image_grid_thw[:, 0]).cumsum(
dim=0,
# Select dtype based on the following factors:
# - FA2 requires that cu_seqlens_q must have dtype int32
# - torch.onnx.export requires that cu_seqlens_q must have same dtype as grid_thw
# See https://github.com/huggingface/transformers/pull/34852 for more information
dtype=image_grid_thw.dtype if torch.jit.is_tracing() else torch.int32,
)
cu_seqlens = torch.nn.functional.pad(cu_seqlens, (1, 0), value=0)
vision_outputs = self.visual(
pixel_values=pixel_values,
image_grid_thw=image_grid_thw,
cu_seqlens=cu_seqlens,
return_dict=True,
**kwargs,
)
image_embeds = vision_outputs.last_hidden_state
image_embeds = self.projector(image_embeds, image_grid_thw)
vision_outputs.pooler_output = image_embeds
return vision_outputs
def get_placeholder_mask(
self, input_ids: torch.LongTensor, inputs_embeds: torch.FloatTensor, image_features: torch.FloatTensor
):
"""
Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is
equal to the length of multimodal features. If the lengths are different, an error is raised.
"""
if input_ids is None:
special_image_mask = inputs_embeds == self.get_input_embeddings()(
torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device)
)
special_image_mask = special_image_mask.all(-1)
else:
special_image_mask = input_ids == self.config.image_token_id
n_image_tokens = special_image_mask.sum()
special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
n_image_features = image_features.shape[0] * image_features.shape[1]
torch_compilable_check(
inputs_embeds[special_image_mask].numel() == image_features.numel(),
f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {n_image_features}",
)
return special_image_mask
@can_return_tuple
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: list[torch.FloatTensor] | None = None,
inputs_embeds: torch.FloatTensor | None = None,
use_cache: bool | None = None,
pixel_values: torch.Tensor | None = None,
image_grid_thw: torch.LongTensor | None = None,
mm_token_type_ids: torch.IntTensor | None = None,
rope_deltas: torch.LongTensor | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs,
) -> tuple | PaddleOCRVLModelOutputWithPast:
r"""
image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
The temporal, height and width of feature shape of each image in LLM.
rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*):
The rope index difference between sequence length and multimodal rope.
"""
if inputs_embeds is None:
inputs_embeds = self.language_model.embed_tokens(input_ids)
if pixel_values is not None:
image_embeds = self.get_image_features(pixel_values, image_grid_thw, return_dict=True).pooler_output
image_embeds = image_embeds.to(inputs_embeds.device, inputs_embeds.dtype)
image_mask = self.get_placeholder_mask(input_ids, inputs_embeds=inputs_embeds, image_features=image_embeds)
inputs_embeds = inputs_embeds.masked_scatter(image_mask, image_embeds)
if position_ids is None:
position_ids = self.compute_3d_position_ids(
input_ids=input_ids,
image_grid_thw=image_grid_thw,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
past_key_values=past_key_values,
mm_token_type_ids=mm_token_type_ids,
)
outputs = self.language_model(
input_ids=None,
position_ids=position_ids,
attention_mask=attention_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
output = PaddleOCRVLModelOutputWithPast(
last_hidden_state=outputs.last_hidden_state,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
rope_deltas=self.rope_deltas,
)
return output
class PaddleOCRVLForConditionalGeneration(Qwen2VLForConditionalGeneration):
_checkpoint_conversion_mapping = {
"^visual": "model.visual",
"^mlp_AR": "model.projector",
r"^model(?!(\.visual|\.projector|\.language_model))": "model.language_model",
}
_keys_to_ignore_on_load_unexpected = ["packing_position_embedding", "vision_model.head"]
def get_video_features(self):
raise AttributeError("PaddleOCRVLForConditionalGeneration does not support video.")
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
labels: torch.LongTensor | None = None,
use_cache: bool | None = None,
pixel_values: torch.Tensor | None = None,
image_grid_thw: torch.LongTensor | None = None,
rope_deltas: torch.LongTensor | None = None,
mm_token_type_ids: torch.IntTensor | None = None,
cache_position: torch.LongTensor | None = None,
logits_to_keep: int | torch.Tensor = 0,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | PaddleOCRVLCausalLMOutputWithPast:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
The temporal, height and width of feature shape of each image in LLM.
rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*):
The rope index difference between sequence length and multimodal rope.
Example:
```python
>>> from transformers import AutoProcessor, PaddleOCRVLForConditionalGeneration
>>> model = PaddleOCRVLForConditionalGeneration.from_pretrained("PaddlePaddle/PaddleOCR-VL", dtype="bfloat16")
>>> processor = AutoProcessor.from_pretrained("PaddlePaddle/PaddleOCR-VL")
>>> messages = [
{
"role": "user",
"content": [
{
"type": "image",
"image": "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/ocr_demo.jpg",
},
{"type": "text", "text": "OCR:"},
],
}
]
>>> inputs = processor.apply_chat_template(
messages,
tokenize=True,
add_generation_prompt=True,
return_dict=True,
return_tensors="pt"
).to(model.device)
>>> # Generate
>>> generated_ids = model.generate(**inputs, max_new_tokens=1024)
>>> generated_ids_trimmed = [out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)]
>>> output_text = processor.batch_decode(generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
>>> print(output_text)
```
"""
outputs: PaddleOCRVLModelOutputWithPast = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
image_grid_thw=image_grid_thw,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
pixel_values=pixel_values,
rope_deltas=rope_deltas,
mm_token_type_ids=mm_token_type_ids,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs.last_hidden_state
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(
logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size, **kwargs
)
return PaddleOCRVLCausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
rope_deltas=outputs.rope_deltas,
)
__all__ = [
"PaddleOCRVLForConditionalGeneration",
"PaddleOCRVLModel",
"PaddleOCRVLPreTrainedModel",
"PaddleOCRVisionTransformer",
"PaddleOCRVLConfig",
"PaddleOCRTextModel",
"PaddleOCRVisionModel",
"PaddleOCRVisionConfig",
"PaddleOCRTextConfig",
"PaddleOCRVLImageProcessor",
"PaddleOCRVLImageProcessorFast",
"PaddleOCRVLProcessor",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/paddleocr_vl/modular_paddleocr_vl.py",
"license": "Apache License 2.0",
"lines": 1212,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/paddleocr_vl/test_modeling_paddleocr_vl.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PaddleOCRVL model."""
import copy
import gc
import unittest
import pytest
from parameterized import parameterized
from transformers import (
AutoProcessor,
PaddleOCRVLConfig,
PaddleOCRVLForConditionalGeneration,
is_torch_available,
)
from transformers.testing_utils import (
backend_empty_cache,
require_flash_attn,
require_torch,
require_torch_accelerator,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import (
ModelTesterMixin,
floats_tensor,
ids_tensor,
)
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
class PaddleOCRVLVisionText2TextModelTester:
def __init__(
self,
parent,
batch_size=7,
seq_length=13,
num_channels=3,
image_height=28,
image_width=28,
text_config={
"pad_token_id": 0,
"bos_token_id": 1,
"eos_token_id": 2,
"vocab_size": 103424,
"head_dim": 128,
"hidden_act": "silu",
"hidden_dropout_prob": 0.0,
"hidden_size": 32,
"ignored_index": -100,
"image_token_id": 100295,
"intermediate_size": 32,
"max_position_embeddings": 512,
"model_type": "paddleocr_vl",
"num_attention_heads": 4,
"num_hidden_layers": 2,
"num_key_value_heads": 2,
"rms_norm_eps": 1e-05,
"rope_scaling": {"mrope_section": [16, 24, 24], "rope_type": "default", "type": "default"},
"rope_theta": 500000,
"tie_word_embeddings": False,
},
vision_start_token_id=101305,
vision_end_token_id=101306,
image_token_id=100295,
is_training=True,
vision_config={
"hidden_act": "gelu_pytorch_tanh",
"hidden_size": 144,
"intermediate_size": 32,
"layer_norm_eps": 1e-06,
"model_type": "paddleocr_vl",
"num_attention_heads": 4,
"num_channels": 3,
"num_hidden_layers": 2,
"pad_token_id": 0,
"patch_size": 14,
"spatial_merge_size": 2,
},
):
self.parent = parent
self.bos_token_id = text_config["bos_token_id"]
self.eos_token_id = text_config["eos_token_id"]
self.pad_token_id = text_config["pad_token_id"]
self.num_hidden_layers = text_config["num_hidden_layers"]
self.num_attention_heads = text_config["num_attention_heads"]
self.hidden_size = text_config["hidden_size"]
self.vision_start_token_id = vision_start_token_id
self.vision_end_token_id = vision_end_token_id
self.image_token_id = image_token_id
self.text_config = text_config
self.vision_config = vision_config
self.batch_size = batch_size
self.num_channels = num_channels
self.image_height = image_height
self.image_width = image_width
self.is_training = is_training
self.vocab_size = text_config["vocab_size"]
self.num_image_tokens = 1
self.seq_length = seq_length + self.num_image_tokens
def get_config(self):
return PaddleOCRVLConfig(
text_config=self.text_config,
vision_config=self.vision_config,
vision_start_token_id=self.vision_start_token_id,
image_token_id=self.image_token_id,
)
def prepare_config_and_inputs(self):
config = self.get_config()
patch_size = config.vision_config.patch_size
pixel_values = floats_tensor(
[
self.batch_size * (self.image_height * self.image_width) // (patch_size**2),
config.vision_config.num_channels,
patch_size,
patch_size,
]
)
return config, pixel_values
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
input_ids[:, :4] = torch.tensor([100273, 2969, 93963, 93919], dtype=input_ids.dtype, device=input_ids.device)
input_ids[:, 4] = self.vision_start_token_id
input_ids[:, 5 : 5 + self.num_image_tokens] = self.image_token_id
input_ids[:, -8] = self.vision_end_token_id
input_ids[:, -7:] = torch.tensor(
[93972, 2497, 93963, 23, 92267, 93963, 93919], dtype=input_ids.dtype, device=input_ids.device
)
inputs_dict = {
"pixel_values": pixel_values,
"image_grid_thw": torch.tensor([[1, 2, 2]] * self.batch_size, device=torch_device),
"input_ids": input_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class PaddleOCRVLModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
"""
Model tester for `PaddleOCRVLForConditionalGeneration`.
"""
all_model_classes = (PaddleOCRVLForConditionalGeneration,) if is_torch_available() else ()
pipeline_model_mapping = {"image-text-to-text": PaddleOCRVLForConditionalGeneration}
_is_composite = True
def setUp(self):
self.model_tester = PaddleOCRVLVisionText2TextModelTester(self)
self.config_tester = ConfigTester(self, config_class=PaddleOCRVLConfig, has_text_modality=False)
def test_config(self):
self.config_tester.run_common_tests()
def test_mismatching_num_image_tokens(self):
"""
Tests that an explicit error is thrown when the number of image tokens
doesn't match the number of image placeholders in the text.
We also test multi-image cases when one prompt has multiple image tokens.
"""
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device)
model.eval()
curr_input_dict = copy.deepcopy(input_dict) # in-place modifications further
_ = model(**curr_input_dict) # successful forward with no modifications
# remove one image but leave all the image tokens in text
patch_size = config.vision_config.patch_size
one_img_length = (self.model_tester.image_height * self.model_tester.image_width) // (patch_size**2)
curr_input_dict["pixel_values"] = curr_input_dict["pixel_values"][-one_img_length:, ...]
curr_input_dict["image_grid_thw"] = curr_input_dict["image_grid_thw"][-1:, ...]
with self.assertRaisesRegex(ValueError, "Image features and image tokens do not match"):
_ = model(**curr_input_dict)
# simulate multi-image case by concatenating inputs where each has exactly one image/image-token
input_ids = curr_input_dict["input_ids"][:1]
pixel_values = curr_input_dict["pixel_values"][:one_img_length]
image_grid_thw = curr_input_dict["image_grid_thw"][:1]
input_ids = torch.cat([input_ids, input_ids], dim=0)
# one image and two image tokens raise an error
with self.assertRaisesRegex(ValueError, "Image features and image tokens do not match"):
_ = model(input_ids=input_ids, pixel_values=pixel_values, image_grid_thw=image_grid_thw)
# two images and two image tokens don't raise an error
pixel_values = torch.cat([pixel_values, pixel_values], dim=0)
image_grid_thw = torch.cat([image_grid_thw, image_grid_thw], dim=0)
_ = model(input_ids=input_ids, pixel_values=pixel_values, image_grid_thw=image_grid_thw)
# PaddleOCRVL has pixel_values shaped as (bs*patch_len, image_channels, patch_size, patch_size) so we can't slice to batches in generate
def prepare_config_and_inputs_for_generate(self, batch_size=2):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# We don't want a few model inputs in our model input dictionary for generation tests
input_keys_to_ignore = [
# we don't want encoder-decoder models to start from filled decoder ids
"decoder_input_ids",
"decoder_attention_mask",
# we'll set cache use in each test differently
"use_cache",
# Ignore labels if it is in the input dict
"labels",
# model-specific exceptions should overload/overwrite this function
]
# The diff from the general `prepare_config_and_inputs_for_generate` lies here
patch_size = config.vision_config.patch_size
filtered_image_length = (
batch_size * (self.model_tester.image_height * self.model_tester.image_width) // (patch_size**2)
)
filtered_inputs_dict = {
k: v[:batch_size, ...] if isinstance(v, torch.Tensor) else v
for k, v in inputs_dict.items()
if k not in input_keys_to_ignore
}
filtered_inputs_dict["pixel_values"] = inputs_dict["pixel_values"][:filtered_image_length]
# It is important set `eos_token_id` to `None` to avoid early stopping (would break for length-based checks)
text_gen_config = config.get_text_config(decoder=True)
if text_gen_config.eos_token_id is not None and text_gen_config.pad_token_id is None:
text_gen_config.pad_token_id = (
text_gen_config.eos_token_id
if isinstance(text_gen_config.eos_token_id, int)
else text_gen_config.eos_token_id[0]
)
text_gen_config.eos_token_id = None
text_gen_config.forced_eos_token_id = None
return config, filtered_inputs_dict
@unittest.skip(reason="PaddleOCRVL does not support.")
def test_generate_compile_model_forward_fullgraph(self):
pass
@unittest.skip(reason="PaddleOCRVL does not support.")
def test_multi_gpu_data_parallel_forward(self):
pass
@pytest.mark.generate
@unittest.skip(reason="PaddleOCRVL does not support beam search.")
def test_beam_sample_generate(self):
pass
@pytest.mark.generate
@unittest.skip(reason="PaddleOCRVL does not support beam search.")
def test_beam_search_generate(self):
pass
@pytest.mark.generate
@unittest.skip(reason="PaddleOCRVL does not support beam search.")
def test_beam_search_generate_dict_output(self):
pass
@pytest.mark.generate
@unittest.skip(reason="PaddleOCRVL does not support beam search.")
def test_beam_search_generate_dict_outputs_use_cache(self):
pass
@pytest.mark.generate
@unittest.skip(reason="PaddleOCRVL does not support beam search.")
def test_beam_sample_generate_dict_output(self):
pass
@unittest.skip(reason="PaddleOCRVL needs to apply weight conversions.")
def test_can_load_from_already_mapped_keys(self):
pass
@pytest.mark.generate
@unittest.skip(reason="PaddleOCRVL does not support beam search.")
def test_generate_from_inputs_embeds_1_beam_search(self, _, num_beams):
pass
@parameterized.expand([("random",), ("same",)])
@pytest.mark.generate
@unittest.skip(reason="PaddleOCRVL does not support assisted decoding.")
def test_assisted_decoding_matches_greedy_search(self, assistant_type):
pass
@pytest.mark.generate
@unittest.skip(reason="PaddleOCRVL does not support assisted decoding.")
def test_assisted_decoding_sample(self):
pass
@unittest.skip("PaddleOCRVL does not support this test.")
def test_model_is_small(self):
pass
@require_torch
@slow
class PaddleOCRVLIntegrationTest(unittest.TestCase):
def setUp(self):
self.processor = AutoProcessor.from_pretrained("PaddlePaddle/PaddleOCR-VL")
self.messages = [
{
"role": "user",
"content": [
{
"type": "image",
"url": "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/ocr_demo2.jpg",
},
{"type": "text", "text": "OCR:"},
],
}
]
def tearDown(self):
gc.collect()
backend_empty_cache(torch_device)
def test_small_model_integration_test(self):
model = (
PaddleOCRVLForConditionalGeneration.from_pretrained(
"PaddlePaddle/PaddleOCR-VL",
dtype="bfloat16",
)
.to(torch_device)
.eval()
)
inputs = self.processor.apply_chat_template(
self.messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors="pt",
)
expected_input_ids_length = 211
assert expected_input_ids_length == len(inputs.input_ids[0])
expected_input_ids = [100273, 2969, 93963, 93919, 101305, 100295, 100295, 100295, 100295, 100295] # fmt: skip
assert expected_input_ids == inputs.input_ids[0].tolist()[:10]
expected_pixel_slice = torch.tensor(
[
[1.0000, 1.0000, 1.0000],
[1.0000, 1.0000, 1.0000],
[0.9922, 0.9922, 0.9922],
[1.0000, 1.0000, 1.0000],
[1.0000, 1.0000, 1.0000],
],
dtype=torch.float32,
device="cpu",
)
assert torch.allclose(expected_pixel_slice, inputs.pixel_values[:5, :, 0, 0], atol=3e-3)
# verify generation
inputs = inputs.to(torch_device)
output = model.generate(**inputs, max_new_tokens=30)
result = self.processor.decode(output[0][inputs["input_ids"].shape[-1] : -1])
EXPECTED_DECODED_TEXT = "生甘草"
self.assertEqual(
result,
EXPECTED_DECODED_TEXT,
)
def test_small_model_integration_test_batch(self):
model = (
PaddleOCRVLForConditionalGeneration.from_pretrained("PaddlePaddle/PaddleOCR-VL", dtype="bfloat16")
.to(torch_device)
.eval()
)
inputs = self.processor.apply_chat_template(
[self.messages, self.messages],
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors="pt",
padding=True,
padding_side="left",
).to(torch_device)
# it should not matter whether two images are the same size or not
output = model.generate(**inputs, max_new_tokens=30)
generated_ids_trimmed = [out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, output)]
result = self.processor.batch_decode(
generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
)
EXPECTED_DECODED_TEXT = ["生甘草", "生甘草"]
self.assertEqual(
result,
EXPECTED_DECODED_TEXT,
)
@require_flash_attn
@require_torch_accelerator
@pytest.mark.flash_attn_test
def test_small_model_integration_test_flashatt2(self):
model = (
PaddleOCRVLForConditionalGeneration.from_pretrained(
"PaddlePaddle/PaddleOCR-VL", dtype="bfloat16", attn_implementation="flash_attention_2"
)
.to(torch_device)
.eval()
)
inputs = self.processor.apply_chat_template(
self.messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors="pt",
)
expected_input_ids_length = 211
assert expected_input_ids_length == len(inputs.input_ids[0])
expected_input_ids = [100273, 2969, 93963, 93919, 101305, 100295, 100295, 100295, 100295, 100295] # fmt: skip
assert expected_input_ids == inputs.input_ids[0].tolist()[:10]
expected_pixel_slice = torch.tensor(
[
[1.0000, 1.0000, 1.0000],
[1.0000, 1.0000, 1.0000],
[0.9922, 0.9922, 0.9922],
[1.0000, 1.0000, 1.0000],
[1.0000, 1.0000, 1.0000],
],
dtype=torch.float32,
device="cpu",
)
assert torch.allclose(expected_pixel_slice, inputs.pixel_values[:5, :, 0, 0], atol=3e-3)
# verify generation
inputs = inputs.to(torch_device)
output = model.generate(**inputs, max_new_tokens=30)
result = self.processor.decode(output[0][inputs["input_ids"].shape[-1] : -1])
EXPECTED_DECODED_TEXT = "生甘草"
self.assertEqual(
result,
EXPECTED_DECODED_TEXT,
)
@require_flash_attn
@require_torch_accelerator
@pytest.mark.flash_attn_test
def test_small_model_integration_test_batch_flashatt2(self):
model = (
PaddleOCRVLForConditionalGeneration.from_pretrained(
"PaddlePaddle/PaddleOCR-VL", dtype="bfloat16", attn_implementation="flash_attention_2"
)
.to(torch_device)
.eval()
)
inputs = self.processor.apply_chat_template(
[self.messages, self.messages],
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors="pt",
padding=True,
padding_side="left",
).to(torch_device)
# it should not matter whether two images are the same size or not
output = model.generate(**inputs, max_new_tokens=30)
generated_ids_trimmed = [out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, output)]
result = self.processor.batch_decode(
generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
)
EXPECTED_DECODED_TEXT = ["生甘草", "生甘草"]
self.assertEqual(
result,
EXPECTED_DECODED_TEXT,
)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/paddleocr_vl/test_modeling_paddleocr_vl.py",
"license": "Apache License 2.0",
"lines": 436,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/lasr/feature_extraction_lasr.py | # Copyright 2025 The HuggingFace Inc. team and Google LLC. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ...audio_utils import hertz_to_mel
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
from ...utils.import_utils import requires
logger = logging.get_logger(__name__)
# TODO: @eustlb, we should be able to remove this and use mel_filter_bank from audio_utils
def linear_to_mel_weight_matrix(
num_mel_bins: int,
num_spectrogram_bins: int,
sample_rate: float,
lower_edge_hertz: float,
upper_edge_hertz: float,
dtype,
) -> np.ndarray:
"""NumPy-port of the JAX mel weight matrix logic."""
# We use float64 for precision, matching the JAX implementation.
internal_dtype = np.float64
# HTK excludes the spectrogram DC bin.
bands_to_zero = 1
nyquist_hertz = sample_rate / 2.0
linear_frequencies = np.linspace(0.0, nyquist_hertz, num_spectrogram_bins, dtype=internal_dtype)[bands_to_zero:]
spectrogram_bins_mel = hertz_to_mel(linear_frequencies, mel_scale="kaldi")[:, np.newaxis]
edges = np.linspace(
hertz_to_mel(lower_edge_hertz, mel_scale="kaldi"),
hertz_to_mel(upper_edge_hertz, mel_scale="kaldi"),
num_mel_bins + 2,
dtype=internal_dtype,
)
lower_edge_mel, center_mel, upper_edge_mel = (
edges[:-2][np.newaxis, :],
edges[1:-1][np.newaxis, :],
edges[2:][np.newaxis, :],
)
lower_slopes = (spectrogram_bins_mel - lower_edge_mel) / (center_mel - lower_edge_mel)
upper_slopes = (upper_edge_mel - spectrogram_bins_mel) / (upper_edge_mel - center_mel)
mel_weights_matrix = np.maximum(0.0, np.minimum(lower_slopes, upper_slopes))
return np.pad(mel_weights_matrix, [[bands_to_zero, 0], [0, 0]]).astype(dtype)
@requires(backends=("torch",))
class LasrFeatureExtractor(SequenceFeatureExtractor):
r"""
Constructs a LASR feature extractor.
This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains
most of the main methods. Users should refer to this superclass for more information regarding those methods.
This class extracts mel-filter bank features from raw speech using a custom numpy implementation of the `Short Time
Fourier Transform` which should match pytorch's `torch.stft` equivalent.
Args:
feature_size (`int`, *optional*, defaults to 128):
The feature dimension of the extracted features.
sampling_rate (`int`, *optional*, defaults to 16000):
The sampling rate at which the audio files should be digitalized expressed in hertz (Hz).
hop_length (`int`, *optional*, defaults to 160):
Length of the overlapping windows for the STFT used to obtain the Mel Frequency coefficients.
n_fft (`int`, *optional*, defaults to 512):
Size of the Fourier transform.
win_length (`int`, *optional*, defaults to 400):
The window length for the STFT computation.
padding_value (`float`, *optional*, defaults to 0.0):
Padding value used to pad the audio. Should correspond to silences.
"""
model_input_names = ["input_features", "attention_mask"]
def __init__(
self,
feature_size=128,
sampling_rate=16000,
hop_length=160,
n_fft=512,
win_length=400,
padding_value=0.0,
**kwargs,
):
super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs)
self.hop_length = hop_length
self.n_fft = n_fft
self.win_length = win_length
self.mel_filters = torch.from_numpy(
linear_to_mel_weight_matrix(
num_mel_bins=feature_size,
num_spectrogram_bins=n_fft // 2 + 1,
sample_rate=sampling_rate,
lower_edge_hertz=125.0,
upper_edge_hertz=7500.0,
dtype=np.float64,
)
)
def _torch_extract_fbank_features(self, waveform, device="cpu"):
# spectrogram
window = torch.hann_window(self.win_length, periodic=False, device=device, dtype=torch.float64)
waveform = waveform.to(torch.float64)
# TODO: @eustlb, to be standardized
# here we cannot use directly torch.stft because every fft frame is padded with zeros
# due to unfold then rfft, while torch.stft unfolds with the number of fft points
frames = waveform.unfold(-1, self.win_length, self.hop_length)
stft = torch.fft.rfft(window * frames, n=self.n_fft)
power_spec = torch.abs(stft) ** 2
# log mel spectrogram
mel_filters = self.mel_filters.to(device)
mel_spec = torch.clamp(power_spec @ mel_filters, min=1e-5)
mel_spec = torch.log(mel_spec)
return mel_spec
def __call__(
self,
raw_speech: np.ndarray | list[float] | list[np.ndarray] | list[list[float]],
truncation: bool = False,
pad_to_multiple_of: int | None = None,
return_tensors: str | TensorType | None = None,
return_attention_mask: bool | None = None,
padding: str | None = "longest",
max_length: int | None = None,
sampling_rate: int | None = None,
do_normalize: bool | None = None,
device: str | None = "cpu",
return_token_timestamps: bool | None = None,
**kwargs,
) -> BatchFeature:
"""
Main method to featurize and prepare for the model one or several sequence(s). Implementation uses PyTorch for
the STFT computation if available, otherwise a slower NumPy based one.
Args:
raw_speech (`np.ndarray`, `list[float]`, `list[np.ndarray]`, `list[list[float]]`):
The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float
values, a list of numpy arrays or a list of list of float values. Must be mono channel audio, not
stereo, i.e. single float per timestep.
truncation (`bool`, *optional*, default to `True`):
Activates truncation to cut input sequences longer than *max_length* to *max_length*.
pad_to_multiple_of (`int`, *optional*, defaults to None):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
`>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128.
return_attention_mask (`bool`, *optional*):
Whether to return the attention mask. If left to the default, will return the attention mask according
to the specific feature_extractor's default.
[What are attention masks?](../glossary#attention-mask)
<Tip>
For Parakeet models, `attention_mask` should always be passed for batched inference, to avoid subtle
bugs.
</Tip>
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
sampling_rate (`int`, *optional*):
The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass
`sampling_rate` at the forward call to prevent silent errors and allow automatic speech recognition
pipeline.
padding_value (`float`, *optional*, defaults to 0.0):
The value that is used to fill the padding values / vectors.
do_normalize (`bool`, *optional*, defaults to `False`):
Whether or not to zero-mean unit-variance normalize the input. Normalizing can help to significantly
improve the performance of the model.
device (`str`, *optional*, defaults to `'cpu'`):
Specifies the device for computation of the log-mel spectrogram of audio signals in the
`_torch_extract_fbank_features` method. (e.g., "cpu", "cuda")
return_token_timestamps (`bool`, *optional*, defaults to `None`):
Deprecated. Use `return_attention_mask` instead from which the number of frames can be inferred.
Whether or not to return the number of frames of the input raw_speech.
These num_frames can be used by the model to compute word level timestamps.
"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
f" was sampled with {self.sampling_rate} and not {sampling_rate}."
)
else:
logger.warning(
f"It is strongly recommended to pass the `sampling_rate` argument to `{self.__class__.__name__}()`. "
"Failing to do so can result in silent errors that might be hard to debug."
)
# Convert to torch tensor
if isinstance(raw_speech, np.ndarray):
raw_speech = torch.tensor(raw_speech)
elif isinstance(raw_speech, (list, tuple)):
if isinstance(raw_speech[0], (list, np.ndarray)):
raw_speech = [torch.tensor(speech) for speech in raw_speech]
else: # list[float]
raw_speech = torch.tensor(raw_speech)
is_batched_torch = isinstance(raw_speech, torch.Tensor) and len(raw_speech.shape) > 1
if is_batched_torch and len(raw_speech.shape) > 2:
logger.warning(
f"Only mono-channel audio is supported for input to {self.__class__.__name__}. "
"We will take the mean of the channels to convert to mono."
)
raw_speech = raw_speech.mean(-1)
is_batched_sequence = isinstance(raw_speech, (list, tuple))
if is_batched_sequence:
for speech in raw_speech:
if len(speech.shape) > 1:
logger.warning(
f"Only mono-channel audio is supported for input to {self.__class__.__name__}. "
"We will take the mean of the channels to convert to mono."
)
speech = speech.mean(-1)
if is_batched_torch or is_batched_sequence:
raw_speech = [speech[:, None].to(torch.float32) for speech in raw_speech]
else:
raw_speech = [raw_speech[:, None].to(torch.float32)]
batched_speech = BatchFeature({"input_features": raw_speech})
padded_inputs = self.pad(
batched_speech,
padding=padding,
max_length=max_length,
truncation=truncation,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
return_tensors="pt",
)
input_features = padded_inputs.input_features.squeeze(-1)
input_features = self._torch_extract_fbank_features(input_features, device)
data = {
"input_features": input_features.to(torch.float32),
}
if return_attention_mask:
attention_mask = padded_inputs.attention_mask[:, self.win_length - 1 :: self.hop_length]
data["attention_mask"] = attention_mask.to(torch.bool)
return BatchFeature(data=data, tensor_type=return_tensors)
__all__ = ["LasrFeatureExtractor"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/lasr/feature_extraction_lasr.py",
"license": "Apache License 2.0",
"lines": 234,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/lasr/modular_lasr.py | # Copyright 2025 The HuggingFace Inc. team and Google LLC. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from collections.abc import Callable
import torch
from tokenizers import Tokenizer
from tokenizers.models import Unigram
from torch import nn
from ...masking_utils import create_bidirectional_mask
from ...modeling_outputs import BaseModelOutput
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...processing_utils import Unpack
from ...tokenization_utils_tokenizers import TokenizersBackend
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
from ...utils.generic import merge_with_config_defaults
from ...utils.output_capturing import capture_outputs
from ..llama.modeling_llama import LlamaAttention, LlamaRotaryEmbedding, apply_rotary_pos_emb, eager_attention_forward
from ..parakeet.configuration_parakeet import ParakeetCTCConfig, ParakeetEncoderConfig
from ..parakeet.modeling_parakeet import (
ParakeetEncoderBlock,
ParakeetEncoderConvolutionModule,
ParakeetForCTC,
ParakeetPreTrainedModel,
)
from ..parakeet.processing_parakeet import ParakeetProcessor
from ..t5.tokenization_t5 import T5Tokenizer
class LasrTokenizer(T5Tokenizer, TokenizersBackend):
def __init__(
self,
eos_token="</s>",
unk_token="<unk>",
pad_token="<pad>",
extra_ids=100,
additional_special_tokens=None,
vocab=None,
vocab_file=None,
**kwargs,
):
super().__init__(
eos_token=eos_token,
unk_token=unk_token,
pad_token=pad_token,
extra_ids=extra_ids,
additional_special_tokens=additional_special_tokens,
vocab=vocab,
vocab_file=vocab_file,
**kwargs,
)
self._tokenizer = Tokenizer(
Unigram(
self._vocab_scores,
unk_id=3,
byte_fallback=False,
)
)
def _decode(
self,
token_ids: int | list[int],
skip_special_tokens: bool = False,
clean_up_tokenization_spaces: bool | None = None,
group_tokens: bool = True,
**kwargs,
) -> str:
if isinstance(token_ids, int):
token_ids = [token_ids]
if group_tokens:
token_ids = [token_group[0] for token_group in itertools.groupby(token_ids)]
# for CTC we filter out the blank token, which is the pad token
token_ids = [token for token in token_ids if token != self.pad_token_id]
return TokenizersBackend._decode(
self,
token_ids=token_ids,
skip_special_tokens=skip_special_tokens,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
**kwargs,
)
class LasrProcessor(ParakeetProcessor):
pass
class LasrEncoderConfig(ParakeetEncoderConfig):
r"""
This is the configuration class to store the configuration of a [`LasrEncoder`]. It is used to instantiate a
`LasrEncoder` model according to the specified arguments, defining the model architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 512):
Dimension of the layers and the hidden states.
num_hidden_layers (`int`, *optional*, defaults to 17):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 2048):
Dimension of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the encoder and pooler.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use bias in the attention layers.
convolution_bias (`bool`, *optional*, defaults to `False`):
Whether to use bias in convolutions of the conformer's convolution module.
conv_kernel_size (`int`, *optional*, defaults to 32):
The kernel size of the convolution layers in the Conformer block.
subsampling_conv_channels (`int`, *optional*, defaults to 256):
The number of channels in the subsampling convolution layers.
subsampling_conv_kernel_size (`int`, *optional*, defaults to 5):
The kernel size of the subsampling convolution layers.
subsampling_conv_stride (`int`, *optional*, defaults to 2):
The stride of the subsampling convolution layers.
num_mel_bins (`int`, *optional*, defaults to 128):
Number of mel features.
dropout (`float`, *optional*, defaults to 0.1):
The dropout ratio for all fully connected layers in the embeddings, encoder, and pooler.
dropout_positions (`float`, *optional*, defaults to 0.0):
The dropout ratio for the positions in the input sequence.
layerdrop (`float`, *optional*, defaults to 0.1):
The dropout ratio for the layers in the encoder.
activation_dropout (`float`, *optional*, defaults to 0.1):
The dropout ratio for activations inside the fully connected layer.
attention_dropout (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention layers.
max_position_embeddings (`int`, *optional*, defaults to 10000):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
feed_forward_residual_weights (`tuple[float, float]`, *optional*, defaults to `[1.5, 0.5]`):
The residual weights for the feed forward layers.
conv_residual_weights (`tuple[float, float]`, *optional*, defaults to `[2.0, 1.0]`):
The residual weights for the convolution layers.
batch_norm_momentum (`float`, *optional*, defaults to 0.01):
The momentum for the batch normalization layers.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
Example:
```python
>>> from transformers import LasrEncoderModel, LasrEncoderConfig
>>> # Initializing a `LasrEncoder` configuration
>>> configuration = LasrEncoderConfig()
>>> # Initializing a model from the configuration
>>> model = LasrEncoderModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
This configuration class is based on the LasrEncoder architecture from Google Health AI. You can find more details
and pre-trained models at [TODO/TODO](https://huggingface.co/TODO/TODO).
"""
def __init__(
self,
hidden_size=512,
num_hidden_layers=17,
num_attention_heads=8,
intermediate_size=2048,
hidden_act="silu",
attention_bias=False,
convolution_bias=False,
conv_kernel_size=32,
subsampling_conv_channels=256,
subsampling_conv_kernel_size=5,
subsampling_conv_stride=2,
num_mel_bins=128,
dropout=0.1,
dropout_positions=0.0,
layerdrop=0.1,
activation_dropout=0.1,
attention_dropout=0.1,
max_position_embeddings=10000,
initializer_range=0.02,
layer_norm_eps=1e-6,
feed_forward_residual_weights=[1.5, 0.5],
conv_residual_weights=[2.0, 1.0],
batch_norm_momentum=0.01,
rope_parameters=None,
**kwargs,
):
self.rope_parameters = rope_parameters
self.layer_norm_eps = layer_norm_eps
self.feed_forward_residual_weights = feed_forward_residual_weights
self.conv_residual_weights = conv_residual_weights
self.batch_norm_momentum = batch_norm_momentum
super().__init__(
hidden_size=hidden_size,
num_hidden_layers=num_hidden_layers,
num_attention_heads=num_attention_heads,
intermediate_size=intermediate_size,
hidden_act=hidden_act,
attention_bias=attention_bias,
convolution_bias=convolution_bias,
conv_kernel_size=conv_kernel_size,
subsampling_conv_channels=subsampling_conv_channels,
num_mel_bins=num_mel_bins,
subsampling_conv_kernel_size=subsampling_conv_kernel_size,
subsampling_conv_stride=subsampling_conv_stride,
dropout=dropout,
dropout_positions=dropout_positions,
layerdrop=layerdrop,
activation_dropout=activation_dropout,
attention_dropout=attention_dropout,
max_position_embeddings=max_position_embeddings,
initializer_range=initializer_range,
**kwargs,
)
del self.subsampling_factor
del self.scale_input
class LasrCTCConfig(ParakeetCTCConfig):
r"""
This is the configuration class to store the configuration of a [`LasrForCTC`]. It is used to instantiate a
Lasr CTC model according to the specified arguments, defining the model architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 512):
Vocabulary size of the model.
ctc_loss_reduction (`str`, *optional*, defaults to `"mean"`):
Specifies the reduction to apply to the output of `torch.nn.CTCLoss`. Only relevant when training an
instance of [`LasrForCTC`].
ctc_zero_infinity (`bool`, *optional*, defaults to `True`):
Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`. Infinite losses mainly
occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance
of [`LasrForCTC`].
encoder_config (`Union[dict, LasrEncoderConfig]`, *optional*):
The config object or dictionary of the encoder.
pad_token_id (`int`, *optional*, defaults to 0):
Padding token id. Also used as blank token id.
Example:
```python
>>> from transformers import LasrForCTC, LasrCTCConfig
>>> # Initializing a Lasr configuration
>>> configuration = LasrCTCConfig()
>>> # Initializing a model from the configuration
>>> model = LasrForCTC(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
This configuration class is based on the Lasr CTC architecture from Google Health AI. You can find more details
and pre-trained models at [TODO/TODO](https://huggingface.co/TODO/TODO).
"""
def __init__(
self,
vocab_size=512,
ctc_loss_reduction="mean",
ctc_zero_infinity=True,
encoder_config: dict | LasrEncoderConfig = None,
pad_token_id=0,
**kwargs,
):
super().__init__(
vocab_size=vocab_size,
ctc_loss_reduction=ctc_loss_reduction,
ctc_zero_infinity=ctc_zero_infinity,
encoder_config=encoder_config,
pad_token_id=pad_token_id,
**kwargs,
)
@property
def inputs_to_logits_ratio(self):
return self.encoder_config.subsampling_conv_stride**2
class LasrEncoderSubsampling(nn.Module):
def __init__(self, config: LasrEncoderConfig):
super().__init__()
self.dense_0 = nn.Linear(config.num_mel_bins, config.hidden_size)
self.conv_0 = nn.Conv1d(
config.hidden_size,
config.hidden_size,
kernel_size=config.subsampling_conv_kernel_size,
stride=config.subsampling_conv_stride,
)
self.conv_1 = nn.Conv1d(
config.hidden_size,
config.subsampling_conv_channels,
kernel_size=config.subsampling_conv_kernel_size,
stride=config.subsampling_conv_stride,
)
self.dense_1 = nn.Linear(config.subsampling_conv_channels, config.hidden_size)
self.act_fn = nn.ReLU()
def forward(self, input_features: torch.Tensor) -> torch.Tensor:
hidden_states = self.act_fn(self.dense_0(input_features))
hidden_states = hidden_states.transpose(1, 2)
hidden_states = self.act_fn(self.conv_0(hidden_states))
hidden_states = self.act_fn(self.conv_1(hidden_states))
hidden_states = hidden_states.transpose(1, 2)
return self.dense_1(hidden_states)
class LasrEncoderRotaryEmbedding(LlamaRotaryEmbedding): ...
class LasrEncoderAttention(LlamaAttention):
def __init__(self, config: LasrEncoderConfig, layer_idx: int):
super().__init__(config, layer_idx)
self.is_causal = False
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None,
attention_mask: torch.Tensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, torch.Tensor]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface(
self.config._attn_implementation, eager_attention_forward
)
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class LasrEncoderConvolutionModule(ParakeetEncoderConvolutionModule):
def __init__(self, config: LasrEncoderConfig, module_config=None):
super().__init__(config, module_config)
self.padding = "same"
self.norm = nn.BatchNorm1d(config.hidden_size, momentum=config.batch_norm_momentum)
class LasrEncoderBlock(ParakeetEncoderBlock):
def __init__(self, config: LasrEncoderConfig, layer_idx: int):
super().__init__(config, layer_idx)
self.feed_forward_residual_weights = config.feed_forward_residual_weights
self.conv_residual_weights = config.conv_residual_weights
self.norm_feed_forward1 = nn.LayerNorm(config.hidden_size, config.layer_norm_eps, bias=False)
self.norm_self_att = nn.LayerNorm(config.hidden_size, config.layer_norm_eps, bias=False)
self.norm_conv = nn.LayerNorm(config.hidden_size, config.layer_norm_eps, bias=False)
self.norm_feed_forward2 = nn.LayerNorm(config.hidden_size, config.layer_norm_eps, bias=False)
self.norm_out = nn.LayerNorm(config.hidden_size, config.layer_norm_eps, bias=False)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor | None = None,
position_embeddings: torch.Tensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
residual = hidden_states
hidden_states = self.feed_forward1(self.norm_feed_forward1(hidden_states))
hidden_states = (
self.feed_forward_residual_weights[0] * residual + self.feed_forward_residual_weights[1] * hidden_states
)
normalized_hidden_states = self.norm_self_att(hidden_states)
attn_output, _ = self.self_attn(
hidden_states=normalized_hidden_states,
attention_mask=attention_mask,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = hidden_states + attn_output
conv_output = self.conv(self.norm_conv(hidden_states), attention_mask=attention_mask)
hidden_states = self.conv_residual_weights[0] * hidden_states + self.conv_residual_weights[1] * conv_output
residual = hidden_states
hidden_states = self.feed_forward2(self.norm_feed_forward2(hidden_states))
hidden_states = (
self.feed_forward_residual_weights[0] * residual + self.feed_forward_residual_weights[1] * hidden_states
)
hidden_states = self.norm_out(hidden_states)
return hidden_states
class LasrPreTrainedModel(ParakeetPreTrainedModel):
# padding is incompatible with flex attention as the resulting mask cannot be used to apply padding
_supports_flex_attn = False
def _init_weights(self, module):
PreTrainedModel._init_weights(module)
def _get_subsampling_output_length(self, input_lengths: torch.Tensor):
encoder_config = self.config.encoder_config if isinstance(self.config, LasrCTCConfig) else self.config
kernel_size = encoder_config.subsampling_conv_kernel_size
stride = encoder_config.subsampling_conv_stride
num_layers = 2
for _ in range(num_layers):
input_lengths = (input_lengths - kernel_size) // stride + 1
return input_lengths
@auto_docstring(
custom_intro="""
The LasrEncoder model, based on the Conformer architecture](https://arxiv.org/abs/2005.08100).
"""
)
class LasrEncoder(LasrPreTrainedModel):
config: LasrEncoderConfig
base_model_prefix = "encoder"
def __init__(self, config: LasrEncoderConfig):
super().__init__(config)
self.gradient_checkpointing = False
self.dropout = config.dropout
self.dropout_positions = config.dropout_positions
self.layerdrop = config.layerdrop
self.subsampler = LasrEncoderSubsampling(config)
self.rotary_emb = LasrEncoderRotaryEmbedding(config)
self.layers = nn.ModuleList(
[LasrEncoderBlock(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.out_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps, bias=False)
self.post_init()
@auto_docstring
@merge_with_config_defaults
@capture_outputs
@can_return_tuple
def forward(
self,
input_features: torch.Tensor,
attention_mask: torch.Tensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutput:
r"""
Example:
```python
>>> from transformers import AutoProcessor, LasrEncoder
>>> from datasets import load_dataset, Audio
>>> model_id = TODO
>>> processor = AutoProcessor.from_pretrained(model_id)
>>> encoder = ParakeetEncoder.from_pretrained(model_id)
>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
>>> ds = ds.cast_column("audio", Audio(sampling_rate=processor.feature_extractor.sampling_rate))
>>> inputs = processor(ds[0]["audio"]["array"])
>>> encoder_outputs = encoder(**inputs)
>>> print(encoder_outputs.last_hidden_state.shape)
```
"""
hidden_states = self.subsampler(input_features)
cos, sin = self.rotary_emb(
hidden_states, torch.arange(hidden_states.shape[1], device=hidden_states.device).unsqueeze(0)
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
cos = nn.functional.dropout(cos, p=self.dropout_positions, training=self.training)
sin = nn.functional.dropout(sin, p=self.dropout_positions, training=self.training)
if attention_mask is not None:
attention_mask = self._get_output_attention_mask(attention_mask, target_length=hidden_states.shape[1])
attention_mask = create_bidirectional_mask(
config=self.config,
inputs_embeds=hidden_states,
attention_mask=attention_mask,
)
for encoder_layer in self.layers:
# add LayerDrop (see https://huggingface.co/papers/1909.11556 for description)
to_drop = False
if self.training:
dropout_probability = torch.rand([])
if dropout_probability < self.layerdrop: # skip the layer
to_drop = True
if not to_drop:
hidden_states = encoder_layer(
hidden_states,
attention_mask=attention_mask,
position_embeddings=(cos, sin),
**kwargs,
)
hidden_states = self.out_norm(hidden_states)
return BaseModelOutput(last_hidden_state=hidden_states)
class LasrForCTC(ParakeetForCTC):
def generate(**super_kwargs):
r"""
Example:
```python
>>> from transformers import AutoProcessor, LasrForCTC
>>> from datasets import load_dataset, Audio
>>> model_id = TODO
>>> processor = AutoProcessor.from_pretrained(model_id)
>>> model = LasrForCTC.from_pretrained(model_id)
>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
>>> ds = ds.cast_column("audio", Audio(sampling_rate=processor.feature_extractor.sampling_rate))
>>> inputs = processor(ds[0]["audio"]["array"], text=ds[0]["text"])
>>> predicted_ids = model.generate(**inputs)
>>> transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True)
>>> print(transcription)
```
"""
return super().generate(**super_kwargs)
__all__ = [
"LasrForCTC",
"LasrEncoder",
"LasrPreTrainedModel",
"LasrProcessor",
"LasrEncoderConfig",
"LasrCTCConfig",
"LasrTokenizer",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/lasr/modular_lasr.py",
"license": "Apache License 2.0",
"lines": 490,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/test_training_mixin.py | # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training overfit tester mixin for model tests."""
import logging
import time
from abc import ABC, abstractmethod
import torch
from transformers import set_seed
from transformers.testing_utils import Colors, build_cpu_memory_monitor, init_test_logger, is_training_test
logger = logging.getLogger("transformers.training_test")
class TrainingTesterMixin(ABC):
"""
Mixin for training overfit tests. Add to model test classes alongside ModelTesterMixin.
The model_tester (e.g., CausalLMModelTester) already provides:
- get_config() -> tiny model config
- prepare_config_and_inputs_for_common() -> config + input dict
- causal_lm_class, base_model_class, etc.
This mixin adds training-specific tests using that infrastructure.
"""
# ============================================================
# Training hyperparameters
# ============================================================
training_overfit_steps: int = 300
training_overfit_batch_size: int = 2
training_overfit_learning_rate: float = 1e-3
training_overfit_seq_length: int = 64
training_overfit_log_freq: int = 10
# Loss reduction and grad norm reduction thresholds for passing the test (i.e 95% reduction)
training_loss_reduction_threshold: float = 0.9
training_grad_norm_reduction_threshold: float = 0.9
@property
@abstractmethod
def model_tester(self):
"""The model tester instance (e.g., CausalLMModelTester)."""
...
# ============================================================
# Modality detection
# ============================================================
def _get_model_modality(self) -> str:
"""Detect the modality of the model based on its input signature."""
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
if "input_ids" in inputs_dict:
return "text"
elif "pixel_values" in inputs_dict:
return "image"
elif "input_features" in inputs_dict or "input_values" in inputs_dict:
return "audio"
else:
raise ValueError(f"Unknown modality: {inputs_dict}")
# ============================================================
# Training data creation for each modality
# ============================================================
def _create_text_training_batch(
self,
batch_size: int,
seq_length: int,
vocab_size: int,
) -> dict[str, torch.Tensor]:
"""Create a simple text batch without needing a tokenizer."""
# Create a deterministic sequence (not random, so model can learn it)
pattern = list(range(1, min(20, vocab_size))) # tokens 1-19
num_repeats = (seq_length // len(pattern)) + 1
tokens = (pattern * num_repeats)[:seq_length]
input_ids = torch.tensor([tokens] * batch_size, dtype=torch.long)
return {"input_ids": input_ids, "labels": input_ids.clone()}
def _create_image_training_batch(
self,
batch_size: int,
num_channels: int,
height: int,
width: int,
) -> dict[str, torch.Tensor]:
"""Create fixed batch for image models using a deterministic pattern."""
pass
def _create_audio_training_batch(
self,
batch_size: int,
audio_length: int,
feature_size: int | None = None,
) -> dict[str, torch.Tensor]:
"""Create fixed batch for audio models using a deterministic waveform."""
pass
def _decode_text_tokens(self, tokens: list[int], max_display: int = 40) -> str:
"""Decode tokens to readable string (maps token IDs to letters: 1->a, 2->b, etc.)."""
decoded = "".join(chr(ord("a") + (t - 1) % 26) for t in tokens)
if len(decoded) > max_display:
return f"'{decoded[:max_display]}...'"
return f"'{decoded}'"
def _get_trainable_model_class(self):
"""Get the model class to use for training (prefers *ForCausalLM, *ForSequenceClassification, etc.)."""
# Prefer model classes with a head (for computing loss)
if hasattr(self.model_tester, "causal_lm_class") and self.model_tester.causal_lm_class is not None:
return self.model_tester.causal_lm_class
if (
hasattr(self.model_tester, "sequence_classification_class")
and self.model_tester.sequence_classification_class is not None
):
return self.model_tester.sequence_classification_class
# Fall back to first model class
return self.all_model_classes[0]
@is_training_test
def test_training_overfit(self):
"""Test that a tiny model can overfit on a fixed batch."""
# Initialize logging and memory monitoring
init_test_logger()
memory_monitor = build_cpu_memory_monitor(logger)
logger.info("=" * 70)
logger.info(f"Starting test: {self._testMethodName}")
logger.info("=" * 70)
# Skip if model doesn't support training
if not getattr(self.model_tester, "is_training", True):
logger.info(f"{Colors.YELLOW}Skipping: Model tester not configured for training tests{Colors.RESET}")
self.skipTest("Model tester not configured for training tests")
# Configuration
logger.info(f"{Colors.BOLD}Job Configuration:{Colors.RESET}")
logger.info(f" {Colors.CYAN}total_steps:{Colors.RESET} {self.training_overfit_steps}")
logger.info(f" {Colors.CYAN}batch_size:{Colors.RESET} {self.training_overfit_batch_size}")
logger.info(f" {Colors.CYAN}learning_rate:{Colors.RESET} {self.training_overfit_learning_rate}")
logger.info(f" {Colors.CYAN}seq_length:{Colors.RESET} {self.training_overfit_seq_length}")
logger.info(f" {Colors.CYAN}log_freq:{Colors.RESET} {self.training_overfit_log_freq}")
logger.info(f" {Colors.CYAN}device:{Colors.RESET} cpu")
set_seed(42)
logger.info("-" * 70)
logger.info(f"{Colors.BOLD}Building model{Colors.RESET}")
load_start = time.perf_counter()
# Get tiny config from existing infrastructure
config = self.model_tester.get_config()
model_class = self._get_trainable_model_class()
model = model_class(config)
model.train()
load_time = time.perf_counter() - load_start
logger.info(f"Model loaded in {Colors.GREEN}{load_time:.3f}s{Colors.RESET}")
# Log model architecture
# TODO(3outeille): make sure if there is other parameters to log
logger.info(f"{Colors.BOLD}Model Architecture:{Colors.RESET}")
logger.info(f" {Colors.CYAN}model_class:{Colors.RESET} {model_class.__name__}")
if hasattr(config, "hidden_size"):
logger.info(f" {Colors.CYAN}hidden_size:{Colors.RESET} {config.hidden_size}")
if hasattr(config, "num_hidden_layers"):
logger.info(f" {Colors.CYAN}num_hidden_layers:{Colors.RESET} {config.num_hidden_layers}")
if hasattr(config, "num_attention_heads"):
logger.info(f" {Colors.CYAN}num_attention_heads:{Colors.RESET} {config.num_attention_heads}")
if hasattr(config, "num_key_value_heads"):
logger.info(f" {Colors.CYAN}num_key_value_heads:{Colors.RESET} {config.num_key_value_heads}")
if hasattr(config, "intermediate_size"):
logger.info(f" {Colors.CYAN}intermediate_size:{Colors.RESET} {config.intermediate_size}")
if hasattr(config, "vocab_size"):
logger.info(f" {Colors.CYAN}vocab_size:{Colors.RESET} {config.vocab_size}")
if hasattr(config, "num_experts"):
logger.info(f" {Colors.CYAN}num_experts:{Colors.RESET} {config.num_experts}")
if hasattr(config, "num_experts_per_tok"):
logger.info(f" {Colors.CYAN}num_experts_per_tok:{Colors.RESET} {config.num_experts_per_tok}")
# Count parameters
total_params = sum(p.numel() for p in model.parameters())
trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
logger.info(
f"{Colors.CYAN}Model size:{Colors.RESET} {Colors.BRIGHT_GREEN}{total_params:,}{Colors.RESET} total parameters"
)
logger.info(
f"{Colors.CYAN}Trainable parameters:{Colors.RESET} {Colors.BRIGHT_GREEN}{trainable_params:,}{Colors.RESET}"
)
# Memory after model load
mem_stats = memory_monitor.get_stats()
logger.info(
f"{Colors.MAGENTA}Memory after model load:{Colors.RESET} {mem_stats.rss_gib:.2f} GiB ({mem_stats.rss_pct:.1f}%)"
)
logger.info("-" * 70)
logger.info(f"{Colors.BOLD}Creating fixed batch{Colors.RESET}")
modality = self._get_model_modality()
logger.info(f"{Colors.CYAN}Detected modality:{Colors.RESET} {modality}")
_, sample_inputs = self.model_tester.prepare_config_and_inputs_for_common()
if modality == "text":
# For text models, we need a tokenizer - use a simple one or create fake tokens
batch = self._create_text_training_batch(
batch_size=self.training_overfit_batch_size,
seq_length=self.training_overfit_seq_length,
vocab_size=config.vocab_size,
)
logger.info(f"{Colors.CYAN}Training pattern:{Colors.RESET} Repeating token sequence (1-19)")
else:
raise ValueError(f"Modality {modality} not supported yet for training overfit")
tokens_per_batch = self.training_overfit_batch_size * self.training_overfit_seq_length
logger.info(f" {Colors.CYAN}batch_size:{Colors.RESET} {self.training_overfit_batch_size}")
logger.info(f" {Colors.CYAN}seq_length:{Colors.RESET} {self.training_overfit_seq_length}")
logger.info(f" {Colors.CYAN}tokens_per_batch:{Colors.RESET} {tokens_per_batch:,}")
logger.info(f"{Colors.DIM}Using same fixed batch every step (deterministic overfitting){Colors.RESET}")
logger.info("-" * 70)
logger.info(f"{Colors.BOLD}Building optimizer{Colors.RESET}")
optimizer = torch.optim.Adam(
model.parameters(), lr=self.training_overfit_learning_rate, weight_decay=0.0, betas=(0.9, 0.999)
)
logger.info(f"{Colors.CYAN}Optimizer:{Colors.RESET} Adam")
logger.info(f" {Colors.CYAN}learning_rate:{Colors.RESET} {self.training_overfit_learning_rate}")
logger.info(f" {Colors.CYAN}weight_decay:{Colors.RESET} 0.0")
logger.info(f" {Colors.CYAN}betas:{Colors.RESET} (0.9, 0.999)")
# Training Loop
logger.info("-" * 70)
logger.info("Training starts at step 1")
initial_loss = None
final_loss = None
initial_grad_norm = None
final_grad_norm = None
training_start = time.perf_counter()
memory_monitor.reset_peak_stats()
for step in range(1, self.training_overfit_steps + 1):
step_start = time.perf_counter()
optimizer.zero_grad()
outputs = model(**batch)
loss = outputs.loss
if initial_loss is None:
initial_loss = loss.item()
final_loss = loss.item()
loss.backward()
grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
if initial_grad_norm is None:
initial_grad_norm = grad_norm.item()
final_grad_norm = grad_norm.item()
optimizer.step()
step_time = time.perf_counter() - step_start
# Log at frequency
if step == 1 or step % self.training_overfit_log_freq == 0 or step == self.training_overfit_steps:
tokens_per_sec = tokens_per_batch / step_time
mem_stats = memory_monitor.get_stats()
logger.info(
f"{Colors.CYAN}step:{Colors.RESET} {step} "
f"{Colors.GREEN}loss:{Colors.RESET} {loss.item():7.4f} "
f"{Colors.YELLOW}grad_norm:{Colors.RESET} {grad_norm.item():6.4f} "
f"{Colors.MAGENTA}memory:{Colors.RESET} {mem_stats.rss_gib:.2f}GiB({mem_stats.rss_pct:.1f}%) "
f"{Colors.BLUE}tok/s:{Colors.RESET} {tokens_per_sec:,.0f} "
f"{Colors.DIM}step_time:{Colors.RESET} {step_time:.3f}s"
)
training_time = time.perf_counter() - training_start
# Training Summary
total_tokens = self.training_overfit_steps * tokens_per_batch
logger.info("-" * 70)
logger.info(f"{Colors.BOLD}Training completed{Colors.RESET}")
logger.info(f"Total training time: {training_time:.2f}s")
logger.info(f"Total steps: {self.training_overfit_steps}")
logger.info(f"Total tokens seen: {total_tokens:,}")
logger.info(f"Average tokens/sec: {total_tokens / training_time:,.0f}")
# Memory summary
mem_stats = memory_monitor.get_stats()
logger.info(f"{Colors.BOLD}Memory usage:{Colors.RESET}")
logger.info(
f" {Colors.CYAN}current_rss:{Colors.RESET} {mem_stats.rss_gib:.2f} GiB ({mem_stats.rss_pct:.1f}%)"
)
logger.info(
f" {Colors.CYAN}peak_rss:{Colors.RESET} {mem_stats.peak_rss_gib:.2f} GiB ({mem_stats.peak_rss_pct:.1f}%)"
)
logger.info(
f" {Colors.CYAN}available:{Colors.RESET} {mem_stats.available_gib:.2f} GiB / {mem_stats.total_gib:.2f} GiB"
)
# Loss analysis
loss_reduction = (initial_loss - final_loss) / initial_loss * 100
logger.info(f"{Colors.BOLD}Loss metrics:{Colors.RESET}")
logger.info(f" {Colors.CYAN}initial_loss:{Colors.RESET} {initial_loss:.4f}")
logger.info(f" {Colors.CYAN}final_loss:{Colors.RESET} {final_loss:.4f}")
logger.info(f" {Colors.CYAN}loss_reduction:{Colors.RESET} {loss_reduction:.1f}%")
# Grad norm analysis
grad_norm_reduction = (initial_grad_norm - final_grad_norm) / initial_grad_norm * 100
logger.info(f"{Colors.BOLD}Grad norm metrics:{Colors.RESET}")
logger.info(f" {Colors.CYAN}initial_grad_norm:{Colors.RESET} {initial_grad_norm:.4f}")
logger.info(f" {Colors.CYAN}final_grad_norm:{Colors.RESET} {final_grad_norm:.4f}")
logger.info(f" {Colors.CYAN}grad_norm_reduction:{Colors.RESET} {grad_norm_reduction:.1f}%")
# Generation Test (only for text/causal LM models)
# TODO(3outeille): handle audio and generate
generation_matches = None
if modality == "text" and hasattr(model, "generate"):
logger.info("-" * 70)
logger.info(f"{Colors.BOLD}Testing generation{Colors.RESET}")
model.eval()
# Get the expected token sequence (same pattern used in training)
expected_tokens = batch["input_ids"][0].tolist()
# Use first token as prompt
prompt_ids = torch.tensor([[expected_tokens[0]]], dtype=torch.long)
num_tokens_to_generate = len(expected_tokens) - 1
logger.info(f"Prompt: {self._decode_text_tokens([expected_tokens[0]])}")
model_type = getattr(config, "model_type", "")
use_cache = model_type == "recurrent_gemma"
if use_cache:
logger.info("Only RecurrentGemmaModel is using use_cache=True. Other models run with use_cache=False")
with torch.no_grad():
generated_ids = model.generate(
prompt_ids,
max_new_tokens=num_tokens_to_generate,
do_sample=False,
pad_token_id=config.pad_token_id if hasattr(config, "pad_token_id") else 0,
eos_token_id=0,
use_cache=use_cache,
)
generated_tokens = generated_ids[0].tolist()
# Compare generated tokens with expected tokens
generation_matches = generated_tokens == expected_tokens
# TODO(3outeille): handle audio and image generation
if generation_matches:
logger.info(f"Expected: {Colors.GREEN}{self._decode_text_tokens(expected_tokens)}{Colors.RESET}")
logger.info(f"Generated: {Colors.GREEN}{self._decode_text_tokens(generated_tokens)}{Colors.RESET}")
logger.info(f"{Colors.GREEN}✓ Generation matches training sequence!{Colors.RESET}")
else:
logger.info(f"Expected: {Colors.GREEN}{self._decode_text_tokens(expected_tokens)}{Colors.RESET}")
logger.info(f"Generated: {Colors.RED}{self._decode_text_tokens(generated_tokens)}{Colors.RESET}")
# Count matching tokens
matches = sum(1 for g, e in zip(generated_tokens, expected_tokens) if g == e)
logger.info(
f"{Colors.YELLOW}✗ Generation mismatch: {matches}/{len(expected_tokens)} tokens match{Colors.RESET}"
)
# Assertions
logger.info("-" * 70)
logger.info(f"{Colors.BOLD}Running assertions{Colors.RESET}")
# Assert loss decreased significantly
loss_reduction_ratio = (initial_loss - final_loss) / initial_loss
self.assertGreater(
loss_reduction_ratio,
self.training_loss_reduction_threshold,
f"Expected loss to decrease by at least {self.training_loss_reduction_threshold * 100:.0f}%, "
f"got {loss_reduction:.1f}%",
)
logger.info(
f"{Colors.GREEN}✓ Loss decreased by more than {self.training_loss_reduction_threshold * 100:.0f}%{Colors.RESET}"
)
# Assert grad_norm decreased significantly
grad_norm_reduction_ratio = (initial_grad_norm - final_grad_norm) / initial_grad_norm
self.assertGreater(
grad_norm_reduction_ratio,
self.training_grad_norm_reduction_threshold,
f"Expected grad_norm to decrease by at least {self.training_grad_norm_reduction_threshold * 100:.0f}%, "
f"got {grad_norm_reduction:.1f}%",
)
logger.info(
f"{Colors.GREEN}✓ Grad norm decreased by more than {self.training_grad_norm_reduction_threshold * 100:.0f}%{Colors.RESET}"
)
# Assert generation matches (if applicable)
if generation_matches is not None:
self.assertTrue(generation_matches, "Expected model to generate the training sequence after overfitting")
logger.info(f"{Colors.GREEN}✓ Generated sequence matches training sequence{Colors.RESET}")
logger.info("=" * 70)
logger.info(f"Finished test: {self._testMethodName}")
logger.info("=" * 70)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/test_training_mixin.py",
"license": "Apache License 2.0",
"lines": 349,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/trainer_jit_checkpoint.py | import os
import signal
import threading
from .trainer_callback import TrainerCallback
from .trainer_utils import PREFIX_CHECKPOINT_DIR
from .utils import logging
logger = logging.get_logger(__name__)
class CheckpointManager:
def __init__(self, trainer, kill_wait: int = 3):
"""
Initialize the CheckpointManager for Just-In-Time checkpoint handling.
Args:
trainer: The Trainer instance that will be used to save checkpoints when SIGTERM is received.
kill_wait (`int`, *optional*, defaults to 3): Grace period to distinguish between SIGTERM and SIGKILL.
"""
self.trainer = trainer
self.is_checkpoint_requested = False
self._original_sigterm_handler = None
self.kill_wait = kill_wait
def setup_signal_handler(self):
self._original_sigterm_handler = signal.signal(signal.SIGTERM, self._sigterm_handler)
logger.info("JIT checkpoint signal handler registered for SIGTERM")
def _sigterm_handler(self, signum, frame):
if self.is_checkpoint_requested:
return
logger.info(f"SIGTERM received, will request JIT checkpoint after {self.kill_wait}s")
threading.Timer(self.kill_wait, self._enable_checkpoint).start()
def _enable_checkpoint(self):
logger.info("Kill wait period elapsed, requesting checkpoint")
self.is_checkpoint_requested = True
def execute_jit_checkpoint(self):
try:
# Set checkpoint flag to False to avoid multiple checkpoints getting triggered by other callbacks
self.is_checkpoint_requested = False
logger.info("Starting JIT checkpointing...")
current_step = self.trainer.state.global_step
logger.info(f"Saving JIT checkpoint at step {current_step}")
output_dir = self.trainer._get_output_dir(trial=None)
checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{current_step}"
checkpoint_path = os.path.join(output_dir, checkpoint_folder)
# Create checkpoint directory
os.makedirs(checkpoint_path, exist_ok=True)
# Create a sentinel file to indicate checkpointing is in progress
sentinel_file = os.path.join(output_dir, checkpoint_folder, "checkpoint-is-incomplete.txt")
with open(sentinel_file, "w") as f:
f.write(f"Checkpoint started at step {current_step} and in progress...")
logger.info(f"Created checkpoint progress sentinel marker file: {sentinel_file}")
# Invoke the trainer's checkpoint method directly
self.trainer._save_checkpoint(self.trainer.model, trial=None)
# Remove sentinel file upon successful checkpointing
if os.path.exists(sentinel_file):
os.remove(sentinel_file)
logger.info("Sentinel marker file removed")
logger.info("Immediate JIT checkpoint completed successfully")
except Exception as e:
logger.error(f"Failed to save JIT checkpoint: {e}")
raise
class JITCheckpointCallback(TrainerCallback):
"""
Callback for Just-In-Time checkpointing on SIGTERM signals.
When SIGTERM is received, the checkpoint manager sets `is_checkpoint_requested=True`.
The callbacks detect this flag and set `control.should_training_stop=True`, which signals
the Trainer's training loop to exit gracefully after saving the checkpoint.
"""
def __init__(self):
self.trainer = None
self.jit_manager: CheckpointManager | None = None
def set_trainer(self, trainer):
self.trainer = trainer
if trainer.args.enable_jit_checkpoint:
self.jit_manager = CheckpointManager(trainer=trainer)
self.jit_manager.setup_signal_handler()
logger.info("JIT checkpointing enabled")
def on_pre_optimizer_step(self, args, state, control, **kwargs):
if self.jit_manager and self.jit_manager.is_checkpoint_requested:
control.should_training_stop = True
self.jit_manager.execute_jit_checkpoint()
def on_step_begin(self, args, state, control, **kwargs):
if self.jit_manager and self.jit_manager.is_checkpoint_requested:
control.should_training_stop = True
self.jit_manager.execute_jit_checkpoint()
def on_step_end(self, args, state, control, **kwargs):
if self.jit_manager and self.jit_manager.is_checkpoint_requested:
control.should_save = False
control.should_training_stop = True
self.jit_manager.execute_jit_checkpoint()
def on_epoch_end(self, args, state, control, **kwargs):
if self.jit_manager and self.jit_manager.is_checkpoint_requested:
control.should_save = False
control.should_training_stop = True
self.jit_manager.execute_jit_checkpoint()
def on_train_end(self, args, state, control, **kwargs):
# Restore original SIGTERM handler
if self.jit_manager and self.jit_manager._original_sigterm_handler is not None:
signal.signal(signal.SIGTERM, self.jit_manager._original_sigterm_handler)
logger.info("Restored original SIGTERM handler after training completion")
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/trainer_jit_checkpoint.py",
"license": "Apache License 2.0",
"lines": 96,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
huggingface/transformers:src/transformers/integrations/quark.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..core_model_loading import ConversionOps
from ..utils import is_torch_available
if is_torch_available():
import torch
class QuarkDeserialize(ConversionOps):
def __init__(self, hf_quantizer):
self.hf_quantizer = hf_quantizer
def convert(
self,
input_dict: torch.Tensor,
model: torch.nn.Module | None = None,
missing_keys: list[str] | None = None,
full_layer_name: str | None = None,
**kwargs,
) -> dict[str, torch.Tensor]:
# target_key should be in the form of weight_scale, bias_scale, input_scale, output_scale, weight_zero_point, bias_zero_point, input_zero_point, output_zero_point
target_key, value = tuple(input_dict.items())[0]
value = value[0] if isinstance(value, list) else value
# this will get the param name : weight, input, bias or output
param = target_key.split("_", 1)[0]
# quant_state should be in the form of scale, or zero_point
quant_state = target_key.split("_", 1)[-1]
# here we change the name for example from the form of :
# model.layers.0.mlp.down_proj.weight_scale to model.layers.0.mlp.down_proj.weight_quantizer.scale to fit within
# the QParamsLinear module of quark
sub_module_state = full_layer_name.rsplit(".", 1)[0] + "." + param + "_quantizer" + "." + quant_state
# since quark module was expecting keys in the form of model.layers.0.mlp.down_proj.weight_scale
# we need to remove it from the missing_keys list
missing_keys.discard(full_layer_name)
return {sub_module_state: value}
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/integrations/quark.py",
"license": "Apache License 2.0",
"lines": 43,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/fast_vlm/convert_fastvlm_weights_to_hf.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import glob
import os
import re
from io import BytesIO
import httpx
import torch
from huggingface_hub import snapshot_download
from PIL import Image
from safetensors import safe_open
from transformers import (
AddedToken,
AutoConfig,
AutoTokenizer,
CLIPImageProcessor,
FastVlmConfig,
FastVlmForConditionalGeneration,
LlavaProcessor,
)
os.environ["TIMM_FUSED_ATTN"] = (
"0" # to avoid logits diverging, needed because the original implementation uses regular (not fused) atteniton
)
KEYS_TO_MODIFY_MAPPING = {
"model.vision_tower.vision_tower.model": "model.vision_tower.timm_model",
"patch_embed": "stem",
"layers": "language_model.layers",
"embed_tokens": "language_model.embed_tokens",
"layer_scale_1": "layer_scale_1.gamma",
"layer_scale_2": "layer_scale_2.gamma",
"mm_projector.0": "multi_modal_projector.linear_1",
"mm_projector.2": "multi_modal_projector.linear_2",
"conv_exp": "final_conv",
"se.reduce": "se.fc1",
"se.expand": "se.fc2",
"convffn": "mlp",
"lkb_reparam": "reparam_conv",
}
def map_to_stage(number):
number = int(number)
if number == 0:
return 0
if number in {1, 2}:
return 1
if number in {3, 4}:
return 2
if number in {5, 6, 7}:
return 3
if number in {8, 9, 10}:
return 4
def load_original_state_dict(model_id):
directory_path = snapshot_download(repo_id=model_id, allow_patterns=["*.safetensors"])
original_state_dict = {}
for path in glob.glob(f"{directory_path}/*"):
if path.endswith(".safetensors"):
with safe_open(path, framework="pt", device="cpu") as f:
for key in f.keys():
original_state_dict[key] = f.get_tensor(key)
if "model.vision_tower.vision_tower.model.head.proj" in original_state_dict:
del original_state_dict["model.vision_tower.vision_tower.model.head.proj"]
return original_state_dict
def convert_state_dict_to_hf(state_dict):
new_state_dict = {}
single_pattern = r"network\.(\d{1,2})"
double_pattern = r"network\.(\d{1,2})\.(\d{1,2})"
pos_embedding_pattern = r"stages\.(\d{1,2})\.reparam_conv"
for key, value in state_dict.items():
if key.endswith("layer_scale"):
key = key.replace("layer_scale", "layer_scale.gamma")
if key.startswith("model.norm"):
key = key.replace("model.norm", "model.language_model.norm")
if "token_mixer" not in key:
key = key.replace(".proj.", ".downsample.proj.")
matches = re.findall(double_pattern, key)
if len(matches) == 1:
match = matches[0]
key = key.replace(f"network.{match[0]}.{match[1]}", f"stages.{map_to_stage(match[0])}.blocks.{match[1]}")
matches = re.findall(single_pattern, key)
if len(matches) == 1:
match = matches[0]
key = key.replace(f"network.{match[0]}", f"stages.{map_to_stage(match[0])}")
matches = re.findall(pos_embedding_pattern, key)
if len(matches) == 1:
match = matches[0]
key = key.replace(f"stages.{match[0]}", f"stages.{match[0]}.pos_emb")
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
key = key.replace(key_to_modify, new_key)
new_state_dict[key] = value
return new_state_dict
def convert_fastvlm_to_hf(text_model_id, vision_model_id, output_hub_path, old_state_dict_id):
torch.set_default_dtype(torch.bfloat16)
text_config = AutoConfig.from_pretrained(text_model_id)
vision_config = AutoConfig.from_pretrained(vision_model_id)
vision_config.model_args = {"inference_mode": True}
vision_config.hidden_size = vision_config.num_features
vision_config.label2id = {}
vision_config.id2label = {}
config = FastVlmConfig(
text_config=text_config,
vision_config=vision_config,
)
config.vision_feature_select_strategy = "full"
config.vision_feature_layer = -1
config.image_token_index = 151646
config.image_seq_length = 256
tokenizer = AutoTokenizer.from_pretrained(
text_model_id,
chat_template="{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n'}}{# Render all images first #}{% for content in message['content'] | selectattr('type', 'equalto', 'image') %}{{ '<image>' }}{% endfor %}{# Render all text next #}{% for content in message['content'] | selectattr('type', 'equalto', 'text') %}{{ '\n' + content['text'] }}{% endfor %}{{'<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
)
tokenizer.add_tokens(AddedToken("<image>", special=True, normalized=False), special_tokens=True)
image_processor = CLIPImageProcessor(
crop_size={"height": 1024, "width": 1024},
image_mean=[0.0, 0.0, 0.0],
image_std=[1.0, 1.0, 1.0],
size={"shortest_edge": 1024},
)
processor = LlavaProcessor(tokenizer=tokenizer, image_processor=image_processor)
processor.patch_size = 64 # effective patch size (2^6)
model = FastVlmForConditionalGeneration(config)
state_dict = load_original_state_dict(old_state_dict_id)
state_dict = convert_state_dict_to_hf(state_dict)
model.load_state_dict(state_dict, strict=True, assign=True)
pre_expansion_embeddings = model.language_model.embed_tokens.weight.data
mu = torch.mean(pre_expansion_embeddings, dim=0).float()
n = pre_expansion_embeddings.size()[0]
sigma = ((pre_expansion_embeddings - mu).T @ (pre_expansion_embeddings - mu)) / n
dist = torch.distributions.multivariate_normal.MultivariateNormal(mu, covariance_matrix=1e-5 * sigma)
# We add an image token so we resize the model and pad to 64 for performance reasons
pad_shape = 64
vocab_size = config.text_config.vocab_size
model.resize_token_embeddings(config.text_config.vocab_size + 1, pad_shape)
model.language_model.embed_tokens.weight.data[vocab_size:] = torch.stack(
tuple(dist.sample() for _ in range(model.language_model.embed_tokens.weight.data[vocab_size:].shape[0])),
dim=0,
)
model.lm_head.weight.data[vocab_size:] = torch.stack(
tuple(dist.sample() for _ in range(model.lm_head.weight.data[vocab_size:].shape[0])),
dim=0,
)
conversation = [{"role": "user", "content": [{"type": "text", "text": "What are these?"}, {"type": "image"}]}]
prompt = tokenizer.apply_chat_template(conversation, add_generation_prompt=True, tokenize=False)
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
with httpx.stream("GET", url) as response:
raw_image = Image.open(BytesIO(response.read()))
inputs = processor(images=raw_image, text=prompt, return_tensors="pt").to("cuda")
inputs = {k: (v.to(torch.bfloat16) if v.dtype == torch.float32 else v) for k, v in inputs.items()}
model = model.cuda()
model.eval()
with torch.no_grad():
logits = model(**inputs).logits
# in order to get the same logits as in the Apple repo, we need to manually replace the original (Apple) LayerNorm2D with Timm's LayerNorm2D or vice versa
# otherwise numerical errors accumulate
if output_hub_path == "KamilaMila/FastVLM-0.5B":
expected_shape = torch.Size([1, 280, 152000])
expected_slice = torch.tensor([4.1250, 9.6875, 11.1875], device="cuda")
elif output_hub_path == "KamilaMila/FastVLM-1.5B":
expected_shape = torch.Size([1, 280, 152000])
expected_slice = torch.tensor([3.3750, 11.5000, 11.8125], device="cuda")
elif output_hub_path == "KamilaMila/FastVLM-7B":
expected_shape = torch.Size([1, 280, 152128])
expected_slice = torch.tensor([3.8281, 9.0625, 7.9062], device="cuda")
logits_slice = logits[0, -1, :3]
assert torch.allclose(expected_slice, logits_slice, atol=1e-8)
assert logits.shape == expected_shape
model.push_to_hub(output_hub_path)
processor.push_to_hub(output_hub_path)
print("Successfully pushed to hub!")
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument(
"--text_model_id",
default="Qwen/Qwen2-1.5B",
help="Hub location of the text model",
)
parser.add_argument(
"--vision_model_id",
default="timm/fastvit_mci3.apple_mclip2_dfndr2b",
help="Hub location of the vision model",
)
parser.add_argument(
"--output_hub_path",
default="KamilaMila/FastVLM-1.5B",
help="Location on the hub of the converted model",
)
parser.add_argument(
"--old_state_dict_id",
default="apple/FastVLM-1.5B",
help="Location on the hub of the raw state dict of the original model.",
)
args = parser.parse_args()
convert_fastvlm_to_hf(args.text_model_id, args.vision_model_id, args.output_hub_path, args.old_state_dict_id)
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/fast_vlm/convert_fastvlm_weights_to_hf.py",
"license": "Apache License 2.0",
"lines": 208,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/fast_vlm/modular_fast_vlm.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch import nn
from ...activations import ACT2FN
from ...cache_utils import Cache
from ...configuration_utils import PreTrainedConfig
from ...modeling_outputs import BaseModelOutputWithPooling
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring
from ...utils.generic import can_return_tuple, merge_with_config_defaults
from ..auto import CONFIG_MAPPING
from ..llava.configuration_llava import LlavaConfig
from ..llava.modeling_llava import (
LlavaCausalLMOutputWithPast,
LlavaForConditionalGeneration,
LlavaModel,
LlavaModelOutputWithPast,
LlavaMultiModalProjector,
LlavaPreTrainedModel,
)
class FastVlmConfig(LlavaConfig):
r"""
This is the configuration class to store the configuration of a [`FastVlmForConditionalGeneration`]. It is used to instantiate a
FastVLM model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield the same configuration as the one of FastVLM-7B.
e.g. [KamilaMila/FastVLM-7B](https://huggingface.co/KamilaMila/FastVLM-7B)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vision_config (`Union[AutoConfig, dict]`, *optional*, defaults to `TimmWrapperConfig` for `fastvit_mci3`):
The config object or dictionary of the vision backbone.
text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `Qwen2Config`):
The config object or dictionary of the text backbone.
image_token_id (`int`, *optional*, defaults to 151646):
The image token index to encode the image prompt.
projector_hidden_act (`str`, *optional*, defaults to `"gelu"`):
The activation function used by the multimodal projector.
vision_feature_select_strategy (`str`, *optional*, defaults to `"full"`):
The feature selection strategy used to select the vision feature from the vision backbone.
Only "full" supported.
vision_feature_layer (`Union[int, list[int]]`, *optional*, defaults to -1):
The index of the layer to select the vision feature. If multiple indices are provided,
the vision feature of the corresponding indices will be concatenated to form the
vision features. Only -1 supported.
multimodal_projector_bias (`bool`, *optional*, defaults to `True`):
Whether to use bias in the multimodal projector.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
Example:
```python
>>> from transformers import FastVlmForConditionalGeneration, FastVlmConfig
>>> # Initializing a FastVLM-7B style configuration
>>> configuration = FastVlmConfig()
>>> # Initializing a model from the FastVLM-7B style configuration
>>> model = FastVlmForConditionalGeneration(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "fast_vlm"
def __init__(
self,
vision_config=None,
text_config=None,
image_token_id=151646,
projector_hidden_act="gelu",
vision_feature_select_strategy="full",
vision_feature_layer=-1,
multimodal_projector_bias=True,
tie_word_embeddings=False,
**kwargs,
):
self.image_token_id = image_token_id
self.projector_hidden_act = projector_hidden_act
if vision_feature_select_strategy != "full":
raise ValueError(
f"Unexpected select feature strategy: {vision_feature_select_strategy}. Only 'full' is supported in FastVLM."
)
if vision_feature_layer != -1:
raise ValueError(
f"Unexpected vision feature layer: {vision_feature_layer}. Only -1 is supported in FastVLM."
)
self.vision_feature_select_strategy = vision_feature_select_strategy
self.vision_feature_layer = vision_feature_layer
if isinstance(vision_config, dict):
vision_config["model_type"] = vision_config.get("model_type", "timm_wrapper")
vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config)
elif vision_config is None:
vision_config = CONFIG_MAPPING["timm_wrapper"](
architecture="fastvit_mci3",
do_pooling=True,
global_pool="avg",
hidden_size=3072,
initializer_range=0.02,
model_args={"inference_mode": True},
)
self.vision_config = vision_config
if isinstance(text_config, dict):
text_config["model_type"] = text_config.get("model_type", "qwen2")
text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config)
elif text_config is None:
text_config = CONFIG_MAPPING["qwen2"](
hidden_size=3584,
vocab_size=152128,
intermediate_size=18944,
num_attention_heads=28,
num_key_value_heads=4,
num_hidden_layers=28,
)
self.text_config = text_config
self.multimodal_projector_bias = multimodal_projector_bias
self.tie_word_embeddings = tie_word_embeddings
# The default value is `False` but this config is used with many model types
# Attr `tie_word_embeddings` was saved in text config for those models, so we
# need an ugly workaround and forward-pass the attr from text config
if not tie_word_embeddings and self.text_config.tie_word_embeddings:
self.tie_word_embeddings = self.text_config.tie_word_embeddings
PreTrainedConfig.__init__(**kwargs)
class FastVlmMultiModalProjector(LlavaMultiModalProjector):
def __init__(self, config: FastVlmConfig):
nn.Module.__init__()
self.linear_1 = nn.Linear(
config.vision_config.hidden_size,
config.text_config.hidden_size,
bias=config.multimodal_projector_bias,
)
self.act = ACT2FN[config.projector_hidden_act]
self.linear_2 = nn.Linear(
config.text_config.hidden_size, config.text_config.hidden_size, bias=config.multimodal_projector_bias
)
class FastVlmPreTrainedModel(LlavaPreTrainedModel):
pass
class FastVlmModelOutputWithPast(LlavaModelOutputWithPast):
pass
class FastVlmModel(LlavaModel):
_checkpoint_conversion_mapping = {}
def __init__(self, config: FastVlmConfig):
super().__init__(config)
@can_return_tuple
@merge_with_config_defaults
@auto_docstring(
custom_intro="Obtains image last hidden states from the vision tower and apply multimodal projection."
)
def get_image_features(
self,
pixel_values: torch.FloatTensor,
vision_feature_layer: int | list[int] | None = None,
vision_feature_select_strategy: str | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | BaseModelOutputWithPooling:
r"""
pixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`):
The tensors corresponding to the input images.
vision_feature_layer (`Union[int, list[int]]`, *optional*):
The index/indices of the layer to select the vision feature. Only -1 supported.
vision_feature_select_strategy (`str`, *optional*):
The feature selection strategy used to select the vision feature from the vision backbone.
Only "full" supported.
"""
kwargs = {k: v for k, v in kwargs.items() if v is not None}
image_outputs = self.vision_tower(pixel_values, return_dict=True, **kwargs)
# since the vision tower is hybrid in FastVLM, its output needs to be handled differently from Llava
selected_image_feature = image_outputs.last_hidden_state
selected_image_feature = selected_image_feature.flatten(2).permute(0, 2, 1)
image_features = self.multi_modal_projector(selected_image_feature)
image_outputs.pooler_output = list(image_features)
return image_outputs
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
pixel_values: torch.FloatTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
vision_feature_layer: int | list[int] | None = None,
vision_feature_select_strategy: str | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | FastVlmModelOutputWithPast:
r"""
vision_feature_layer (`Union[int, list[int], NoneType]`, *optional*):
The index of the layer to select the vision feature. If multiple indices are provided, the vision feature of the
corresponding indices will be concatenated to form the vision features. Only -1 supported.
vision_feature_select_strategy (`str`, *optional*):
The feature selection strategy used to select the vision feature from the vision backbone. Only "full" supported.
"""
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.get_input_embeddings()(input_ids)
if pixel_values is not None:
image_features = self.get_image_features(
pixel_values=pixel_values,
vision_feature_layer=vision_feature_layer,
vision_feature_select_strategy=vision_feature_select_strategy,
return_dict=True,
).pooler_output
image_features = torch.cat(image_features, dim=0).to(inputs_embeds.device, inputs_embeds.dtype)
special_image_mask = self.get_placeholder_mask(
input_ids, inputs_embeds=inputs_embeds, image_features=image_features
)
inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features)
outputs = self.language_model(
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
cache_position=cache_position,
**kwargs,
)
return FastVlmModelOutputWithPast(
last_hidden_state=outputs.last_hidden_state,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=image_features if pixel_values is not None else None,
)
class FastVlmCausalLMOutputWithPast(LlavaCausalLMOutputWithPast):
pass
@auto_docstring(
custom_intro="""
The FastVlm model which consists of a vision backbone and a language model.
"""
)
class FastVlmForConditionalGeneration(LlavaForConditionalGeneration):
_checkpoint_conversion_mapping = {}
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
pixel_values: torch.FloatTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
vision_feature_layer: int | list[int] | None = None,
vision_feature_select_strategy: str | None = None,
labels: torch.LongTensor | None = None,
cache_position: torch.LongTensor | None = None,
logits_to_keep: int | torch.Tensor = 0,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | FastVlmCausalLMOutputWithPast:
r"""
vision_feature_layer (`Union[int, list[int], NoneType]`, *optional*):
The index of the layer to select the vision feature. If multiple indices are provided, the vision feature of the
corresponding indices will be concatenated to form the vision features. Only -1 supported.
vision_feature_select_strategy (`str`, *optional*):
The feature selection strategy used to select the vision feature from the vision backbone. Only "full" supported.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from PIL import Image
>>> import httpx
>>> from io import BytesIO
>>> from transformers import AutoProcessor, AutoModelForImageTextToText
>>> import torch
>>> device = "cuda" if torch.cuda.is_available() else "cpu"
>>> model = AutoModelForImageTextToText.from_pretrained("KamilaMila/FastVLM-0.5B").to(device)
>>> processor = AutoProcessor.from_pretrained("KamilaMila/FastVLM-0.5B")
>>> conversation = [
{
"role": "user",
"content": [
{"type": "text", "text": "What are these?"},
{"type": "image"}
]
}
]
>>> prompt = processor.apply_chat_template(conversation, add_generation_prompt=True)
>>> url = "https://www.ilankelman.org/stopsigns/australia.jpg"
>>> with httpx.stream("GET", url) as response:
... image = Image.open(BytesIO(response.read()))
>>> inputs = processor(images=image, text=prompt, return_tensors="pt").to(device)
>>> # Generate
>>> generated_ids = model.generate(**inputs, max_new_tokens=15)
>>> print(processor.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0])
system\n You are a helpful assistant.\n user\n What are these?\n assistant\n The image depicts a traditional Chinese street...
```"""
outputs = self.model(
input_ids=input_ids,
pixel_values=pixel_values,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
vision_feature_layer=vision_feature_layer,
vision_feature_select_strategy=vision_feature_select_strategy,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs[0]
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(
logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size, **kwargs
)
return FastVlmCausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=outputs.image_hidden_states,
)
__all__ = ["FastVlmForConditionalGeneration", "FastVlmModel", "FastVlmPreTrainedModel", "FastVlmConfig"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/fast_vlm/modular_fast_vlm.py",
"license": "Apache License 2.0",
"lines": 326,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/fast_vlm/test_modeling_fast_vlm.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the FastVLM model."""
import copy
import unittest
import requests
from transformers import (
AutoProcessor,
FastVlmConfig,
FastVlmForConditionalGeneration,
FastVlmModel,
is_torch_available,
is_vision_available,
)
from transformers.testing_utils import (
cleanup,
require_torch,
require_vision,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
class FastVlmVisionText2TextModelTester:
def __init__(
self,
parent,
ignore_index=-100,
image_token_id=0,
projector_hidden_act="gelu",
seq_length=7,
vision_feature_select_strategy="full",
vision_feature_layer=-1,
text_config={
"model_type": "qwen2",
"is_training": True,
"vocab_size": 99,
"hidden_size": 32,
"num_hidden_layers": 2,
"num_attention_heads": 4,
"num_key_value_heads": 4,
"intermediate_size": 37,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"attention_probs_dropout_prob": 0.1,
"max_position_embeddings": 512,
"initializer_range": 0.02,
"pad_token_id": 1,
},
is_training=True,
vision_config={
"image_size": 16,
"patch_size": 8,
"num_channels": 3,
"hidden_size": 32,
"initializer_range": 0.02,
"architecture": "fastvit_mci3",
"do_pooling": True,
"global_pool": "avg",
"model_args": {
"inference_mode": True,
"layers": (2, 2),
"embed_dims": (8, 16),
"mlp_ratios": (4, 4),
"se_downsamples": (False, False),
"downsamples": (False, True),
"pos_embs": (None, None),
"token_mixers": ("repmixer", "repmixer"),
"lkc_use_act": True,
"stem_use_scale_branch": False,
},
},
):
self.parent = parent
self.ignore_index = ignore_index
self.image_token_id = image_token_id
self.projector_hidden_act = projector_hidden_act
self.vision_feature_select_strategy = vision_feature_select_strategy
self.vision_feature_layer = vision_feature_layer
self.text_config = text_config
self.vision_config = vision_config
self.pad_token_id = text_config["pad_token_id"]
self.num_hidden_layers = text_config["num_hidden_layers"]
self.vocab_size = text_config["vocab_size"]
self.hidden_size = text_config["hidden_size"]
self.num_attention_heads = text_config["num_attention_heads"]
self.is_training = is_training
self.batch_size = 3
self.num_image_tokens = (self.vision_config["image_size"] // self.vision_config["patch_size"]) ** 2
self.seq_length = seq_length + self.num_image_tokens
def get_config(self):
return FastVlmConfig(
text_config=self.text_config,
vision_config=self.vision_config,
ignore_index=self.ignore_index,
image_token_id=self.image_token_id,
projector_hidden_act=self.projector_hidden_act,
vision_feature_select_strategy=self.vision_feature_select_strategy,
vision_feature_layer=self.vision_feature_layer,
)
def prepare_config_and_inputs(self):
pixel_values = floats_tensor(
[
self.batch_size,
self.vision_config["num_channels"],
self.vision_config["image_size"],
self.vision_config["image_size"],
]
)
config = self.get_config()
return config, pixel_values
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
input_ids = ids_tensor([self.batch_size, self.seq_length], config.text_config.vocab_size - 1) + 1
input_ids[input_ids == config.image_token_index] = self.pad_token_id
input_ids[:, : self.num_image_tokens] = config.image_token_index
attention_mask = input_ids.ne(1).to(torch_device)
inputs_dict = {
"pixel_values": pixel_values,
"input_ids": input_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class FastVlmForConditionalGenerationModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
"""
Model tester for `FastVlmForConditionalGeneration`.
"""
all_model_classes = (
(
FastVlmModel,
FastVlmForConditionalGeneration,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = {"image-text-to-text": FastVlmForConditionalGeneration} if is_torch_available() else {}
skip_test_image_features_output_shape = True # FastVLM uses index -3 for hidden_size instead of -1
_is_composite = True
def setUp(self):
self.model_tester = FastVlmVisionText2TextModelTester(self)
common_properties = ["image_token_id"]
self.config_tester = ConfigTester(
self, config_class=FastVlmConfig, has_text_modality=False, common_properties=common_properties
)
def test_enable_input_require_grads(self):
self.skipTest("FastVLM relies on timm architectures unavailable in this test environment.")
def test_config(self):
self.config_tester.run_common_tests()
def test_mismatching_num_image_tokens(self):
"""
Tests that an explicit error is thrown when the number of image tokens
doesn't match the number of image placeholders in the text.
We also test multi-image cases when one prompt has multiple image tokens.
"""
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device)
model.eval()
curr_input_dict = copy.deepcopy(input_dict) # in-place modifications further
_ = model(**curr_input_dict) # successful forward with no modifications
# remove one image but leave all the image tokens in text
curr_input_dict["pixel_values"] = curr_input_dict["pixel_values"][-2:, ...]
with self.assertRaisesRegex(ValueError, "Image features and image tokens do not match"):
_ = model(**curr_input_dict)
# simulate the multi-image/single set of placeholders case by concatenating
input_ids = curr_input_dict["input_ids"][:1]
pixel_values = curr_input_dict["pixel_values"][:1]
pixel_values = torch.cat([pixel_values, pixel_values], dim=0)
# two images and one set of image tokens raise an error
with self.assertRaisesRegex(ValueError, "Image features and image tokens do not match"):
_ = model(input_ids=input_ids, pixel_values=pixel_values)
# two images and two sets of image tokens don't raise an error
input_ids = torch.cat([input_ids, input_ids], dim=0)
_ = model(input_ids=input_ids, pixel_values=pixel_values)
@unittest.skip("Timm can't be initialized on meta")
def test_can_be_initialized_on_meta(self):
pass
@unittest.skip("Cannot set output_attentions on timm models.")
def test_get_image_features_attentions(self):
pass
def _image_features_get_expected_num_hidden_states(self, model_tester=None):
# For models that rely on timm for their vision backend, it's hard to infer how many layers the model has
# from the timm config alone. So, we're just hardcoding the expected number of hidden states here.
return 2
@require_torch
@slow
class FastVlmForConditionalGenerationIntegrationTest(unittest.TestCase):
def setUp(self):
self.processor = AutoProcessor.from_pretrained("KamilaMila/FastVLM-0.5B")
def tearDown(self):
cleanup(torch_device, gc_collect=True)
@require_vision
def test_small_model_integration_test(self):
model = FastVlmForConditionalGeneration.from_pretrained(
"KamilaMila/FastVLM-0.5B", device_map=torch_device, dtype=torch.bfloat16
)
prompt = "user\n<image>\nWhat are the things I should be cautious about when I visit this place?\nassistant"
image_file = "https://llava-vl.github.io/static/images/view.jpg"
raw_image = Image.open(requests.get(image_file, stream=True).raw)
inputs = self.processor(images=raw_image, text=prompt, return_tensors="pt").to(torch_device, dtype=model.dtype)
output = model.generate(**inputs, max_new_tokens=20)
expected_decoded_texts = "user\n\nWhat are the things I should be cautious about when I visit this place?\nassistant\n\nWhen visiting this place, there are a few things you should be cautious about:\n\n1. **" # fmt: skip
EXPECTED_DECODED_TEXT = expected_decoded_texts
self.assertEqual(
self.processor.decode(output[0], skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
@require_vision
def test_small_model_integration_test_batch(self):
model = FastVlmForConditionalGeneration.from_pretrained(
"KamilaMila/FastVLM-0.5B", device_map=torch_device, dtype=torch.bfloat16
)
prompts = [
"user\n<image>\nWhat are the things I should be cautious about when I visit this place? What should I bring with me?\nassistant",
"user\n<image>\nWhat is this?\nassistant",
]
image1 = Image.open(requests.get("https://llava-vl.github.io/static/images/view.jpg", stream=True).raw)
image2 = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw)
inputs = self.processor(images=[image1, image2], text=prompts, return_tensors="pt", padding=True).to(
torch_device,
dtype=model.dtype,
)
output = model.generate(**inputs, max_new_tokens=20)
EXPECTED_DECODED_TEXT = [
"user\n\nWhat are the things I should be cautious about when I visit this place? What should I bring with me?\nassistant\n\nWhen visiting this serene place, it's essential to be mindful of the following:\n\n1. **",
"user\n\nWhat is this?\nassistant\nThe image depicts two cats lying on a pink surface, which could be a couch or a"
] # fmt: skip
self.assertEqual(
self.processor.batch_decode(output, skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
def test_generation_no_images(self):
model_id = "KamilaMila/FastVLM-0.5B"
model = FastVlmForConditionalGeneration.from_pretrained(
model_id, device_map=torch_device, dtype=torch.bfloat16
)
processor = AutoProcessor.from_pretrained(model_id)
# Prepare inputs with no images
inputs = processor(text="Hello, I am", return_tensors="pt").to(torch_device)
# Make sure that `generate` works
_ = model.generate(**inputs, max_new_tokens=20)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/fast_vlm/test_modeling_fast_vlm.py",
"license": "Apache License 2.0",
"lines": 259,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/ministral3/configuration_ministral3.py | # Copyright 2025 Mistral AI and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ministral model configuration"""
from ...configuration_utils import PreTrainedConfig
from ...modeling_rope_utils import RopeParameters
from ...utils import logging
logger = logging.get_logger(__name__)
class Ministral3Config(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Ministral3Model`]. It is used to instantiate an
Mistral model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the mistralai/Ministral-3-8B-Base-2512, mistralai/Ministral-3-8B-Instruct-2512 or mistralai/Ministral-3-8B-Reasoning-2512.
[mistralai/Ministral-3-8B-Base-2512](https://huggingface.co/mistralai/Ministral-3-8B-Base-2512)
[mistralai/Ministral-3-8B-Instruct-2512](https://huggingface.co/mistralai/Ministral-3-8B-Instruct-2512)
[mistralai/Ministral-3-8B-Reasoning-2512](https://huggingface.co/mistralai/Ministral-3-8B-Reasoning-2512)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`Optional`, *optional*, defaults to 131072):
Vocabulary size of the Ministral3 model. Defines the number of different tokens that can be represented by
the `inputs_ids` passed when calling [`Ministral3Model`].
hidden_size (`Optional`, *optional*, defaults to 4096):
Dimensionality of the embeddings and hidden states.
intermediate_size (`Optional`, *optional*, defaults to 14336):
Dimensionality of the intermediate (feed-forward) layer.
num_hidden_layers (`Optional`, *optional*, defaults to 34):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`Optional`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`Optional`, *optional*, defaults to 8):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA); if
`num_key_value_heads=1`, the model will use Multi Query Attention (MQA); otherwise GQA is used.
head_dim (`Optional`, *optional*, defaults to 128):
The attention head dimension. If not specified, will default to `hidden_size // num_attention_heads`.
hidden_act (`Optional`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`Optional`, *optional*, defaults to 262144):
The maximum sequence length that this model might ever be used with.
initializer_range (`Optional`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`Optional`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
use_cache (`Optional`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`Optional`, *optional*, defaults to 11):
The id of the padding token.
bos_token_id (`Optional`, *optional*, defaults to 1):
The id of the "beginning-of-sequence" token.
eos_token_id (`Optional`, *optional*, defaults to 2):
The id of the "end-of-sequence" token.
tie_word_embeddings (`Optional`, *optional*, defaults to `False`):
Whether the model's input and output word embeddings should be tied.
rope_parameters (`Union`, *optional*, defaults to `{'type': 'yarn', 'rope_theta': 1000000.0, 'factor': 16.0, 'original_max_position_embeddings': 16384, 'beta_fast': 32.0, 'beta_slow': 1.0, 'mscale_all_dim': 1.0, 'mscale': 1.0, 'llama_4_scaling_beta': 0.1}`):
Dictionary containing the configuration parameters for the RoPE embeddings, including optional Yarn scaling
settings such as `factor`, `original_max_position_embeddings`, `mscale`, and `llama_4_scaling_beta`.
sliding_window (`Optional`, *optional*):
Sliding window attention window size. If `None`, full attention is used.
attention_dropout (`Optional`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
Example:
```python
>>> from transformers import Ministral3Config, Ministral3ForCausalLM, Mistral3Config, Mistral3ForConditionalGeneration, PixtralVisionConfig
>>> # Initializing a Pixtral-vision config
>>> vision_config = PixtralVisionConfig()
>>> # Initializing a Ministral3 config
>>> text_config = Ministral3Config()
>>> # Initializing a Mistral3 configuration
>>> configuration = Mistral3Config(vision_config, text_config)
>>> # Initializing a model from the Ministral3 configuration
>>> text_model = Ministral3ForCausalLM(text_config)
>>> # Initializing a model from the Mistral3 configuration
>>> model = Mistral3ForConditionalGeneration(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "ministral3"
keys_to_ignore_at_inference = ["past_key_values"]
# Default tensor parallel plan for base model `MistralModel`
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise",
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.gate_proj": "colwise",
"layers.*.mlp.up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
}
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
"norm": (["hidden_states"], ["hidden_states"]),
}
def __init__(
self,
vocab_size: int | None = 131072,
hidden_size: int | None = 4096,
intermediate_size: int | None = 14336,
num_hidden_layers: int | None = 34,
num_attention_heads: int | None = 32,
num_key_value_heads: int | None = 8,
head_dim: int | None = 128,
hidden_act: str | None = "silu",
max_position_embeddings: int | None = 262144,
initializer_range: float | None = 0.02,
rms_norm_eps: float | None = 1e-5,
use_cache: bool | None = True,
pad_token_id: int | None = 11,
bos_token_id: int | None = 1,
eos_token_id: int | None = 2,
tie_word_embeddings: bool | None = False,
rope_parameters: RopeParameters | dict[str, RopeParameters] | None = None,
sliding_window: int | None = None,
attention_dropout: float | None = 0.0,
**kwargs,
):
if rope_parameters is None:
rope_parameters = {
"type": "yarn",
"rope_theta": 1000000.0,
"factor": 16.0,
"original_max_position_embeddings": 16384,
"max_position_embeddings": max_position_embeddings,
"beta_fast": 32.0,
"beta_slow": 1.0,
"mscale_all_dim": 1.0,
"mscale": 1.0,
"llama_4_scaling_beta": 0.1,
}
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.sliding_window = sliding_window
self.head_dim = head_dim if head_dim is not None else hidden_size // num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.attention_dropout = attention_dropout
if "layer_types" in kwargs:
logger.warning_once(
"Detected Mistral model with layer_types. Consider using AutoModel or Ministral classes instead to enable alternating attention compatibility."
)
self.rope_parameters = rope_parameters
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.tie_word_embeddings = tie_word_embeddings
super().__init__(
ignore_keys_at_rope_validation={"llama_4_scaling_beta", "max_position_embeddings"},
**kwargs,
)
__all__ = ["Ministral3Config"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/ministral3/configuration_ministral3.py",
"license": "Apache License 2.0",
"lines": 172,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/ministral3/convert_ministral3_weights_to_hf.py | # Copyright 2025 Mistral AI and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
import re
import torch
from safetensors.torch import load_file
from transformers import (
GenerationConfig,
Ministral3Config,
Ministral3ForCausalLM,
Mistral3Config,
Mistral3ForConditionalGeneration,
PixtralImageProcessorFast,
PixtralProcessor,
PixtralVisionConfig,
)
from transformers.integrations.finegrained_fp8 import replace_with_fp8_linear
from transformers.integrations.mistral import convert_tekken_tokenizer
from transformers.quantizers.auto import AutoQuantizationConfig
# fmt: off
def get_sd_mapping(has_vision: bool) -> dict:
model_key = "model.language_model" if has_vision else "model"
return {
# Text model keys
r"^output.weight": r"lm_head.weight",
r"^norm.weight": rf"{model_key}.norm.weight",
r"^tok_embeddings.weight": rf"{model_key}.embed_tokens.weight",
r"^layers.(\d+).attention_norm.weight": rf"{model_key}.layers.\1.input_layernorm.weight",
r"^layers.(\d+).ffn_norm.weight": rf"{model_key}.layers.\1.post_attention_layernorm.weight",
r"^layers.(\d+).attention.w(q|k|v|o).weight": rf"{model_key}.layers.\1.self_attn.\2_proj.weight",
r"^layers.(\d+).feed_forward.w1.weight": rf"{model_key}.layers.\1.mlp.gate_proj.weight",
r"^layers.(\d+).feed_forward.w2.weight": rf"{model_key}.layers.\1.mlp.down_proj.weight",
r"^layers.(\d+).feed_forward.w3.weight": rf"{model_key}.layers.\1.mlp.up_proj.weight",
r"^layers.(\d+).attention.w(q|k|v|o).qscale_act": rf"{model_key}.layers.\1.self_attn.\2_proj.activation_scale",
r"^layers.(\d+).feed_forward.w1.qscale_act": rf"{model_key}.layers.\1.mlp.gate_proj.activation_scale",
r"^layers.(\d+).feed_forward.w2.qscale_act": rf"{model_key}.layers.\1.mlp.down_proj.activation_scale",
r"^layers.(\d+).feed_forward.w3.qscale_act": rf"{model_key}.layers.\1.mlp.up_proj.activation_scale",
r"^layers.(\d+).attention.w(q|k|v|o).qscale_weight": rf"{model_key}.layers.\1.self_attn.\2_proj.weight_scale_inv",
r"^layers.(\d+).feed_forward.w1.qscale_weight": rf"{model_key}.layers.\1.mlp.gate_proj.weight_scale_inv",
r"^layers.(\d+).feed_forward.w2.qscale_weight": rf"{model_key}.layers.\1.mlp.down_proj.weight_scale_inv",
r"^layers.(\d+).feed_forward.w3.qscale_weight": rf"{model_key}.layers.\1.mlp.up_proj.weight_scale_inv",
# Vision model keys
r"vision_encoder.transformer.layers.(\d+).attention_norm.weight": r"model.vision_tower.transformer.layers.\1.attention_norm.weight",
r"^vision_encoder.transformer.layers.(\d+).ffn_norm.weight": r"model.vision_tower.transformer.layers.\1.ffn_norm.weight",
r"^vision_encoder.transformer.layers.(\d+).attention.w(q|k|v|o).weight": r"model.vision_tower.transformer.layers.\1.attention.\2_proj.weight",
r"^vision_encoder.transformer.layers.(\d+).feed_forward.w1.weight": r"model.vision_tower.transformer.layers.\1.feed_forward.gate_proj.weight",
r"^vision_encoder.transformer.layers.(\d+).feed_forward.w2.weight": r"model.vision_tower.transformer.layers.\1.feed_forward.down_proj.weight",
r"^vision_encoder.transformer.layers.(\d+).feed_forward.w3.weight": r"model.vision_tower.transformer.layers.\1.feed_forward.up_proj.weight",
r"^vision_language_adapter.w_in": r"model.multi_modal_projector.linear_1",
r"^vision_language_adapter.w_out": r"model.multi_modal_projector.linear_2",
r"^vision_encoder.ln_pre.weight": r"model.vision_tower.ln_pre.weight",
r"^vision_encoder.patch_conv.weight": r"model.vision_tower.patch_conv.weight",
r"^patch_merger.merging_layer.weight": r"model.multi_modal_projector.patch_merger.merging_layer.weight",
r"^pre_mm_projector_norm.weight": r"model.multi_modal_projector.norm.weight",
}
# fmt: on
def map_old_key_to_new(old_key, mapping):
"""Map of a key of the original state dict to the equivalent key in HF format"""
for pattern, replacement in mapping.items():
new_key, n_replace = re.subn(pattern, replacement, old_key)
# Early exit of the loop
if n_replace > 0:
return new_key
raise ValueError(f"Key: {old_key} could not be mapped (check the mapping).")
def read_json(path):
with open(path, "r") as f:
return json.load(f)
def permute_for_rope(tensor, n_heads, dim1, dim2):
"""Permute the weights for the ROPE formulation."""
tensor = tensor.view(n_heads, dim1 // n_heads // 2, 2, dim2)
tensor = tensor.transpose(1, 2)
tensor = tensor.reshape(dim1, dim2)
return tensor
def convert_state_dict(original_state_dict: dict, config: Mistral3Config):
"""Convert a state dict file, when a single `nn.Module` is never sharded in different files (usual case)."""
new_dict = {}
is_vision = isinstance(config, Mistral3Config)
mapping = get_sd_mapping(is_vision)
for old_key, tensor in original_state_dict.items():
if "fake_quantizer" in old_key:
continue
new_key = map_old_key_to_new(old_key, mapping)
if "vision" in old_key:
num_attention_heads = config.vision_config.num_attention_heads
num_key_value_heads = num_attention_heads
hidden_size = config.vision_config.hidden_size
head_dim = config.vision_config.head_dim
key_value_dim = head_dim * num_attention_heads
query_dim = head_dim * num_attention_heads
else:
text_config = config.text_config if is_vision else config
num_attention_heads = text_config.num_attention_heads
hidden_size = text_config.hidden_size
head_dim = text_config.head_dim
num_key_value_heads = text_config.num_key_value_heads
key_value_dim = head_dim * num_key_value_heads
query_dim = head_dim * num_attention_heads
if "q_proj" in new_key and new_key.endswith("weight"):
tensor = permute_for_rope(tensor, num_attention_heads, query_dim, hidden_size)
elif "k_proj" in new_key and new_key.endswith("weight"):
tensor = permute_for_rope(tensor, num_key_value_heads, key_value_dim, hidden_size)
new_dict[new_key] = tensor
return new_dict
def convert_config(original_config: dict, max_position_embeddings: int = 262144, is_vision: bool = True):
original_vision_config = original_config.pop("vision_encoder", None)
assert is_vision == (original_vision_config is not None), (
f"is_vision={is_vision} but original_vision_config={original_vision_config}"
)
original_text_config = original_config
# Text config
text_key_mapping = {
"hidden_size": "dim",
"num_hidden_layers": "n_layers",
"intermediate_size": "hidden_dim",
"num_attention_heads": "n_heads",
"num_key_value_heads": "n_kv_heads",
"rms_norm_eps": "norm_eps",
}
similar_text_keys_to_keep = [
"head_dim",
"vocab_size",
]
new_text_config_kwargs = {k: original_text_config[v] for k, v in text_key_mapping.items()}
new_text_config_kwargs.update({k: v for k, v in original_text_config.items() if k in similar_text_keys_to_keep})
tie_word_embeddings = original_text_config.get("tied_embeddings", False)
new_text_config_kwargs["tie_word_embeddings"] = tie_word_embeddings
new_text_config_kwargs["rope_parameters"] = {
"type": "yarn",
"rope_theta": original_config.get("rope_theta", 1000000.0),
"factor": float(original_config["yarn"]["factor"]),
"original_max_position_embeddings": original_config["yarn"]["original_max_position_embeddings"],
"beta_fast": float(original_config["yarn"]["beta"]),
"beta_slow": float(original_config["yarn"]["alpha"]),
"mscale_all_dim": 1.0 if is_vision else 0.0,
"mscale": 1.0,
"llama_4_scaling_beta": original_config.get("llama_4_scaling", {}).get("beta", 0),
}
# These are not always defined depending on `params.json`
new_text_config_kwargs["sliding_window"] = original_text_config.get("sliding_window", None)
new_text_config_kwargs["max_position_embeddings"] = original_text_config.get(
"max_position_embeddings", original_text_config.get("max_seq_len", max_position_embeddings)
)
# This may sometimes be a string in `params.json`
if new_text_config_kwargs["sliding_window"] is not None:
new_text_config_kwargs["sliding_window"] = int(new_text_config_kwargs["sliding_window"])
def get_maybe_quant_config() -> dict:
kwargs = {}
if original_config.get("quantization", {}).get("qformat_weight") == "fp8_e4m3":
assert original_config["quantization"]["qscheme_act"] == "TENSOR"
quantization_config = {
"activation_scheme": "static",
"modules_to_not_convert": ["model.vision_tower", "model.multi_modal_projector", "lm_head"],
"quant_method": "fp8",
"weight_block_size": None,
}
kwargs["quantization_config"] = AutoQuantizationConfig.from_dict(quantization_config)
return kwargs
# No vision
if original_vision_config is None:
new_text_config = Ministral3Config(**new_text_config_kwargs, **get_maybe_quant_config())
return new_text_config
else:
new_text_config = Ministral3Config(**new_text_config_kwargs)
# Vision config
new_vision_config = original_vision_config
adapter_bias = new_vision_config.pop("adapter_bias", False)
_ = new_vision_config.pop("mm_projector_id", None)
_ = new_vision_config.pop("add_pre_mm_projector_layer_norm", None)
spatial_merge_size = new_vision_config.pop("spatial_merge_size")
image_token_id = new_vision_config.pop("image_token_id", 10)
_ = new_vision_config.pop("image_break_token_id", 12)
_ = new_vision_config.pop("image_end_token_id", 13)
_ = new_vision_config.pop("max_image_size")
new_vision_config = PixtralVisionConfig(hidden_act="silu", **new_vision_config)
new_config = Mistral3Config(
vision_config=new_vision_config,
text_config=new_text_config,
multimodal_projector_bias=adapter_bias,
image_token_id=image_token_id,
spatial_merge_size=spatial_merge_size,
vision_feature_layer=-1,
**get_maybe_quant_config(),
)
return new_config
def convert_and_write_model(input_dir: str, output_dir: str, max_position_embeddings: int):
"""Convert the model and save it (this implicitly save the config as well)."""
params = read_json(os.path.join(input_dir, "params.json"))
is_vision = params.get("vision_encoder") is not None
config = convert_config(params, max_position_embeddings, is_vision)
full_state_dict = {}
# The model may be split between different files, but a single nn.Module is always fully present in a single file
shards = [file for file in os.listdir(input_dir) if file.endswith(".safetensors")]
for shard_file in shards:
original_state_dict = load_file(os.path.join(input_dir, shard_file))
new_dict = convert_state_dict(original_state_dict, config)
full_state_dict.update(new_dict)
text_config = config.text_config if is_vision else config
if text_config.tie_word_embeddings:
model_key = "model.language_model" if is_vision else "model"
full_state_dict["lm_head.weight"] = full_state_dict[f"{model_key}.embed_tokens.weight"]
# Load weights into model and resave them
with torch.device("meta"):
if isinstance(config, Mistral3Config):
model = Mistral3ForConditionalGeneration(config)
elif isinstance(config, Ministral3Config):
model = Ministral3ForCausalLM(config)
else:
raise ValueError(f"Unknown config type {type(config)}.")
# let's swap nn.Linear to FP8 Linear before loading
if hasattr(model.config, "quantization_config"):
model = replace_with_fp8_linear(
model, model.config.quantization_config.modules_to_not_convert, model.config.quantization_config
)
model.load_state_dict(full_state_dict, strict=True, assign=True)
model.save_pretrained(output_dir)
return config
def convert_and_write_processor_and_tokenizer(
input_dir: str, output_dir: str, model_config: Mistral3Config | Ministral3ForCausalLM
):
"""Convert the tokenizer and save it."""
from mistral_common.tokens.tokenizers.tekken import Tekkenizer
tokenizer_file = os.path.join(input_dir, "tekken.json")
tokenizer = convert_tekken_tokenizer(tokenizer_file)
# No vision
if isinstance(model_config, Ministral3Config):
tokenizer.save_pretrained(output_dir)
return
tekkenizer = Tekkenizer.from_file(tokenizer_file)
config = read_json(os.path.join(input_dir, "params.json"))
patch_size = config["vision_encoder"]["patch_size"]
spatial_merge_size = config["vision_encoder"]["spatial_merge_size"]
max_image_size = config["vision_encoder"]["max_image_size"]
image_processor = PixtralImageProcessorFast(patch_size=patch_size, size={"longest_edge": max_image_size})
processor = PixtralProcessor(
tokenizer=tokenizer,
image_processor=image_processor,
image_token="[IMG]",
patch_size=patch_size,
spatial_merge_size=spatial_merge_size,
)
# Finally save it
processor.save_pretrained(output_dir)
generation_config = GenerationConfig(
eos_token_id=tekkenizer.eos_id,
bos_token_id=tekkenizer.bos_id,
pad_token_id=tekkenizer.pad_id,
max_length=model_config.text_config.max_position_embeddings,
)
generation_config.save_pretrained(output_dir)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"input_dir",
help="Location of Mistral weights, which contains tokenizer.model and model folders",
)
parser.add_argument(
"output_dir",
help="Location to write HF model and tokenizer",
)
parser.add_argument(
"--max_position_embeddings",
type=int,
default=262144,
help="`max_position_embeddings` field in the config. This needs to be manually passed (not present anywhere otherwise).",
)
args = parser.parse_args()
config = convert_and_write_model(args.input_dir, args.output_dir, args.max_position_embeddings)
convert_and_write_processor_and_tokenizer(args.input_dir, args.output_dir, config)
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/ministral3/convert_ministral3_weights_to_hf.py",
"license": "Apache License 2.0",
"lines": 283,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/ministral3/modular_ministral3.py | from collections.abc import Callable
import torch
from ...cache_utils import Cache
from ...modeling_flash_attention_utils import FlashAttentionKwargs
from ...modeling_layers import (
GenericForQuestionAnswering,
GenericForSequenceClassification,
GenericForTokenClassification,
)
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS
from ...processing_utils import Unpack
from ...utils import auto_docstring, logging
from ..mistral.modeling_mistral import (
MistralAttention,
MistralDecoderLayer,
MistralForCausalLM,
MistralModel,
MistralPreTrainedModel,
apply_rotary_pos_emb,
eager_attention_forward,
)
logger = logging.get_logger(__name__)
def _get_llama_4_attn_scale(positions_ids: torch.Tensor, beta: float, max_position_embeddings: int) -> torch.Tensor:
scaling = 1 + beta * torch.log(1 + torch.floor(positions_ids / max_position_embeddings))
return scaling.unsqueeze(-1)
class Ministral3Attention(MistralAttention):
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: torch.Tensor | None,
past_key_values: Cache | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, torch.Tensor | None]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
query_states = query_states * _get_llama_4_attn_scale(
cache_position,
self.config.rope_parameters.get("llama_4_scaling_beta"),
self.config.rope_parameters.get("original_max_position_embeddings"),
).to(query_states.dtype)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface(
self.config._attn_implementation, eager_attention_forward
)
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
sliding_window=getattr(self.config, "sliding_window", None), # main diff with Llama
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class Ministral3DecoderLayer(MistralDecoderLayer):
pass
@auto_docstring
class Ministral3PreTrainedModel(MistralPreTrainedModel):
pass
@auto_docstring
class Ministral3Model(MistralModel):
pass
@auto_docstring
class Ministral3ForCausalLM(MistralForCausalLM):
pass
class Ministral3ForTokenClassification(GenericForTokenClassification, Ministral3PreTrainedModel):
pass
class Ministral3ForSequenceClassification(GenericForSequenceClassification, Ministral3PreTrainedModel):
pass
class Ministral3ForQuestionAnswering(GenericForQuestionAnswering, Ministral3PreTrainedModel):
pass
__all__ = [
"Ministral3ForCausalLM",
"Ministral3ForQuestionAnswering",
"Ministral3Model",
"Ministral3PreTrainedModel",
"Ministral3ForSequenceClassification",
"Ministral3ForTokenClassification",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/ministral3/modular_ministral3.py",
"license": "Apache License 2.0",
"lines": 93,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
huggingface/transformers:tests/models/ministral3/test_modeling_ministral3.py | # Copyright 2025 the HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch Ministral3 model."""
import gc
import unittest
import pytest
from transformers import AutoTokenizer, Mistral3ForConditionalGeneration, is_torch_available
from transformers.testing_utils import (
Expectations,
backend_empty_cache,
cleanup,
require_deterministic_for_xpu,
require_flash_attn,
require_torch,
require_torch_accelerator,
slow,
torch_device,
)
if is_torch_available():
import torch
from transformers import (
Ministral3Model,
)
from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester
class Ministral3ModelTester(CausalLMModelTester):
if is_torch_available():
base_model_class = Ministral3Model
@require_torch
class Ministral3ModelTest(CausalLMModelTest, unittest.TestCase):
_is_stateful = True
model_split_percents = [0.5, 0.6]
model_tester_class = Ministral3ModelTester
# TODO (ydshieh): Check this. See https://app.circleci.com/pipelines/github/huggingface/transformers/79245/workflows/9490ef58-79c2-410d-8f51-e3495156cf9c/jobs/1012146
def is_pipeline_test_to_skip(
self,
pipeline_test_case_name,
config_class,
model_architecture,
tokenizer_name,
image_processor_name,
feature_extractor_name,
processor_name,
):
return True
@require_flash_attn
@require_torch_accelerator
@pytest.mark.flash_attn_test
@slow
def test_flash_attn_2_inference_equivalence_right_padding(self):
self.skipTest(reason="Ministral3 flash attention does not support right padding")
@require_torch
class Ministral3IntegrationTest(unittest.TestCase):
def tearDown(self):
cleanup(torch_device, gc_collect=True)
@slow
def test_model_3b_logits(self):
input_ids = [1, 306, 4658, 278, 6593, 310, 2834, 338]
model = Mistral3ForConditionalGeneration.from_pretrained(
"mistralai/Ministral-3-3B-Instruct-2512", device_map="auto"
)
input_ids = torch.tensor([input_ids]).to(model.device)
with torch.no_grad():
out = model(input_ids).logits.float().cpu()
# Expected mean on dim = -1
# fmt: off
EXPECTED_MEANS = Expectations(
{
("cuda", None): torch.tensor([[-1.1503, -1.9935, -0.4457, -1.0717, -1.9182, -1.1431, -0.9697, -1.7098]]),
("xpu", None): torch.tensor([[-0.9800, -2.4773, -0.2386, -1.0664, -1.8994, -1.3792, -1.0531, -1.8832]]),
}
)
# fmt: on
EXPECTED_MEAN = EXPECTED_MEANS.get_expectation()
torch.testing.assert_close(out.mean(-1), EXPECTED_MEAN, rtol=1e-2, atol=1e-2)
del model
backend_empty_cache(torch_device)
gc.collect()
@slow
@require_deterministic_for_xpu
def test_model_3b_generation(self):
# fmt: off
EXPECTED_TEXTS = Expectations(
{
("cuda", None): "My favourite condiment is 100% pure olive oil. It's a staple in my kitchen and I use it in",
("xpu", None): "My favourite condiment is iced tea. I love the way it makes me feel. It’s like a little bubble bath for",
}
)
# fmt: on
EXPECTED_TEXT = EXPECTED_TEXTS.get_expectation()
prompt = "My favourite condiment is "
tokenizer = AutoTokenizer.from_pretrained("mistralai/Ministral-3-3B-Instruct-2512")
model = Mistral3ForConditionalGeneration.from_pretrained(
"mistralai/Ministral-3-3B-Instruct-2512", device_map="auto"
)
input_ids = tokenizer.encode(prompt, return_tensors="pt").to(model.device)
# greedy generation outputs
generated_ids = model.generate(input_ids, max_new_tokens=20, temperature=0)
text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
self.assertEqual(text, EXPECTED_TEXT)
del model
backend_empty_cache(torch_device)
gc.collect()
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/ministral3/test_modeling_ministral3.py",
"license": "Apache License 2.0",
"lines": 113,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/t5gemma2/modular_t5gemma2.py | # Copyright 2025 Google Inc. HuggingFace Inc. team. All rights reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from collections.abc import Callable
from typing import Any, Optional
import torch
import torch.nn as nn
from ... import initialization as init
from ...cache_utils import DynamicCache, EncoderDecoderCache, StaticCache
from ...configuration_utils import PreTrainedConfig, layer_type_validation
from ...generation import GenerationConfig, GenerationMixin, GenerationMode
from ...masking_utils import create_bidirectional_mask
from ...modeling_flash_attention_utils import FlashAttentionKwargs
from ...modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPooling,
Seq2SeqLMOutput,
Seq2SeqModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, RopeParameters
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...processing_utils import Unpack
from ...utils import (
TransformersKwargs,
auto_docstring,
can_return_tuple,
logging,
torch_compilable_check,
)
from ...utils.generic import merge_with_config_defaults
from ...utils.output_capturing import OutputRecorder, capture_outputs
from ..auto import AutoModel
from ..gemma3.configuration_gemma3 import Gemma3Config, Gemma3TextConfig
from ..gemma3.modeling_gemma3 import (
Gemma3Attention,
Gemma3MLP,
Gemma3MultiModalProjector,
Gemma3PreTrainedModel,
Gemma3RMSNorm,
Gemma3RotaryEmbedding,
Gemma3TextScaledWordEmbedding,
apply_rotary_pos_emb,
create_causal_mask,
create_sliding_window_causal_mask,
eager_attention_forward,
)
from ..siglip import SiglipVisionConfig
from ..t5gemma.modeling_t5gemma import (
T5GemmaClassificationHead,
T5GemmaEncoderLayer,
T5GemmaLMHead,
bidirectional_mask_function,
)
logger = logging.get_logger(__name__)
class T5Gemma2TextConfig(Gemma3TextConfig, PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`T5Gemma2TextModel`]. It is used to instantiate the encoder's
text model portion of the T5Gemma2 Model according to the specified arguments, defining the model architecture. Instantiating
a configuration with the defaults will yield a similar configuration to that of the T5Gemma2Text-7B.
e.g. [google/t5gemma2_text-7b](https://huggingface.co/google/t5gemma2_text-7b)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 262208):
Vocabulary size of the T5Gemma2Text model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`T5Gemma2TextModel`]
hidden_size (`int`, *optional*, defaults to 2304):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 9216):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 26):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*, defaults to 4):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
head_dim (`int`, *optional*, defaults to 256):
The attention head dimension.
hidden_activation (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
The non-linear activation function (function or string) in the decoder. Will default to `"gelu_pytorch_tanh"`
if not specified. `"gelu_pytorch_tanh"` uses an approximation of the `"gelu"` activation function.
max_position_embeddings (`int`, *optional*, defaults to 131072):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*, defaults to 0):
Padding token id.
eos_token_id (`int`, *optional*, defaults to 1):
End of stream token id.
bos_token_id (`int`, *optional*, defaults to 2):
Beginning of stream token id.
attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
query_pre_attn_scalar (`float`, *optional*, defaults to 256):
Scaling factor used on the attention scores
sliding_window (`int`, *optional*, defaults to 4096):
In T5Gemma2Text, every other layer uses sliding window attention. This is the size of the sliding window.
layer_types (`list`, *optional*):
Attention pattern for each layer.
final_logit_softcapping (`float`, *optional*):
Scaling factor when applying tanh softcapping on the logits.
attn_logit_softcapping (`float`, *optional*):
Scaling factor when applying tanh softcapping on the attention scores.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
"""
model_type = "t5gemma2_text"
def __init__(
self,
vocab_size: int | None = 262_208,
hidden_size: int | None = 2304,
intermediate_size: int | None = 9216,
num_hidden_layers: int | None = 26,
num_attention_heads: int | None = 8,
num_key_value_heads: int | None = 4,
head_dim: int | None = 256,
hidden_activation: str | None = "gelu_pytorch_tanh",
max_position_embeddings: int | None = 131_072,
initializer_range: float | None = 0.02,
rms_norm_eps: int | None = 1e-6,
use_cache: bool | None = True,
pad_token_id: int | None = 0,
eos_token_id: int | None = 1,
bos_token_id: int | None = 2,
attention_bias: bool | None = False,
attention_dropout: float | None = 0.0,
query_pre_attn_scalar: int | None = 256,
sliding_window: int | None = 4096,
layer_types: list[str] | None = None,
final_logit_softcapping: float | None = None,
attn_logit_softcapping: float | None = None,
rope_parameters: RopeParameters | dict[str, RopeParameters] | None = None,
**kwargs,
):
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.head_dim = head_dim
self.num_key_value_heads = num_key_value_heads
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.hidden_activation = hidden_activation
self.query_pre_attn_scalar = query_pre_attn_scalar
self.sliding_window = sliding_window
self.final_logit_softcapping = final_logit_softcapping
self.attn_logit_softcapping = attn_logit_softcapping
self.layer_types = layer_types
# BC -> the pattern used to be a simple int, and it's still present in configs on the Hub
self._sliding_window_pattern = kwargs.get("sliding_window_pattern", 6)
if self.layer_types is None:
self.layer_types = [
"sliding_attention" if bool((i + 1) % self._sliding_window_pattern) else "full_attention"
for i in range(self.num_hidden_layers)
]
layer_type_validation(self.layer_types, self.num_hidden_layers)
self.rope_parameters = rope_parameters
PreTrainedConfig.__init__(**kwargs)
class T5Gemma2EncoderConfig(Gemma3Config):
model_type = "t5gemma2_encoder"
sub_configs = {
"text_config": T5Gemma2TextConfig,
"vision_config": SiglipVisionConfig,
}
class T5Gemma2DecoderConfig(Gemma3TextConfig, PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`T5Gemma2DecoderModel`]. It is used to instantiate the decoder
text model portion of the T5Gemma2 Model according to the specified arguments, defining the model architecture. Instantiating
a configuration with the defaults will yield a similar configuration to that of the T5Gemma2Decoder-7B.
e.g. [google/t5gemma2_text-7b](https://huggingface.co/google/t5gemma2_text-7b)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 262208):
Vocabulary size of the T5Gemma2Decoder model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`T5Gemma2DecoderModel`]
hidden_size (`int`, *optional*, defaults to 2304):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 9216):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 26):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*, defaults to 4):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
head_dim (`int`, *optional*, defaults to 256):
The attention head dimension.
hidden_activation (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
The non-linear activation function (function or string) in the decoder. Will default to `"gelu_pytorch_tanh"`
if not specified. `"gelu_pytorch_tanh"` uses an approximation of the `"gelu"` activation function.
max_position_embeddings (`int`, *optional*, defaults to 131072):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*, defaults to 0):
Padding token id.
eos_token_id (`int`, *optional*, defaults to 1):
End of stream token id.
bos_token_id (`int`, *optional*, defaults to 2):
Beginning of stream token id.
attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
query_pre_attn_scalar (`float`, *optional*, defaults to 256):
Scaling factor used on the attention scores
sliding_window (`int`, *optional*, defaults to 4096):
In T5Gemma2Decoder, every other layer uses sliding window attention. This is the size of the sliding window.
layer_types (`list`, *optional*):
Attention pattern for each layer.
final_logit_softcapping (`float`, *optional*):
Scaling factor when applying tanh softcapping on the logits.
attn_logit_softcapping (`float`, *optional*):
Scaling factor when applying tanh softcapping on the attention scores.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
"""
model_type = "t5gemma2_decoder"
def __init__(
self,
vocab_size: int | None = 262_208,
hidden_size: int | None = 2304,
intermediate_size: int | None = 9216,
num_hidden_layers: int | None = 26,
num_attention_heads: int | None = 8,
num_key_value_heads: int | None = 4,
head_dim: int | None = 256,
hidden_activation: str | None = "gelu_pytorch_tanh",
max_position_embeddings: int | None = 131_072,
initializer_range: float | None = 0.02,
rms_norm_eps: int | None = 1e-6,
use_cache: bool | None = True,
pad_token_id: int | None = 0,
eos_token_id: int | None = 1,
bos_token_id: int | None = 2,
attention_bias: bool | None = False,
attention_dropout: float | None = 0.0,
query_pre_attn_scalar: int | None = 256,
sliding_window: int | None = 4096,
layer_types: list[str] | None = None,
final_logit_softcapping: float | None = None,
attn_logit_softcapping: float | None = None,
rope_parameters: RopeParameters | dict[str, RopeParameters] | None = None,
**kwargs,
):
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.head_dim = head_dim
self.num_key_value_heads = num_key_value_heads
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.hidden_activation = hidden_activation
self.query_pre_attn_scalar = query_pre_attn_scalar
self.sliding_window = sliding_window
self.final_logit_softcapping = final_logit_softcapping
self.attn_logit_softcapping = attn_logit_softcapping
self.layer_types = layer_types
# BC -> the pattern used to be a simple int, and it's still present in configs on the Hub
self._sliding_window_pattern = kwargs.get("sliding_window_pattern", 6)
if self.layer_types is None:
self.layer_types = [
"sliding_attention" if bool((i + 1) % self._sliding_window_pattern) else "full_attention"
for i in range(self.num_hidden_layers)
]
layer_type_validation(self.layer_types, self.num_hidden_layers)
self.rope_parameters = rope_parameters
PreTrainedConfig.__init__(**kwargs)
class T5Gemma2Config(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`T5Gemma2Model`]. It is used to instantiate an T5Gemma2
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to a hypothetical balanced Gemma3 encoder-decoder model.
e.g. [google/t5gemma-2-270m-270m](https://huggingface.co/google/t5gemma-2-270m-270m)
Configuration objects inherit from [PreTrainedConfig] and can be used to control the model outputs. Read the
documentation from [PreTrainedConfig] for more information.
Args:
encoder (`Union[T5Gemma2EncoderConfig, dict]`, optional, *optional*):
Configuration for the encoder.
decoder (`Union[T5Gemma2DecoderConfig, dict]`, optional, *optional*):
Configuration for the decoder.
is_encoder_decoder (bool, optional, *optional*, defaults to `True`):
Whether the model is used as an encoder/decoder or not.
dropout_rate (`float`, *optional*, defaults to 0.0):
The ratio for all dropout layers (following T5).
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for attention.
classifier_dropout_rate (`float`, *optional*, defaults to 0.0):
The dropout ratio for classifier (following T5).
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
image_token_index (`int`, *optional*, defaults to 256001):
The image token index to encode the image prompt. Defaults to 256001, which is right after the eoi_token_index.
Note this is different from Gemma 3.
tie_word_embeddings (`bool`, *optional*, defaults to `True`):
Whether to tie weight embeddings
```python
>>> from transformers import T5Gemma2Config, T5Gemma2Model
>>> t5gemma2_config = T5Gemma2Config.from_pretrained("google/t5gemma-270m-270m")
>>> model = T5Gemma2Model(t5gemma2_config)
```
"""
model_type = "t5gemma2"
keys_to_ignore_at_inference = ["past_key_values"]
sub_configs = {
"encoder": T5Gemma2EncoderConfig,
"decoder": T5Gemma2DecoderConfig,
}
attribute_map = {
"image_token_id": "image_token_index",
"eoi_token_id": "eoi_token_index",
}
def __init__(
self,
encoder: T5Gemma2EncoderConfig | dict[str, Any] | None = None,
decoder: T5Gemma2DecoderConfig | dict[str, Any] | None = None,
is_encoder_decoder: bool = True,
dropout_rate: float = 0.0,
attention_dropout: float = 0.0,
classifier_dropout_rate: float = 0.0,
initializer_range: float = 0.02,
image_token_index: int = 256_001,
tie_word_embeddings: bool | None = True,
**kwargs,
):
if isinstance(encoder, dict):
encoder = T5Gemma2EncoderConfig(**encoder)
elif encoder is None:
encoder = T5Gemma2EncoderConfig()
logger.info("encoder is None, using default T5Gemma2EncoderConfig encoder config.")
else:
if not isinstance(encoder, T5Gemma2EncoderConfig):
raise ValueError(f"{type(encoder)} is not supported.")
if isinstance(decoder, dict):
decoder = T5Gemma2DecoderConfig(**decoder)
elif decoder is None:
decoder = T5Gemma2DecoderConfig()
logger.info("decoder is None, using default T5Gemma2DecoderConfig decoder config.")
else:
if not isinstance(decoder, T5Gemma2DecoderConfig):
raise ValueError(f"{type(decoder)} is not supported.")
if encoder.text_config.hidden_size != decoder.hidden_size:
raise ValueError(
"Imbalanced encoder-decoder is not supported in T5Gemma2: "
f"encoder ({encoder.text_config.hidden_size}) vs decoder ({decoder.hidden_size})."
)
if not is_encoder_decoder:
raise ValueError("T5Gemma2Model only support encoder-decoder modeling.")
if encoder.text_config.vocab_size != decoder.vocab_size:
raise ValueError(
"Imbalanced encoder-decoder vocabulary size is not supported in T5Gemma2: "
f"encoder ({encoder.text_config.vocab_size}) vs decoder ({decoder.vocab_size})."
)
# Encoder.
encoder.text_config.dropout_rate = dropout_rate
encoder.text_config.attention_dropout = attention_dropout
encoder.vision_config.attention_dropout = attention_dropout
encoder.image_token_index = image_token_index
self.encoder = encoder
# Decoder.
decoder.dropout_rate = dropout_rate
decoder.attention_dropout = attention_dropout
self.decoder = decoder
for special_token_key in ["bos_token_id", "pad_token_id", "eos_token_id", "vocab_size"]:
if special_token_key not in kwargs:
kwargs[special_token_key] = getattr(decoder, special_token_key)
self.classifier_dropout_rate = classifier_dropout_rate
self.initializer_range = initializer_range
self.eoi_token_index = encoder.eoi_token_index
self.image_token_index = image_token_index
self.tie_word_embeddings = tie_word_embeddings
super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)
class T5Gemma2RMSNorm(Gemma3RMSNorm):
pass
class T5Gemma2MLP(Gemma3MLP):
def __init__(self, config: T5Gemma2TextConfig):
super().__init__(config)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(self, x):
hidden_states = self.act_fn(self.gate_proj(x)) * self.up_proj(x)
hidden_states = self.dropout(hidden_states)
down_proj = self.down_proj(hidden_states)
return down_proj
class T5Gemma2RotaryEmbedding(Gemma3RotaryEmbedding):
def __init__(self, config: T5Gemma2TextConfig, device=None):
super().__init__(config, device)
@staticmethod
def compute_default_rope_parameters(
config: T5Gemma2TextConfig | None = None,
device: Optional["torch.device"] = None,
seq_len: int | None = None,
layer_type: str | None = None,
) -> tuple["torch.Tensor", float]:
return super().compute_default_rope_parameters(config, device, seq_len, layer_type)
class T5Gemma2SelfAttention(Gemma3Attention):
def __init__(self, config: T5Gemma2TextConfig, layer_idx: int):
super().__init__(config, layer_idx)
self.is_causal = False # Only used by the encoder
class T5Gemma2MergedAttention(Gemma3Attention):
"""Merged self-attention and cross-attention for decoder."""
def __init__(self, config: T5Gemma2TextConfig, layer_idx: int):
super().__init__(config, layer_idx)
self.is_causal = False # Fused causal and encoder mask
def forward(
self,
# decoder self-attention inputs
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
merged_attention_mask: torch.Tensor | None,
# cross-attention inputs
encoder_hidden_states: torch.Tensor,
# cache inputs
past_key_values: EncoderDecoderCache | None = None,
cache_position: torch.LongTensor | None = None,
# others
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, torch.Tensor | None, tuple[torch.Tensor] | None]:
# attention shapes.
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
cross_input_shape = encoder_hidden_states.shape[:-1]
cross_hidden_shape = (*cross_input_shape, -1, self.head_dim)
# self-attention.
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
query_states = self.q_norm(query_states)
key_states = self.k_norm(key_states)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
# self-attention.
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
self_attention_cache = past_key_values.self_attention_cache
key_states, value_states = self_attention_cache.update(
key_states, value_states, self.layer_idx, cache_kwargs
)
# cross-attention.
is_updated = past_key_values.is_updated.get(self.layer_idx)
cross_attention_cache = past_key_values.cross_attention_cache
if past_key_values is None or not is_updated:
cross_key_states = self.k_proj(encoder_hidden_states).view(cross_hidden_shape).transpose(1, 2)
cross_value_states = self.v_proj(encoder_hidden_states).view(cross_hidden_shape).transpose(1, 2)
cross_key_states = self.k_norm(cross_key_states)
if past_key_values is not None:
cross_key_states, cross_value_states = cross_attention_cache.update(
cross_key_states, cross_value_states, self.layer_idx
)
past_key_values.is_updated[self.layer_idx] = True
else:
cross_key_states = cross_attention_cache.layers[self.layer_idx].keys
cross_value_states = cross_attention_cache.layers[self.layer_idx].values
# merged attention.
query_states = query_states
cross_key_size = cross_input_shape[1]
key_states = torch.cat([key_states, cross_key_states], dim=2)
value_states = torch.cat([value_states, cross_value_states], dim=2)
attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface(
self.config._attn_implementation, eager_attention_forward
)
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
merged_attention_mask,
dropout=self.attention_dropout if self.training else 0.0,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
# decompose merged attention weights into self & cross attention weights
if attn_weights is not None:
self_attn_weights = attn_weights[..., :-cross_key_size]
cross_attn_weights = attn_weights[..., -cross_key_size:]
else:
self_attn_weights, cross_attn_weights = None, None
return attn_output, self_attn_weights, cross_attn_weights
def sliding_window_mask_function(sliding_window: int, is_causal=True) -> Callable:
"""
This creates uni/bidirectional attention mask with sliding window.
"""
def inner_mask(batch_idx: int, head_idx: int, q_idx: int, kv_idx: int) -> bool:
if is_causal:
left_window_size, right_window_size = sliding_window, 0
else:
left_window_size, right_window_size = ((sliding_window + 1) // 2, (sliding_window) // 2 + 1)
dist = q_idx - kv_idx
left_mask = (dist >= 0) & (dist < left_window_size)
right_mask = (dist < 0) & (-dist < right_window_size)
return left_mask | right_mask
return inner_mask
class T5Gemma2EncoderLayer(T5GemmaEncoderLayer):
pass
class T5Gemma2DecoderLayer(T5GemmaEncoderLayer):
"""Decoder sub-layer: merged attention instead of vanilla self-attention."""
def __init__(self, config, layer_idx: int):
super().__init__(config, layer_idx)
# replace vanilla self-attention with merged attention to support joint cross-attention.
self.self_attn = T5Gemma2MergedAttention(
config=config,
layer_idx=layer_idx,
)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
merged_attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: EncoderDecoderCache | None = None,
use_cache: bool | None = False,
cache_position: torch.LongTensor | None = None,
encoder_hidden_states: torch.Tensor | None = None,
**kwargs,
) -> torch.FloatTensor:
residual = hidden_states
hidden_states = self.pre_self_attn_layernorm(hidden_states)
hidden_states, _, _ = self.self_attn(
hidden_states=hidden_states,
position_embeddings=position_embeddings,
merged_attention_mask=merged_attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
encoder_hidden_states=encoder_hidden_states,
**kwargs,
)
hidden_states = self.post_self_attn_layernorm(hidden_states)
hidden_states = residual + self.dropout(hidden_states)
residual = hidden_states
hidden_states = self.pre_feedforward_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = self.post_feedforward_layernorm(hidden_states)
hidden_states = residual + self.dropout(hidden_states)
return hidden_states
class T5Gemma2LMHead(T5GemmaLMHead):
pass
class T5Gemma2ClassificationHead(T5GemmaClassificationHead):
pass
class T5Gemma2MultiModalProjector(Gemma3MultiModalProjector):
def __init__(self, config: T5Gemma2EncoderConfig):
super().__init__(config)
class T5Gemma2TextScaledWordEmbedding(Gemma3TextScaledWordEmbedding):
"""T5Gemma2 Embedding: override to add eoi token embedding separately."""
def __init__(
self,
num_embeddings: int,
embedding_dim: int,
padding_idx: int,
embed_scale: float = 1.0,
eoi_token_index: int = 256_000,
):
super().__init__(num_embeddings, embedding_dim, padding_idx, embed_scale)
self.eoi_token_index = eoi_token_index
self.eoi_embedding = nn.Parameter(torch.zeros(self.embedding_dim))
def forward(self, input_ids: torch.Tensor):
input_embeddings = super().forward(input_ids) * self.embed_scale.to(self.weight.dtype)
input_embeddings[input_ids == self.eoi_token_index] = self.eoi_embedding.to(input_embeddings.dtype)
return input_embeddings
@auto_docstring
class T5Gemma2PreTrainedModel(Gemma3PreTrainedModel):
config: T5Gemma2Config
base_model_prefix = "model"
supports_gradient_checkpointing = True
# Mask creation is incompatible
# FA due to non-default creation / SWA
_supports_flash_attn = False
# Flex due to custom masks not compatible to be merged after creation
_supports_flex_attn = False
_no_split_modules = [
"T5Gemma2EncoderLayer",
"T5Gemma2DecoderLayer",
"SiglipVisionEmbeddings",
"SiglipEncoderLayer",
"SiglipMultiheadAttentionPoolingHead",
]
_can_record_outputs = {
"hidden_states": [T5Gemma2EncoderLayer, T5Gemma2DecoderLayer],
"attentions": [
OutputRecorder(T5Gemma2SelfAttention, index=1, layer_name="self_attn"),
OutputRecorder(T5Gemma2MergedAttention, index=1, layer_name="self_attn"),
OutputRecorder(T5Gemma2MergedAttention, index=2, layer_name="cross_attn"),
],
}
def _init_weights(self, module):
PreTrainedModel._init_weights(self, module)
if isinstance(module, T5Gemma2MultiModalProjector):
init.zeros_(module.mm_input_projection_weight)
elif isinstance(module, T5Gemma2TextScaledWordEmbedding):
init.zeros_(module.eoi_embedding)
init.constant_(module.embed_scale, module.scalar_embed_scale)
elif isinstance(module, T5Gemma2ClassificationHead):
scale = module.out_proj.weight.shape[0] ** -0.5
init.normal_(module.out_proj.weight, mean=0.0, std=self.config.initializer_range * scale)
if hasattr(module.out_proj, "bias") and module.out_proj.bias is not None:
init.zeros_(module.out_proj.bias)
# We initialize with 0s to be 1 centered as the RMSNorm here does (1 + weight)
elif "RMSNorm" in module.__class__.__name__:
init.zeros_(module.weight)
elif isinstance(module, T5Gemma2RotaryEmbedding):
for layer_type in module.layer_types:
rope_init_fn = module.compute_default_rope_parameters
if module.rope_type[layer_type] != "default":
rope_init_fn = ROPE_INIT_FUNCTIONS[module.rope_type[layer_type]]
curr_inv_freq, _ = rope_init_fn(module.config, layer_type=layer_type)
init.copy_(getattr(module, f"{layer_type}_inv_freq"), curr_inv_freq)
init.copy_(getattr(module, f"{layer_type}_original_inv_freq"), curr_inv_freq)
def prepare_decoder_input_ids_from_labels(self, input_ids):
"""
Shifts input_ids to the right, prepends the decoder_start_token_id, and handles
pad_token_id replacement for labels that were -100.
This is a common preparation step for decoder inputs in sequence-to-sequence models.
"""
decoder_config = self.config.decoder
decoder_start_token_id = decoder_config.bos_token_id
pad_token_id = decoder_config.pad_token_id
if decoder_start_token_id is None:
raise ValueError("self.model.config.decoder.bos_token_id has to be defined. ")
# shift inputs to the right
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()
shifted_input_ids[..., 0] = decoder_start_token_id
if pad_token_id is None:
raise ValueError("self.model.config.decoder.pad_token_id has to be defined.")
# Is this T5 specific?
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
return shifted_input_ids
class T5Gemma2TextEncoder(T5Gemma2PreTrainedModel):
config: T5Gemma2TextConfig
_can_record_outputs = {
"attentions": T5Gemma2SelfAttention,
"hidden_states": T5Gemma2EncoderLayer,
}
def __init__(
self,
config: T5Gemma2TextConfig,
eoi_token_index: int = 256_000,
):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = T5Gemma2TextScaledWordEmbedding(
config.vocab_size,
config.hidden_size,
self.padding_idx,
embed_scale=config.hidden_size**0.5,
eoi_token_index=eoi_token_index,
)
self.norm = T5Gemma2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.gradient_checkpointing = False
self.layers = nn.ModuleList(
[T5Gemma2EncoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.dropout = nn.Dropout(config.dropout_rate)
self.rotary_emb = T5Gemma2RotaryEmbedding(config)
# Initialize weights and apply final processing
self.post_init()
@merge_with_config_defaults
@capture_outputs
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
inputs_embeds: torch.FloatTensor | None = None,
# Unused for processor compatibility kept in signature.
token_type_ids: torch.Tensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutput:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
# As we want to pass `past_key_values=None` explicitly everywhere, we need to pop them from kwargs if present
kwargs.pop("past_key_values", None)
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if position_ids is None:
position_ids = torch.arange(0, inputs_embeds.shape[1], device=inputs_embeds.device).unsqueeze(0)
if not isinstance(self_attn_mask_mapping := attention_mask, dict):
mask_kwargs = {
"config": self.config,
"inputs_embeds": inputs_embeds,
"attention_mask": attention_mask,
}
self_attn_mask_mapping = {
"full_attention": create_bidirectional_mask(**mask_kwargs),
"sliding_attention": create_bidirectional_mask(
**mask_kwargs,
and_mask_function=sliding_window_mask_function(self.config.sliding_window, is_causal=False),
),
}
# input layer
hidden_states = inputs_embeds
# global and local position embeddings
position_embeddings = {}
for layer_type in self.config.layer_types:
position_embeddings[layer_type] = self.rotary_emb(hidden_states, position_ids, layer_type)
# dropout
hidden_states = self.dropout(hidden_states)
for layer_module in self.layers[: self.config.num_hidden_layers]:
hidden_states = layer_module(
hidden_states,
position_embeddings[layer_module.attention_type],
self_attn_mask_mapping[layer_module.attention_type],
position_ids,
**kwargs,
)
hidden_states = self.norm(hidden_states)
hidden_states = self.dropout(hidden_states)
return BaseModelOutput(
last_hidden_state=hidden_states,
)
class T5Gemma2Encoder(T5Gemma2PreTrainedModel):
config: T5Gemma2EncoderConfig
def __init__(
self,
config: T5Gemma2EncoderConfig,
eoi_token_index: int = 256_000,
):
super().__init__(config)
self.text_model = T5Gemma2TextEncoder._from_config(config.text_config, eoi_token_index=eoi_token_index)
self.vision_tower = AutoModel.from_config(config=config.vision_config)
self.multi_modal_projector = T5Gemma2MultiModalProjector(config)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.text_model.get_input_embeddings()
def set_input_embeddings(self, new_embeddings):
return self.text_model.set_input_embeddings(new_embeddings)
@can_return_tuple
@auto_docstring
def get_image_features(
self, pixel_values: torch.Tensor, **kwargs: Unpack[TransformersKwargs]
) -> tuple | BaseModelOutputWithPooling:
# pixel_values: (batch_size, channels, height, width)
# image_features: Image feature tensor of shape (num_images, image_length, embed_dim).
vision_outputs = self.vision_tower(pixel_values=pixel_values, return_dict=True, **kwargs)
last_hidden_state = vision_outputs.last_hidden_state
image_features = self.multi_modal_projector(last_hidden_state)
vision_outputs.pooler_output = image_features
return vision_outputs
def get_image_placeholder_mask(
self,
input_ids: torch.LongTensor | None,
inputs_embeds: torch.FloatTensor | None,
image_features: torch.FloatTensor,
):
"""
Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is
equal to the length of multimodal features. If the lengths are different, an error is raised.
"""
image_token_id = self.config.image_token_id
if input_ids is None:
if inputs_embeds is None:
raise ValueError("Either `input_ids` or `inputs_embeds` has to be provided.")
special_image_mask = inputs_embeds == self.get_input_embeddings()(
torch.tensor(image_token_id, dtype=torch.long, device=inputs_embeds.device)
)
special_image_mask = special_image_mask.all(-1)
else:
special_image_mask = input_ids == image_token_id
n_image_tokens = special_image_mask.sum()
special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
n_image_features = image_features.shape[0] * image_features.shape[1]
torch_compilable_check(
inputs_embeds[special_image_mask].numel() == image_features.numel(),
f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {n_image_features}",
)
return special_image_mask
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
inputs_embeds: torch.FloatTensor | None = None,
pixel_values: torch.FloatTensor | None = None,
# Unused for processor compatibility kept in signature.
token_type_ids: torch.Tensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutput:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.text_model.embed_tokens(input_ids)
if pixel_values is not None:
image_features = self.get_image_features(pixel_values, return_dict=True).pooler_output
image_features = image_features.to(inputs_embeds.device, inputs_embeds.dtype)
image_mask = self.get_image_placeholder_mask(
input_ids, inputs_embeds=inputs_embeds, image_features=image_features
)
inputs_embeds = inputs_embeds.masked_scatter(image_mask, image_features)
outputs = self.text_model(
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
position_ids=position_ids,
**kwargs,
)
return outputs
class T5Gemma2Decoder(T5Gemma2PreTrainedModel):
config: T5Gemma2DecoderConfig
_can_record_outputs = {
"attentions": OutputRecorder(T5Gemma2MergedAttention, index=1),
"cross_attentions": OutputRecorder(T5Gemma2MergedAttention, index=2),
"hidden_states": T5Gemma2DecoderLayer,
}
def __init__(self, config: T5Gemma2DecoderConfig, eoi_token_index: int = 256_000):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = T5Gemma2TextScaledWordEmbedding(
config.vocab_size,
config.hidden_size,
config.pad_token_id,
embed_scale=config.hidden_size**0.5,
eoi_token_index=eoi_token_index,
)
self.norm = T5Gemma2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.gradient_checkpointing = False
self.layers = nn.ModuleList(
[T5Gemma2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.dropout = nn.Dropout(config.dropout_rate)
self.rotary_emb = T5Gemma2RotaryEmbedding(config)
self.post_init()
@merge_with_config_defaults
@capture_outputs
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: EncoderDecoderCache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
use_cache: bool | None = None,
cache_position: torch.LongTensor | None = None,
encoder_hidden_states: torch.Tensor | None = None,
encoder_attention_mask: torch.Tensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutputWithPastAndCrossAttentions:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if encoder_hidden_states is None:
raise ValueError("`encoder_hidden_states` must be given in decoder")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if not self.training and use_cache and past_key_values is None:
past_key_values = EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache())
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
if not isinstance(self_attn_mask_mapping := attention_mask, dict):
mask_kwargs = {
"config": self.config,
"inputs_embeds": inputs_embeds,
"attention_mask": attention_mask,
"cache_position": cache_position,
"past_key_values": past_key_values.self_attention_cache if past_key_values is not None else None,
"position_ids": position_ids,
}
# this masking function did nothing to masking but forces `allow_is_causal_skip` to be False
# as we always need a mask during decoding for merged attention.
mask_kwargs["and_mask_function"] = lambda *args: torch.tensor(True, dtype=torch.bool)
self_attn_mask_mapping = {
"full_attention": create_causal_mask(**mask_kwargs),
"sliding_attention": create_sliding_window_causal_mask(**mask_kwargs),
}
if not isinstance(cross_attn_mask_mapping := encoder_attention_mask, dict):
mask_kwargs = {
"config": self.config,
"inputs_embeds": encoder_hidden_states,
"attention_mask": encoder_attention_mask,
"cache_position": cache_position,
"past_key_values": None,
"position_ids": None,
}
cross_attn_mask_mapping = {
"full_attention": create_causal_mask(
**mask_kwargs,
or_mask_function=bidirectional_mask_function(encoder_attention_mask),
),
}
merged_attn_mask_mapping = {
"full_attention": torch.cat(
[self_attn_mask_mapping["full_attention"], cross_attn_mask_mapping["full_attention"]], dim=-1
),
"sliding_attention": torch.cat(
[self_attn_mask_mapping["sliding_attention"], cross_attn_mask_mapping["full_attention"]], dim=-1
),
}
# input layer
hidden_states = inputs_embeds
# global and local position embeddings
position_embeddings = {}
for layer_type in self.config.layer_types:
position_embeddings[layer_type] = self.rotary_emb(hidden_states, position_ids, layer_type)
# dropout
hidden_states = self.dropout(hidden_states)
for layer_module in self.layers[: self.config.num_hidden_layers]:
hidden_states = layer_module(
hidden_states,
position_embeddings[layer_module.attention_type],
merged_attn_mask_mapping[layer_module.attention_type],
position_ids,
past_key_values,
use_cache,
cache_position,
encoder_hidden_states,
**kwargs,
)
hidden_states = self.norm(hidden_states)
hidden_states = self.dropout(hidden_states)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
@auto_docstring
class T5Gemma2Model(T5Gemma2PreTrainedModel):
_tied_weights_keys = {
"decoder.embed_tokens.weight": "encoder.text_model.embed_tokens.weight",
"decoder.embed_tokens.eoi_embedding": "encoder.text_model.embed_tokens.eoi_embedding",
}
def __init__(self, config: T5Gemma2Config):
super().__init__(config)
# setup encoder and decoder
self.encoder = T5Gemma2Encoder(config.encoder, config.eoi_token_index)
self.decoder = T5Gemma2Decoder(config.decoder, config.eoi_token_index)
self.post_init()
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
def get_input_embeddings(self):
return self.encoder.get_input_embeddings()
def set_input_embeddings(self, new_embeddings):
return self.encoder.set_input_embeddings(new_embeddings)
@can_return_tuple
@auto_docstring
def forward(
self,
# encoder inputs
input_ids: torch.LongTensor | None = None,
pixel_values: torch.FloatTensor | None = None,
attention_mask: torch.FloatTensor | None = None,
position_ids: torch.LongTensor | None = None,
# decoder inputs
decoder_input_ids: torch.LongTensor | None = None,
decoder_attention_mask: torch.BoolTensor | None = None,
decoder_position_ids: torch.LongTensor | None = None,
# others (mainly inference or cache related)
encoder_outputs: BaseModelOutput | None = None,
past_key_values: EncoderDecoderCache | None = None,
inputs_embeds: torch.Tensor | None = None,
decoder_inputs_embeds: torch.Tensor | None = None,
use_cache: bool | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> Seq2SeqModelOutput:
r"""
decoder_position_ids (`torch.LongTensor` of shape `(batch_size, decoder_sequence_length)`, *optional*):
Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range `[0,
config.decoder.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)
"""
# encoder
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
pixel_values=pixel_values,
return_dict=True,
**kwargs,
)
encoder_hidden_states = encoder_outputs.last_hidden_state
# decoder
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
position_ids=decoder_position_ids,
inputs_embeds=decoder_inputs_embeds,
past_key_values=past_key_values,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=attention_mask,
use_cache=use_cache,
cache_position=cache_position,
return_dict=True,
**kwargs,
)
return Seq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
class T5Gemma2ForConditionalGeneration(T5Gemma2PreTrainedModel, GenerationMixin):
_tied_weights_keys = {
"lm_head.out_proj.weight": "model.encoder.text_model.embed_tokens.weight",
}
_tp_plan = {"lm_head.out_proj": "colwise_gather_output"}
_pp_plan = {"lm_head.out_proj": (["hidden_states"], ["logits"])}
def __init__(self, config: T5Gemma2Config):
super().__init__(config)
self.model = T5Gemma2Model(config)
self.vocab_size = config.decoder.vocab_size
self.lm_head = T5Gemma2LMHead(config.decoder.hidden_size, self.vocab_size)
self.loss_type = "ForMaskedLM"
self.post_init()
def set_output_embeddings(self, new_embeddings):
self.lm_head.out_proj = new_embeddings
def get_output_embeddings(self):
return self.lm_head.out_proj
def get_input_embeddings(self):
return self.model.get_input_embeddings()
def set_input_embeddings(self, value):
self.model.set_input_embeddings(value)
def get_encoder(self):
return self.model.get_encoder()
def get_decoder(self):
return self.model.get_decoder()
@can_return_tuple
@auto_docstring
def get_image_features(
self, pixel_values: torch.Tensor, **kwargs: Unpack[TransformersKwargs]
) -> tuple | BaseModelOutputWithPooling:
return self.get_encoder().get_image_features(pixel_values, **kwargs)
@property
def vision_tower(self):
return self.get_encoder().vision_tower
@can_return_tuple
@auto_docstring
def forward(
self,
# encoder inputs
input_ids: torch.LongTensor | None = None,
pixel_values: torch.FloatTensor | None = None,
attention_mask: torch.FloatTensor | None = None,
position_ids: torch.LongTensor | None = None,
# decoder inputs
decoder_input_ids: torch.LongTensor | None = None,
decoder_attention_mask: torch.BoolTensor | None = None,
decoder_position_ids: torch.LongTensor | None = None,
# others (mainly inference or cache related)
encoder_outputs: BaseModelOutput | None = None,
past_key_values: EncoderDecoderCache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
decoder_inputs_embeds: torch.FloatTensor | None = None,
labels: torch.LongTensor | None = None,
use_cache: bool | None = None,
cache_position: torch.LongTensor | None = None,
logits_to_keep: int | torch.Tensor = 0,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.FloatTensor] | Seq2SeqLMOutput:
r"""
decoder_position_ids (`torch.LongTensor` of shape `(batch_size, decoder_sequence_length)`, *optional*):
Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range `[0,
config.decoder.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
"""
if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:
# get decoder inputs from shifting lm labels to the right
decoder_input_ids = self.prepare_decoder_input_ids_from_labels(labels)
decoder_outputs: Seq2SeqModelOutput = self.model(
input_ids=input_ids,
pixel_values=pixel_values,
attention_mask=attention_mask,
position_ids=position_ids,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
decoder_position_ids=decoder_position_ids,
encoder_outputs=encoder_outputs,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = decoder_outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
decoder_config = self.config.decoder
if decoder_config.final_logit_softcapping is not None:
logits = logits / decoder_config.final_logit_softcapping
logits = torch.tanh(logits)
logits = logits * decoder_config.final_logit_softcapping
loss = None
if labels is not None:
# Input has right-shifted so we directly perform masked lm loss
loss = self.loss_function(logits, labels, self.vocab_size, **kwargs)
return Seq2SeqLMOutput(
loss=loss,
logits=logits,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.decoder_hidden_states,
decoder_attentions=decoder_outputs.decoder_attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=decoder_outputs.encoder_last_hidden_state,
encoder_hidden_states=decoder_outputs.encoder_hidden_states,
encoder_attentions=decoder_outputs.encoder_attentions,
)
def _prepare_cache_for_generation(
self,
generation_config: GenerationConfig,
model_kwargs: dict,
generation_mode: GenerationMode,
batch_size: int,
max_cache_length: int,
) -> bool:
"""Override cache preparation to support T5Gemma2-specific EncoderDecoder Cache."""
# Build cache and past_key_values structure first and then override as needed.
super()._prepare_cache_for_generation(
generation_config,
model_kwargs,
generation_mode,
batch_size,
max_cache_length,
)
# If use_cache is False, do not prepare the cache.
if generation_config.use_cache is False:
return
cache_implementation = generation_config.cache_implementation
if cache_implementation is None:
offload_cache = False
else:
offload_cache = "offloaded" in generation_config.cache_implementation
# Main change: use full cache for cross-attention.
cross_attn_config = copy.deepcopy(self.config.get_text_config(decoder=True))
# cross-attention does not use sliding window
del cross_attn_config.sliding_window
del cross_attn_config.layer_types
cross_attn_cache_kwargs = {
"config": cross_attn_config,
"offloading": offload_cache,
}
past_key_values = model_kwargs.get("past_key_values")
if past_key_values is not None:
if not isinstance(past_key_values, EncoderDecoderCache):
raise ValueError(
"The `past_key_values` in `model_kwargs` must be of type `EncoderDecoderCache` for T5Gemma2 model."
)
# Cache already established, no need to re-initialize.
if len(past_key_values.is_updated) > 0 and past_key_values.is_updated.get(0):
return
cross_attn_cls = type(past_key_values.cross_attention_cache)
if cross_attn_cls == StaticCache:
cross_attn_cache_kwargs["max_cache_len"] = model_kwargs["encoder_outputs"][0].shape[1]
# Update cross-attention cache only (switch from sliding_window to full).
past_key_values.cross_attention_cache = cross_attn_cls(**cross_attn_cache_kwargs)
else:
# Initialize new cache.
model_kwargs["past_key_values"] = EncoderDecoderCache(
DynamicCache(
**{
"config": self.config.get_text_config(decoder=True),
"offloading": offload_cache,
}
), # self-attention cache
DynamicCache(), # cross-attention cache
)
if hasattr(self, "_cache") and self._cache is not None:
if not isinstance(self._cache, EncoderDecoderCache):
raise ValueError("The internal cache must be of type `EncoderDecoderCache` for T5Gemma2 model.")
self._cache = model_kwargs["past_key_values"]
@auto_docstring
class T5Gemma2ForSequenceClassification(T5Gemma2PreTrainedModel):
def __init__(self, config: T5Gemma2Config):
super().__init__(config)
self.num_labels = config.num_labels
self.hidden_size = config.decoder.hidden_size
self.model = T5Gemma2Model(config)
classifier_dropout = getattr(config, "classifier_dropout_rate", 0.1)
self.score = T5Gemma2ClassificationHead(self.hidden_size, self.num_labels, classifier_dropout)
self.post_init()
def get_input_embeddings(self):
return self.model.get_input_embeddings()
def set_input_embeddings(self, value):
self.model.set_input_embeddings(value)
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
pixel_values: torch.FloatTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
decoder_input_ids: torch.LongTensor | None = None,
decoder_attention_mask: torch.Tensor | None = None,
decoder_position_ids: torch.LongTensor | None = None,
encoder_outputs: BaseModelOutput | None = None,
inputs_embeds: torch.FloatTensor | None = None,
decoder_inputs_embeds: torch.FloatTensor | None = None,
labels: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> SequenceClassifierOutput:
r"""
decoder_position_ids (`torch.LongTensor` of shape `(batch_size, decoder_sequence_length)`, *optional*):
Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range `[0,
config.decoder.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
if inputs_embeds is not None or decoder_inputs_embeds is not None:
raise NotImplementedError(
f"Passing input embeddings is currently not supported for {self.__class__.__name__}."
)
if input_ids is None:
raise ValueError("You have to specify input_ids")
if decoder_input_ids is None:
decoder_input_ids = self.prepare_decoder_input_ids_from_labels(input_ids)
outputs: Seq2SeqModelOutput = self.model(
input_ids,
pixel_values=pixel_values,
attention_mask=attention_mask,
position_ids=position_ids,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
decoder_position_ids=decoder_position_ids,
encoder_outputs=encoder_outputs,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=False,
**kwargs,
)
last_hidden_state = outputs.last_hidden_state
hidden_states = outputs.decoder_hidden_states
attentions = outputs.decoder_attentions
logits = self.score(last_hidden_state)
batch_size = input_ids.shape[0]
# To handle both left- and right- padding, we take the rightmost token that is not equal to pad_token_id
non_pad_mask = (decoder_input_ids != self.config.pad_token_id).to(logits.device, torch.int32)
token_indices = torch.arange(decoder_input_ids.shape[-1], device=logits.device, dtype=torch.int32)
last_non_pad_token = (token_indices * non_pad_mask).argmax(-1)
last_non_pad_token = torch.clamp(last_non_pad_token, max=decoder_input_ids.shape[-1] - 1)
pooled_logits = logits[torch.arange(batch_size, device=logits.device), last_non_pad_token]
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, pooled_logits=pooled_logits, config=self.config)
return SequenceClassifierOutput(
loss=loss,
logits=pooled_logits,
hidden_states=hidden_states,
attentions=attentions,
)
@auto_docstring
class T5Gemma2ForTokenClassification(T5Gemma2PreTrainedModel):
def __init__(self, config: T5Gemma2Config):
super().__init__(config)
self.num_labels = config.num_labels
self.hidden_size = config.decoder.hidden_size
self.model = T5Gemma2Model(config)
classifier_dropout = getattr(config, "classifier_dropout_rate", 0.1)
self.score = T5Gemma2ClassificationHead(self.hidden_size, self.num_labels, classifier_dropout)
self.post_init()
def get_input_embeddings(self):
return self.model.get_input_embeddings()
def set_input_embeddings(self, value):
self.model.set_input_embeddings(value)
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
pixel_values: torch.FloatTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
decoder_input_ids: torch.LongTensor | None = None,
decoder_attention_mask: torch.Tensor | None = None,
decoder_position_ids: torch.LongTensor | None = None,
encoder_outputs: BaseModelOutput | None = None,
inputs_embeds: torch.FloatTensor | None = None,
decoder_inputs_embeds: torch.FloatTensor | None = None,
labels: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> TokenClassifierOutput:
r"""
decoder_position_ids (`torch.LongTensor` of shape `(batch_size, decoder_sequence_length)`, *optional*):
Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range `[0,
config.decoder.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
if inputs_embeds is not None or decoder_inputs_embeds is not None:
raise NotImplementedError(
f"Passing input embeddings is currently not supported for {self.__class__.__name__}."
)
if input_ids is None:
raise ValueError("You have to specify input_ids")
if decoder_input_ids is None:
decoder_input_ids = self.prepare_decoder_input_ids_from_labels(input_ids)
outputs: Seq2SeqModelOutput = self.model(
input_ids,
pixel_values=pixel_values,
attention_mask=attention_mask,
position_ids=position_ids,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
decoder_position_ids=decoder_position_ids,
encoder_outputs=encoder_outputs,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=False,
**kwargs,
)
last_hidden_state = outputs.last_hidden_state
hidden_states = outputs.decoder_hidden_states
attentions = outputs.decoder_attentions
logits = self.score(last_hidden_state)
loss = None
if labels is not None:
loss = self.loss_function(logits, labels, self.config)
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=hidden_states,
attentions=attentions,
)
__all__ = [
"T5Gemma2Config",
"T5Gemma2TextConfig",
"T5Gemma2EncoderConfig",
"T5Gemma2DecoderConfig",
"T5Gemma2ForConditionalGeneration",
"T5Gemma2Model",
"T5Gemma2Encoder",
"T5Gemma2PreTrainedModel",
"T5Gemma2ForSequenceClassification",
"T5Gemma2ForTokenClassification",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/t5gemma2/modular_t5gemma2.py",
"license": "Apache License 2.0",
"lines": 1411,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/t5gemma2/test_modeling_t5gemma2.py | # Copyright 2025 Google Inc. HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch T5Gemma2 model."""
import copy
import unittest
import pytest
import requests
from transformers import (
AutoProcessor,
T5Gemma2Config,
T5Gemma2DecoderConfig,
T5Gemma2EncoderConfig,
T5Gemma2TextConfig,
is_torch_available,
is_vision_available,
)
from transformers.testing_utils import (
Expectations,
cleanup,
require_torch,
require_torch_accelerator,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin, assert_similar_generate_outputs
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
if is_torch_available():
import torch
import torch.nn.functional as F
from transformers import (
T5Gemma2ForConditionalGeneration,
T5Gemma2ForSequenceClassification,
T5Gemma2ForTokenClassification,
T5Gemma2Model,
)
if is_vision_available():
from PIL import Image
class T5Gemma2ModelTester:
config_class = T5Gemma2Config
text_config_class = T5Gemma2TextConfig
encoder_config_class = T5Gemma2EncoderConfig
decoder_config_class = T5Gemma2DecoderConfig
if is_torch_available():
model_class = T5Gemma2Model
causal_lm_class = T5Gemma2ForConditionalGeneration
sequence_classification_class = T5Gemma2ForSequenceClassification
token_classification_class = T5Gemma2ForTokenClassification
def __init__(
self,
parent,
batch_size=13,
is_training=True,
use_attention_mask=True,
use_labels=True,
vocab_size=99,
# decoder-specific
seq_length=7,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
num_key_value_heads=2,
intermediate_size=37,
# encoder-specific
encoder_seq_length=7,
encoder_hidden_size=32,
encoder_num_hidden_layers=2,
encoder_num_attention_heads=4,
encoder_num_key_value_heads=2,
encoder_intermediate_size=37,
# vision-specific
mm_tokens_per_image=2,
image_token_index=4,
boi_token_index=5,
eoi_token_index=6,
siglip_config={
"use_labels": True,
"image_size": 20,
"patch_size": 5,
"num_channels": 3,
"is_training": True,
"hidden_size": 32,
"num_key_value_heads": 1,
"num_hidden_layers": 2,
"num_attention_heads": 4,
"intermediate_size": 37,
"dropout": 0.1,
"attention_dropout": 0.1,
"initializer_range": 0.02,
},
# common
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
layer_types=["full_attention", "sliding_attention"],
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
# special ids
eos_token_id=1,
pad_token_id=0,
bos_token_id=2,
):
self.parent = parent
self.batch_size = batch_size
self.is_training = is_training
self.use_attention_mask = use_attention_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
# decoder
self.seq_length = seq_length
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.intermediate_size = intermediate_size
# encoder
self.encoder_seq_length = encoder_seq_length
self.encoder_hidden_size = encoder_hidden_size
self.encoder_num_hidden_layers = encoder_num_hidden_layers
self.encoder_num_attention_heads = encoder_num_attention_heads
self.encoder_num_key_value_heads = encoder_num_key_value_heads
self.encoder_intermediate_size = encoder_intermediate_size
# vision
self.mm_tokens_per_image = mm_tokens_per_image
self.image_token_index = image_token_index
self.boi_token_index = boi_token_index
self.eoi_token_index = eoi_token_index
self.siglip_config = siglip_config
self.num_channels = siglip_config["num_channels"]
self.image_size = siglip_config["image_size"]
# common
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.layer_types = layer_types
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
self.head_dim = self.hidden_size // self.num_attention_heads
# special ids
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
def get_encoder_config(self):
return self.encoder_config_class(
text_config=self.text_config_class(
vocab_size=self.vocab_size,
hidden_size=self.encoder_hidden_size,
num_hidden_layers=self.encoder_num_hidden_layers,
num_attention_heads=self.encoder_num_attention_heads,
num_key_value_heads=self.encoder_num_key_value_heads,
intermediate_size=self.encoder_intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
layer_types=self.layer_types,
type_vocab_size=self.type_vocab_size,
is_decoder=False,
initializer_range=self.initializer_range,
head_dim=self.head_dim,
bos_token_id=self.bos_token_id,
eos_token_id=self.eos_token_id,
pad_token_id=self.pad_token_id,
),
# vision.
vision_config=self.siglip_config,
image_token_index=self.image_token_index,
boi_token_index=self.boi_token_index,
eoi_token_index=self.eoi_token_index,
mm_tokens_per_image=self.mm_tokens_per_image,
hidden_size=self.encoder_hidden_size,
)
def get_decoder_config(self):
return self.decoder_config_class(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
num_key_value_heads=self.num_key_value_heads,
intermediate_size=self.intermediate_size,
cross_attention_hidden_size=self.encoder_hidden_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
layer_types=self.layer_types,
type_vocab_size=self.type_vocab_size,
is_decoder=True,
initializer_range=self.initializer_range,
head_dim=self.head_dim,
bos_token_id=self.bos_token_id,
eos_token_id=self.eos_token_id,
pad_token_id=self.pad_token_id,
)
def get_config(self, is_encoder_decoder=True):
return self.config_class(
encoder=self.get_encoder_config(),
decoder=self.get_decoder_config(),
is_encoder_decoder=is_encoder_decoder,
# vision.
image_token_index=self.image_token_index,
# Used for generation test.
num_attention_heads=self.num_attention_heads,
num_key_value_heads=self.num_key_value_heads,
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
)
def prepare_config_and_inputs(self):
config = self.get_config()
input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size - 1) + 1
decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size - 1) + 1
# Vision inputs.
pixel_values = floats_tensor(
[
self.batch_size,
self.siglip_config["num_channels"],
self.siglip_config["image_size"],
self.siglip_config["image_size"],
]
)
# Remove BOS symbols from inputs.
input_ids = torch.where(input_ids == self.bos_token_id, 42, input_ids)
decoder_input_ids = torch.where(decoder_input_ids == self.bos_token_id, 42, decoder_input_ids)
# Avoid leading PAD tokens from inputs.
decoder_input_ids[:, 0] = self.pad_token_id + 1
# set the 3 first tokens to be image, and ensure that no other tokens are image tokens
# do not change this unless you modified image size or patch size
input_ids[input_ids == config.encoder.image_token_index] = self.pad_token_id
input_ids[:, :1] = config.encoder.image_token_index
attention_mask = None
decoder_attention_mask = None
if self.use_attention_mask:
attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2)
decoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
lm_labels = None
if self.use_labels:
lm_labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
return (
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
pixel_values,
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
pixel_values,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"pixel_values": pixel_values,
}
return config, inputs_dict
def create_and_check_model(
self,
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
pixel_values,
):
model = self.model_class(config=config).to(torch_device).eval()
result = model(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
pixel_values=pixel_values,
attention_mask=attention_mask,
decoder_attention_mask=decoder_attention_mask,
)
decoder_output = result.last_hidden_state
decoder_past = result.past_key_values
encoder_output = result.encoder_last_hidden_state
self.parent.assertEqual(
encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.encoder_hidden_size)
)
self.parent.assertEqual(decoder_output.size(), (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertIsNotNone(decoder_past)
self.parent.assertEqual(len(decoder_past.self_attention_cache), config.decoder.num_hidden_layers)
self.parent.assertEqual(len(decoder_past.cross_attention_cache), config.decoder.num_hidden_layers)
def check_prepare_lm_labels_via_shift_left(
self,
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
pixel_values,
):
model = self.model_class(config=config).to(torch_device).eval()
# _shift_right should be called on labels
shifted_labels = model.prepare_decoder_input_ids_from_labels(lm_labels)
# first token should be decoder_start_token_id
self.parent.assertTrue(torch.all(shifted_labels[:, 0] == config.decoder.bos_token_id))
# the rest should be the labels shifted by one, with -100 replaced by pad_token_id
labels_without_ignore_index = lm_labels.masked_fill(lm_labels == -100, config.decoder.pad_token_id)
self.parent.assertTrue(torch.all(shifted_labels[:, 1:] == labels_without_ignore_index[:, :-1]))
def create_and_check_with_lm_head(
self,
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
pixel_values,
):
model = self.causal_lm_class(config=config).to(torch_device).eval()
outputs = model(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
decoder_attention_mask=decoder_attention_mask,
labels=lm_labels,
pixel_values=pixel_values,
)
self.parent.assertEqual(len(outputs), 4)
self.parent.assertEqual(outputs["logits"].size(), (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(outputs["loss"].size(), ())
def create_and_check_with_sequence_classification_head(
self,
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
pixel_values,
):
labels = torch.tensor([1] * self.batch_size, dtype=torch.long, device=torch_device)
model = self.sequence_classification_class(config=config).to(torch_device).eval()
outputs = model(
input_ids=input_ids,
pixel_values=pixel_values,
decoder_input_ids=decoder_input_ids,
labels=labels,
)
self.parent.assertEqual(outputs["logits"].size(), (self.batch_size, config.num_labels))
self.parent.assertEqual(outputs["loss"].size(), ())
def create_and_check_with_token_classification_head(
self,
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
pixel_values,
):
labels = torch.tensor([1] * self.seq_length * self.batch_size, dtype=torch.long, device=torch_device)
model = self.token_classification_class(config=config)
model = model.to(torch_device).eval()
outputs = model(
input_ids=input_ids,
pixel_values=pixel_values,
decoder_input_ids=decoder_input_ids,
labels=labels,
)
self.parent.assertEqual(outputs["logits"].size(), (self.batch_size, self.seq_length, config.num_labels))
self.parent.assertEqual(outputs["loss"].size(), ())
def create_and_check_decoder_model_past(
self,
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
pixel_values,
):
model = self.model_class(config=config).get_decoder().to(torch_device).eval()
encoder_hidden_states = torch.ones(
(self.batch_size, self.encoder_seq_length, self.encoder_hidden_size), dtype=torch.float32
).to(torch_device)
# first forward pass
outputs = model(decoder_input_ids, encoder_hidden_states=encoder_hidden_states, use_cache=True)
outputs_use_cache_conf = model(decoder_input_ids, encoder_hidden_states=encoder_hidden_states)
outputs_no_past = model(decoder_input_ids, encoder_hidden_states=encoder_hidden_states, use_cache=False)
self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf))
self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1)
output, past_key_values = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# append to next input_ids and
next_input_ids = torch.cat([decoder_input_ids, next_tokens], dim=-1)
output_from_no_past = model(next_input_ids, encoder_hidden_states=encoder_hidden_states)["last_hidden_state"]
output_from_past = model(
next_tokens, encoder_hidden_states=encoder_hidden_states, past_key_values=past_key_values
)["last_hidden_state"]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_decoder_model_attention_mask_past(
self,
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
pixel_values,
):
model = self.model_class(config=config).get_decoder().to(torch_device).eval()
encoder_hidden_states = torch.ones(
(self.batch_size, self.encoder_seq_length, self.encoder_hidden_size), dtype=torch.float32
).to(torch_device)
# create attention mask
attn_mask = torch.ones(decoder_input_ids.shape, dtype=torch.long, device=torch_device)
half_seq_length = decoder_input_ids.shape[-1] // 2
attn_mask[:, half_seq_length:] = 0
# first forward pass
output, past_key_values = model(
decoder_input_ids, encoder_hidden_states=encoder_hidden_states, attention_mask=attn_mask, use_cache=True
).to_tuple()
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# change a random masked slice from input_ids
random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1
random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1)
decoder_input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens
# append to next input_ids and attn_mask
next_input_ids = torch.cat([decoder_input_ids, next_tokens], dim=-1)
attn_mask = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)],
dim=1,
)
# get two different outputs
output_from_no_past = model(
next_input_ids, encoder_hidden_states=encoder_hidden_states, attention_mask=attn_mask
)["last_hidden_state"]
output_from_past = model(
next_tokens,
encoder_hidden_states=encoder_hidden_states,
past_key_values=past_key_values,
attention_mask=attn_mask,
)["last_hidden_state"]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_decoder_model_past_large_inputs(
self,
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
pixel_values,
):
model = self.model_class(config=config).get_decoder().to(torch_device).eval()
encoder_hidden_states = torch.ones(
(self.batch_size, self.encoder_seq_length, self.encoder_hidden_size), dtype=torch.float32
).to(torch_device)
# first forward pass
outputs = model(
decoder_input_ids,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
use_cache=True,
)
output, past_key_values = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_mask = ids_tensor((self.batch_size, 3), vocab_size=2)
# append to next input_ids and
next_input_ids = torch.cat([decoder_input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([attention_mask, next_mask], dim=-1)
output_from_no_past = model(
next_input_ids, encoder_hidden_states=encoder_hidden_states, attention_mask=next_attention_mask
)["last_hidden_state"]
output_from_past = model(
next_tokens,
encoder_hidden_states=encoder_hidden_states,
attention_mask=next_attention_mask,
past_key_values=past_key_values,
)["last_hidden_state"]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_generate_with_past_key_values(
self,
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
pixel_values,
):
model = self.causal_lm_class(config=config).to(torch_device).eval()
torch.manual_seed(0)
output_without_past_cache = model.generate(
input_ids, pixel_values=pixel_values, num_beams=2, max_length=5, do_sample=True, use_cache=False
)
torch.manual_seed(0)
output_with_past_cache = model.generate(
input_ids, pixel_values=pixel_values, num_beams=2, max_length=5, do_sample=True
)
self.parent.assertTrue(torch.all(output_with_past_cache == output_without_past_cache))
def create_and_check_model_fp16_forward(
self,
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
pixel_values,
):
model = self.model_class(config=config).to(torch_device).half().eval()
output = model(
input_ids,
pixel_values=pixel_values,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
decoder_attention_mask=decoder_attention_mask,
)["last_hidden_state"]
self.parent.assertFalse(torch.isnan(output).any().item())
def create_and_create_and_check_forward_full_mask(
self,
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
pixel_values,
):
"""
Checks whether we can use the shortcuts in our mask generation (SDPA) properly,
these rely on the `is_causal` flag to function properly
"""
model = self.model_class(config=config).to(torch_device).eval()
# Force full mask (all true) which can be shortcircuited to `None`
attention_mask = torch.ones_like(attention_mask)
decoder_attention_mask = torch.ones_like(decoder_attention_mask)
output_full_mask = model(
input_ids,
pixel_values=pixel_values,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
decoder_attention_mask=decoder_attention_mask,
)["last_hidden_state"]
# Compile forces the mask creation to happen at any time
model.forward = torch.compile(model.forward)
output_full_mask_no_shortcut = model(
input_ids,
pixel_values=pixel_values,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
decoder_attention_mask=decoder_attention_mask,
)["last_hidden_state"]
self.parent.assertTrue(torch.allclose(output_full_mask, output_full_mask_no_shortcut, atol=1e-3, rtol=1e-3))
@require_torch
class T5Gemma2ModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = (
(
T5Gemma2Model,
T5Gemma2ForConditionalGeneration,
T5Gemma2ForSequenceClassification,
T5Gemma2ForTokenClassification,
)
if is_torch_available()
else ()
)
_is_stateful = True
is_encoder_decoder = True
# MP works but offload doesn't work when the SigLIP MultiheadAttention is offloaded
test_cpu_offload = False
test_disk_offload_safetensors = False
test_disk_offload_bin = False
def setUp(self):
self.model_tester = T5Gemma2ModelTester(self)
self.config_tester = ConfigTester(
self,
config_class=T5Gemma2Config,
# For faking the testing.
hidden_size=37,
vocab_size=self.model_tester.vocab_size,
num_attention_heads=self.model_tester.num_attention_heads,
num_hidden_layers=self.model_tester.num_hidden_layers,
)
def test_config(self):
self.config_tester.run_common_tests()
def test_shift_right(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_prepare_lm_labels_via_shift_left(*config_and_inputs)
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
# Based on tests.models.t5.test_modeling_t5.T5ModelTest.test_inputs_embeds
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (T5Gemma2Model, T5Gemma2ForConditionalGeneration):
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
if not self.is_encoder_decoder:
input_ids = inputs["input_ids"]
del inputs["input_ids"]
else:
encoder_input_ids = inputs["input_ids"]
decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids)
del inputs["input_ids"]
inputs.pop("decoder_input_ids", None)
wte = model.get_input_embeddings()
if not self.is_encoder_decoder:
inputs["inputs_embeds"] = wte(input_ids)
else:
inputs["inputs_embeds"] = wte(encoder_input_ids)
inputs["decoder_inputs_embeds"] = wte(decoder_input_ids)
with torch.no_grad():
model(**inputs)[0]
def test_with_lm_head(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_with_lm_head(*config_and_inputs)
def test_with_sequence_classification_head(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_with_sequence_classification_head(*config_and_inputs)
def test_with_token_classification_head(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_with_token_classification_head(*config_and_inputs)
def test_decoder_model_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*config_and_inputs)
def test_decoder_model_past_with_attn_mask(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs)
def test_decoder_model_past_with_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)
def test_generate_with_past_key_values(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_generate_with_past_key_values(*config_and_inputs)
@unittest.skipIf(torch_device == "cpu", "Can't do half precision")
def test_model_fp16_forward(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fp16_forward(*config_and_inputs)
# Failing job for ref: https://github.com/huggingface/transformers/pull/43633/checks?check_run_id=62485281160
@unittest.skip("Fails in CI run and isn't reproducible locally/in A10 runners. FIXME @raushan")
def test_forward_full_mask(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_create_and_check_forward_full_mask(*config_and_inputs)
# Based on tests.models.gemma.test_modeling_gemma.GemmaModelTest.test_Gemma_sequence_classification_model with Gemma -> T5Gemma2 (Add is_encoder_decoder option)
def test_T5Gemma2_sequence_classification_model(self):
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.num_labels = 3
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(1).to(torch_device)
sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size)
for pixel_values in [None, input_dict["pixel_values"]]:
model = self.model_tester.sequence_classification_class(config).to(torch_device).eval()
result = model(input_ids, pixel_values=pixel_values, attention_mask=attention_mask, labels=sequence_labels)
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels))
# Based on tests.models.gemma.test_modeling_gemma.GemmaModelTest.test_Gemma_sequence_classification_model_for_single_label with Gemma -> T5Gemma2 (Add is_encoder_decoder option)
def test_T5Gemma2_sequence_classification_model_for_single_label(self):
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.num_labels = 3
config.problem_type = "single_label_classification"
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(1).to(torch_device)
sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size)
for pixel_values in [None, input_dict["pixel_values"]]:
model = self.model_tester.sequence_classification_class(config).to(torch_device).eval()
result = model(input_ids, pixel_values=pixel_values, attention_mask=attention_mask, labels=sequence_labels)
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels))
# Based on tests.models.gemma.test_modeling_gemma.GemmaModelTest.test_Gemma_sequence_classification_model_for_multi_label with Gemma -> T5Gemma2 (Add is_encoder_decoder option)
def test_T5Gemma2_sequence_classification_model_for_multi_label(self):
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.num_labels = 3
config.problem_type = "multi_label_classification"
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(1).to(torch_device)
sequence_labels = ids_tensor(
[self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size
).to(torch.float)
for pixel_values in [None, input_dict["pixel_values"]]:
model = self.model_tester.sequence_classification_class(config).to(torch_device).eval()
result = model(input_ids, pixel_values=pixel_values, attention_mask=attention_mask, labels=sequence_labels)
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels))
# Based on tests.models.gemma.test_modeling_gemma.GemmaModelTest.test_Gemma_token_classification_model with Gemma -> T5Gemma2 (Add is_encoder_decoder option)
def test_T5Gemma2_token_classification_model(self):
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.num_labels = 3
input_ids = input_dict["input_ids"]
decoder_input_ids = input_dict["decoder_input_ids"]
attention_mask = input_ids.ne(1).to(torch_device)
token_labels = ids_tensor([self.model_tester.batch_size, self.model_tester.seq_length], config.num_labels)
for pixel_values in [None, input_dict["pixel_values"]]:
model = self.model_tester.token_classification_class(config).to(torch_device).eval()
result = model(
input_ids,
decoder_input_ids=decoder_input_ids,
pixel_values=pixel_values,
attention_mask=attention_mask,
labels=token_labels,
)
self.assertEqual(
result.logits.shape,
(self.model_tester.batch_size, self.model_tester.seq_length, self.model_tester.num_labels),
)
@unittest.skip("This was not properly written, submodules need the attribute to be overwritten")
def test_attention_outputs(self):
pass
@unittest.skip("Mismatch issue doesn't exist in T5Gemma2.")
def test_load_with_mismatched_shapes(self):
pass
# Based on tests.generation.test_utils.GenerationTesterMixin.test_generate_continue_from_past_key_values
# Updated decoder_attention_mask to consider the appended bos token
@pytest.mark.generate
def test_generate_continue_from_past_key_values(self):
# Tests that we can continue generating from past key values, returned from a previous `generate` call
for model_class in self.all_generative_model_classes:
if model_class == self.model_tester.token_classification_class:
continue
if any(model_name in model_class.__name__.lower() for model_name in ["imagegpt", "mllama"]):
self.skipTest(reason="Won't fix: old model with unique inputs/caches/other")
if any(model_name in model_class.__name__.lower() for model_name in ["umt5"]):
self.skipTest(reason="TODO: needs modeling or test input preparation fixes for compatibility")
config, inputs = self.model_tester.prepare_config_and_inputs_for_common()
if not hasattr(config.get_text_config(), "use_cache"):
self.skipTest(reason=f"{model_class.__name__} doesn't support caching")
# Let's make it always:
# 1. use cache (for obvious reasons)
# 2. generate to max length (which can be achieved by setting the eos token to an invalid value), which
# would make the test flaky (e.g. EOS is generated on iteration 1 on both generations, but the
# continuation would force it to generate beyond an EOS token)
# 3. ignore `token_type_ids` for simplicity
# 4. ignore `forced_eos_token_id`, which requires further manipulation of the continuation inputs and is
# active by default on some models
# 5. ignore `encoder_no_repeat_ngram_size`, which is set by default in some encoder-decoder models. When
# we use their decoder as a stand-alone model, `encoder_no_repeat_ngram_size` actually prevents
# repetition exclusively from the prompt. This test relies on comparing one call vs 2 calls
# with cache, what is considered a prompt is different in the two cases.
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
model = model_class(config).to(torch_device)
model.eval()
# If "past_key_values" is not returned, skip the test (e.g. RWKV uses a different cache name and format)
outputs = model(**inputs)
if "past_key_values" not in outputs:
self.skipTest(reason="This model doesn't return `past_key_values`")
generate_kwargs = {
"pad_token_id": -1,
"eos_token_id": -1,
"forced_eos_token_id": None,
"encoder_no_repeat_ngram_size": 0,
"use_cache": True,
"do_sample": False,
"return_dict_in_generate": True,
"output_scores": True,
}
# Traditional way of generating text, with `return_dict_in_generate` to return the past key values
outputs = model.generate(**inputs, **generate_kwargs, max_new_tokens=4)
# Let's generate again, but passing the past key values in between (3 + 1 = 4 tokens). Note that the
# inputs may need to be tweaked across `generate` calls (like the attention mask).
outputs_cached = model.generate(**inputs, **generate_kwargs, max_new_tokens=3)
# Continue from the tokens generated above, preparing the inputs accordingly
inputs["past_key_values"] = outputs_cached.past_key_values
new_attention_len = outputs_cached.sequences.shape[-1]
# It must be encoder-decoder models
self.assertTrue(config.is_encoder_decoder)
inputs["decoder_input_ids"] = outputs_cached.sequences
if "decoder_attention_mask" in inputs:
decoder_attention_mask = inputs["decoder_attention_mask"]
# Add BOS mask: the new sequence comes with a new BOS token, which is not included in the original inputs
padding_tensor = torch.ones_like(decoder_attention_mask[:, :1])
decoder_attention_mask = torch.cat([padding_tensor, decoder_attention_mask], dim=1)
inputs["decoder_attention_mask"] = torch.nn.functional.pad(
decoder_attention_mask,
(0, new_attention_len - decoder_attention_mask.shape[1]),
mode="constant",
value=1,
)
first_caches_scores = outputs_cached.scores
outputs_cached = model.generate(**inputs, **generate_kwargs, max_new_tokens=1)
full_cached_scores = first_caches_scores + outputs_cached.scores
outputs_cached.scores = full_cached_scores
# The two sets of generated text and past kv should be equal to each other
assert_similar_generate_outputs(outputs, outputs_cached)
self._check_caches_are_equal(outputs.past_key_values, outputs_cached.past_key_values)
@unittest.skip("T5Gemma 2 only support final layer hidden states.")
def test_hidden_states_output(self):
pass
# Based on tests.models.t5.test_modeling_t5.T5ModelTest.test_custom_4d_attention_mask
# Excluding the final token from input_ids
def test_custom_4d_attention_mask(self):
for model_class in self.all_generative_model_classes:
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
model = model_class(config).to(device=torch_device, dtype=torch.float32)
(
input_ids,
position_ids,
input_ids_shared_prefix,
mask_shared_prefix,
position_ids_shared_prefix,
) = self._get_custom_4d_mask_test_data()
mask_shared_prefix = mask_shared_prefix == 0.0
outputs = model.forward(
decoder_input_ids=input_ids,
input_ids=input_ids[:, :-1],
decoder_position_ids=position_ids,
)
logits = outputs.logits
# logits.shape == torch.Size([3, 4, ...])
outputs_shared_prefix = model(
input_ids=input_ids[:1, :-1],
decoder_input_ids=input_ids_shared_prefix,
decoder_attention_mask=mask_shared_prefix,
decoder_position_ids=position_ids_shared_prefix,
)
logits_shared_prefix = outputs_shared_prefix.logits
# logits_shared_prefix.shape == torch.Size([1, 6, ...])
torch.testing.assert_close(
outputs.encoder_last_hidden_state[0], outputs_shared_prefix.encoder_last_hidden_state[0]
)
out_last_tokens = logits[:, -1, :] # last tokens in each batch line
out_shared_prefix_last_tokens = logits_shared_prefix[0, -3:, :] # last three tokens
# comparing softmax-normalized logits:
normalized_0 = F.softmax(out_last_tokens)
normalized_1 = F.softmax(out_shared_prefix_last_tokens)
torch.testing.assert_close(normalized_0[2], normalized_1[2], rtol=1e-3, atol=1e-4)
torch.testing.assert_close(normalized_0, normalized_1, rtol=1e-3, atol=1e-4)
@pytest.mark.xfail(reason="This architecture seems to not compute gradients for some layer.")
def test_training_gradient_checkpointing(self):
super().test_training_gradient_checkpointing()
@pytest.mark.xfail(reason="This architecture seems to not compute gradients for some layer.")
def test_training_gradient_checkpointing_use_reentrant_false(self):
super().test_training_gradient_checkpointing_use_reentrant_false()
@pytest.mark.xfail(reason="This architecture seems to not compute gradients for some layer.")
def test_training_gradient_checkpointing_use_reentrant_true(self):
super().test_training_gradient_checkpointing_use_reentrant_true()
@unittest.skip(reason="SiglipVisionModel (vision backbone) does not support standalone training")
def test_torch_compile_for_training(self):
pass
@unittest.skip(reason="Self&cross attention are splited after the merged attention")
def test_retain_grad_hidden_states_attentions(self):
pass
@unittest.skip(
reason="Merged attention module will always require a mask which is incompatible with the FA backend"
)
def test_sdpa_can_dispatch_on_flash(self):
pass
@require_torch_accelerator
@slow
class T5Gemma2IntegrationTest(unittest.TestCase):
def setUp(self):
cleanup(torch_device, gc_collect=True)
def tearDown(self):
cleanup(torch_device, gc_collect=True)
def test_model_generation_270m(self):
expected_texts = Expectations(
{
("cuda", None): ' a bumble bee in a flower bed.',
}
) # fmt: skip
EXPECTED_TEXT = expected_texts.get_expectation()
model = T5Gemma2ForConditionalGeneration.from_pretrained(
"google/t5gemma-2-270m-270m", device_map="auto", dtype=torch.bfloat16
)
processor = AutoProcessor.from_pretrained("google/t5gemma-2-270m-270m")
url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/bee.jpg"
image = Image.open(requests.get(url, stream=True).raw)
prompt = "<start_of_image> in this image, there is"
model_inputs = processor(text=prompt, images=image, return_tensors="pt").to(model.device)
generated_ids = model.generate(**model_inputs, max_new_tokens=30, do_sample=False)
generated_text = processor.decode(generated_ids[0], skip_special_tokens=True)
self.assertEqual(generated_text, EXPECTED_TEXT)
def test_model_generation_batch_270m(self):
expected_texts = Expectations(
{
("cuda", None): [' a bumble bee in a flower bed.', ', a bumblebee is seen in the garden of a house in the UK.'],
}
) # fmt: skip
EXPECTED_TEXT = expected_texts.get_expectation()
model = T5Gemma2ForConditionalGeneration.from_pretrained(
"google/t5gemma-2-270m-270m", device_map="auto", dtype=torch.bfloat16
)
processor = AutoProcessor.from_pretrained("google/t5gemma-2-270m-270m")
url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/bee.jpg"
image = Image.open(requests.get(url, stream=True).raw)
prompt = ["<start_of_image> in this image, there is", "<start_of_image> in this image"]
model_inputs = processor(text=prompt, images=[[image], [image]], padding=True, return_tensors="pt").to(
model.device
)
generated_ids = model.generate(**model_inputs, max_new_tokens=30, do_sample=False)
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)
self.assertEqual(generated_text, EXPECTED_TEXT)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/t5gemma2/test_modeling_t5gemma2.py",
"license": "Apache License 2.0",
"lines": 935,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/afmoe/configuration_afmoe.py | # Copyright 2025 Arcee AI and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AFMoE model configuration"""
from ...configuration_utils import PreTrainedConfig, layer_type_validation
from ...modeling_rope_utils import RopeParameters
from ...utils import logging
logger = logging.get_logger(__name__)
class AfmoeConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`AfmoeModel`]. It is used to instantiate an
AFMoE model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of [arcee-ai/Trinity-Mini](https://huggingface.co/arcee-ai/Trinity-Mini).
AFMoE is an Adaptive Feedforward MoE (Mixture of Experts) model with token-choice routing, shared experts, and a
hybrid attention mechanism combining sliding window and full attention patterns.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 200192):
Vocabulary size of the AFMoE model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`AfmoeModel`].
hidden_size (`int`, *optional*, defaults to 2048):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 6144):
Dimension of the dense MLP representations.
moe_intermediate_size (`int`, *optional*, defaults to 1408):
Intermediate size of the routed expert MLPs.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer decoder.
num_dense_layers (`int`, *optional*, defaults to 1):
Number of initial dense layers before MoE layers begin. Layers with index < num_dense_layers will use
standard dense MLPs instead of MoE.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
head_dim (`int`, *optional*, defaults to 128):
The dimension of each attention head.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the MLP blocks.
max_position_embeddings (`int`, *optional*, defaults to 16384):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the RMS normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether the model's input and output word embeddings should be tied.
rope_theta (`float`, *optional*, defaults to 10000.0):
The base period of the RoPE embeddings.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
num_experts (`int`, *optional*, defaults to 64):
Number of routed experts in MoE layers.
num_experts_per_tok (`int`, *optional*, defaults to 6):
Number of experts to route each token to. This is the top-k value for the token-choice routing.
num_shared_experts (`int`, *optional*, defaults to 2):
Number of shared experts that are always activated for all tokens.
route_scale (`float`, *optional*, defaults to 1.0):
Scaling factor applied to routing weights.
global_attn_every_n_layers (`int`, *optional*, defaults to 4):
The frequency of full attention layers. Every Nth layer will use full attention, while others use sliding
window attention.
sliding_window (`int`, *optional*, defaults to 1024):
Sliding window size for local attention layers.
layer_types (`list[str]`, *optional*):
A list that explicitly maps each layer index with its attention type. Each element should be either
"sliding_attention" or "full_attention". If not provided, it will be automatically generated based on
`global_attn_every_n_layers`.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
mup_enabled (`bool`, *optional*, defaults to `False`):
Whether to enable muP (Maximal Update Parametrization) input scaling. When enabled, input embeddings
are scaled by `sqrt(hidden_size)`.
eos_token_id (`int`, *optional*):
End of stream token id.
pad_token_id (`int`, *optional*):
Padding token id.
bos_token_id (`int`, *optional*):
Beginning of stream token id.
Example:
```python
>>> from transformers import AfmoeModel, AfmoeConfig
>>> # Initializing an AFMoE configuration
>>> configuration = AfmoeConfig()
>>> # Initializing a model from the afmoe-small-sft-v1 style configuration
>>> model = AfmoeModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = "afmoe"
keys_to_ignore_at_inference = ["past_key_values"]
# Default pipeline parallel plan for base model
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
"norm": (["hidden_states"], ["hidden_states"]),
}
def __init__(
self,
vocab_size: int | None = 200192,
hidden_size: int | None = 2048,
intermediate_size: int | None = 6144,
moe_intermediate_size: int | None = 1408,
num_hidden_layers: int | None = 32,
num_dense_layers: int | None = 1,
num_attention_heads: int | None = 16,
num_key_value_heads: int | None = None,
head_dim: int | None = 128,
hidden_act: str | None = "silu",
max_position_embeddings: int | None = 16384,
initializer_range: float | None = 0.02,
rms_norm_eps: float | None = 1e-5,
use_cache: bool | None = True,
tie_word_embeddings: bool | None = False,
rope_theta: float | None = 10000.0,
rope_parameters: RopeParameters | dict[str, RopeParameters] | None = None,
num_experts: int | None = 64,
num_experts_per_tok: int | None = 6,
num_shared_experts: int | None = 2,
route_scale: float | None = 1.0,
global_attn_every_n_layers: int | None = 4,
sliding_window: int | None = 1024,
layer_types: list | None = None,
attention_dropout: float | None = 0.0,
mup_enabled: bool | None = False,
eos_token_id: bool | None = None,
pad_token_id: bool | None = None,
bos_token_id: bool | None = None,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_dense_layers = num_dense_layers
self.num_attention_heads = num_attention_heads
self.head_dim = head_dim
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.rope_theta = rope_theta
self.rope_parameters = rope_parameters
# MoE specific
self.moe_intermediate_size = moe_intermediate_size
self.num_experts_per_tok = num_experts_per_tok
self.num_experts = num_experts
self.num_shared_experts = num_shared_experts
self.route_scale = route_scale
self.attention_bias = False
# Attention specific
self.attention_dropout = attention_dropout
self.global_attn_every_n_layers = global_attn_every_n_layers
self.sliding_window = sliding_window
self.mup_enabled = mup_enabled
self.layer_types = layer_types
if self.layer_types is None:
self.layer_types = [
"sliding_attention" if bool((i + 1) % global_attn_every_n_layers) else "full_attention"
for i in range(self.num_hidden_layers)
]
layer_type_validation(self.layer_types)
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.tie_word_embeddings = tie_word_embeddings
super().__init__(**kwargs)
__all__ = ["AfmoeConfig"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/afmoe/configuration_afmoe.py",
"license": "Apache License 2.0",
"lines": 195,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/afmoe/modular_afmoe.py | # Copyright 2025 Arcee AI and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch AFMoE model."""
from collections.abc import Callable
import torch
from torch import nn
from ... import initialization as init
from ...cache_utils import Cache, DynamicCache
from ...generation import GenerationMixin
from ...masking_utils import create_causal_mask, create_sliding_window_causal_mask
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import MoeModelOutputWithPast
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring, logging
from ...utils.generic import merge_with_config_defaults
from ...utils.output_capturing import capture_outputs
from ..gpt_oss.modeling_gpt_oss import GptOssRMSNorm
from ..llama.modeling_llama import (
LlamaAttention,
LlamaForCausalLM,
LlamaRotaryEmbedding,
apply_rotary_pos_emb,
eager_attention_forward,
)
from ..qwen2_moe.modeling_qwen2_moe import Qwen2MoeMLP
from .configuration_afmoe import AfmoeConfig
logger = logging.get_logger(__name__)
class AfmoeRotaryEmbedding(LlamaRotaryEmbedding):
pass
class AfmoeRMSNorm(GptOssRMSNorm):
pass
class AfmoeMLP(Qwen2MoeMLP):
pass
class AfmoeTokenChoiceRouter(nn.Module):
"""
Token-choice top-K router for MoE routing.
This router assigns each token to the top-K experts based on sigmoid scores, matching the released checkpoints.
"""
def __init__(self, config):
super().__init__()
self.config = config
self.top_k = config.num_experts_per_tok
self.num_experts = config.num_experts
self.route_scale = config.route_scale
self.gate = nn.Linear(config.hidden_size, config.num_experts, bias=False)
def forward(self, hidden_states: torch.Tensor, expert_bias: torch.Tensor):
_, _, hidden_dim = hidden_states.shape
hidden_states = hidden_states.view(-1, hidden_dim)
scores = torch.sigmoid(self.gate(hidden_states).to(torch.float32))
_, selected_experts = torch.topk(scores + expert_bias, k=self.top_k, dim=1)
top_scores = scores.gather(dim=1, index=selected_experts)
denominator = top_scores.sum(dim=-1, keepdim=True) + 1e-20
top_scores = top_scores / denominator
top_scores = top_scores * self.route_scale
return top_scores, selected_experts
class AfmoeExperts(nn.ModuleList):
"""
Container holding the routed experts.
This mirrors the Experts pattern used across other MoE models to ease checkpoint conversion.
"""
def __init__(self, config: AfmoeConfig):
super().__init__()
self.top_k = config.num_experts_per_tok
self.num_experts = config.num_experts
for _ in range(self.num_experts):
self.append(AfmoeMLP(config, intermediate_size=config.moe_intermediate_size))
def forward(
self, hidden_states: torch.Tensor, selected_experts: torch.Tensor, routing_weights: torch.Tensor
) -> torch.Tensor:
"""
Args:
hidden_states: (batch, seq, hidden)
selected_experts: (batch, seq, top_k)
routing_weights: (batch, seq, top_k)
"""
batch_size, seq_len, hidden_dim = hidden_states.shape
if seq_len == 0:
return hidden_states.new_zeros(batch_size, 0, hidden_dim)
hidden_states_flat = hidden_states.view(-1, hidden_dim)
top_k = selected_experts.shape[-1]
# Map every token routing decision to a unique position so we can process expert by expert.
token_indices = torch.arange(
hidden_states_flat.shape[0], device=hidden_states.device, dtype=torch.long
).repeat_interleave(top_k)
expert_indices = selected_experts.reshape(-1)
routing_weights = routing_weights.reshape(-1)
sorting = torch.argsort(expert_indices, stable=True)
token_indices = token_indices[sorting]
expert_indices = expert_indices[sorting]
routing_weights = routing_weights[sorting]
dispatched_tokens = hidden_states_flat.index_select(0, token_indices)
expert_outputs = torch.zeros_like(dispatched_tokens)
unique_experts, counts = torch.unique_consecutive(expert_indices, return_counts=True)
start = 0
for expert_id, count in zip(unique_experts.tolist(), counts.tolist()):
if count == 0:
continue
end = start + count
expert_input = dispatched_tokens[start:end]
expert_output = self[expert_id](expert_input)
expert_outputs[start:end] = expert_output
start = end
weighted_outputs = (expert_outputs.to(torch.float32) * routing_weights.unsqueeze(-1)).to(hidden_states.dtype)
aggregated = torch.zeros_like(hidden_states_flat)
scatter_indices = token_indices.unsqueeze(-1).expand_as(weighted_outputs)
aggregated.scatter_add_(0, scatter_indices, weighted_outputs)
return aggregated.view(batch_size, seq_len, hidden_dim)
class AfmoeMoE(nn.Module):
"""
Mixture of Experts (MoE) module for AFMoE.
This module implements a sparse MoE layer with both shared experts (always active) and
routed experts (activated based on token-choice routing).
"""
def __init__(self, config):
super().__init__()
self.config = config
self.router = AfmoeTokenChoiceRouter(config)
self.shared_experts = AfmoeMLP(config, config.moe_intermediate_size * config.num_shared_experts)
self.experts = AfmoeExperts(config)
self.expert_bias = nn.Parameter(torch.zeros(config.num_experts), requires_grad=False)
def forward(self, hidden_states):
batch_size, seq_len, hidden_dim = hidden_states.shape
hidden_states_flat = hidden_states.view(-1, hidden_dim)
# Get routing decisions
top_scores, selected_experts = self.router(hidden_states, self.expert_bias)
top_scores = top_scores.view(batch_size, seq_len, self.config.num_experts_per_tok)
selected_experts = selected_experts.view(batch_size, seq_len, self.config.num_experts_per_tok)
# Process through shared experts
shared_output = self.shared_experts(hidden_states_flat).view(batch_size, seq_len, hidden_dim)
routed_output = self.experts(hidden_states, selected_experts, top_scores)
return shared_output + routed_output
class AfmoeAttention(LlamaAttention):
"""
Multi-headed attention module with optional sliding window and gating.
This attention mechanism supports both full attention and sliding window attention,
and includes Q/K normalization and gating of the output. It inherits from [`LlamaAttention`] to minimize the amount
of custom logic we need to maintain.
"""
def __init__(self, config: AfmoeConfig, layer_idx: int):
super().__init__(config, layer_idx)
# Parent LlamaAttention already sets: layer_idx, num_heads, num_key_value_heads, num_key_value_groups, head_dim
# We only add AFMoE-specific attributes
self.is_local_attention = config.layer_types[layer_idx] == "sliding_attention"
self.sliding_window = config.sliding_window if self.is_local_attention else None
self.q_norm = AfmoeRMSNorm(self.head_dim, eps=config.rms_norm_eps)
self.k_norm = AfmoeRMSNorm(self.head_dim, eps=config.rms_norm_eps)
self.gate_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=False)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: torch.Tensor | None,
past_key_value: Cache | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, torch.Tensor]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape)
key_states = self.k_proj(hidden_states).view(hidden_shape)
value_states = self.v_proj(hidden_states).view(hidden_shape)
gate_states = self.gate_proj(hidden_states)
query_states = self.q_norm(query_states).transpose(1, 2)
key_states = self.k_norm(key_states).transpose(1, 2)
value_states = value_states.transpose(1, 2)
if self.is_local_attention:
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_value is not None:
cache_kwargs = {"cache_position": cache_position}
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface(
self.config._attn_implementation, eager_attention_forward
)
output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask=attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
sliding_window=self.sliding_window,
**kwargs,
)
output = output.view(*input_shape, -1).contiguous()
output = output * torch.sigmoid(gate_states)
attn_output = self.o_proj(output)
return attn_output, attn_weights
class AfmoeDecoderLayer(GradientCheckpointingLayer):
"""
AFMoE decoder layer with dual normalization.
This layer applies self-attention followed by either a dense MLP or MoE block,
with dual normalization (pre and post) around each component.
"""
def __init__(self, config: AfmoeConfig, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.layer_idx = layer_idx
self.self_attn = AfmoeAttention(config=config, layer_idx=layer_idx)
self.attention_type = config.layer_types[layer_idx]
# Dual normalization for attention
self.input_layernorm = AfmoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = AfmoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
# Dual normalization for FFN
self.pre_mlp_layernorm = AfmoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_mlp_layernorm = AfmoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
# MoE or dense FFN
self.moe_enabled = layer_idx >= config.num_dense_layers
if self.moe_enabled:
self.mlp = AfmoeMoE(config)
else:
self.mlp = AfmoeMLP(config)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_value: Cache | None = None,
use_cache: bool | None = None,
cache_position: torch.LongTensor | None = None,
position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> torch.FloatTensor:
residual = hidden_states
# Self Attention with dual normalization
hidden_states = self.input_layernorm(hidden_states)
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_value=past_key_value,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = residual + hidden_states
# FFN with dual normalization
residual = hidden_states
hidden_states = self.pre_mlp_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = self.post_mlp_layernorm(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
class AfmoePreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config: AfmoeConfig
base_model_prefix = "model"
_no_split_modules = ["AfmoeDecoderLayer"]
_skip_keys_device_placement = ["past_key_values"]
_can_record_outputs = {
"hidden_states": AfmoeDecoderLayer,
"attentions": AfmoeAttention,
}
_keep_in_fp32_modules = [
"input_layernorm",
"post_attention_layernorm",
"pre_mlp_layernorm",
"post_mlp_layernorm",
"q_norm",
"k_norm",
"norm",
"expert_bias",
]
_supports_sdpa = True
_supports_flash_attn = True
_supports_flex_attn = True
_supports_attention_backend = True
supports_gradient_checkpointing = True
def _init_weights(self, module):
"""Initialize the weights"""
super()._init_weights(module)
if isinstance(module, AfmoeTokenChoiceRouter):
init.zeros_(module.gate.weight)
elif isinstance(module, AfmoeMoE):
init.zeros_(module.expert_bias)
@auto_docstring
class AfmoeModel(AfmoePreTrainedModel):
"""
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`AfmoeDecoderLayer`]
Args:
config: AfmoeConfig
"""
def __init__(self, config: AfmoeConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList(
[AfmoeDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = AfmoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = AfmoeRotaryEmbedding(config=config)
self.gradient_checkpointing = False
self.post_init()
@auto_docstring
@merge_with_config_defaults
@capture_outputs
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
inputs_embeds: torch.FloatTensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
cache_position: torch.LongTensor | None = None,
use_cache: bool | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | MoeModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens,
past_seen_tokens + inputs_embeds.shape[1],
device=inputs_embeds.device,
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
# It may already have been prepared by e.g. `generate`
if not isinstance(causal_mask_mapping := attention_mask, dict):
mask_kwargs = {
"config": self.config,
"inputs_embeds": inputs_embeds,
"attention_mask": attention_mask,
"cache_position": cache_position,
"past_key_values": past_key_values,
}
causal_mask_mapping = {
"full_attention": create_causal_mask(**mask_kwargs),
"sliding_attention": create_sliding_window_causal_mask(**mask_kwargs),
}
hidden_states = inputs_embeds
# Apply muP input scaling if enabled
if self.config.mup_enabled:
hidden_states = hidden_states * (self.config.hidden_size**0.5)
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for decoder_layer in self.layers:
hidden_states = decoder_layer(
hidden_states,
attention_mask=causal_mask_mapping[decoder_layer.attention_type],
position_ids=position_ids,
past_key_value=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return MoeModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values if use_cache else None,
)
class AfmoeForCausalLM(LlamaForCausalLM, AfmoePreTrainedModel, GenerationMixin):
def __init__(self, config):
AfmoePreTrainedModel.__init__(self, config)
self.model = AfmoeModel(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.post_init()
__all__ = [
"AfmoeForCausalLM",
"AfmoeModel",
"AfmoePreTrainedModel",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/afmoe/modular_afmoe.py",
"license": "Apache License 2.0",
"lines": 387,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/afmoe/test_modeling_afmoe.py | # Copyright 2025 Arcee AI and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers import AfmoeForCausalLM, AfmoeModel
from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester
class AfmoeModelTester(CausalLMModelTester):
if is_torch_available():
base_model_class = AfmoeModel
def __init__(
self,
parent,
batch_size=4,
seq_length=12,
is_training=True,
use_input_mask=True,
use_token_type_ids=False,
use_labels=True,
vocab_size=64,
hidden_size=32,
intermediate_size=16,
moe_intermediate_size=16,
num_hidden_layers=2,
num_dense_layers=1,
num_attention_heads=16,
num_key_value_heads=16,
head_dim=128,
hidden_act="silu",
max_position_embeddings=128,
initializer_range=0.02,
rms_norm_eps=1e-5,
use_cache=False,
rope_theta=10000.0,
rope_parameters=None,
num_experts=4,
num_experts_per_tok=2,
num_shared_experts=2,
route_norm=True,
route_scale=1.0,
global_attn_every_n_layers=2,
sliding_window=128,
attention_dropout=0.0,
):
super().__init__(
parent=parent,
batch_size=batch_size,
seq_length=seq_length,
is_training=is_training,
use_input_mask=use_input_mask,
use_token_type_ids=use_token_type_ids,
use_labels=use_labels,
vocab_size=vocab_size,
hidden_size=hidden_size,
num_hidden_layers=num_hidden_layers,
num_attention_heads=num_attention_heads,
num_key_value_heads=num_key_value_heads,
intermediate_size=intermediate_size,
hidden_act=hidden_act,
max_position_embeddings=max_position_embeddings,
initializer_range=initializer_range,
)
self.use_cache = use_cache
self.head_dim = head_dim
self.rms_norm_eps = rms_norm_eps
self.rope_theta = rope_theta
self.moe_intermediate_size = moe_intermediate_size
self.num_dense_layers = num_dense_layers
self.num_experts = num_experts
self.num_experts_per_tok = num_experts_per_tok
self.num_shared_experts = num_shared_experts
self.route_norm = route_norm
self.route_scale = route_scale
self.global_attn_every_n_layers = global_attn_every_n_layers
self.sliding_window = sliding_window
self.attention_dropout = attention_dropout
@require_torch
class AfmoeModelTest(CausalLMModelTest, unittest.TestCase):
model_tester_class = AfmoeModelTester
all_model_classes = (AfmoeModel, AfmoeForCausalLM) if is_torch_available() else ()
pipeline_model_mapping = (
{"feature-extraction": AfmoeModel, "text-generation": AfmoeForCausalLM} if is_torch_available() else {}
)
@unittest.skip("Afmoe applies key/query norm which doesn't work with packing")
def test_eager_padding_matches_padding_free_with_position_ids(self):
pass
@unittest.skip("Afmoe applies key/query norm which doesn't work with packing")
def test_sdpa_padding_matches_padding_free_with_position_ids(self):
pass
@unittest.skip("Afmoe applies key/query norm which doesn't work with packing")
def test_model_rope_scaling_frequencies(self):
pass
@unittest.skip("Afmoe has moe, output can be different")
def test_model_outputs_equivalence(self, **kwargs):
pass
# TODO: Add integration tests once we have a checkpoint on the Hub
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/afmoe/test_modeling_afmoe.py",
"license": "Apache License 2.0",
"lines": 108,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/gpt_neox/tokenization_gpt_neox.py | # Copyright 2022 EleutherAI and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for GPTNeoX."""
from tokenizers import Tokenizer, decoders, normalizers, pre_tokenizers
from tokenizers.models import BPE
from ...tokenization_utils_tokenizers import TokenizersBackend
from ...utils import logging
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
class GPTNeoXTokenizer(TokenizersBackend):
"""
Construct a GPT-NeoX-20B tokenizer (backed by HuggingFace's tokenizers library). Based on byte-level
Byte-Pair-Encoding.
This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
be encoded differently whether it is at the beginning of the sentence (without space) or not:
```python
>>> from transformers import GPTNeoXTokenizer
>>> tokenizer = GPTNeoXTokenizer.from_pretrained("EleutherAI/gpt-neox-20b")
>>> tokenizer("Hello world")["input_ids"]
[15496, 995]
>>> tokenizer(" Hello world")["input_ids"]
[18435, 995]
```
You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer, but since
the model was not pretrained this way, it might yield a decrease in performance.
<Tip>
When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`.
</Tip>
This tokenizer inherits from [`TokenizersBackend`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`, *optional*):
Path to the vocabulary file.
merges_file (`str`, *optional*):
Path to the merges file.
tokenizer_file (`str`, *optional*):
Path to a tokenizers JSON file containing the serialization of a tokenizer.
errors (`str`, *optional*, defaults to `"replace"`):
Paradigm to follow when decoding bytes to UTF-8. See
[bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
bos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
The beginning of sequence token.
eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
The end of sequence token.
pad_token (`str`, *optional*, defaults to `"<|padding|>"`):
Token for padding a sequence.
add_prefix_space (`bool`, *optional*, defaults to `False`):
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
other word. (GPTNeoX tokenizer detect beginning of words by the preceding space).
add_bos_token (`bool`, *optional*, defaults to `False`):
Whether or not to add a `bos_token` at the start of sequences.
add_eos_token (`bool`, *optional*, defaults to `False`):
Whether or not to add an `eos_token` at the end of sequences.
trim_offsets (`bool`, *optional*, defaults to `True`):
Whether or not the post-processing step should trim offsets to avoid including whitespaces.
vocab (`str` or `dict[str, int]`, *optional*):
Custom vocabulary dictionary. If not provided, vocabulary is loaded from `vocab_file`.
merges (`str` or `list[str]`, *optional*):
Custom merges list. If not provided, merges are loaded from `merges_file`.
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ["input_ids", "attention_mask"]
model = BPE
def __init__(
self,
vocab: str | dict[str, int] | None = None,
merges: str | list[str] | None = None,
errors: str = "replace",
unk_token: str = "<|endoftext|>",
bos_token: str = "<|endoftext|>",
eos_token: str = "<|endoftext|>",
pad_token: str = "<|padding|>",
add_prefix_space: bool = False,
trim_offsets: bool = True,
**kwargs,
):
self.add_prefix_space = add_prefix_space
self.trim_offsets = trim_offsets
self._vocab = vocab if vocab is not None else {str(unk_token): 0, str(pad_token): 1}
self._merges = merges or []
self._tokenizer = Tokenizer(
BPE(
vocab=self._vocab,
merges=self._merges,
dropout=None,
continuing_subword_prefix="",
end_of_word_suffix="",
fuse_unk=False,
)
)
self._tokenizer.normalizer = normalizers.NFC()
self._tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(
add_prefix_space=add_prefix_space, trim_offsets=trim_offsets
)
self._tokenizer.decoder = decoders.ByteLevel(add_prefix_space=False, trim_offsets=True)
super().__init__(
errors=errors,
unk_token=unk_token,
bos_token=bos_token,
eos_token=eos_token,
pad_token=pad_token,
add_prefix_space=add_prefix_space,
trim_offsets=trim_offsets,
**kwargs,
)
__all__ = ["GPTNeoXTokenizer"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/gpt_neox/tokenization_gpt_neox.py",
"license": "Apache License 2.0",
"lines": 120,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/tokenization_utils_sentencepiece.py | # Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
SentencePiece-based tokenization class for loading from sentencepiece.model files.
"""
import os
from shutil import copyfile
try:
import sentencepiece as spm
except ImportError:
spm = None
from .convert_slow_tokenizer import import_protobuf
from .tokenization_python import PreTrainedTokenizer
from .tokenization_utils_base import (
INIT_TOKENIZER_DOCSTRING,
AddedToken,
generate_merges,
)
from .utils import add_end_docstrings, logging, requires_backends
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model"}
SPIECE_UNDERLINE = "▁"
@add_end_docstrings(INIT_TOKENIZER_DOCSTRING)
class SentencePieceBackend(PreTrainedTokenizer):
"""
Base class for SentencePiece-based tokenizers that load from sentencepiece.model files.
Inherits from [`~tokenization_utils.PreTrainedTokenizer`].
Handle all the shared methods for tokenization and special tokens as well as methods downloading/caching/loading
pretrained tokenizers as well as adding tokens to the vocabulary.
This class also contain the added tokens in a unified way on top of all tokenizers so we don't have to handle the
specific vocabulary augmentation methods of the various underlying dictionary structures (BPE, sentencepiece...).
"""
vocab_files_names = VOCAB_FILES_NAMES
def __init__(self, **kwargs):
# Ensure optional dependency is available before loading
requires_backends(self, "sentencepiece")
# Extract sentencepiece-specific parameters
self.vocab_file = kwargs.get("vocab_file")
self.legacy = kwargs.get("legacy", True)
self.sp_model_kwargs = kwargs.pop("sp_model_kwargs", {})
# Set backend to "sentencepiece" if not already set
if "backend" not in kwargs:
kwargs["backend"] = "sentencepiece"
# Load the SentencePiece model before calling parent __init__
# This is needed because parent __init__ may call methods that depend on sp_model
tokenizer = spm.SentencePieceProcessor(**self.sp_model_kwargs)
tokenizer.Load(self.vocab_file)
if not self.legacy:
model_pb2 = import_protobuf()
proto = model_pb2.ModelProto.FromString(tokenizer.serialized_model_proto())
if proto.normalizer_spec.add_dummy_prefix:
proto.normalizer_spec.add_dummy_prefix = False
tokenizer.LoadFromSerializedProto(proto.SerializeToString())
self.sp_model = tokenizer
# Initialize total_vocab_size before parent __init__ (which may call _add_tokens -> len(self))
self.total_vocab_size = self.sp_model.get_piece_size()
# Add sp_model_kwargs back to kwargs so it gets stored in init_kwargs
kwargs["sp_model_kwargs"] = self.sp_model_kwargs
# Call parent class __init__ (PreTrainedTokenizer)
# This handles tokens_trie, _added_tokens_decoder, _added_tokens_encoder,
# token_type_ids_pattern, special_tokens_pattern, and adds special tokens
super().__init__(**kwargs)
self._update_trie()
@property
def vocab_size(self) -> int:
"""Returns vocab size"""
return self.sp_model.get_piece_size()
def get_vocab(self):
"""Returns vocab as a dict"""
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _add_tokens(self, new_tokens: list[str] | list[AddedToken], special_tokens: bool = False) -> int:
"""
Add a list of new tokens to the tokenizer class. If the new tokens are not in the vocabulary, they are added to
it with indices starting from length of the current vocabulary. Special tokens are sometimes already in the
vocab which is why they have to be handled specifically.
Args:
new_tokens (`list[str]`or `list[tokenizers.AddedToken]`):
Token(s) to add in vocabulary. A token is counted as added if it's not already in the vocabulary
(tested by checking if the tokenizer assign the index of the `unk_token` to them). If a token is part
of the vocabulary then we simply mark this token as an `AddedToken` which allows to control the
stripping and normalization of this token. This is NOT possible in `tokenizers`.
special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the tokens should be added as special tokens.
Returns:
`int`: The number of tokens actually added to the vocabulary.
Examples:
```python
# Let's see how to increase the vocabulary of Bert model and tokenizer
tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased")
model = BertModel.from_pretrained("google-bert/bert-base-uncased")
num_added_toks = tokenizer.add_tokens(["new_tok1", "my_new-tok2"])
print("We have added", num_added_toks, "tokens")
# Note: resize_token_embeddings expects to receive the full size of the new vocabulary, i.e. the length of the tokenizer.
model.resize_token_embeddings(len(tokenizer))
```"""
if not new_tokens:
return 0
next_index = len(self) # total size (base + added)
num_added = 0
for token in new_tokens:
if not isinstance(token, (str, AddedToken)):
raise TypeError(f"Token {token} is not a string but a {type(token)}.")
if str(token) == "":
continue
if isinstance(token, str):
if token in self._added_tokens_encoder:
continue
is_special = token in self.all_special_tokens or special_tokens
token = AddedToken(token, rstrip=False, lstrip=False, normalized=not is_special, special=is_special)
elif special_tokens:
# doing token.special=True changes the normalization! will fix in rust
# this is important and the only reason why the AddedTokens in each class are normalized by default
token.__setstate__({"special": True, "normalized": token.normalized})
if token in self._added_tokens_decoder.values():
continue
if not token.special and token.normalized and getattr(self, "do_lower_case", False):
token.content = token.content.lower()
# Check if token already exists in the SentencePiece base vocab
tok_id = self.sp_model.piece_to_id(token.content)
in_base_vocab = (
tok_id < self.sp_model.get_piece_size() and self.sp_model.IdToPiece(tok_id) == token.content
)
if in_base_vocab:
token_index = tok_id
else:
token_index = next_index
next_index += 1
num_added += 1
if token.special and str(token) not in self.all_special_tokens:
self._extra_special_tokens.append(token)
# the setter automatically updates the reverse map
self._added_tokens_decoder[token_index] = token
self._added_tokens_encoder[token.content] = token_index
if self.verbose:
logger.info(f"Adding {token} to the vocabulary")
self._update_trie()
self._update_total_vocab_size()
return num_added
def _update_trie(self, unique_no_split_tokens: list[str] | None = None):
# Add all added tokens
for token in self._added_tokens_decoder.values():
if token.content not in self.tokens_trie._tokens:
self.tokens_trie.add(token.content)
# Also add all special tokens (even if they're in base vocab) so they get split during tokenization
for token in self.all_special_tokens:
if token not in self.tokens_trie._tokens:
self.tokens_trie.add(token)
# Add any additional no-split tokens
for token in unique_no_split_tokens or []:
if token not in self.tokens_trie._tokens:
self.tokens_trie.add(token)
def _tokenize(self, text, **kwargs):
"""
Returns a tokenized string.
We de-activated the `add_dummy_prefix` option, thus the sentencepiece internals will always strip any
SPIECE_UNDERLINE. For example: `self.sp_model.encode(f"{SPIECE_UNDERLINE}Hey", out_type = str)` will give
`['H', 'e', 'y']` instead of `['▁He', 'y']`. Thus we always encode `f"{unk_token}text"` and strip the
`unk_token`. Here is an example with `unk_token = "<unk>"` and `unk_token_length = 4`.
`self.tokenizer.sp_model.encode("<unk> Hey", out_type = str)[4:]`.
"""
if self.legacy or not text.startswith((SPIECE_UNDERLINE, " ")):
return self.sp_model.encode(text, out_type=str)
# 1. Encode string + prefix ex: "<unk> Hey"
tokens = self.sp_model.encode(self.unk_token + text, out_type=str)
# 2. Remove self.unk_token from ['<','unk','>', '▁Hey']
unk_token_length = len(self.sp_model.encode(str(self.unk_token)))
return tokens[unk_token_length:] if len(tokens) >= unk_token_length else tokens
def _convert_token_to_id(self, token):
"""Converts a token (str) to an id using the vocab."""
return self.sp_model.piece_to_id(token)
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
token = self.sp_model.IdToPiece(index)
return token
def convert_tokens_to_string(self, tokens: list[str]) -> str:
"""Converts a sequence of tokens (string) in a single string."""
out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
return out_string
def save_vocabulary(self, save_directory: str, filename_prefix: str | None = None) -> tuple[str]:
"""
Save the sentencepiece vocabulary (copy original file) to a directory.
Args:
save_directory (`str`):
The directory in which to save the vocabulary.
filename_prefix (`str`, *optional*):
An optional prefix to add to the named of the saved files.
Returns:
`tuple(str)`: Paths to the files saved.
"""
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
out_vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file, out_vocab_file)
elif not os.path.isfile(self.vocab_file):
with open(out_vocab_file, "wb") as fi:
content_spiece_model = self.sp_model.serialized_model_proto()
fi.write(content_spiece_model)
return (out_vocab_file,)
def _decode(
self,
token_ids: int | list[int],
skip_special_tokens: bool = False,
clean_up_tokenization_spaces: bool | None = None,
spaces_between_special_tokens: bool = False,
**kwargs,
) -> str:
"""
Decode token ids to string.
Uses the generic decode path from PreTrainedTokenizer which works for all vocabularies,
including custom vocabularies that override _convert_id_to_token.
"""
# Use parent class's generic decode method - it's simpler and works for all cases
return super()._decode(
token_ids=token_ids,
skip_special_tokens=skip_special_tokens,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
**kwargs,
)
class SentencePieceExtractor:
"""
Extractor implementation for SentencePiece trained models. https://github.com/google/sentencepiece
"""
def __init__(self, model: str):
requires_backends(self, "sentencepiece")
from sentencepiece import SentencePieceProcessor
self.sp = SentencePieceProcessor()
self.sp.Load(model)
def extract(self, vocab_scores=None) -> tuple[dict[str, int], list[tuple[str, float]], list[tuple]]:
"""
By default will return vocab and merges with respect to their order, by sending `vocab_scores` we're going to
order the merges with respect to the piece scores instead.
"""
sp = self.sp
vocab_ids = {sp.id_to_piece(index): index for index in range(sp.GetPieceSize())}
vocab_scores_dict = {sp.id_to_piece(i): sp.get_score(i) for i in range(sp.GetPieceSize())}
merges = generate_merges(vocab_ids, vocab_scores_dict)
vocab_scores_list = [(sp.id_to_piece(i), sp.get_score(i)) for i in range(sp.GetPieceSize())]
return vocab_ids, vocab_scores_list, merges
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/tokenization_utils_sentencepiece.py",
"license": "Apache License 2.0",
"lines": 255,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/test_sentencepiece_backend_mixin.py | # Sentencepiece backend layer tests
import shutil
import tempfile
from typing import TYPE_CHECKING
from transformers import AutoTokenizer, PythonBackend, TokenizersBackend
from transformers.tokenization_python import AddedToken
if TYPE_CHECKING:
pass
class SentencePieceBackendTesterMixin:
"""
Tests that specifically test the SentencePiece backend.
"""
tokenizer_class = None
rust_tokenizer_class = None
test_sentencepiece = True
test_sentencepiece_ignore_case = False
test_slow_tokenizer = True
test_rust_tokenizer = False
from_pretrained_id = "huggyllama/llama-7b"
from_pretrained_kwargs = {"use_fast": False}
@classmethod
def setUpClass(cls) -> None:
cls.tmpdirname = tempfile.mkdtemp()
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.tmpdirname, ignore_errors=True)
@classmethod
def get_tokenizer(cls, **kwargs) -> PythonBackend:
merged_kwargs = {}
if cls.from_pretrained_kwargs is not None:
merged_kwargs.update(cls.from_pretrained_kwargs)
merged_kwargs.update(kwargs)
return AutoTokenizer.from_pretrained(cls.from_pretrained_id, **merged_kwargs)
@classmethod
def get_rust_tokenizer(cls, **kwargs) -> TokenizersBackend:
return cls.rust_tokenizer_class.from_pretrained(cls.from_pretrained_id, **kwargs)
def get_tokenizers(self, fast=True, **kwargs):
if fast and self.test_rust_tokenizer and self.test_slow_tokenizer:
return [self.get_tokenizer(**kwargs), self.get_rust_tokenizer(**kwargs)]
elif fast and self.test_rust_tokenizer:
return [self.get_rust_tokenizer(**kwargs)]
elif self.test_slow_tokenizer:
return [self.get_tokenizer(**kwargs)]
else:
raise ValueError("This tokenizer class has no tokenizer to be tested.")
def test_sentencepiece_tokenize_and_convert_tokens_to_string(self):
"""Test ``_tokenize`` and ``convert_tokens_to_string``."""
if not self.test_sentencepiece:
self.skipTest(reason="test_sentencepiece is set to False")
tokenizer = self.get_tokenizer()
text = "This is text to test the tokenizer."
if self.test_sentencepiece_ignore_case:
text = text.lower()
tokens = tokenizer.tokenize(text)
self.assertTrue(len(tokens) > 0)
# check if converting back to original text works
reverse_text = tokenizer.convert_tokens_to_string(tokens)
if self.test_sentencepiece_ignore_case:
reverse_text = reverse_text.lower()
self.assertEqual(reverse_text, text)
special_tokens = tokenizer.all_special_tokens
special_tokens_string = tokenizer.convert_tokens_to_string(special_tokens)
for special_token in special_tokens:
self.assertIn(special_token, special_tokens_string)
if self.test_rust_tokenizer:
rust_tokenizer = self.get_rust_tokenizer()
special_tokens_string_rust = rust_tokenizer.convert_tokens_to_string(special_tokens)
self.assertEqual(special_tokens_string, special_tokens_string_rust)
def test_sentencepiece_tokenize_and_decode(self):
if not self.test_sentencepiece:
self.skipTest(reason="test_sentencepiece is set to False")
text = "This is text to test the tokenizer."
if self.test_rust_tokenizer:
tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer()
slow_ids = tokenizer(text).input_ids
fast_ids = rust_tokenizer(text).input_ids
self.assertEqual(slow_ids, fast_ids)
slow_decoded = tokenizer.decode(slow_ids)
fast_decoded = rust_tokenizer.decode(slow_ids)
self.assertEqual(slow_decoded, fast_decoded)
def test_save_sentencepiece_tokenizer(self) -> None:
text = "This is text to test the tokenizer."
tokenizer_slow_1 = self.get_tokenizer()
encoding_tokenizer_slow_1 = tokenizer_slow_1(text)
tmpdirname_1 = tempfile.mkdtemp()
tmpdirname_2 = tempfile.mkdtemp()
tokenizer_slow_1.save_pretrained(tmpdirname_1)
tokenizer_slow_2 = self.tokenizer_class.from_pretrained(tmpdirname_1)
encoding_tokenizer_slow_2 = tokenizer_slow_2(text)
shutil.rmtree(tmpdirname_1)
tokenizer_slow_2.save_pretrained(tmpdirname_2)
tokenizer_slow_3 = self.tokenizer_class.from_pretrained(tmpdirname_2)
encoding_tokenizer_slow_3 = tokenizer_slow_3(text)
shutil.rmtree(tmpdirname_2)
self.assertEqual(encoding_tokenizer_slow_1, encoding_tokenizer_slow_2)
self.assertEqual(encoding_tokenizer_slow_1, encoding_tokenizer_slow_3)
def test_added_token_are_matched_longest_first(self):
tokenizers = self.get_tokenizers(fast=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
try:
tokenizer.add_tokens([AddedToken("extra_id_1")])
tokenizer.add_tokens([AddedToken("extra_id_100")])
except Exception:
# Canine cannot add tokens which are not codepoints
self.skipTest(reason="Cannot add those Added tokens")
# XXX: This used to split on `extra_id_1` first we're matching
# longest first now.
tokens = tokenizer.tokenize("This is some extra_id_100")
self.assertIn("extra_id_100", tokens)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
tokenizer.add_tokens([AddedToken("extra_id_100")])
tokenizer.add_tokens([AddedToken("extra_id_1")])
tokens = tokenizer.tokenize("This is some extra_id_100")
self.assertIn("extra_id_100", tokens)
def test_added_tokens_do_lower_case(self):
tokenizer = self.get_tokenizer(do_lower_case=True)
if not hasattr(tokenizer, "do_lower_case") or not tokenizer.do_lower_case:
self.skipTest(reason="Tokenizer does not support do_lower_case")
special_token = tokenizer.all_special_tokens[0]
text = special_token + " aaaaa bbbbbb low cccccccccdddddddd l " + special_token
text2 = special_token + " AAAAA BBBBBB low CCCCCCCCCDDDDDDDD l " + special_token
toks_before_adding = tokenizer.tokenize(text) # toks before adding new_toks
new_toks = ["aaaaa bbbbbb", "cccccccccdddddddd", "AAAAA BBBBBB", "CCCCCCCCCDDDDDDDD"]
added = tokenizer.add_tokens([AddedToken(tok, lstrip=True, rstrip=True) for tok in new_toks])
toks_after_adding = tokenizer.tokenize(text)
toks_after_adding2 = tokenizer.tokenize(text2)
# Rust tokenizers don't lowercase added tokens at the time calling `tokenizer.add_tokens`,
# while python tokenizers do, so new_toks 0 and 2 would be treated as the same, so do new_toks 1 and 3.
self.assertIn(added, [2, 4])
self.assertListEqual(toks_after_adding, toks_after_adding2)
self.assertTrue(
len(toks_before_adding) > len(toks_after_adding), # toks_before_adding should be longer
)
# Check that none of the special tokens are lowercased
sequence_with_special_tokens = "A " + " yEs ".join(tokenizer.all_special_tokens) + " B"
# Convert the tokenized list to str as some special tokens are tokenized like normal tokens
# which have a prefix spacee e.g. the mask token of Albert, and cannot match the original
# special tokens exactly.
tokenized_sequence = "".join(tokenizer.tokenize(sequence_with_special_tokens))
for special_token in tokenizer.all_special_tokens:
self.assertTrue(special_token in tokenized_sequence or special_token.lower() in tokenized_sequence)
def test_add_tokens_tokenizer(self):
tokenizer = self.get_tokenizer(do_lower_case=False)
vocab_size = tokenizer.vocab_size
all_size = len(tokenizer)
self.assertNotEqual(vocab_size, 0)
new_toks = [
AddedToken("aaaaa bbbbbb", rstrip=True, lstrip=True),
AddedToken("cccccccccdddddddd", rstrip=True, lstrip=True),
]
added_toks = tokenizer.add_tokens(new_toks)
vocab_size_2 = tokenizer.vocab_size
all_size_2 = len(tokenizer)
self.assertNotEqual(vocab_size_2, 0)
self.assertEqual(vocab_size, vocab_size_2)
self.assertEqual(added_toks, len(new_toks))
self.assertEqual(all_size_2, all_size + len(new_toks))
tokens = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l", add_special_tokens=False)
self.assertGreaterEqual(len(tokens), 4)
self.assertGreater(tokens[0], tokenizer.vocab_size - 1)
self.assertGreater(tokens[-2], tokenizer.vocab_size - 1)
new_toks_2 = {
"eos_token": AddedToken(">>>>|||<||<<|<<", rstrip=True, lstrip=True),
"pad_token": AddedToken("<<<<<|||>|>>>>|>", rstrip=True, lstrip=True),
}
added_toks_2 = tokenizer.add_special_tokens(new_toks_2)
vocab_size_3 = tokenizer.vocab_size
all_size_3 = len(tokenizer)
self.assertNotEqual(vocab_size_3, 0)
self.assertEqual(vocab_size, vocab_size_3)
self.assertEqual(added_toks_2, len(new_toks_2))
self.assertEqual(all_size_3, all_size_2 + len(new_toks_2))
tokens = tokenizer.encode(
">>>>|||<||<<|<< aaaaa bbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l", add_special_tokens=False
)
self.assertGreaterEqual(len(tokens), 6)
self.assertGreater(tokens[0], tokenizer.vocab_size - 1)
self.assertGreater(tokens[0], tokens[1])
self.assertGreater(tokens[-2], tokenizer.vocab_size - 1)
self.assertGreater(tokens[-2], tokens[-3])
self.assertEqual(tokens[0], tokenizer.eos_token_id)
self.assertEqual(tokens[-2], tokenizer.pad_token_id)
def test_add_special_tokens(self):
self.skipTest(reason="Redundant with test_add_tokens_tokenizer")
def test_add_tokens(self):
if not self.test_rust_tokenizer:
self.skipTest(reason="test_rust_tokenizer is set to False")
tokenizer_r = self.get_rust_tokenizer()
vocab_size = len(tokenizer_r)
self.assertEqual(tokenizer_r.add_tokens(""), 0)
self.assertEqual(tokenizer_r.add_tokens("testoken"), 1)
self.assertEqual(tokenizer_r.add_tokens(["testoken1", "testtoken2"]), 2)
self.assertEqual(len(tokenizer_r), vocab_size + 3)
self.assertEqual(tokenizer_r.add_special_tokens({}), 0)
self.assertEqual(tokenizer_r.add_special_tokens({"bos_token": "[BOS]", "eos_token": "[EOS]"}), 2)
self.assertRaises(
AssertionError, tokenizer_r.add_special_tokens, {"additional_special_tokens": "<testtoken1>"}
)
self.assertEqual(tokenizer_r.add_special_tokens({"additional_special_tokens": ["<testtoken2>"]}), 1)
self.assertEqual(
tokenizer_r.add_special_tokens({"additional_special_tokens": ["<testtoken3>", "<testtoken4>"]}), 2
)
self.assertIn("<testtoken3>", tokenizer_r.special_tokens_map["additional_special_tokens"])
self.assertIsInstance(tokenizer_r.special_tokens_map["additional_special_tokens"], list)
self.assertGreaterEqual(len(tokenizer_r.special_tokens_map["additional_special_tokens"]), 2)
self.assertEqual(len(tokenizer_r), vocab_size + 8)
def test_compare_add_special_tokens(self):
if not self.test_rust_tokenizer:
self.skipTest(reason="test_rust_tokenizer is set to False")
tokenizer_r = self.get_rust_tokenizer()
simple_num_special_tokens_to_add = tokenizer_r.num_special_tokens_to_add(pair=False)
for text in ["", " "]:
# tokenize()
no_special_tokens = tokenizer_r.tokenize(text, add_special_tokens=False)
with_special_tokens = tokenizer_r.tokenize(text, add_special_tokens=True)
self.assertEqual(len(no_special_tokens), len(with_special_tokens) - simple_num_special_tokens_to_add)
# Single input
no_special_tokens = tokenizer_r(text, add_special_tokens=False)
with_special_tokens = tokenizer_r(text, add_special_tokens=True)
for key in no_special_tokens:
self.assertEqual(
len(no_special_tokens[key]),
len(with_special_tokens[key]) - simple_num_special_tokens_to_add,
)
# Batched input
no_special_tokens = tokenizer_r([text, text], add_special_tokens=False)
with_special_tokens = tokenizer_r([text, text], add_special_tokens=True)
for key in no_special_tokens:
for i_no, i_with in zip(no_special_tokens[key], with_special_tokens[key]):
self.assertEqual(len(i_no), len(i_with) - simple_num_special_tokens_to_add)
def test_special_tokens_initialization(self):
if not self.test_rust_tokenizer:
self.skipTest(reason="test_rust_tokenizer is set to False")
added_tokens = [AddedToken("<special>", lstrip=True)]
tokenizer_r = self.get_rust_tokenizer(additional_special_tokens=added_tokens)
r_output = tokenizer_r.encode("Hey this is a <special> token")
special_token_id = tokenizer_r.encode("<special>", add_special_tokens=False)[0]
self.assertTrue(special_token_id in r_output)
def test_special_token_addition(self):
tokenizer = self.get_tokenizer()
# Create tokenizer and add an extra special token
tokenizer.add_special_tokens({"extra_special_tokens": ["<tok>"]})
self.assertEqual(tokenizer.extra_special_tokens, ["<tok>"])
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(tmp_dir)
# Load the above tokenizer and add the same special token a second time
tokenizer_2 = self.tokenizer_class.from_pretrained(tmp_dir)
tokenizer_2.add_special_tokens({"extra_special_tokens": ["<tok>"]})
self.assertEqual(tokenizer_2.extra_special_tokens, ["<tok>"])
tokenizer_2.add_special_tokens({"extra_special_tokens": ["<tok>", "<other>"]})
self.assertEqual(tokenizer_2.extra_special_tokens, ["<tok>", "<other>"])
tokenizer_2.add_special_tokens({"extra_special_tokens": ["<other>", "<another>"]})
self.assertEqual(tokenizer_2.extra_special_tokens, ["<other>", "<another>"])
tokenizer_2.add_special_tokens(
{"extra_special_tokens": ["<tok>"]},
replace_extra_special_tokens=False,
)
self.assertEqual(tokenizer_2.extra_special_tokens, ["<other>", "<another>", "<tok>"])
def test_alignment_methods(self):
tokenizer_r = self.get_tokenizer()
words = ["Wonderful", "no", "inspiration", "example", "with", "subtoken"]
text = " ".join(words)
batch_size = 3
encoding = tokenizer_r(text, add_special_tokens=False)
batch_encoding = tokenizer_r([text] * batch_size, add_special_tokens=False)
num_tokens = len(encoding["input_ids"])
last_word_index = len(words) - 1
last_token_index = num_tokens - 1
last_batch_index = batch_size - 1
last_char_index = len(text) - 1
# words, tokens
self.assertEqual(len(encoding.words(0)), num_tokens)
self.assertEqual(max(encoding.words(0)), last_word_index)
self.assertEqual(min(encoding.words(0)), 0)
self.assertEqual(len(batch_encoding.words(last_batch_index)), num_tokens)
self.assertEqual(max(batch_encoding.words(last_batch_index)), last_word_index)
self.assertEqual(min(batch_encoding.words(last_batch_index)), 0)
self.assertEqual(len(encoding.tokens(0)), num_tokens)
# Assert token_to_word
self.assertEqual(encoding.token_to_word(0), 0)
self.assertEqual(encoding.token_to_word(0, 0), 0)
self.assertEqual(encoding.token_to_word(last_token_index), last_word_index)
self.assertEqual(encoding.token_to_word(0, last_token_index), last_word_index)
self.assertEqual(batch_encoding.token_to_word(1, 0), 0)
self.assertEqual(batch_encoding.token_to_word(0, last_token_index), last_word_index)
self.assertEqual(batch_encoding.token_to_word(last_batch_index, last_token_index), last_word_index)
# Assert word_to_tokens
self.assertEqual(encoding.word_to_tokens(0).start, 0)
self.assertEqual(encoding.word_to_tokens(0, 0).start, 0)
self.assertEqual(encoding.word_to_tokens(last_word_index).end, last_token_index + 1)
self.assertEqual(encoding.word_to_tokens(0, last_word_index).end, last_token_index + 1)
self.assertEqual(batch_encoding.word_to_tokens(1, 0).start, 0)
self.assertEqual(batch_encoding.word_to_tokens(0, last_word_index).end, last_token_index + 1)
self.assertEqual(batch_encoding.word_to_tokens(last_batch_index, last_word_index).end, last_token_index + 1)
# Assert token_to_chars
self.assertEqual(encoding.token_to_chars(0).start, 0)
self.assertEqual(encoding.token_to_chars(0, 0).start, 0)
self.assertEqual(encoding.token_to_chars(last_token_index).end, last_char_index + 1)
self.assertEqual(encoding.token_to_chars(0, last_token_index).end, last_char_index + 1)
self.assertEqual(batch_encoding.token_to_chars(1, 0).start, 0)
self.assertEqual(batch_encoding.token_to_chars(0, last_token_index).end, last_char_index + 1)
self.assertEqual(batch_encoding.token_to_chars(last_batch_index, last_token_index).end, last_char_index + 1)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/test_sentencepiece_backend_mixin.py",
"license": "Apache License 2.0",
"lines": 305,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/test_tokenizers_backend_mixin.py | # Optionally test tokenizers-backend API in transformers
import inspect
import shutil
import tempfile
from typing import TYPE_CHECKING
from parameterized import parameterized
from transformers import TokenizersBackend
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
SMALL_TRAINING_CORPUS = [
["This is the first sentence.", "This is the second one."],
["This sentence (contains #) over symbols and numbers 12 3.", "But not this one."],
]
if TYPE_CHECKING:
pass
class TokenizersBackendTesterMixin:
"""
Tests that specifically test the tokenizers-backend.
These tests don't need to be run for every model, just once to verify the backend works correctly.
"""
tokenizer_class = None
rust_tokenizer_class = None
from_pretrained_id = None
from_pretrained_kwargs = None
@classmethod
def setUpClass(cls) -> None:
cls.from_pretrained_id = (
[cls.from_pretrained_id] if isinstance(cls.from_pretrained_id, str) else cls.from_pretrained_id
)
# Use rust_tokenizer_class if set, otherwise fall back to tokenizer_class
tokenizer_class = getattr(cls, "rust_tokenizer_class", None) or getattr(cls, "tokenizer_class", None)
cls.tokenizers_list = [
(
tokenizer_class,
pretrained_id,
cls.from_pretrained_kwargs if cls.from_pretrained_kwargs is not None else {},
)
for pretrained_id in (cls.from_pretrained_id or [])
]
cls.tmpdirname = tempfile.mkdtemp()
# save the first pretrained tokenizer to tmpdirname for tests to use
if cls.from_pretrained_id and tokenizer_class is not None:
try:
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
cls.from_pretrained_id[0],
**(cls.from_pretrained_kwargs if cls.from_pretrained_kwargs is not None else {}),
)
tokenizer.save_pretrained(cls.tmpdirname)
except Exception as e:
print(f"Could not setup tokenizer: {e}")
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.tmpdirname, ignore_errors=True)
@classmethod
def get_rust_tokenizer(cls, pretrained_name=None, **kwargs) -> TokenizersBackend:
pretrained_name = pretrained_name or cls.tmpdirname
tokenizer_class = getattr(cls, "rust_tokenizer_class", None) or getattr(cls, "tokenizer_class", None)
return tokenizer_class.from_pretrained(pretrained_name, **kwargs)
def test_alignment_methods(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.get_rust_tokenizer(pretrained_name, **kwargs)
words = ["Wonderful", "no", "inspiration", "example", "with", "subtoken"]
text = " ".join(words)
batch_size = 3
encoding = tokenizer_r(text, add_special_tokens=False)
batch_encoding = tokenizer_r([text] * batch_size, add_special_tokens=False)
num_tokens = len(encoding["input_ids"])
last_word_index = len(words) - 1
last_token_index = num_tokens - 1
last_batch_index = batch_size - 1
last_char_index = len(text) - 1
# words, tokens
self.assertEqual(len(encoding.word_ids(0)), num_tokens)
word_ids = [w for w in encoding.word_ids(0) if w is not None]
self.assertEqual(max(word_ids), last_word_index)
self.assertEqual(min(word_ids), 0)
batch_word_ids = [w for w in batch_encoding.word_ids(last_batch_index) if w is not None]
self.assertEqual(len(batch_encoding.word_ids(last_batch_index)), num_tokens)
self.assertEqual(max(batch_word_ids), last_word_index)
self.assertEqual(min(batch_word_ids), 0)
self.assertEqual(len(encoding.tokens(0)), num_tokens)
# Assert token_to_word
self.assertEqual(encoding.token_to_word(0), 0)
self.assertEqual(encoding.token_to_word(0, 0), 0)
self.assertEqual(encoding.token_to_word(last_token_index), last_word_index)
self.assertEqual(encoding.token_to_word(0, last_token_index), last_word_index)
self.assertEqual(batch_encoding.token_to_word(1, 0), 0)
self.assertEqual(batch_encoding.token_to_word(0, last_token_index), last_word_index)
self.assertEqual(batch_encoding.token_to_word(last_batch_index, last_token_index), last_word_index)
# Assert word_to_tokens
self.assertEqual(encoding.word_to_tokens(0).start, 0)
self.assertEqual(encoding.word_to_tokens(0, 0).start, 0)
self.assertEqual(encoding.word_to_tokens(last_word_index).end, last_token_index + 1)
self.assertEqual(encoding.word_to_tokens(0, last_word_index).end, last_token_index + 1)
self.assertEqual(batch_encoding.word_to_tokens(1, 0).start, 0)
self.assertEqual(batch_encoding.word_to_tokens(0, last_word_index).end, last_token_index + 1)
self.assertEqual(
batch_encoding.word_to_tokens(last_batch_index, last_word_index).end, last_token_index + 1
)
# Assert token_to_chars
self.assertEqual(encoding.token_to_chars(0).start, 0)
self.assertEqual(encoding.token_to_chars(0, 0).start, 0)
self.assertEqual(encoding.token_to_chars(last_token_index).end, last_char_index + 1)
self.assertEqual(encoding.token_to_chars(0, last_token_index).end, last_char_index + 1)
self.assertEqual(batch_encoding.token_to_chars(1, 0).start, 0)
self.assertEqual(batch_encoding.token_to_chars(0, last_token_index).end, last_char_index + 1)
self.assertEqual(
batch_encoding.token_to_chars(last_batch_index, last_token_index).end, last_char_index + 1
)
# Assert char_to_token
self.assertEqual(encoding.char_to_token(0), 0)
self.assertEqual(encoding.char_to_token(0, 0), 0)
self.assertEqual(encoding.char_to_token(last_char_index), last_token_index)
self.assertEqual(encoding.char_to_token(0, last_char_index), last_token_index)
self.assertEqual(batch_encoding.char_to_token(1, 0), 0)
self.assertEqual(batch_encoding.char_to_token(0, last_char_index), last_token_index)
self.assertEqual(batch_encoding.char_to_token(last_batch_index, last_char_index), last_token_index)
# Assert char_to_word
self.assertEqual(encoding.char_to_word(0), 0)
self.assertEqual(encoding.char_to_word(0, 0), 0)
self.assertEqual(encoding.char_to_word(last_char_index), last_word_index)
self.assertEqual(encoding.char_to_word(0, last_char_index), last_word_index)
self.assertEqual(batch_encoding.char_to_word(1, 0), 0)
self.assertEqual(batch_encoding.char_to_word(0, last_char_index), last_word_index)
self.assertEqual(batch_encoding.char_to_word(last_batch_index, last_char_index), last_word_index)
# Assert word_to_chars
self.assertEqual(encoding.word_to_chars(0).start, 0)
self.assertEqual(encoding.word_to_chars(0, 0).start, 0)
self.assertEqual(encoding.word_to_chars(last_word_index).end, last_char_index + 1)
self.assertEqual(encoding.word_to_chars(0, last_word_index).end, last_char_index + 1)
self.assertEqual(batch_encoding.word_to_chars(1, 0).start, 0)
self.assertEqual(batch_encoding.word_to_chars(0, last_word_index).end, last_char_index + 1)
self.assertEqual(
batch_encoding.word_to_chars(last_batch_index, last_word_index).end, last_char_index + 1
)
# Assert token_to_sequence
self.assertEqual(encoding.token_to_sequence(num_tokens // 2), 0)
self.assertEqual(encoding.token_to_sequence(0, num_tokens // 2), 0)
self.assertEqual(batch_encoding.token_to_sequence(1, num_tokens // 2), 0)
self.assertEqual(batch_encoding.token_to_sequence(0, num_tokens // 2), 0)
self.assertEqual(batch_encoding.token_to_sequence(last_batch_index, num_tokens // 2), 0)
# Pair of input sequences
words = ["Wonderful", "no", "inspiration", "example", "with", "subtoken"]
text = " ".join(words)
pair_words = ["Amazing", "example", "full", "of", "inspiration"]
pair_text = " ".join(pair_words)
batch_size = 3
index_word_in_first_seq = words.index("inspiration")
index_word_in_pair_seq = pair_words.index("inspiration")
index_char_in_first_seq = text.find("inspiration")
index_char_in_pair_seq = pair_text.find("inspiration")
pair_encoding = tokenizer_r(text, pair_text, add_special_tokens=False)
pair_batch_encoding = tokenizer_r(
[text] * batch_size, [pair_text] * batch_size, add_special_tokens=False
)
num_tokens = len(encoding["input_ids"])
last_word_index = len(words) - 1
last_token_index = num_tokens - 1
last_batch_index = batch_size - 1
last_char_index = len(text) - 1
# Assert word_to_tokens
self.assertNotEqual(
pair_encoding.word_to_tokens(index_word_in_first_seq, sequence_index=0).start,
pair_encoding.word_to_tokens(index_word_in_pair_seq, sequence_index=1).start,
)
self.assertEqual(
pair_encoding["input_ids"][
pair_encoding.word_to_tokens(index_word_in_first_seq, sequence_index=0).start
],
pair_encoding["input_ids"][
pair_encoding.word_to_tokens(index_word_in_pair_seq, sequence_index=1).start
],
)
self.assertNotEqual(
pair_batch_encoding.word_to_tokens(1, index_word_in_first_seq, sequence_index=0).start,
pair_batch_encoding.word_to_tokens(1, index_word_in_pair_seq, sequence_index=1).start,
)
self.assertEqual(
pair_batch_encoding["input_ids"][1][
pair_batch_encoding.word_to_tokens(1, index_word_in_first_seq, sequence_index=0).start
],
pair_batch_encoding["input_ids"][1][
pair_batch_encoding.word_to_tokens(1, index_word_in_pair_seq, sequence_index=1).start
],
)
# Assert char_to_token
self.assertNotEqual(
pair_encoding.char_to_token(index_char_in_first_seq, sequence_index=0),
pair_encoding.char_to_token(index_char_in_pair_seq, sequence_index=1),
)
self.assertEqual(
pair_encoding["input_ids"][pair_encoding.char_to_token(index_char_in_first_seq, sequence_index=0)],
pair_encoding["input_ids"][pair_encoding.char_to_token(index_char_in_pair_seq, sequence_index=1)],
)
self.assertNotEqual(
pair_batch_encoding.char_to_token(1, index_char_in_first_seq, sequence_index=0),
pair_batch_encoding.char_to_token(1, index_char_in_pair_seq, sequence_index=1),
)
self.assertEqual(
pair_batch_encoding["input_ids"][1][
pair_batch_encoding.char_to_token(1, index_char_in_first_seq, sequence_index=0)
],
pair_batch_encoding["input_ids"][1][
pair_batch_encoding.char_to_token(1, index_char_in_pair_seq, sequence_index=1)
],
)
# Assert char_to_word
self.assertNotEqual(
pair_encoding.char_to_word(index_char_in_first_seq, sequence_index=0),
pair_encoding.char_to_word(index_char_in_pair_seq, sequence_index=1),
)
self.assertEqual(
words[pair_encoding.char_to_word(index_char_in_first_seq, sequence_index=0)],
pair_words[pair_encoding.char_to_word(index_char_in_pair_seq, sequence_index=1)],
)
self.assertNotEqual(
pair_batch_encoding.char_to_word(1, index_char_in_first_seq, sequence_index=0),
pair_batch_encoding.char_to_word(1, index_char_in_pair_seq, sequence_index=1),
)
self.assertEqual(
words[pair_batch_encoding.char_to_word(1, index_char_in_first_seq, sequence_index=0)],
pair_words[pair_batch_encoding.char_to_word(1, index_char_in_pair_seq, sequence_index=1)],
)
# Assert word_to_chars
self.assertNotEqual(
pair_encoding.word_to_chars(index_word_in_first_seq, sequence_index=0).start,
pair_encoding.word_to_chars(index_word_in_pair_seq, sequence_index=1).start,
)
self.assertEqual(
text[pair_encoding.word_to_chars(index_word_in_first_seq, sequence_index=0).start],
pair_text[pair_encoding.word_to_chars(index_word_in_pair_seq, sequence_index=1).start],
)
self.assertNotEqual(
pair_batch_encoding.word_to_chars(1, index_word_in_first_seq, sequence_index=0).start,
pair_batch_encoding.word_to_chars(1, index_word_in_pair_seq, sequence_index=1).start,
)
self.assertEqual(
text[pair_batch_encoding.word_to_chars(1, index_word_in_first_seq, sequence_index=0).start],
pair_text[pair_batch_encoding.word_to_chars(1, index_word_in_pair_seq, sequence_index=1).start],
)
# Assert token_to_sequence
pair_encoding = tokenizer_r(text, pair_text, add_special_tokens=True)
pair_sequence_ids = [
pair_encoding.token_to_sequence(i) for i in range(len(pair_encoding["input_ids"]))
]
self.assertIn(0, pair_sequence_ids)
self.assertIn(1, pair_sequence_ids)
if tokenizer_r.num_special_tokens_to_add(pair=True):
self.assertIn(None, pair_sequence_ids)
pair_batch_encoding = tokenizer_r(
[text] * batch_size, [pair_text] * batch_size, add_special_tokens=True
)
pair_batch_sequence_ids = [
pair_batch_encoding.token_to_sequence(1, i)
for i in range(len(pair_batch_encoding["input_ids"][0]))
]
self.assertIn(0, pair_batch_sequence_ids)
self.assertIn(1, pair_batch_sequence_ids)
if tokenizer_r.num_special_tokens_to_add(pair=True):
self.assertIn(None, pair_batch_sequence_ids)
def test_offsets_mapping(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.get_rust_tokenizer(pretrained_name, **kwargs)
text = "Wonderful no inspiration example with subtoken"
pair = "Along with an awesome pair"
# No pair
tokens_with_offsets = tokenizer_r(
text, return_special_tokens_mask=True, return_offsets_mapping=True, add_special_tokens=True
)
added_tokens = tokenizer_r.num_special_tokens_to_add(False)
offsets = tokens_with_offsets["offset_mapping"]
# Assert there is the same number of tokens and offsets
self.assertEqual(len(offsets), len(tokens_with_offsets["input_ids"]))
# Assert there is online added_tokens special_tokens
self.assertEqual(sum(tokens_with_offsets["special_tokens_mask"]), added_tokens)
# Pairs
tokens_with_offsets = tokenizer_r(
text, pair, return_special_tokens_mask=True, return_offsets_mapping=True, add_special_tokens=True
)
added_tokens = tokenizer_r.num_special_tokens_to_add(True)
offsets = tokens_with_offsets["offset_mapping"]
# Assert there is the same number of tokens and offsets
self.assertEqual(len(offsets), len(tokens_with_offsets["input_ids"]))
# Assert there is online added_tokens special_tokens
self.assertEqual(sum(tokens_with_offsets["special_tokens_mask"]), added_tokens)
def test_training_new_tokenizer(self):
# This feature only exists for fast tokenizers
tokenizer = self.get_rust_tokenizer()
new_tokenizer = tokenizer.train_new_from_iterator(SMALL_TRAINING_CORPUS, 100)
# Test we can use the new tokenizer with something not seen during training
inputs = new_tokenizer(["This is the first sentence", "This sentence is different 🤗."])
self.assertEqual(len(inputs["input_ids"]), 2)
decoded_input = new_tokenizer.decode(inputs["input_ids"][0], skip_special_tokens=True)
expected_result = "This is the first sentence"
if tokenizer.backend_tokenizer.normalizer is not None:
expected_result = tokenizer.backend_tokenizer.normalizer.normalize_str(expected_result)
self.assertEqual(expected_result, decoded_input)
# We check that the parameters of the tokenizer remained the same
# Check we have the same number of added_tokens for both pair and non-pair inputs.
self.assertEqual(tokenizer.num_special_tokens_to_add(False), new_tokenizer.num_special_tokens_to_add(False))
self.assertEqual(tokenizer.num_special_tokens_to_add(True), new_tokenizer.num_special_tokens_to_add(True))
# Check we have the correct max_length for both pair and non-pair inputs.
self.assertEqual(tokenizer.max_len_single_sentence, new_tokenizer.max_len_single_sentence)
self.assertEqual(tokenizer.max_len_sentences_pair, new_tokenizer.max_len_sentences_pair)
# Assert the set of special tokens match as we didn't ask to change them
self.assertSequenceEqual(
tokenizer.all_special_tokens,
new_tokenizer.all_special_tokens,
)
self.assertDictEqual(tokenizer.special_tokens_map, new_tokenizer.special_tokens_map)
def test_training_new_tokenizer_with_special_tokens_change(self):
# This feature only exists for fast tokenizers
tokenizer = self.get_rust_tokenizer()
# Test with a special tokens map
class_signature = inspect.signature(tokenizer.__class__)
if "cls_token" in class_signature.parameters:
new_tokenizer = tokenizer.train_new_from_iterator(
SMALL_TRAINING_CORPUS, 100, special_tokens_map={tokenizer.cls_token: "<cls>"}
)
cls_id = new_tokenizer.get_vocab()["<cls>"]
self.assertEqual(new_tokenizer.cls_token, "<cls>")
self.assertEqual(new_tokenizer.cls_token_id, cls_id)
# Create a new mapping from the special tokens defined in the original tokenizer
special_tokens_list = PreTrainedTokenizerBase.SPECIAL_TOKENS_ATTRIBUTES.copy()
if "additional_special_tokens" in special_tokens_list:
special_tokens_list.remove("additional_special_tokens")
special_tokens_map = {}
for token in special_tokens_list:
if getattr(tokenizer, token) is not None:
special_token = getattr(tokenizer, token)
special_tokens_map[special_token] = f"{special_token}a"
# Train new tokenizer
new_tokenizer = tokenizer.train_new_from_iterator(
SMALL_TRAINING_CORPUS, 100, special_tokens_map=special_tokens_map
)
# Check the changes
for token in special_tokens_list:
# Get the private one to avoid unnecessary warnings.
if getattr(tokenizer, token) is None:
continue
special_token = getattr(tokenizer, token)
if special_token in special_tokens_map:
new_special_token = getattr(new_tokenizer, token)
self.assertEqual(special_tokens_map[special_token], new_special_token)
new_id = new_tokenizer.get_vocab()[new_special_token]
self.assertEqual(getattr(new_tokenizer, f"{token}_id"), new_id)
# Check if the special tokens have been kept (all_special_tokens returns strings)
for special_token in tokenizer.all_special_tokens:
if special_token not in special_tokens_map:
# The special token must appear identically in the list of the new tokenizer.
self.assertTrue(
special_token in new_tokenizer.all_special_tokens,
f"'{special_token}' should be in {new_tokenizer.all_special_tokens}",
)
else:
# The special token must appear in the list of the new tokenizer with the new mapping.
self.assertTrue(special_tokens_map[special_token] in new_tokenizer.all_special_tokens)
# Test we can use the new tokenizer with something not seen during training
inputs = new_tokenizer(["This is the first sentence", "This sentence is different 🤗."])
self.assertEqual(len(inputs["input_ids"]), 2)
decoded_input = new_tokenizer.decode(inputs["input_ids"][0], skip_special_tokens=True)
expected_result = "This is the first sentence"
if tokenizer.backend_tokenizer.normalizer is not None:
expected_result = tokenizer.backend_tokenizer.normalizer.normalize_str(expected_result)
self.assertEqual(expected_result, decoded_input)
@parameterized.expand([(True,), (False,)])
def test_rust_tokenizer_add_prefix_space(self, add_prefix_space):
for tokenizer, pretrained_name, _ in self.tokenizers_list:
fast_tokenizer = tokenizer.from_pretrained(pretrained_name, add_prefix_space=add_prefix_space)
self.assertEqual(fast_tokenizer.add_prefix_space, add_prefix_space)
# Only the ByteLevel pre-tokenizer has the `add_prefix_space` attribute, we have to ensure that it's set correctly
if hasattr(fast_tokenizer.backend_tokenizer.pre_tokenizer, "add_prefix_space"):
self.assertEqual(fast_tokenizer.backend_tokenizer.pre_tokenizer.add_prefix_space, add_prefix_space)
def test_add_bos_token_without_bos_token(self):
"""
Test that setting add_bos_token=True when bos_token=None silently disables add_bos_token.
"""
tokenizer_r = self.get_rust_tokenizer()
# Reload the tokenizer with bos_token=None
with tempfile.TemporaryDirectory() as tmpdir:
tokenizer_r.save_pretrained(tmpdir)
tokenizer_class = getattr(self, "rust_tokenizer_class", None) or getattr(self, "tokenizer_class", None)
tokenizer_no_bos = tokenizer_class.from_pretrained(tmpdir, bos_token=None)
self.assertIsNone(tokenizer_no_bos.bos_token)
tokenizer_no_bos.add_bos_token = True
self.assertFalse(tokenizer_no_bos.add_bos_token)
test_text = "Hello world"
encoded = tokenizer_no_bos(test_text)
self.assertIsNotNone(encoded["input_ids"])
decoded = tokenizer_no_bos.decode(encoded["input_ids"], skip_special_tokens=True)
self.assertIsInstance(decoded, str)
def test_local_files_only(self):
from transformers import AutoTokenizer
pretrained_list = getattr(self, "from_pretrained_id", []) or []
for pretrained_name in pretrained_list:
with self.subTest(f"AutoTokenizer ({pretrained_name})"):
# First cache the tokenizer files
try:
tokenizer_cached = AutoTokenizer.from_pretrained(pretrained_name)
# Now load with local_files_only=True
tokenizer_local = AutoTokenizer.from_pretrained(pretrained_name, local_files_only=True)
# Check that the two tokenizers are identical
self.assertEqual(tokenizer_cached.get_vocab(), tokenizer_local.get_vocab())
self.assertEqual(
tokenizer_cached.all_special_tokens_extended,
tokenizer_local.all_special_tokens_extended,
)
except Exception as e:
# if the pretrained model is not loadable how could it pass locally :)
print(f"Could not load tokenizer model: {e}")
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/test_tokenizers_backend_mixin.py",
"license": "Apache License 2.0",
"lines": 412,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/nanochat/configuration_nanochat.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...configuration_utils import PretrainedConfig
from ...modeling_rope_utils import RopeParameters
class NanoChatConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`NanoChatModel`]. It is used to instantiate a
NanoChat model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the [karpathy/nanochat-d32](https://huggingface.co/karpathy/nanochat-d32).
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50304):
Vocabulary size of the NanoChat model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`NanoChatModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 8192):
Dimension of the MLP representations. If `None`, it will be computed based on the model architecture.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 6):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with.
hidden_act (`str` or `function`, *optional*, defaults to `"relu2"`):
The non-linear activation function (function or string) in the decoder.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionaty should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
final_logit_softcapping (`float`, *optional*, defaults to 15.0):
scaling factor when applying tanh softcapping on the logits.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, and value projection layers during self-attention.
bos_token_id (`int`, *optional*, defaults to 0):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 1):
End of stream token id.
pad_token_id (`int`, *optional*, defaults to 1):
Padding token id.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
```python
>>> from transformers import NanoChatModel, NanoChatConfig
>>> # Initializing a NanoChat style configuration
>>> configuration = NanoChatConfig()
>>> # Initializing a model from the NanoChat style configuration
>>> model = NanoChatModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "nanochat"
keys_to_ignore_at_inference = ["past_key_values"]
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise",
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.fc1": "colwise",
"layers.*.mlp.fc2": "rowwise",
}
def __init__(
self,
vocab_size: int = 50304,
hidden_size: int = 768,
intermediate_size: int | None = 8192,
num_hidden_layers: int = 12,
num_attention_heads: int = 6,
num_key_value_heads: int | None = None,
max_position_embeddings: int = 2048,
hidden_act: str = "relu2",
attention_dropout: float = 0.0,
rms_norm_eps: float = 1e-6,
initializer_range: float = 0.02,
rope_parameters: RopeParameters | dict | None = None,
use_cache: bool = True,
final_logit_softcapping: float | None = 15.0,
attention_bias: bool = False,
bos_token_id: int = 0,
eos_token_id: int = 1,
pad_token_id: int = 1,
tie_word_embeddings: bool = False,
**kwargs,
):
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.max_position_embeddings = max_position_embeddings
self.hidden_act = hidden_act
self.attention_dropout = attention_dropout
self.rms_norm_eps = rms_norm_eps
self.initializer_range = initializer_range
self.use_cache = use_cache
self.final_logit_softcapping = final_logit_softcapping
self.attention_bias = attention_bias
self.rope_parameters = rope_parameters
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.tie_word_embeddings = tie_word_embeddings
super().__init__(**kwargs)
__all__ = ["NanoChatConfig"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/nanochat/configuration_nanochat.py",
"license": "Apache License 2.0",
"lines": 137,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/nanochat/convert_nanochat_checkpoints.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import gc
import json
import os
from pathlib import Path
import torch
from transformers import AutoTokenizer, NanoChatConfig, NanoChatForCausalLM
def infer_kv_heads(config: NanoChatConfig, state_dict: dict[str, torch.Tensor]) -> int:
key_weight = state_dict.get("transformer.h.0.attn.c_k.weight")
if key_weight is None:
return config.num_key_value_heads
rows = key_weight.shape[0]
head_dim = config.hidden_size // config.num_attention_heads
if rows % head_dim != 0:
return config.num_key_value_heads
inferred = rows // head_dim
print(f"Inferred {inferred} key_value heads from checkpoint")
return max(inferred, 1)
def convert_layer(old_prefix: str, new_prefix: str) -> dict[str, str]:
return {
f"{old_prefix}.attn.c_q.weight": f"{new_prefix}.self_attn.q_proj.weight",
f"{old_prefix}.attn.c_k.weight": f"{new_prefix}.self_attn.k_proj.weight",
f"{old_prefix}.attn.c_v.weight": f"{new_prefix}.self_attn.v_proj.weight",
f"{old_prefix}.attn.c_proj.weight": f"{new_prefix}.self_attn.o_proj.weight",
f"{old_prefix}.mlp.c_fc.weight": f"{new_prefix}.mlp.fc1.weight",
f"{old_prefix}.mlp.c_proj.weight": f"{new_prefix}.mlp.fc2.weight",
}
def load_config_from_checkpoint(input_path: Path) -> NanoChatConfig:
"""Load config from either meta_*.json or config.json in the checkpoint directory."""
# Try to find meta_*.json first
meta_files = list(input_path.glob("meta_*.json"))
if meta_files:
meta_file = meta_files[0]
print(f"Loading config from {meta_file.name}")
with open(meta_file, "r") as f:
meta_config = json.load(f)
# Extract model config from meta file
if "model_config" in meta_config:
model_config = meta_config["model_config"]
else:
model_config = meta_config
# Map to NanoChat config parameters
config_kwargs = {
"vocab_size": model_config.get("vocab_size", 50304),
"hidden_size": model_config.get("n_embd", 768),
"num_hidden_layers": model_config.get("n_layer", 12),
"num_attention_heads": model_config.get("n_head", 6),
"num_key_value_heads": model_config.get("n_kv_head"),
"max_position_embeddings": model_config.get("sequence_len", 2048),
"intermediate_size": model_config.get("intermediate_size", model_config.get("n_embd", 768) * 4),
}
# Try to load existing config.json for additional parameters
config_file = input_path / "config.json"
if config_file.exists():
print("Loading additional config from config.json")
with open(config_file, "r") as f:
extra_config = json.load(f)
# Add additional parameters from config.json
for key in [
"hidden_act",
"attention_dropout",
"rms_norm_eps",
"initializer_range",
"logits_soft_cap",
"attention_bias",
"intermediate_size",
"bos_token_id",
"eos_token_id",
"pad_token_id",
]:
if key in extra_config:
config_kwargs[key] = extra_config[key]
# Handle legacy qkv_bias -> attention_bias conversion
elif key == "attention_bias" and "qkv_bias" in extra_config:
config_kwargs[key] = extra_config["qkv_bias"]
# Handle rope_theta as a direct kwarg for the rope_parameters processing
if "rope_theta" in extra_config:
config_kwargs["rope_theta"] = extra_config["rope_theta"]
# Handle rope_parameters or rope_scaling if present
if "rope_parameters" in extra_config:
config_kwargs["rope_parameters"] = extra_config["rope_parameters"]
elif "rope_scaling" in extra_config and extra_config["rope_scaling"] is not None:
config_kwargs["rope_parameters"] = extra_config["rope_scaling"]
config = NanoChatConfig(**config_kwargs)
else:
# Fallback to loading from config.json if it exists
config_file = input_path / "config.json"
if config_file.exists():
print("Loading config from config.json")
config = NanoChatConfig.from_pretrained(input_path)
# Handle legacy qkv_bias -> attention_bias conversion
if hasattr(config, "qkv_bias") and not hasattr(config, "attention_bias"):
config.attention_bias = config.qkv_bias
else:
raise ValueError(f"No config file found in {input_path}. Expected meta_*.json or config.json")
return config
def write_model(input_dir, output_dir):
"""Convert NanoChat model from original checkpoint format to HuggingFace format."""
print("Converting the model.")
os.makedirs(output_dir, exist_ok=True)
input_path = Path(input_dir)
# Load config
config = load_config_from_checkpoint(input_path)
print(f"Loaded config hidden_size={config.hidden_size} num_layers={config.num_hidden_layers}")
# Load checkpoint - try model_*.pt first, then pytorch_model.bin
checkpoint_files = list(input_path.glob("model_*.pt"))
if checkpoint_files:
checkpoint_path = checkpoint_files[0]
else:
checkpoint_path = input_path / "pytorch_model.bin"
print(f"Fetching all parameters from the checkpoint at {checkpoint_path}...")
old_state = torch.load(checkpoint_path, map_location="cpu")
# Original nanochat weights are in bfloat16
for key in old_state:
if old_state[key].dtype == torch.float32:
old_state[key] = old_state[key].to(torch.bfloat16)
# Infer key-value heads from checkpoint
inferred_kv = infer_kv_heads(config, old_state)
config.num_key_value_heads = inferred_kv
if config.num_attention_heads % config.num_key_value_heads != 0:
print(f"Adjusting num_attention_heads from {config.num_attention_heads} to {config.num_key_value_heads}")
config.num_attention_heads = config.num_key_value_heads
print("Converting model...")
state_dict = {}
rename_map = {}
def assign(
old_key: str,
new_key: str,
old_state: dict[str, torch.Tensor],
state_dict: dict[str, torch.Tensor],
rename_map: dict[str, str],
) -> None:
tensor = old_state.get(old_key)
if tensor is None:
return
state_dict[new_key] = tensor.clone()
rename_map[old_key] = new_key
# Convert embeddings and head
assign("transformer.wte.weight", "model.embed_tokens.weight", old_state, state_dict, rename_map)
assign("lm_head.weight", "lm_head.weight", old_state, state_dict, rename_map)
# Convert layers
for layer_idx in range(config.num_hidden_layers):
old_prefix = f"transformer.h.{layer_idx}"
new_prefix = f"model.layers.{layer_idx}"
mapping = convert_layer(old_prefix, new_prefix)
for old_key, new_key in mapping.items():
assign(old_key, new_key, old_state, state_dict, rename_map)
missing = [key for key in old_state.keys() if key not in rename_map]
if missing:
print(f"Skipped {len(missing)} legacy entries that have no equivalent in the shared implementation")
del old_state
gc.collect()
# Update config
config.torch_dtype = torch.bfloat16
config.tie_word_embeddings = False
# Load the checkpoint into the model
print("Loading the checkpoint in a NanoChat model.")
with torch.device("meta"):
model = NanoChatForCausalLM(config)
model.load_state_dict(state_dict, strict=True, assign=True)
print("Checkpoint loaded successfully.")
if hasattr(model.config, "_name_or_path"):
del model.config._name_or_path
print("Saving the model.")
model.save_pretrained(output_dir)
del state_dict, model
# Safety check: reload the converted model
gc.collect()
print("Reloading the model to check if it's saved correctly.")
NanoChatForCausalLM.from_pretrained(output_dir, torch_dtype=torch.bfloat16, device_map="auto")
print("Model reloaded successfully.")
def write_tokenizer(input_dir, output_dir):
"""Convert and save the tokenizer."""
input_path = Path(input_dir)
# Convert the pickle tokenizer to HF format
tokenizer_pkl = input_path / "tokenizer.pkl"
if tokenizer_pkl.exists():
try:
import pickle
from transformers.integrations.tiktoken import convert_tiktoken_to_fast
with open(tokenizer_pkl, "rb") as f:
tok_pkl = pickle.load(f)
convert_tiktoken_to_fast(tok_pkl, output_dir)
print("Converted tokenizer.pkl to HuggingFace format")
except Exception as e:
print(f"Warning: Failed to convert tokenizer.pkl: {e}")
# Fallback: copy tokenizer files if they exist
for filename in ("tokenizer.json", "tokenizer_config.json"):
src = input_path / filename
if src.exists():
(Path(output_dir) / filename).write_bytes(src.read_bytes())
else:
# No pickle tokenizer, copy JSON files
for filename in ("tokenizer.json", "tokenizer_config.json", "special_tokens_map.json"):
src = input_path / filename
if src.exists():
(Path(output_dir) / filename).write_bytes(src.read_bytes())
print("Tokenizer saved successfully.")
def run_test(output_dir: str, prompt: str, max_new_tokens: int = 64) -> None:
"""Run a quick generation test to verify the converted model works correctly."""
print(f"Running quick generation test with prompt: {prompt}")
tokenizer = AutoTokenizer.from_pretrained(output_dir)
model = NanoChatForCausalLM.from_pretrained(output_dir, torch_dtype=torch.bfloat16)
model.eval()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = model.to(device)
inputs = tokenizer(prompt, return_tensors="pt").to(device)
with torch.no_grad():
output = model.generate(**inputs, max_new_tokens=max_new_tokens)
generated = tokenizer.decode(output[0, inputs.input_ids.shape[1] :], skip_special_tokens=True)
print(f"Generated text: {generated}")
def main():
parser = argparse.ArgumentParser(description="Convert NanoChat checkpoints to HuggingFace format")
parser.add_argument(
"--input_dir",
type=str,
required=True,
help="Path to the original checkpoint directory",
)
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Location to write HF model and tokenizer",
)
parser.add_argument(
"--test_prompt",
type=str,
default=None,
help="Optional prompt for a quick generation test",
)
args = parser.parse_args()
write_model(
args.input_dir,
args.output_dir,
)
write_tokenizer(args.input_dir, args.output_dir)
if args.test_prompt:
run_test(args.output_dir, args.test_prompt)
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/nanochat/convert_nanochat_checkpoints.py",
"license": "Apache License 2.0",
"lines": 254,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/nanochat/modular_nanochat.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from collections.abc import Callable
import torch
import torch.nn as nn
from ... import initialization as init
from ...cache_utils import Cache, DynamicCache
from ...masking_utils import create_causal_mask
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring
from ..clip.modeling_clip import CLIPMLP
from ..gemma2.modeling_gemma2 import Gemma2ForCausalLM
from ..llama.modeling_llama import (
LlamaDecoderLayer,
LlamaModel,
LlamaPreTrainedModel,
LlamaRotaryEmbedding,
apply_rotary_pos_emb,
eager_attention_forward,
)
from ..llama4.modeling_llama4 import Llama4TextL2Norm
from ..qwen3.modeling_qwen3 import Qwen3Attention
from .configuration_nanochat import NanoChatConfig
class NanoChatRMSNorm(Llama4TextL2Norm):
pass
class NanoChatRotaryEmbedding(LlamaRotaryEmbedding):
pass
def rotate_half(x):
"""Rotates half the hidden dims of the input with flipped signs for NanoChat."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((x2, -x1), dim=-1)
class NanoChatAttention(Qwen3Attention):
def __init__(self, config: NanoChatConfig, layer_idx: int):
super().__init__(config, layer_idx)
del self.sliding_window
del self.layer_type
self.q_norm = NanoChatRMSNorm(eps=config.rms_norm_eps)
self.k_norm = NanoChatRMSNorm(eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None,
attention_mask: torch.Tensor | None = None,
past_key_values: Cache | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, torch.Tensor | None]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
# RoPE -> Norm (instead of usual Norm -> RoPE)
query_states = self.q_norm(query_states)
key_states = self.k_norm(key_states)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface(
self.config._attn_implementation, eager_attention_forward
)
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class NanoChatMLP(CLIPMLP):
def __init__(self, config):
super().__init__(config)
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size, bias=False)
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size, bias=False)
class NanoChatDecoderLayer(LlamaDecoderLayer):
def __init__(self, config: NanoChatConfig, layer_idx: int):
super().__init__()
self.input_layernorm = NanoChatRMSNorm(eps=config.rms_norm_eps)
self.post_attention_layernorm = NanoChatRMSNorm(eps=config.rms_norm_eps)
@auto_docstring
class NanoChatPreTrainedModel(LlamaPreTrainedModel):
def _init_weights(self, module: nn.Module) -> None:
PreTrainedModel._init_weights(self, module)
if isinstance(module, NanoChatAttention):
init.normal_(
module.o_proj.weight,
mean=0.0,
std=self.config.initializer_range / math.sqrt(2 * self.config.num_hidden_layers),
)
@auto_docstring
class NanoChatModel(LlamaModel):
def __init__(self, config: NanoChatConfig):
super().__init__(config)
self.norm = NanoChatRMSNorm(eps=config.rms_norm_eps)
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
cache_position: torch.LongTensor | None = None,
use_cache: bool | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds: torch.Tensor = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position: torch.Tensor = (
torch.arange(inputs_embeds.shape[1], device=inputs_embeds.device) + past_seen_tokens
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = create_causal_mask(
config=self.config,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
position_ids=position_ids,
)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids)
hidden_states = self.norm(hidden_states) # Additional norm before the layers
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
hidden_states = decoder_layer(
hidden_states,
attention_mask=causal_mask,
position_embeddings=position_embeddings,
position_ids=position_ids,
past_key_values=past_key_values,
cache_position=cache_position,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
@auto_docstring
class NanoChatForCausalLM(Gemma2ForCausalLM):
_tp_plan = {"lm_head": "colwise_gather_output"}
def forward(self, **super_kwargs) -> CausalLMOutputWithPast:
r"""
Example:
```python
>>> from transformers import AutoTokenizer, AutoModelForCausalLM
>>> model = AutoModelForCausalLM.from_pretrained("karpathy/nanochat-d32")
>>> tokenizer = AutoTokenizer.from_pretrained("karpathy/nanochat-d32")
>>> conversation = [
{"role": "user", "content": "What is the capital of France?"},
]
>>> inputs = tokenizer.apply_chat_template(
conversation, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt"
).to(device)
>>> with torch.no_grad():
>>> outputs = model.generate(**inputs, max_new_tokens=64, do_sample=False)
>>> generated_tokens = outputs[0, inputs["input_ids"].shape[1] :]
>>> output = tokenizer.decode(generated_tokens, skip_special_tokens=True)
```"""
super().forward(**super_kwargs)
__all__ = [
"NanoChatPreTrainedModel",
"NanoChatModel",
"NanoChatForCausalLM",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/nanochat/modular_nanochat.py",
"license": "Apache License 2.0",
"lines": 194,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/nanochat/test_modeling_nanochat.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch NanoChat model."""
import unittest
from transformers import AutoTokenizer, NanoChatConfig, is_torch_available
from transformers.testing_utils import (
cleanup,
require_torch,
slow,
torch_device,
)
if is_torch_available():
import torch
from transformers import (
NanoChatForCausalLM,
NanoChatModel,
)
from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester
class NanoChatModelTester(CausalLMModelTester):
config_class = NanoChatConfig
if is_torch_available():
base_model_class = NanoChatModel
causal_lm_class = NanoChatForCausalLM
@require_torch
class NanoChatModelTest(CausalLMModelTest, unittest.TestCase):
model_tester_class = NanoChatModelTester
@require_torch
class NanoChatIntegrationTest(unittest.TestCase):
"""Integration tests for NanoChat models using real checkpoints."""
def setUp(self):
cleanup(torch_device, gc_collect=True)
def tearDown(self):
cleanup(torch_device, gc_collect=True)
@slow
def test_model_d20_logits(self):
"""Test that d20 model logits are computed correctly."""
model_id = "nanochat-students/nanochat-d20"
model = NanoChatForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_id)
# Simple test input - "Hello world"
test_text = "Hello world"
input_ids = tokenizer.encode(test_text, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model(input_ids)
logits = outputs.logits.float().cpu()
# Basic shape checks
self.assertEqual(logits.shape[0], 1) # batch size
self.assertEqual(logits.shape[1], input_ids.shape[1]) # sequence length
self.assertEqual(logits.shape[2], model.config.vocab_size) # vocab size 65536
# Check logits are not NaN or Inf
self.assertFalse(torch.isnan(logits).any())
self.assertFalse(torch.isinf(logits).any())
# Check expected mean logits (with tolerance for numerical variation)
EXPECTED_MEAN = torch.tensor([[-6.6607, -7.8095]])
# Check first 10 logits at position [0,0,:10]
EXPECTED_SLICE = torch.tensor(
[-12.8750, -13.0625, -13.1875, -13.1875, -13.1875, -13.1875, -13.1875, -13.1875, -12.6250, -4.4062]
)
torch.testing.assert_close(logits.mean(-1), EXPECTED_MEAN, rtol=1e-3, atol=1e-3)
torch.testing.assert_close(logits[0, 0, :10], EXPECTED_SLICE, rtol=1e-3, atol=1e-3)
@slow
def test_model_d20_generation(self):
"""Test that d20 model generates text correctly."""
model_id = "nanochat-students/nanochat-d20"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = NanoChatForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16)
# Test generation with chat template
conversation = [
[
{"role": "user", "content": "What is the capital of France?"},
],
[
{"role": "user", "content": "Tell me something."},
],
]
inputs = tokenizer.apply_chat_template(
conversation,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
padding=True,
tokenizer_kwargs={"padding_side": "left"},
return_tensors="pt",
).to(model.device)
# Generate with greedy decoding for reproducibility
with torch.no_grad():
generated_ids = model.generate(
**inputs,
max_new_tokens=32,
do_sample=False,
)
# Decode only the generated tokens
generated_text = [
tokenizer.decode(generated_ids[0, inputs["input_ids"].shape[1] :], skip_special_tokens=True),
tokenizer.decode(generated_ids[1, inputs["input_ids"].shape[1] :], skip_special_tokens=True),
]
EXPECTED_TEXT_COMPLETION = [
"The capital of France is Paris.",
"I'm ready to help. What's the first thing you'd like to know or discuss?",
]
self.assertEqual(EXPECTED_TEXT_COMPLETION[0], generated_text[0])
self.assertEqual(EXPECTED_TEXT_COMPLETION[1], generated_text[1])
@slow
def test_model_d32_logits(self):
"""Test that d32 model logits are computed correctly."""
model_id = "karpathy/nanochat-d32"
revision = "refs/pr/1" # TODO: update when merged to hub
model = NanoChatForCausalLM.from_pretrained(
model_id, device_map="auto", torch_dtype=torch.bfloat16, revision=revision
)
tokenizer = AutoTokenizer.from_pretrained(model_id, revision=revision)
# Simple test input - "Hello world"
test_text = "Hello world"
input_ids = tokenizer.encode(test_text, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model(input_ids)
logits = outputs.logits.float().cpu()
# Basic shape checks
self.assertEqual(logits.shape[0], 1) # batch size
self.assertEqual(logits.shape[1], input_ids.shape[1]) # sequence length
self.assertEqual(logits.shape[2], model.config.vocab_size) # vocab size 65536
# Check logits are not NaN or Inf
self.assertFalse(torch.isnan(logits).any())
self.assertFalse(torch.isinf(logits).any())
# Check expected mean logits (with tolerance for numerical variation)
EXPECTED_MEAN = torch.tensor([[-5.5791, -8.3456]])
# Check first 10 logits at position [0,0,:10]
EXPECTED_SLICE = torch.tensor(
[-12.3125, -13.1250, -12.8125, -13.1250, -13.1250, -13.1250, -13.1250, -13.1250, -11.8125, -1.4688]
)
torch.testing.assert_close(logits.mean(-1), EXPECTED_MEAN, rtol=1e-3, atol=1e-3)
torch.testing.assert_close(logits[0, 0, :10], EXPECTED_SLICE, rtol=1e-3, atol=1e-3)
@slow
def test_model_d32_generation(self):
"""Test that d32 model generates text correctly."""
model_id = "karpathy/nanochat-d32"
revision = "refs/pr/1" # TODO: update when merged to hub
tokenizer = AutoTokenizer.from_pretrained(model_id, revision=revision)
model = NanoChatForCausalLM.from_pretrained(
model_id, device_map="auto", torch_dtype=torch.bfloat16, revision=revision
)
# Test generation with chat template
conversation = [
[
{"role": "user", "content": "What is the capital of France?"},
],
[
{"role": "user", "content": "Tell me something."},
],
]
inputs = tokenizer.apply_chat_template(
conversation,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
padding=True,
tokenizer_kwargs={"padding_side": "left"},
return_tensors="pt",
).to(model.device)
# Generate with greedy decoding for reproducibility
with torch.no_grad():
generated_ids = model.generate(
**inputs,
max_new_tokens=32,
do_sample=False,
)
# Decode only the generated tokens
generated_text = [
tokenizer.decode(generated_ids[0, inputs["input_ids"].shape[1] :], skip_special_tokens=True),
tokenizer.decode(generated_ids[1, inputs["input_ids"].shape[1] :], skip_special_tokens=True),
]
EXPECTED_TEXT_COMPLETION = [
"The capital of France is Paris.",
"I'm here to help you explore your creative writing endeavors. What's been on your mind lately? Do you have a story idea you'd like to develop,",
]
self.assertEqual(EXPECTED_TEXT_COMPLETION[0], generated_text[0])
self.assertEqual(EXPECTED_TEXT_COMPLETION[1], generated_text[1])
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/nanochat/test_modeling_nanochat.py",
"license": "Apache License 2.0",
"lines": 188,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/pipelines/any_to_any.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
from typing import Any, Union, overload
import numpy as np
from ..audio_utils import AudioInput
from ..generation import GenerationConfig
from ..image_utils import ImageInput
from ..processing_utils import ProcessingKwargs, Unpack
from ..utils import (
add_end_docstrings,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from ..video_utils import VideoInput
from .base import Pipeline, build_pipeline_init_args
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MULTIMODAL_LM_MAPPING_NAMES
from .pt_utils import KeyDataset
if is_vision_available():
from PIL import Image
logger = logging.get_logger(__name__)
class ReturnType(enum.Enum):
TENSORS = 0
NEW_TEXT = 1
FULL_TEXT = 2
class Chat:
"""This class is intended to just be used internally in this pipeline and not exposed to users. We convert chats
to this format because the rest of the pipeline code tends to assume that lists of messages are
actually a batch of samples rather than messages in the same conversation."""
def __init__(self, messages: list[dict]):
for message in messages:
if not ("role" in message and "content" in message):
raise ValueError("When passing chat dicts as input, each dict must have a 'role' and 'content' key.")
self.messages = messages
@add_end_docstrings(build_pipeline_init_args(has_processor=True))
class AnyToAnyPipeline(Pipeline):
"""
Multimodal Generation pipeline using an `AutoModelForMultimodalLM`. This pipeline generates text given any
combination of multimodal data and text.When the underlying model is a conversational model, it can also
accept one or more chats, in which case the pipeline will operate in chat mode and will continue the
chat(s) by adding its response(s). Each chat takes the form of a list of dicts, where each dict contains
"role" and "content" keys.
Unless the model you're using explicitly sets these generation parameters in its configuration files
(`generation_config.json`), the following default values will be used:
- max_new_tokens: 256
Example:
```python
>>> from transformers import pipeline
>>> pipe = pipeline(task="any-to-any", model="google/gemma-3n-E4B-it")
>>> pipe("https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png", text="A photo of")
[{'generated_text': 'a photo of two birds'}]
```
```python
>>> from transformers import pipeline
>>> pipe = pipeline("any-to-any", model="google/gemma-3n-E4B-it")
>>> messages = [
>>> {
>>> "role": "user",
>>> "content": [
>>> {
>>> "type": "image",
>>> "url": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg",
>>> },
>>> {"type": "text", "text": "Describe this image."},
>>> ],
>>> },
>>> {
>>> "role": "assistant",
>>> "content": [
>>> {"type": "text", "text": "There is a dog and"},
>>> ],
>>> },
>>> ]
>>> pipe(text=messages, max_new_tokens=20, return_full_text=False)
[{'input_text': [{'role': 'user',
'content': [{'type': 'image',
'url': 'https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg'},
{'type': 'text', 'text': 'Describe this image.'}]},
{'role': 'assistant',
'content': [{'type': 'text', 'text': 'There is a dog and'}]}],
'generated_text': ' a person in the image. The dog is sitting on the sand, and the person is sitting on'}]
```
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
This multimodal pipeline can currently be loaded from pipeline() using the following task identifier:
"any-to-any".
See the list of available models on
[huggingface.co/models](https://huggingface.co/models?pipeline_tag=any-to-any).
"""
_load_processor = True
_load_image_processor = False
_load_feature_extractor = False
_load_tokenizer = False
_pipeline_calls_generate = True
# Make sure the docstring is updated when the default generation config is changed
_default_generation_config = GenerationConfig(
max_new_tokens=256,
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if "image" in self.model.input_modalities or "video" in self.model.input_modalities:
requires_backends(self, "vision")
requires_backends(self, "torchvision")
if "audio" in self.model.input_modalities:
requires_backends(self, "librosa")
self.check_model_type(MODEL_FOR_MULTIMODAL_LM_MAPPING_NAMES)
def _sanitize_parameters(
self,
max_new_tokens=None,
generate_kwargs=None,
timeout=None,
return_full_text=None,
return_tensors=None,
return_type=None,
clean_up_tokenization_spaces=None,
stop_sequence=None,
continue_final_message=None,
skip_special_tokens=None,
generation_mode=None,
**kwargs: Unpack[ProcessingKwargs],
):
forward_kwargs = {}
preprocess_params = {}
postprocess_params = {}
# Preprocess params
preprocess_params.update(kwargs)
if timeout is not None:
preprocess_params["timeout"] = timeout
if continue_final_message is not None:
preprocess_params["continue_final_message"] = continue_final_message
# Forward kwargs
forward_kwargs["generate_kwargs"] = generate_kwargs or {}
if generation_mode is not None and generation_mode != "text":
forward_kwargs["generate_kwargs"]["generation_mode"] = generation_mode
if kwargs.get("load_audio_from_video"):
forward_kwargs["generate_kwargs"]["use_audio_in_video"] = True
if stop_sequence is not None:
if isinstance(stop_sequence, str):
stop_sequence = [stop_sequence]
forward_kwargs["generate_kwargs"]["stop_strings"] = stop_sequence
forward_kwargs["generate_kwargs"]["tokenizer"] = self.processor.tokenizer
if max_new_tokens is not None:
if generate_kwargs is not None and "max_new_tokens" in generate_kwargs:
raise ValueError(
"'max_new_tokens' is defined twice, once in 'generate_kwargs' and "
"once as a direct argument. Please use only one."
)
forward_kwargs["generate_kwargs"]["max_new_tokens"] = max_new_tokens
if return_full_text is not None and return_type is None:
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`")
return_type = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
elif return_tensors is not None and return_type is None:
return_type = ReturnType.TENSORS
# We don't want to set the global default to FULLTEXT at init time. That is why
# `_postprocess_params` is checked before setting the default value
elif return_type is None and generation_mode in [None, "text"] and hasattr(self, "_postprocess_params"):
return_type = ReturnType.FULL_TEXT
# Postprocess params
if generation_mode not in [None, "text"] and return_type is not None:
raise ValueError(
f"`return_type` cannot be set to {return_type} when generation_mode={generation_mode}. "
"Set `return_type=None` or generation_mode='text'"
)
if generation_mode not in [None, "text", "image", "audio"]:
raise ValueError(
f"`generation_mode` can be only one of the `text`, `audio`, `image` but got generation_mode[={generation_mode}]"
)
elif generation_mode is not None and generation_mode not in self.model.output_modalities:
raise ValueError(
f"`generation_mode={generation_mode}` is not supported for {self.model.__class__.__name__}. "
f"The model can only output the following modalities: {self.model.output_modalities}"
)
if return_type is not None:
postprocess_params["return_type"] = return_type
if continue_final_message is not None:
postprocess_params["continue_final_message"] = continue_final_message
if clean_up_tokenization_spaces is not None:
postprocess_params["clean_up_tokenization_spaces"] = clean_up_tokenization_spaces
if skip_special_tokens is not None:
postprocess_params["skip_special_tokens"] = skip_special_tokens
postprocess_params["generation_mode"] = generation_mode
return preprocess_params, forward_kwargs, postprocess_params
@overload
def __call__(
self,
text: str | None = None,
images: Union[str, "Image.Image"] | None = None,
videos: Union[str, "np.ndarray", "torch.Tensor"] | None = None,
audio: Union[str, "np.ndarray"] | None = None,
**kwargs: Any,
) -> list[dict[str, Any]]: ...
@overload
def __call__(
self,
text: list[str] | None = None,
images: list[str] | list["Image.Image"] | None = None,
videos: list[str] | list["np.ndarray"] | list["torch.Tensor"] | None = None,
audio: list[str] | list["np.ndarray"] | None = None,
**kwargs: Any,
) -> list[list[dict[str, Any]]]: ...
def __call__(
self,
text: str | list[str] | list[dict],
images: str | list[str] | list[list[str]] | ImageInput | None = None,
videos: str | list[str] | VideoInput | None = None,
audio: str | list[str] | AudioInput | None = None,
**kwargs,
) -> list[dict[str, Any]] | list[list[dict[str, Any]]]:
"""
Generate a text given text and optionally multimodal data passed as inputs.
Args:
text (`str`, `list[str]`, `list[dict]`):
The text to be used for generation. If a list of strings is passed, the length of the list should be
the same as the number of images. Text can also follow the chat format: a list of dictionaries where
each dictionary represents a message in a conversation. Each dictionary should have two keys: 'role'
and 'content'. 'role' should be one of 'user', 'system' or 'assistant'. 'content' should be a list of
dictionary containing the text of the message and the type of the message.
images (`str`, `list[str]`, `ImageInput`):
The pipeline handles three types of images:
- A string containing a HTTP(s) link pointing to an image
- A string containing a local path to an image
- An image loaded in PIL directly
The pipeline accepts either a single image or a batch of images. Finally, this pipeline also supports
the chat format (see `text`) containing images and text in this argument.
videos (`str`, `list[str]`, `VideoInput`):
The pipeline handles three types of videos:
- A string containing a HTTP(s) link pointing to a video
- A string containing a local path to a video
- A video loaded and decoded to array format
The pipeline accepts either a single video or a batch of videos. Finally, this pipeline also supports
the chat format (see `text`) containing videos and text in this argument.
audio (`str`, `list[str]`, `AudioInput`):
The pipeline handles three types of audios:
- A string containing a HTTP(s) link pointing to an audio
- A string containing a local path to an audio
- An audio loaded in PIL directly
The pipeline accepts either a single audios or a batch of audios. Finally, this pipeline also supports
the chat format (see `text`) containing audios and text in this argument.
return_tensors (`bool`, *optional*, defaults to `False`):
Returns the tensors of predictions (as token indices) in the outputs. If set to
`True`, the decoded text is not returned.
return_text (`bool`, *optional*):
Returns the decoded texts in the outputs.
return_full_text (`bool`, *optional*, defaults to `True`):
If set to `False` only added text is returned, otherwise the full text is returned. Cannot be
specified at the same time as `return_text`.
clean_up_tokenization_spaces (`bool`, *optional*, defaults to `True`):
Whether or not to clean up the potential extra spaces in the text output.
continue_final_message( `bool`, *optional*): This indicates that you want the model to continue the
last message in the input chat rather than starting a new one, allowing you to "prefill" its response.
By default this is `True` when the final message in the input chat has the `assistant` role and
`False` otherwise, but you can manually override that behaviour by setting this flag.
Return:
A list or a list of list of `dict`: Each result comes as a dictionary with the following key (cannot
return a combination of both `generated_text` and `generated_token_ids`):
- **generated_text** (`str`, present when `return_text=True` and `generation_mode="text"`) -- The generated text.
- **generated_audio** (`np.ndarray`, present when `generation_mode="audio"`) -- The generated audio.
- **generated_image** (`PIL.Image.Image`, present when `generation_mode="image"`) -- The generated image.
- **generated_token_ids** (`torch.Tensor`, present when `return_tensors=True` and `generation_mode="text"`) -- The token
ids of the generated text.
- **input_text** (`str`) -- The input text.
"""
if images is None and text is None:
raise ValueError("You must at least provide either text or images.")
if isinstance(text, (list, tuple, KeyDataset)) and isinstance(text[0], (list, tuple, dict)):
# We have one or more prompts in list-of-dicts format, so this is chat mode
if isinstance(text[0], dict) and "role" in text[0]:
return super().__call__(Chat(text), **kwargs)
elif isinstance(text[0], (list, tuple)) and isinstance(text[0][0], dict) and "role" in text[0][0]:
chats = [Chat(chat) for chat in text] # 🐈 🐈 🐈
return super().__call__(chats, **kwargs)
if text is not None and not (isinstance(text, str) or (isinstance(text, list) and isinstance(text[0], str))):
"""
Supports the following format
- {"text": text, "image": image, "video": video, "audio": audio}
- [{"text": text, "image": image, "video": video, "audio": audio}]
- Generator and datasets
This is a common pattern in other multimodal pipelines, so we support it here as well.
"""
return super().__call__(text, **kwargs)
# encourage the user to use the chat format if supported
if getattr(self.processor, "chat_template", None) is not None:
logger.warning_once(
"The input data was not formatted as a chat with dicts containing 'role' and 'content' keys, even "
"though this model supports chat. Consider using the chat format for better results. For more "
"information, see https://huggingface.co/docs/transformers/en/chat_templating"
)
return super().__call__({"text": text, "images": images, "video": videos, "audio": audio}, **kwargs)
def preprocess(self, inputs=None, timeout=None, continue_final_message=None, **processing_kwargs):
if isinstance(inputs, Chat):
# If the user passes a chat that ends in an assistant message, we treat it as a prefill by default
# because very few models support multiple separate, consecutive assistant messages
if continue_final_message is None:
continue_final_message = inputs.messages[-1]["role"] == "assistant"
# Handle Mistral tokenizer which does not accept processing kwargs
chat_template_kwargs = {"add_generation_prompt": not continue_final_message, **processing_kwargs}
if self.processor.tokenizer.__class__.__name__ == "MistralCommonBackend":
chat_template_kwargs = {
k: v for k, v in chat_template_kwargs.items() if k in ["padding", "truncation", "max_length"]
}
model_inputs = self.processor.apply_chat_template(
inputs.messages,
continue_final_message=continue_final_message,
return_tensors="pt",
tokenize=True,
return_dict=True,
**chat_template_kwargs,
).to(dtype=self.dtype)
model_inputs["text"] = inputs
return model_inputs
# In case we only have text inputs
if isinstance(inputs, (list, tuple, str)):
text = inputs
inputs = {}
else:
inputs = inputs.copy() # avoid in-place changes if users passed dict
text = inputs.pop("text")
# Feature extractor do not load audio files and expect a decode array
if inputs.get("audio", None) is not None and hasattr(self.processor, "feature_extractor"):
inputs["audio"] = self.processor.feature_extractor.fetch_audio(inputs["audio"])
# If batched text inputs, we set padding to True unless specified otherwise
if isinstance(text, (list, tuple)) and len(text) > 1:
processing_kwargs.setdefault("padding", True)
# Multimodal data is loaded in preprocessors so we pass all ipnuts directly to `self.processor`
model_inputs = self.processor(text=text, **inputs, return_tensors="pt", **processing_kwargs).to(
dtype=self.dtype
)
model_inputs["text"] = text
return model_inputs
def _forward(self, model_inputs, generate_kwargs=None):
generate_kwargs = {} if generate_kwargs is None else generate_kwargs
prompt_text = model_inputs.pop("text")
input_ids = model_inputs.get("input_ids", model_inputs.get("decoder_input_ids"))
# User-defined `generation_config` passed to the pipeline call take precedence
if "generation_config" not in generate_kwargs:
generate_kwargs["generation_config"] = self.generation_config
generated_sequence = self.model.generate(**model_inputs, **generate_kwargs)
return {"generated_sequence": generated_sequence, "prompt_text": prompt_text, "input_ids": input_ids}
def postprocess(
self,
model_outputs,
return_type=None,
continue_final_message=None,
skip_special_tokens=None,
**postprocess_kwargs,
):
input_texts = model_outputs["prompt_text"]
input_texts = [input_texts] if isinstance(input_texts, (str, Chat)) else input_texts
generated_sequence = model_outputs["generated_sequence"]
input_ids = model_outputs["input_ids"]
if return_type == ReturnType.TENSORS:
return [
{"input_text": input_texts[i], "generated_token_ids": generated_sequence[i]}
for i in range(len(input_texts))
]
# Decode inputs and outputs the same way to remove input text from generated text if present
skip_special_tokens = skip_special_tokens if skip_special_tokens is not None else True
generation_mode = postprocess_kwargs["generation_mode"] or "text"
if generation_mode == "image" and hasattr(self.model, "decode_image_tokens"):
generated_sequence = self.model.decode_image_tokens(generated_sequence.to(self.model.device))
generated_outputs = self.processor.post_process_multimodal_output(
generated_sequence, skip_special_tokens=skip_special_tokens, **postprocess_kwargs
)
# Force consistent behavior for including the input text in the output
if return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Remove the input text from the generated text if the generated text starts with the input text
# (accounting for the possibility of a space between the input and generated text)
new_generated_texts = []
postprocess_kwargs["generation_mode"] = "text"
decoded_inputs = self.processor.post_process_multimodal_output(
input_ids, skip_special_tokens=skip_special_tokens, **postprocess_kwargs
)
for text_generated, decoded_input in zip(generated_outputs, decoded_inputs):
# There can be added characters before the input text, so we need to find the beginning of the input text in the generated text
index_input_text = text_generated.find(decoded_input)
# Limit the search to 2 residual characters, like spaces or new lines, to avoid removing a large part of the answer
if 0 <= index_input_text <= 2:
# If the input text is found, we remove it
new_generated_texts.append(text_generated[index_input_text + len(decoded_input) :])
else:
new_generated_texts.append(text_generated)
generated_outputs = new_generated_texts
if return_type == ReturnType.FULL_TEXT:
full_texts = []
for prompt_text, generated_text in zip(input_texts, generated_outputs):
if isinstance(prompt_text, str):
generated_text = prompt_text + generated_text
elif isinstance(prompt_text, Chat):
if continue_final_message is None:
# If the user passes a chat ending in an assistant message, we treat it as a prefill by
# default because very few models support multiple separate, consecutive assistant messages
continue_final_message = prompt_text.messages[-1]["role"] == "assistant"
if continue_final_message:
# With assistant prefill, concat onto the end of the last message
new_text = dict(prompt_text.messages[-1]["content"][-1].items())
new_text["text"] += generated_text
generated_text = list(prompt_text.messages)[:-1] + [
{
"role": prompt_text.messages[-1]["role"],
"content": prompt_text.messages[-1]["content"][:-1] + [new_text],
}
]
else:
# When we're not starting from a prefill, the output is a new assistant message
generated_text = list(prompt_text.messages) + [
{"role": "assistant", "content": generated_text}
]
full_texts.append(generated_text)
generated_outputs = full_texts
records = [
{
"input_text": input_text.messages if isinstance(input_text, Chat) else input_text,
f"generated_{generation_mode}": generated_output,
}
for input_text, generated_output in zip(input_texts, generated_outputs)
]
return records
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/pipelines/any_to_any.py",
"license": "Apache License 2.0",
"lines": 432,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/layoutxlm/modular_layoutxlm.py | # Copyright Microsoft Research and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..layoutlmv2.configuration_layoutlmv2 import LayoutLMv2Config
class LayoutXLMConfig(LayoutLMv2Config):
r"""
This is the configuration class to store the configuration of a [`LayoutXLMModel`]. It is used to instantiate an
LayoutXLM model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the LayoutXLM
[microsoft/layoutxlm-base](https://huggingface.co/microsoft/layoutxlm-base) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 30522):
Vocabulary size of the LayoutXLM model. Defines the number of different tokens that can be represented by
the `inputs_ids` passed when calling [`LayoutXLMModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimension of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the `token_type_ids` passed when calling [`LayoutXLMModel`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
pad_token_id (`int`, *optional*, defaults to 0):
Padding token id.
max_2d_position_embeddings (`int`, *optional*, defaults to 1024):
The maximum value that the 2D position embedding might ever be used with. Typically set this to something
large just in case (e.g., 1024).
max_rel_pos (`int`, *optional*, defaults to 128):
The maximum number of relative positions to be used in the self-attention mechanism.
rel_pos_bins (`int`, *optional*, defaults to 32):
The number of relative position bins to be used in the self-attention mechanism.
fast_qkv (`bool`, *optional*, defaults to `True`):
Whether or not to use a single matrix for the queries, keys, values in the self-attention layers.
max_rel_2d_pos (`int`, *optional*, defaults to 256):
The maximum number of relative 2D positions in the self-attention mechanism.
rel_2d_pos_bins (`int`, *optional*, defaults to 64):
The number of 2D relative position bins in the self-attention mechanism.
convert_sync_batchnorm (`bool`, *optional*, defaults to `True`):
Whether or not to convert batch normalization layers to synchronized batch normalization layers.
image_feature_pool_shape (`list[int]`, *optional*, defaults to `[7, 7, 256]`):
The shape of the average-pooled feature map.
coordinate_size (`int`, *optional*, defaults to 128):
Dimension of the coordinate embeddings.
shape_size (`int`, *optional*, defaults to 128):
Dimension of the width and height embeddings.
has_relative_attention_bias (`bool`, *optional*, defaults to `True`):
Whether or not to use a relative attention bias in the self-attention mechanism.
has_spatial_attention_bias (`bool`, *optional*, defaults to `True`):
Whether or not to use a spatial attention bias in the self-attention mechanism.
has_visual_segment_embedding (`bool`, *optional*, defaults to `False`):
Whether or not to add visual segment embeddings.
detectron2_config_args (`dict`, *optional*):
Dictionary containing the configuration arguments of the Detectron2 visual backbone. Refer to [this
file](https://github.com/microsoft/unilm/blob/master/layoutlmft/layoutlmft/models/layoutxlm/detectron2_config.py)
for details regarding default values.
Example:
```python
>>> from transformers import LayoutXLMConfig, LayoutXLMModel
>>> # Initializing a LayoutXLM microsoft/layoutxlm-base style configuration
>>> configuration = LayoutXLMConfig()
>>> # Initializing a model (with random weights) from the microsoft/layoutxlm-base style configuration
>>> model = LayoutXLMModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
pass
__all__ = ["LayoutXLMConfig"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/layoutxlm/modular_layoutxlm.py",
"license": "Apache License 2.0",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/vitpose/image_processing_vitpose_fast.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Image processor class for VitPose."""
import itertools
from typing import TYPE_CHECKING
import numpy as np
import torch
from ...image_processing_utils import BatchFeature
from ...image_processing_utils_fast import BaseImageProcessorFast
from ...image_transforms import group_images_by_shape, reorder_images
from ...image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ImageInput, SizeDict
from ...processing_utils import Unpack
from ...utils import TensorType, auto_docstring
from .image_processing_vitpose import (
VitPoseImageProcessorKwargs,
box_to_center_and_scale,
coco_to_pascal_voc,
get_keypoint_predictions,
get_warp_matrix,
post_dark_unbiased_data_processing,
scipy_warp_affine,
transform_preds,
)
if TYPE_CHECKING:
from .modeling_vitpose import VitPoseEstimatorOutput
@auto_docstring
class VitPoseImageProcessorFast(BaseImageProcessorFast):
image_mean = IMAGENET_DEFAULT_MEAN
image_std = IMAGENET_DEFAULT_STD
size = {"height": 256, "width": 192}
do_rescale = True
do_normalize = True
do_affine_transform = True
normalize_factor = 200.0
valid_kwargs = VitPoseImageProcessorKwargs
model_input_names = ["pixel_values"]
def torch_affine_transform(
self,
image: torch.Tensor,
center: tuple[float],
scale: tuple[float],
rotation: float,
size: SizeDict,
) -> torch.Tensor:
"""
Apply an affine transformation to a torch tensor image.
Args:
image (`torch.Tensor`):
Image tensor of shape (C, H, W) to transform.
center (`tuple[float]`):
Center of the bounding box (x, y).
scale (`tuple[float]`):
Scale of the bounding box with respect to height/width.
rotation (`float`):
Rotation angle in degrees.
size (`SizeDict`):
Size of the destination image.
Returns:
`torch.Tensor`: The transformed image.
"""
transformation = get_warp_matrix(
rotation, center * 2.0, np.array((size.width, size.height)) - 1.0, scale * 200.0
)
# Convert tensor to numpy (channels last) for scipy_warp_affine
image_np = image.permute(1, 2, 0).cpu().numpy()
transformed_np = scipy_warp_affine(src=image_np, M=transformation, size=(size.height, size.width))
# Convert back to torch tensor (channels first)
transformed = torch.from_numpy(transformed_np).permute(2, 0, 1).to(image.device)
return transformed
@auto_docstring
def preprocess(
self,
images: ImageInput,
boxes: list[list[float]] | np.ndarray,
**kwargs: Unpack[VitPoseImageProcessorKwargs],
) -> BatchFeature:
r"""
boxes (`list[list[list[float]]]` or `np.ndarray`):
List or array of bounding boxes for each image. Each box should be a list of 4 floats representing the
bounding box coordinates in COCO format (top_left_x, top_left_y, width, height).
"""
return super().preprocess(images, boxes, **kwargs)
def _preprocess(
self,
images: list[torch.Tensor],
boxes: list | np.ndarray,
do_affine_transform: bool,
size: SizeDict,
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
image_mean: float | tuple[float],
image_std: float | tuple[float],
disable_grouping: bool,
return_tensors: str | TensorType | None,
**kwargs,
) -> BatchFeature:
"""
Preprocess images with affine transformations based on bounding boxes.
"""
if len(images) != len(boxes):
raise ValueError(f"Number of images and boxes must match: {len(images)} != {len(boxes)}")
# Apply affine transformation for each image and each box
if do_affine_transform:
transformed_images = []
for image, image_boxes in zip(images, boxes):
for box in image_boxes:
center, scale = box_to_center_and_scale(
box,
image_width=size.width,
image_height=size.height,
normalize_factor=self.normalize_factor,
)
transformed_image = self.torch_affine_transform(image, center, scale, rotation=0, size=size)
transformed_images.append(transformed_image)
images = transformed_images
# Group images by shape for efficient batch processing
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
processed_images_grouped = {}
for shape, stacked_images in grouped_images.items():
# Apply rescale and normalize
stacked_images = self.rescale_and_normalize(
stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
processed_images_grouped[shape] = stacked_images
processed_images = reorder_images(processed_images_grouped, grouped_images_index)
# Stack into batch tensor
return BatchFeature(data={"pixel_values": processed_images}, tensor_type=return_tensors)
def keypoints_from_heatmaps(
self,
heatmaps: np.ndarray,
center: np.ndarray,
scale: np.ndarray,
kernel: int = 11,
):
"""
Get final keypoint predictions from heatmaps and transform them back to
the image.
Args:
heatmaps (`np.ndarray` of shape `(batch_size, num_keypoints, height, width])`):
Model predicted heatmaps.
center (`np.ndarray` of shape `(batch_size, 2)`):
Center of the bounding box (x, y).
scale (`np.ndarray` of shape `(batch_size, 2)`):
Scale of the bounding box wrt original images of width and height.
kernel (int, *optional*, defaults to 11):
Gaussian kernel size (K) for modulation, which should match the heatmap gaussian sigma when training.
K=17 for sigma=3 and k=11 for sigma=2.
Returns:
tuple: A tuple containing keypoint predictions and scores.
- preds (`np.ndarray` of shape `(batch_size, num_keypoints, 2)`):
Predicted keypoint location in images.
- scores (`np.ndarray` of shape `(batch_size, num_keypoints, 1)`):
Scores (confidence) of the keypoints.
"""
batch_size, _, height, width = heatmaps.shape
coords, scores = get_keypoint_predictions(heatmaps)
preds = post_dark_unbiased_data_processing(coords, heatmaps, kernel=kernel)
# Transform back to the image
for i in range(batch_size):
preds[i] = transform_preds(preds[i], center=center[i], scale=scale[i], output_size=[height, width])
return preds, scores
def post_process_pose_estimation(
self,
outputs: "VitPoseEstimatorOutput",
boxes: list[list[list[float]]] | np.ndarray,
kernel_size: int = 11,
threshold: float | None = None,
target_sizes: TensorType | list[tuple] | None = None,
):
"""
Transform the heatmaps into keypoint predictions and transform them back to the image.
Args:
outputs (`VitPoseEstimatorOutput`):
VitPoseForPoseEstimation model outputs.
boxes (`list[list[list[float]]]` or `np.ndarray`):
List or array of bounding boxes for each image. Each box should be a list of 4 floats representing the bounding
box coordinates in COCO format (top_left_x, top_left_y, width, height).
kernel_size (`int`, *optional*, defaults to 11):
Gaussian kernel size (K) for modulation.
threshold (`float`, *optional*, defaults to None):
Score threshold to keep object detection predictions.
target_sizes (`torch.Tensor` or `list[tuple[int, int]]`, *optional*):
Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size
`(height, width)` of each image in the batch. If unset, predictions will be resize with the default value.
Returns:
`list[list[Dict]]`: A list of dictionaries, each dictionary containing the keypoints and boxes for an image
in the batch as predicted by the model.
"""
# First compute centers and scales for each bounding box
batch_size, num_keypoints, _, _ = outputs.heatmaps.shape
if target_sizes is not None:
if batch_size != len(target_sizes):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits"
)
centers = np.zeros((batch_size, 2), dtype=np.float32)
scales = np.zeros((batch_size, 2), dtype=np.float32)
flattened_boxes = list(itertools.chain(*boxes))
for i in range(batch_size):
if target_sizes is not None:
image_width, image_height = target_sizes[i][0], target_sizes[i][1]
scale_factor = np.array([image_width, image_height, image_width, image_height])
flattened_boxes[i] = flattened_boxes[i] * scale_factor
width, height = self.size["width"], self.size["height"]
center, scale = box_to_center_and_scale(flattened_boxes[i], image_width=width, image_height=height)
centers[i, :] = center
scales[i, :] = scale
preds, scores = self.keypoints_from_heatmaps(
outputs.heatmaps.cpu().numpy(), centers, scales, kernel=kernel_size
)
all_boxes = np.zeros((batch_size, 4), dtype=np.float32)
all_boxes[:, 0:2] = centers[:, 0:2]
all_boxes[:, 2:4] = scales[:, 0:2]
poses = torch.tensor(preds)
scores = torch.tensor(scores)
labels = torch.arange(0, num_keypoints)
bboxes_xyxy = torch.tensor(coco_to_pascal_voc(all_boxes))
results: list[list[dict[str, torch.Tensor]]] = []
pose_bbox_pairs = zip(poses, scores, bboxes_xyxy)
for image_bboxes in boxes:
image_results: list[dict[str, torch.Tensor]] = []
for _ in image_bboxes:
# Unpack the next pose and bbox_xyxy from the iterator
pose, score, bbox_xyxy = next(pose_bbox_pairs)
score = score.squeeze()
keypoints_labels = labels
if threshold is not None:
keep = score > threshold
pose = pose[keep]
score = score[keep]
keypoints_labels = keypoints_labels[keep]
pose_result = {"keypoints": pose, "scores": score, "labels": keypoints_labels, "bbox": bbox_xyxy}
image_results.append(pose_result)
results.append(image_results)
return results
__all__ = ["VitPoseImageProcessorFast"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/vitpose/image_processing_vitpose_fast.py",
"license": "Apache License 2.0",
"lines": 248,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/deepspeed/test_alst_ulysses_sp.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import sys
from transformers import is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
read_json_file,
require_accelerate,
require_torch_multi_accelerator,
slow,
write_file,
)
if is_torch_available():
import torch
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
DataCollatorForLanguageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
class TestTrainerALSTUlyssesSP(TestCasePlus):
"""Test Trainer with ALST/Ulysses sequence parallelism enabled via accelerate's ParallelismConfig."""
@require_torch_multi_accelerator
@require_accelerate
@slow
def test_sp_equivalence(self):
"""Test that ALST/Ulysses sequence parallelism produces the same losses as without it."""
# shared setup
world_size = 2
script_path = __file__ # self.test_file_dir} / "test_alst_ulysses_sp.py"
ds_config_path = self.test_file_dir / "ds_config_zero2.json"
# step 1. Run with SP enabled (sp_size=world_size)
sp_yes_output_dir = self.get_auto_remove_tmp_dir(return_pathlib_obj=True)
sp_yes_accelerate_config_path = sp_yes_output_dir / "context_parallel_config.yaml"
sp_yes_losses_path = sp_yes_output_dir / "sp_yes_losses.json"
write_file(
sp_yes_accelerate_config_path,
f"""
distributed_type: DEEPSPEED
deepspeed_config:
deepspeed_config_file: {ds_config_path}
machine_rank: 0
num_machines: 1
num_processes: {world_size}
parallelism_config:
parallelism_config_sp_size: {world_size}
parallelism_config_sp_backend: deepspeed
parallelism_config_sp_seq_length_is_variable: true
parallelism_config_sp_attn_implementation: sdpa
""",
)
sp_yes_eval_metrics_path = sp_yes_output_dir / "sp_yes_eval_metrics.json"
cmd_sp = f"""
accelerate launch
--config_file {sp_yes_accelerate_config_path}
{script_path}
--output_dir {sp_yes_output_dir}
--report_to none
--max_steps 10
--per_device_train_batch_size 1
--gradient_accumulation_steps 1
--logging_steps 1
--remove_unused_columns False
--seed 42
--per_device_eval_batch_size 1
--loss_output_file {sp_yes_losses_path}
--eval_output_file {sp_yes_eval_metrics_path}
""".split()
execute_subprocess_async(cmd_sp, env=self.get_env())
# step 2. Run without SP enabled (sp_size=world_size)
sp_no_output_dir = self.get_auto_remove_tmp_dir(return_pathlib_obj=True)
sp_no_accelerate_config_path = sp_no_output_dir / "context_parallel_config.yaml"
sp_no_losses_path = sp_no_output_dir / "sp_yes_losses.json"
write_file(
sp_no_accelerate_config_path,
f"""
distributed_type: DEEPSPEED
deepspeed_config:
deepspeed_config_file: {ds_config_path}
machine_rank: 0
num_machines: 1
num_processes: {world_size}
""",
)
cmd_sp = f"""
accelerate launch
--config_file {sp_no_accelerate_config_path}
{script_path}
--output_dir {sp_no_output_dir}
--report_to none
--max_steps 10
--per_device_train_batch_size 1
--gradient_accumulation_steps 1
--logging_steps 1
--remove_unused_columns False
--seed 42
--loss_output_file {sp_no_losses_path}
""".split()
execute_subprocess_async(cmd_sp, env=self.get_env())
# Compare losses - should be very close since SP just splits sequence computation
sp_yes_losses = read_json_file(sp_yes_losses_path)
sp_no_losses = read_json_file(sp_no_losses_path)
assert len(sp_yes_losses) == len(sp_no_losses), (
f"Different number of losses: SP has {len(sp_yes_losses)}, no-SP has {len(sp_no_losses)}"
)
# ALST/UlyssesSP should produce very similar results (small numerical differences expected)
# The differences come from:
# - Different gradient reduction patterns in distributed training
# - BF16 mixed precision accumulated differences
sp_yes_losses_tensor = torch.tensor(sp_yes_losses)
sp_no_losses_tensor = torch.tensor(sp_no_losses)
torch.testing.assert_close(
sp_yes_losses_tensor,
sp_no_losses_tensor,
atol=2e-2,
rtol=2e-5,
msg=f"SP-enabled losses {sp_yes_losses} do not match SP-disabled losses {sp_no_losses}",
)
# Eval should succeed even though eval sequences are not divisible by sp_size,
# because SP is disabled in eval mode.
eval_metrics = read_json_file(sp_yes_eval_metrics_path)
assert "eval_loss" in eval_metrics, f"Expected eval_loss in metrics, got keys: {list(eval_metrics.keys())}"
assert torch.isfinite(torch.tensor(eval_metrics["eval_loss"])), f"Eval loss is not finite: {eval_metrics}"
if __name__ == "__main__":
model_name = "hf-internal-testing/tiny-random-LlamaForCausalLM"
# Parse custom arguments (not TrainingArguments parameters)
loss_output_file = None
eval_output_file = None
if "--loss_output_file" in sys.argv:
idx = sys.argv.index("--loss_output_file")
loss_output_file = sys.argv[idx + 1]
sys.argv.pop(idx)
sys.argv.pop(idx)
if "--eval_output_file" in sys.argv:
idx = sys.argv.index("--eval_output_file")
eval_output_file = sys.argv[idx + 1]
sys.argv.pop(idx)
sys.argv.pop(idx)
parser = HfArgumentParser((TrainingArguments,))
training_args = parser.parse_args_into_dataclasses()[0]
training_args.do_eval = True
tokenizer = AutoTokenizer.from_pretrained(model_name)
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
model = AutoModelForCausalLM.from_pretrained(
model_name,
attn_implementation="sdpa", # SP requires SDPA or FA
)
# fix the outdated testing model config
model.generation_config.pad_token_id = 1
# Create simple dataset: just tokenize some text
texts = [
"The quick brown fox jumps over the lazy dog. " * 10,
"Hello world, this is a test sentence for training. " * 10,
] * 4 # 8 samples total
def tokenize_function(examples):
return tokenizer(examples, max_length=128, truncation=True, padding="max_length")
def tokenize_eval_function(examples):
# Use an odd max_length to ensure it's not divisible by sp_size (2),
# so eval would fail if SP isn't disabled in eval mode.
return tokenizer(examples, max_length=127, truncation=True, padding="max_length")
train_dataset = [tokenize_function(text) for text in texts]
eval_dataset = [tokenize_eval_function(text) for text in texts]
# Use standard DataCollatorForLanguageModeling for causal LM
# pad_to_multiple_of=4 ensures sequences are divisible by sp_size * 2 (for sp_size=2)
# Trainer will automatically generate position_ids and shift_labels as needed
data_collator = DataCollatorForLanguageModeling(
tokenizer=tokenizer,
mlm=False, # Causal language modeling
pad_to_multiple_of=4,
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
data_collator=data_collator,
)
# Train for a few steps
trainer.train()
# Verify training completed
assert trainer.state.global_step > 0, "Training should have completed at least one step"
if training_args.do_eval:
eval_metrics = trainer.evaluate()
if eval_output_file and training_args.process_index == 0:
with open(eval_output_file, "w") as f:
json.dump(eval_metrics, f)
# Save losses to file if requested (for equivalence testing)
if loss_output_file and training_args.process_index == 0:
losses = [log["loss"] for log in trainer.state.log_history if "loss" in log]
with open(loss_output_file, "w") as f:
json.dump(losses, f)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/deepspeed/test_alst_ulysses_sp.py",
"license": "Apache License 2.0",
"lines": 207,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/pix2struct/image_processing_pix2struct_fast.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Image processor class for Pix2Struct."""
import torch
import torchvision.transforms.v2.functional as tvF
from PIL import Image
from ...image_processing_utils import BatchFeature, get_size_dict
from ...image_processing_utils_fast import BaseImageProcessorFast
from ...image_transforms import group_images_by_shape, reorder_images
from ...image_utils import ChannelDimension, ImageInput, SizeDict
from ...processing_utils import Unpack
from ...utils import TensorType, auto_docstring
from .image_processing_pix2struct import Pix2StructImageProcessorKwargs, render_text
# Disable as it causes issues with torch.compile
@torch.compiler.disable
def torch_extract_patches(image_tensor, patch_height, patch_width):
"""
Extract patches from image tensor. Returns tensor of shape (batch, rows, columns, patch_height*patch_width*channels).
Args:
image_tensor (`torch.Tensor`):
Image tensor of shape (batch, channels, height, width).
patch_height (`int`):
Height of patches to extract.
patch_width (`int`):
Width of patches to extract.
"""
batch_size, channels, height, width = image_tensor.shape
patches = torch.nn.functional.unfold(image_tensor, (patch_height, patch_width), stride=(patch_height, patch_width))
patches = patches.reshape(batch_size, channels, patch_height, patch_width, -1)
patches = patches.permute(0, 4, 2, 3, 1).reshape(
batch_size, height // patch_height, width // patch_width, channels * patch_height * patch_width
)
return patches
@auto_docstring
class Pix2StructImageProcessorFast(BaseImageProcessorFast):
rescale_factor = None
do_normalize = True
do_convert_rgb = True
patch_size = {"height": 16, "width": 16}
max_patches = 2048
is_vqa = False
valid_kwargs = Pix2StructImageProcessorKwargs
model_input_names = ["flattened_patches", "attention_mask"]
def _further_process_kwargs(
self,
patch_size: dict[str, int] | None = None,
**kwargs,
) -> dict:
"""
Process custom Pix2Struct kwargs, specifically converting patch_size to SizeDict.
"""
# Call super to handle standard kwargs processing (like converting patch_size to SizeDict)
kwargs = super()._further_process_kwargs(**kwargs)
kwargs["patch_size"] = SizeDict(**get_size_dict(size=patch_size, param_name="patch_size"))
return kwargs
def _validate_preprocess_kwargs(self, **kwargs):
"""
Skip standard validation as Pix2Struct uses custom preprocessing.
"""
# Pix2Struct doesn't use standard resize/rescale/normalize parameters
# so we skip the default validation
pass
def render_header(
self,
image: torch.Tensor,
header: str,
font_bytes: bytes | None = None,
font_path: str | None = None,
) -> torch.Tensor:
"""
Render header text on image using torch tensors.
Args:
image (`torch.Tensor`):
Image tensor in channel-first format (C, H, W).
header (`str`):
Header text to render.
font_bytes (`bytes`, *optional*):
Font bytes to use for rendering.
font_path (`str`, *optional*):
Path to font file to use for rendering.
Returns:
`torch.Tensor`: Image with header in channel-first format (C, H, W).
"""
device = image.device
dtype = image.dtype
# Convert tensor to PIL (channel-first to channel-last for PIL)
if image.dtype == torch.uint8:
image_pil = tvF.to_pil_image(image)
else:
# If float, convert to uint8 first
image_uint8 = (image * 255).clamp(0, 255).to(torch.uint8)
image_pil = tvF.to_pil_image(image_uint8)
# Render header text as PIL image
header_image = render_text(header, font_bytes=font_bytes, font_path=font_path)
# Calculate new dimensions
new_width = max(header_image.width, image_pil.width)
new_height = int(image_pil.height * (new_width / image_pil.width))
new_header_height = int(header_image.height * (new_width / header_image.width))
# Create new image and paste header and original image
new_image = Image.new("RGB", (new_width, new_height + new_header_height), "white")
new_image.paste(header_image.resize((new_width, new_header_height)), (0, 0))
new_image.paste(image_pil.resize((new_width, new_height)), (0, new_header_height))
# Convert back to tensor (channel-first)
result = tvF.pil_to_tensor(new_image).to(device)
# Convert back to original dtype if needed
if dtype != torch.uint8:
result = result.float() / 255.0
return result
def normalize(self, images: torch.Tensor) -> torch.Tensor:
"""
Normalize batched images using per-image mean and standard deviation.
Args:
images (`torch.Tensor`):
Batched float image tensor of shape (B, C, H, W).
Returns:
`torch.Tensor`: Normalized images of shape (B, C, H, W).
"""
# Compute mean and std per image along spatial and channel dimensions
mean = images.mean(dim=(1, 2, 3), keepdim=True) # Shape: (B, 1, 1, 1)
std = images.std(dim=(1, 2, 3), keepdim=True) # Shape: (B, 1, 1, 1)
num_elements_per_image = images.shape[1] * images.shape[2] * images.shape[3]
min_std = 1.0 / num_elements_per_image**0.5
adjusted_stddev = torch.maximum(std, torch.tensor(min_std, device=std.device))
return (images - mean) / adjusted_stddev
def extract_flattened_patches(
self,
images: torch.Tensor,
max_patches: int,
patch_size: SizeDict,
) -> torch.Tensor:
"""
Extract flattened patches from a batch of images.
Args:
images (`torch.Tensor`):
Batched images tensor of shape (batch, channels, height, width).
max_patches (`int`):
Maximum number of patches to extract.
patch_size (`dict[str, int]`):
Dictionary containing patch height and width.
Returns:
`torch.Tensor`: Batched flattened patches with row/column IDs of shape (batch, max_patches, patch_dim).
"""
patch_height, patch_width = patch_size.height, patch_size.width
batch_size, channels, image_height, image_width = images.shape
# Calculate scale to maximize patches while respecting max_patches
scale = (max_patches * (patch_height / image_height) * (patch_width / image_width)) ** 0.5
num_feasible_rows = max(min(int(scale * image_height / patch_height), max_patches), 1)
num_feasible_cols = max(min(int(scale * image_width / patch_width), max_patches), 1)
resized_height = max(num_feasible_rows * patch_height, 1)
resized_width = max(num_feasible_cols * patch_width, 1)
# Resize images (batched) using parent class method
resize_size = SizeDict(height=resized_height, width=resized_width)
images = self.resize(
image=images, size=resize_size, interpolation=tvF.InterpolationMode.BILINEAR, antialias=True
)
# Extract patches: [batch, rows, columns, patch_height * patch_width * channels]
patches = torch_extract_patches(images, patch_height, patch_width)
batch_size, rows, columns, depth = patches.shape
# Reshape to [batch, rows * columns, depth]
patches = patches.reshape(batch_size, rows * columns, depth)
# Create row and column IDs
row_ids = (
torch.arange(rows, device=images.device).reshape(rows, 1).repeat(1, columns).reshape(1, rows * columns, 1)
)
col_ids = (
torch.arange(columns, device=images.device)
.reshape(1, columns)
.repeat(rows, 1)
.reshape(1, rows * columns, 1)
)
# Expand to batch size
row_ids = row_ids.expand(batch_size, -1, -1)
col_ids = col_ids.expand(batch_size, -1, -1)
# Offset by 1 so IDs don't contain zeros (which represent padding)
row_ids = (row_ids + 1).float()
col_ids = (col_ids + 1).float()
# Concatenate row_ids, col_ids, and patches: [batch, rows * columns, 2 + depth]
result = torch.cat([row_ids, col_ids, patches], dim=-1)
# Pad to max_patches: [batch, max_patches, 2 + depth]
result = torch.nn.functional.pad(result, [0, 0, 0, max_patches - (rows * columns)]).float()
return result
@auto_docstring
def preprocess(
self,
images: ImageInput,
header_text: str | list[str] | None = None,
**kwargs: Unpack[Pix2StructImageProcessorKwargs],
) -> BatchFeature:
r"""
header_text (`Union[str, list[str]]`, *optional*):
Text to render as a header. Only has an effect if `image_processor.is_vqa` is `True`.
"""
return super().preprocess(images, header_text=header_text, **kwargs)
def _preprocess_image_like_inputs(
self,
images: ImageInput,
header_text: str | list[str] | None = None,
do_convert_rgb: bool = True,
input_data_format: ChannelDimension = ChannelDimension.FIRST,
device: str | torch.device | None = None,
**kwargs: Unpack[Pix2StructImageProcessorKwargs],
) -> BatchFeature:
"""
Preprocess images for Pix2Struct.
"""
# Prepare images (converts to torch tensors)
images = self._prepare_image_like_inputs(
images=images,
do_convert_rgb=do_convert_rgb,
input_data_format=input_data_format,
device=device,
)
# Handle VQA mode with header rendering
is_vqa = kwargs.get("is_vqa", self.is_vqa)
if is_vqa:
if header_text is None:
raise ValueError("A header text must be provided for VQA models.")
font_bytes = kwargs.pop("font_bytes", None)
font_path = kwargs.pop("font_path", None)
if isinstance(header_text, str):
header_text = [header_text] * len(images)
# Render headers using torch-native method
images = [
self.render_header(image, header_text[i], font_bytes=font_bytes, font_path=font_path)
for i, image in enumerate(images)
]
return self._preprocess(images, **kwargs)
def _preprocess(
self,
images: list[torch.Tensor],
do_normalize: bool,
max_patches: int,
patch_size: SizeDict,
return_tensors: str | TensorType | None,
disable_grouping: bool,
**kwargs,
) -> BatchFeature:
"""
Preprocess images to extract flattened patches.
"""
# Group images by shape first for efficient batch processing
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
flattened_patches_grouped = {}
attention_masks_grouped = {}
for shape, stacked_images in grouped_images.items():
# Convert to float if needed (for resize and other operations)
if stacked_images.dtype == torch.uint8:
stacked_images = stacked_images.float()
# Normalize batched images with per-image mean and std
if do_normalize:
stacked_images = self.normalize(stacked_images)
patches = self.extract_flattened_patches(
images=stacked_images, max_patches=max_patches, patch_size=patch_size
)
masks = (patches.sum(dim=-1) != 0).float()
flattened_patches_grouped[shape] = patches
attention_masks_grouped[shape] = masks
flattened_patches = reorder_images(flattened_patches_grouped, grouped_images_index)
attention_masks = reorder_images(attention_masks_grouped, grouped_images_index)
# Stack if return_tensors is set
if return_tensors:
flattened_patches = torch.stack(flattened_patches, dim=0)
attention_masks = torch.stack(attention_masks, dim=0)
return BatchFeature(
data={"flattened_patches": flattened_patches, "attention_mask": attention_masks},
tensor_type=return_tensors,
)
__all__ = ["Pix2StructImageProcessorFast"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/pix2struct/image_processing_pix2struct_fast.py",
"license": "Apache License 2.0",
"lines": 279,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/sam3/configuration_sam3.py | # Copyright 2025 Meta AI and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SAM3 model configuration"""
from transformers import CLIPTextConfig
from ...configuration_utils import PreTrainedConfig
from ..auto import CONFIG_MAPPING, AutoConfig
class Sam3ViTConfig(PreTrainedConfig):
r"""
Configuration class for SAM3 Vision Encoder (ViT backbone).
Instantiating a configuration defaults will yield a similar configuration to that of SAM 3
[facebook/sam3](https://huggingface.co/facebook/sam3) architecture.
Args:
hidden_size (`int`, *optional*, defaults to 1024):
Dimensionality of the encoder layers.
intermediate_size (`int`, *optional*, defaults to 4736):
Dimensionality of the feedforward (MLP) layers.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer.
num_channels (`int`, *optional*, defaults to 3):
Number of input image channels.
image_size (`int`, *optional*, defaults to 1008):
Expected input image size.
patch_size (`int`, *optional*, defaults to 14):
Size of image patches.
hidden_act (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by layer normalization layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for attention probabilities.
rope_theta (`float`, *optional*, defaults to 10000.0):
Base frequency for RoPE.
window_size (`int`, *optional*, defaults to 24):
Window size for windowed attention.
global_attn_indexes (`list[int]`, *optional*, defaults to `[7, 15, 23, 31]`):
Indexes of layers with global attention.
layer_scale_init_value (`float`, *optional*):
Initial value for layer scale. None means no layer scale.
pretrain_image_size (`int`, *optional*, defaults to 336):
Pretrained model image size for position embedding initialization.
hidden_dropout (`float`, *optional*, defaults to 0.0):
Dropout probability for hidden states.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing weight matrices.
"""
base_config_key = "backbone_config"
model_type = "sam3_vit_model"
def __init__(
self,
hidden_size=1024,
intermediate_size=4736,
num_hidden_layers=32,
num_attention_heads=16,
num_channels=3,
image_size=1008,
patch_size=14,
hidden_act="gelu",
layer_norm_eps=1e-6,
attention_dropout=0.0,
rope_theta=10000.0,
window_size=24,
global_attn_indexes=None,
layer_scale_init_value=None,
pretrain_image_size=336,
hidden_dropout=0.0,
initializer_range=0.02,
**kwargs,
):
super().__init__(**kwargs)
if global_attn_indexes is None:
global_attn_indexes = [7, 15, 23, 31]
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_channels = num_channels
self.image_size = image_size
self.patch_size = patch_size
self.hidden_act = hidden_act
self.layer_norm_eps = layer_norm_eps
self.attention_dropout = attention_dropout
self.rope_theta = rope_theta
self.window_size = window_size
self.global_attn_indexes = global_attn_indexes
self.layer_scale_init_value = layer_scale_init_value
self.pretrain_image_size = pretrain_image_size
self.hidden_dropout = hidden_dropout
self.initializer_range = initializer_range
class Sam3VisionConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Sam3VisionModel`]. It is used to instantiate a SAM
vision encoder according to the specified arguments, defining the model architecture. Instantiating a configuration
defaults will yield a similar configuration to that of SAM 3
[facebook/sam3](https://huggingface.co/facebook/sam3) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
backbone_config (`Union[dict, "PreTrainedConfig"]`, *optional*, defaults to `Sam3ViTConfig()`):
Configuration for the vision backbone. This is used to instantiate the backbone using
`AutoModel.from_config`.
fpn_hidden_size (`int`, *optional*, defaults to 256):
The hidden dimension of the FPN.
backbone_feature_sizes (`List[List[int]]`, *optional*, defaults to `[[288, 288], [144, 144], [72, 72]]`):
The spatial sizes (height, width) of the feature maps from the backbone at different scales.
scale_factors (`list[float]`, *optional*, defaults to `[4.0, 2.0, 1.0, 0.5]`):
Scale factors for FPN multi-scale features. List of scaling factors for each FPN level.
hidden_act (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function in the neck.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon for the layer normalization.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
"""
base_config_key = "vision_config"
model_type = "sam3_vision_model"
sub_configs = {
"backbone_config": AutoConfig,
}
def __init__(
self,
backbone_config=None,
fpn_hidden_size=256,
backbone_feature_sizes=None,
scale_factors=None,
hidden_act="gelu",
layer_norm_eps=1e-6,
initializer_range=0.02,
**kwargs,
):
scale_factors = [4.0, 2.0, 1.0, 0.5] if scale_factors is None else scale_factors
if backbone_feature_sizes is None:
backbone_feature_sizes = [[288, 288], [144, 144], [72, 72]]
if isinstance(backbone_config, dict):
backbone_config["model_type"] = backbone_config.get("model_type", "sam3_vit_model")
backbone_config = CONFIG_MAPPING[backbone_config["model_type"]](**backbone_config)
elif backbone_config is None:
backbone_config = CONFIG_MAPPING["sam3_vit_model"]()
self.backbone_config = backbone_config
# Neck
self.fpn_hidden_size = fpn_hidden_size
self.scale_factors = scale_factors
self.backbone_feature_sizes = backbone_feature_sizes
self.hidden_act = hidden_act
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range
super().__init__(**kwargs)
@property
def image_size(self):
"""Image size for the vision encoder."""
return self.backbone_config.image_size
@image_size.setter
def image_size(self, value):
"""Set the image size and propagate to backbone."""
self.backbone_config.image_size = value
class Sam3GeometryEncoderConfig(PreTrainedConfig):
r"""
Configuration class for SAM3 Geometry Encoder.
Args:
hidden_size (`int`, *optional*, defaults to 256):
Dimensionality of the encoder layers.
num_layers (`int`, *optional*, defaults to 3):
Number of transformer encoder layers for processing geometry prompts.
num_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads in the geometry encoder.
intermediate_size (`int`, *optional*, defaults to 2048):
Dimensionality of the feedforward layers.
dropout (`float`, *optional*, defaults to 0.1):
Dropout probability.
hidden_act (`str`, *optional*, defaults to `"relu"`):
Activation function in FFN.
hidden_dropout (`float`, *optional*, defaults to 0.0):
Dropout probability for hidden states.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
Epsilon for layer normalization.
roi_size (`int`, *optional*, defaults to 7):
ROI size for box pooling operations.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing weight matrices.
"""
model_type = "sam3_geometry_encoder"
def __init__(
self,
hidden_size=256,
num_layers=3,
num_attention_heads=8,
intermediate_size=2048,
dropout=0.1,
hidden_act="relu",
hidden_dropout=0.0,
layer_norm_eps=1e-6,
roi_size=7,
initializer_range=0.02,
**kwargs,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.num_layers = num_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.hidden_act = hidden_act
self.hidden_dropout = hidden_dropout
self.layer_norm_eps = layer_norm_eps
self.roi_size = roi_size
self.initializer_range = initializer_range
class Sam3DETREncoderConfig(PreTrainedConfig):
r"""
Configuration class for SAM3 DETR Encoder (vision-text fusion encoder).
Args:
hidden_size (`int`, *optional*, defaults to 256):
Dimensionality of the encoder layers.
num_layers (`int`, *optional*, defaults to 6):
Number of encoder layers.
num_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads.
intermediate_size (`int`, *optional*, defaults to 2048):
Dimensionality of the feedforward layers.
dropout (`float`, *optional*, defaults to 0.1):
Dropout probability.
hidden_act (`str`, *optional*, defaults to `"relu"`):
Activation function in FFN.
hidden_dropout (`float`, *optional*, defaults to 0.0):
Dropout probability for hidden states.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
Epsilon for layer normalization.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing weight matrices.
"""
model_type = "sam3_detr_encoder"
def __init__(
self,
hidden_size=256,
num_layers=6,
num_attention_heads=8,
intermediate_size=2048,
dropout=0.1,
hidden_act="relu",
hidden_dropout=0.0,
layer_norm_eps=1e-6,
initializer_range=0.02,
**kwargs,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.num_layers = num_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.hidden_act = hidden_act
self.hidden_dropout = hidden_dropout
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range
class Sam3DETRDecoderConfig(PreTrainedConfig):
r"""
Configuration class for SAM3 DETR Decoder (object query decoder).
Args:
hidden_size (`int`, *optional*, defaults to 256):
Dimensionality of the decoder layers.
num_layers (`int`, *optional*, defaults to 6):
Number of decoder layers.
num_queries (`int`, *optional*, defaults to 200):
Number of object queries.
num_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads.
intermediate_size (`int`, *optional*, defaults to 2048):
Dimensionality of the feedforward layers.
dropout (`float`, *optional*, defaults to 0.1):
Dropout probability.
hidden_act (`str`, *optional*, defaults to `"relu"`):
Activation function in FFN.
hidden_dropout (`float`, *optional*, defaults to 0.0):
Dropout probability for hidden states.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
Epsilon for layer normalization.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing weight matrices.
"""
model_type = "sam3_detr_decoder"
def __init__(
self,
hidden_size=256,
num_layers=6,
num_queries=200,
num_attention_heads=8,
intermediate_size=2048,
dropout=0.1,
hidden_act="relu",
hidden_dropout=0.0,
layer_norm_eps=1e-6,
initializer_range=0.02,
**kwargs,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.num_layers = num_layers
self.num_queries = num_queries
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.hidden_act = hidden_act
self.hidden_dropout = hidden_dropout
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range
class Sam3MaskDecoderConfig(PreTrainedConfig):
r"""
Configuration class for SAM3 Mask Decoder (pixel-level mask prediction).
Args:
hidden_size (`int`, *optional*, defaults to 256):
Dimensionality of the mask decoder.
num_upsampling_stages (`int`, *optional*, defaults to 3):
Number of upsampling stages in the pixel decoder (FPN).
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
Epsilon for layer normalization.
dropout (`float`, *optional*, defaults to 0.0):
Dropout probability for prompt cross-attention.
num_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for prompt cross-attention.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing weight matrices.
"""
model_type = "sam3_mask_decoder"
def __init__(
self,
hidden_size=256,
num_upsampling_stages=3,
layer_norm_eps=1e-6,
dropout=0.0,
num_attention_heads=8,
initializer_range=0.02,
**kwargs,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.num_upsampling_stages = num_upsampling_stages
self.layer_norm_eps = layer_norm_eps
self.dropout = dropout
self.num_attention_heads = num_attention_heads
self.initializer_range = initializer_range
class Sam3Config(PreTrainedConfig):
r"""
Configuration class to store the configuration of a [`Sam3Model`].
Instantiating a configuration defaults will yield a similar configuration to that of SAM 3
[facebook/sam3](https://huggingface.co/facebook/sam3) architecture.
This is the main configuration class that combines all sub-configurations for the SAM3 model.
<Tip>
SAM3 checkpoints with `model_type="sam3_video"` are compatible with `Sam3Model` since the video variant weights
are a superset of the image-only model weights. You may see a warning about model type mismatch when loading
such checkpoints, which can be safely ignored in this case.
</Tip>
Args:
vision_config (`dict` or `Sam3VisionConfig`, *optional*):
Configuration for the vision encoder.
text_config (`dict` or `Sam3TextConfig`, *optional*):
Configuration for the text encoder.
geometry_encoder_config (`dict` or `Sam3GeometryEncoderConfig`, *optional*):
Configuration for the geometry encoder.
detr_encoder_config (`dict` or `Sam3DETREncoderConfig`, *optional*):
Configuration for the DETR encoder.
detr_decoder_config (`dict` or `Sam3DETRDecoderConfig`, *optional*):
Configuration for the DETR decoder.
mask_decoder_config (`dict` or `Sam3MaskDecoderConfig`, *optional*):
Configuration for the mask decoder.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing weight matrices.
Example:
```python
>>> from transformers import Sam3Config, Sam3Model
>>> # Initializing a SAM3 configuration
>>> configuration = Sam3Config()
>>> # Initializing a model from the configuration
>>> model = Sam3Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = "sam3"
is_composition = True
sub_configs = {
"vision_config": Sam3VisionConfig,
"text_config": CLIPTextConfig,
"geometry_encoder_config": Sam3GeometryEncoderConfig,
"detr_encoder_config": Sam3DETREncoderConfig,
"detr_decoder_config": Sam3DETRDecoderConfig,
"mask_decoder_config": Sam3MaskDecoderConfig,
}
def __init__(
self,
vision_config=None,
text_config=None,
geometry_encoder_config=None,
detr_encoder_config=None,
detr_decoder_config=None,
mask_decoder_config=None,
initializer_range=0.02,
**kwargs,
):
# Vision config
if vision_config is None:
vision_config = {}
if isinstance(vision_config, dict):
self.vision_config = Sam3VisionConfig(**vision_config)
else:
self.vision_config = vision_config
# Text config (CLIPTextModelWithProjection defaults)
if text_config is None:
text_config = {
"vocab_size": 49408,
"hidden_size": 1024,
"intermediate_size": 4096, # hidden_size * mlp_ratio (1024 * 4)
"projection_dim": 512, # CLIP's internal projection dimension
"num_hidden_layers": 24,
"num_attention_heads": 16,
"max_position_embeddings": 32,
"hidden_act": "gelu",
}
if isinstance(text_config, dict):
self.text_config = CLIPTextConfig(**text_config)
else:
self.text_config = text_config
# Geometry encoder config
if geometry_encoder_config is None:
geometry_encoder_config = {}
if isinstance(geometry_encoder_config, dict):
self.geometry_encoder_config = Sam3GeometryEncoderConfig(**geometry_encoder_config)
else:
self.geometry_encoder_config = geometry_encoder_config
# DETR encoder config
if detr_encoder_config is None:
detr_encoder_config = {}
if isinstance(detr_encoder_config, dict):
self.detr_encoder_config = Sam3DETREncoderConfig(**detr_encoder_config)
else:
self.detr_encoder_config = detr_encoder_config
# DETR decoder config
if detr_decoder_config is None:
detr_decoder_config = {}
if isinstance(detr_decoder_config, dict):
self.detr_decoder_config = Sam3DETRDecoderConfig(**detr_decoder_config)
else:
self.detr_decoder_config = detr_decoder_config
# Mask decoder config
if mask_decoder_config is None:
mask_decoder_config = {}
if isinstance(mask_decoder_config, dict):
self.mask_decoder_config = Sam3MaskDecoderConfig(**mask_decoder_config)
else:
self.mask_decoder_config = mask_decoder_config
self.initializer_range = initializer_range
super().__init__(**kwargs)
@property
def image_size(self):
"""Image size for the SAM3 model."""
return self.vision_config.image_size
@image_size.setter
def image_size(self, value):
"""Set the image size and propagate to vision config."""
self.vision_config.image_size = value
__all__ = [
"Sam3Config",
"Sam3ViTConfig",
"Sam3VisionConfig",
"Sam3GeometryEncoderConfig",
"Sam3DETREncoderConfig",
"Sam3DETRDecoderConfig",
"Sam3MaskDecoderConfig",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/sam3/configuration_sam3.py",
"license": "Apache License 2.0",
"lines": 479,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/sam3/convert_sam3_to_hf.py | # Copyright 2025 The Meta AI Authors and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Convert SAM3 checkpoints from the original implementation to HuggingFace format.
Original repository: https://github.com/facebookresearch/segment-anything-3
"""
import argparse
import gc
import os
import regex as re
import torch
from transformers import CLIPTokenizerFast, Sam3Config, Sam3ImageProcessorFast, Sam3Model, Sam3Processor
from transformers.utils import logging
logging.set_verbosity_info()
logger = logging.get_logger(__name__)
# fmt: off
ORIGINAL_TO_CONVERTED_KEY_MAPPING = {
r"^sam3_model\.": r"",
# ============================================================================
# Vision Encoder - ViT Backbone
# ============================================================================
r"^backbone\.vision_backbone\.trunk\.": r"vision_encoder.backbone.",
r"^vision_encoder\.backbone\.pos_embed": r"vision_encoder.backbone.embeddings.position_embeddings",
r"^vision_encoder\.backbone\.patch_embed\.proj\.": r"vision_encoder.backbone.embeddings.patch_embeddings.projection.",
r"^vision_encoder\.backbone\.ln_pre\.": r"vision_encoder.backbone.layer_norm.",
r"^vision_encoder\.backbone\.blocks\.(\d+)\.norm1\.": r"vision_encoder.backbone.layers.\1.layer_norm1.",
r"^vision_encoder\.backbone\.blocks\.(\d+)\.norm2\.": r"vision_encoder.backbone.layers.\1.layer_norm2.",
r"^vision_encoder\.backbone\.blocks\.(\d+)\.attn\.qkv\.": r"vision_encoder.backbone.layers.\1.attention.qkv.",
r"^vision_encoder\.backbone\.blocks\.(\d+)\.attn\.proj\.": r"vision_encoder.backbone.layers.\1.attention.o_proj.",
r"^vision_encoder\.backbone\.blocks\.(\d+)\.attn\.freqs_cis": r"vision_encoder.backbone.layers.\1.rotary_emb.rope_embeddings",
r"^vision_encoder\.backbone\.blocks\.(\d+)\.mlp\.fc1\.": r"vision_encoder.backbone.layers.\1.mlp.fc1.",
r"^vision_encoder\.backbone\.blocks\.(\d+)\.mlp\.fc2\.": r"vision_encoder.backbone.layers.\1.mlp.fc2.",
# Vision Encoder - FPN Neck
r"^backbone\.vision_backbone\.neck\.fpn\.(\d+)\.": r"vision_encoder.neck.fpn_layers.\1.",
r"^backbone\.vision_backbone\.convs\.(\d+)\.dconv_2x2_0\.": r"vision_encoder.neck.fpn_layers.\1.scale_layers.0.",
r"^backbone\.vision_backbone\.convs\.(\d+)\.dconv_2x2_1\.": r"vision_encoder.neck.fpn_layers.\1.scale_layers.2.",
r"^backbone\.vision_backbone\.convs\.(\d+)\.dconv_2x2\.": r"vision_encoder.neck.fpn_layers.\1.scale_layers.0.",
r"^backbone\.vision_backbone\.convs\.(\d+)\.maxpool_2x2\.": r"vision_encoder.neck.fpn_layers.\1.scale_layers.0.",
r"^backbone\.vision_backbone\.convs\.(\d+)\.conv_1x1\.": r"vision_encoder.neck.fpn_layers.\1.proj1.",
r"^backbone\.vision_backbone\.convs\.(\d+)\.conv_3x3\.": r"vision_encoder.neck.fpn_layers.\1.proj2.",
# ============================================================================
# Text Encoder (CLIP)
# ============================================================================
r"^backbone\.language_backbone\.encoder\.": r"text_encoder.",
r"^text_encoder\.token_embedding\.": r"text_encoder.text_model.embeddings.token_embedding.",
r"^text_encoder\.positional_embedding": r"text_encoder.text_model.embeddings.position_embedding.weight",
r"^text_encoder\.ln_final\.": r"text_encoder.text_model.final_layer_norm.",
r"^text_encoder\.text_projection": r"text_encoder.text_projection.weight",
r"^text_encoder\.transformer\.resblocks\.(\d+)\.attn\.in_proj_": r"text_encoder.text_model.encoder.layers.\1.self_attn.in_proj_",
r"^text_encoder\.transformer\.resblocks\.(\d+)\.attn\.out_proj\.": r"text_encoder.text_model.encoder.layers.\1.self_attn.out_proj.",
r"^text_encoder\.transformer\.resblocks\.(\d+)\.ln_1\.": r"text_encoder.text_model.encoder.layers.\1.layer_norm1.",
r"^text_encoder\.transformer\.resblocks\.(\d+)\.ln_2\.": r"text_encoder.text_model.encoder.layers.\1.layer_norm2.",
r"^text_encoder\.transformer\.resblocks\.(\d+)\.mlp\.c_fc\.": r"text_encoder.text_model.encoder.layers.\1.mlp.fc1.",
r"^text_encoder\.transformer\.resblocks\.(\d+)\.mlp\.c_proj\.": r"text_encoder.text_model.encoder.layers.\1.mlp.fc2.",
r"^backbone\.language_backbone\.resizer\.": r"text_projection.",
# ============================================================================
# Geometry Encoder
# ============================================================================
r"^geometry_encoder\.encode\.(\d+)\.cross_attn_image\.out_proj\.": r"geometry_encoder.layers.\1.cross_attn.o_proj.",
r"^geometry_encoder\.encode\.(\d+)\.cross_attn_image\.": r"geometry_encoder.layers.\1.cross_attn.",
r"^geometry_encoder\.encode\.(\d+)\.self_attn\.out_proj\.": r"geometry_encoder.layers.\1.self_attn.o_proj.",
r"^geometry_encoder\.encode\.(\d+)\.self_attn\.": r"geometry_encoder.layers.\1.self_attn.",
r"^geometry_encoder\.encode\.(\d+)\.linear1\.": r"geometry_encoder.layers.\1.mlp.fc1.",
r"^geometry_encoder\.encode\.(\d+)\.linear2\.": r"geometry_encoder.layers.\1.mlp.fc2.",
r"^geometry_encoder\.encode\.(\d+)\.norm1\.": r"geometry_encoder.layers.\1.layer_norm1.",
r"^geometry_encoder\.encode\.(\d+)\.norm2\.": r"geometry_encoder.layers.\1.layer_norm2.",
r"^geometry_encoder\.encode\.(\d+)\.norm3\.": r"geometry_encoder.layers.\1.layer_norm3.",
r"^geometry_encoder\.img_pre_norm\.": r"geometry_encoder.vision_layer_norm.",
r"^geometry_encoder\.norm\.": r"geometry_encoder.prompt_layer_norm.",
r"^geometry_encoder\.encode_norm\.": r"geometry_encoder.output_layer_norm.",
# ============================================================================
# DETR Encoder
# ============================================================================
r"^transformer\.encoder\.layers\.(\d+)\.cross_attn_image\.out_proj\.": r"detr_encoder.layers.\1.cross_attn.o_proj.",
r"^transformer\.encoder\.layers\.(\d+)\.cross_attn_image\.": r"detr_encoder.layers.\1.cross_attn.",
r"^transformer\.encoder\.layers\.(\d+)\.self_attn\.out_proj\.": r"detr_encoder.layers.\1.self_attn.o_proj.",
r"^transformer\.encoder\.layers\.(\d+)\.self_attn\.": r"detr_encoder.layers.\1.self_attn.",
r"^transformer\.encoder\.layers\.(\d+)\.cross_attn\.out_proj\.": r"detr_encoder.layers.\1.cross_attn.o_proj.",
r"^transformer\.encoder\.layers\.(\d+)\.cross_attn\.": r"detr_encoder.layers.\1.cross_attn.",
r"^transformer\.encoder\.layers\.(\d+)\.linear1\.": r"detr_encoder.layers.\1.mlp.fc1.",
r"^transformer\.encoder\.layers\.(\d+)\.linear2\.": r"detr_encoder.layers.\1.mlp.fc2.",
r"^transformer\.encoder\.layers\.(\d+)\.norm1\.": r"detr_encoder.layers.\1.layer_norm1.",
r"^transformer\.encoder\.layers\.(\d+)\.norm2\.": r"detr_encoder.layers.\1.layer_norm2.",
r"^transformer\.encoder\.layers\.(\d+)\.norm3\.": r"detr_encoder.layers.\1.layer_norm3.",
# ============================================================================
# DETR Decoder
# ============================================================================
r"^transformer\.decoder\.query_embed\.": r"detr_decoder.query_embed.",
r"^transformer\.decoder\.reference_points\.": r"detr_decoder.reference_points.",
r"^transformer\.decoder\.instance_query_embed\.": r"detr_decoder.instance_query_embed.",
r"^transformer\.decoder\.instance_reference_points\.": r"detr_decoder.instance_reference_points.",
r"^transformer\.decoder\.presence_token\.": r"detr_decoder.presence_token.",
r"^transformer\.decoder\.presence_token_head\.layers\.0\.": r"detr_decoder.presence_head.layer1.",
r"^transformer\.decoder\.presence_token_head\.layers\.1\.": r"detr_decoder.presence_head.layer2.",
r"^transformer\.decoder\.presence_token_head\.layers\.2\.": r"detr_decoder.presence_head.layer3.",
r"^transformer\.decoder\.presence_token_out_norm\.": r"detr_decoder.presence_layer_norm.",
r"^transformer\.decoder\.norm\.": r"detr_decoder.output_layer_norm.",
r"^transformer\.decoder\.bbox_embed\.layers\.0\.": r"detr_decoder.box_head.layer1.",
r"^transformer\.decoder\.bbox_embed\.layers\.1\.": r"detr_decoder.box_head.layer2.",
r"^transformer\.decoder\.bbox_embed\.layers\.2\.": r"detr_decoder.box_head.layer3.",
r"^transformer\.decoder\.instance_bbox_embed\.layers\.0\.": r"detr_decoder.instance_box_head.layer1.",
r"^transformer\.decoder\.instance_bbox_embed\.layers\.1\.": r"detr_decoder.instance_box_head.layer2.",
r"^transformer\.decoder\.instance_bbox_embed\.layers\.2\.": r"detr_decoder.instance_box_head.layer3.",
r"^transformer\.decoder\.ref_point_head\.layers\.0\.": r"detr_decoder.ref_point_head.layer1.",
r"^transformer\.decoder\.ref_point_head\.layers\.1\.": r"detr_decoder.ref_point_head.layer2.",
r"^transformer\.decoder\.boxRPB_embed_x\.layers\.0\.": r"detr_decoder.box_rpb_embed_x.layer1.",
r"^transformer\.decoder\.boxRPB_embed_x\.layers\.1\.": r"detr_decoder.box_rpb_embed_x.layer2.",
r"^transformer\.decoder\.boxRPB_embed_y\.layers\.0\.": r"detr_decoder.box_rpb_embed_y.layer1.",
r"^transformer\.decoder\.boxRPB_embed_y\.layers\.1\.": r"detr_decoder.box_rpb_embed_y.layer2.",
r"^transformer\.decoder\.layers\.(\d+)\.self_attn\.out_proj\.": r"detr_decoder.layers.\1.self_attn.o_proj.",
r"^transformer\.decoder\.layers\.(\d+)\.self_attn\.": r"detr_decoder.layers.\1.self_attn.",
r"^transformer\.decoder\.layers\.(\d+)\.ca_text\.out_proj\.": r"detr_decoder.layers.\1.text_cross_attn.o_proj.",
r"^transformer\.decoder\.layers\.(\d+)\.ca_text\.": r"detr_decoder.layers.\1.text_cross_attn.",
r"^transformer\.decoder\.layers\.(\d+)\.cross_attn\.out_proj\.": r"detr_decoder.layers.\1.vision_cross_attn.o_proj.",
r"^transformer\.decoder\.layers\.(\d+)\.cross_attn\.": r"detr_decoder.layers.\1.vision_cross_attn.",
r"^transformer\.decoder\.layers\.(\d+)\.linear1\.": r"detr_decoder.layers.\1.mlp.fc1.",
r"^transformer\.decoder\.layers\.(\d+)\.linear2\.": r"detr_decoder.layers.\1.mlp.fc2.",
r"^transformer\.decoder\.layers\.(\d+)\.norm1\.": r"detr_decoder.layers.\1.vision_cross_attn_layer_norm.",
r"^transformer\.decoder\.layers\.(\d+)\.catext_norm\.": r"detr_decoder.layers.\1.text_cross_attn_layer_norm.",
r"^transformer\.decoder\.layers\.(\d+)\.norm2\.": r"detr_decoder.layers.\1.self_attn_layer_norm.",
r"^transformer\.decoder\.layers\.(\d+)\.norm3\.": r"detr_decoder.layers.\1.mlp_layer_norm.",
# ============================================================================
# Dot Product Scoring
# ============================================================================
r"^dot_prod_scoring\.prompt_mlp\.layers\.0\.": r"dot_product_scoring.text_mlp.layer1.",
r"^dot_prod_scoring\.prompt_mlp\.layers\.1\.": r"dot_product_scoring.text_mlp.layer2.",
r"^dot_prod_scoring\.prompt_mlp\.out_norm\.": r"dot_product_scoring.text_mlp_out_norm.",
r"^dot_prod_scoring\.prompt_proj\.": r"dot_product_scoring.text_proj.",
r"^dot_prod_scoring\.hs_proj\.": r"dot_product_scoring.query_proj.",
# ============================================================================
# Mask Decoder
# ============================================================================
r"^segmentation_head\.pixel_decoder\.conv_layers\.(\d+)\.": r"mask_decoder.pixel_decoder.conv_layers.\1.",
r"^segmentation_head\.pixel_decoder\.norms\.(\d+)\.": r"mask_decoder.pixel_decoder.norms.\1.",
r"^segmentation_head\.mask_embed\.layers\.(\d+)\.": r"mask_decoder.mask_embedder.layers.\1.",
r"^segmentation_head\.mask_predictor\.mask_embed\.layers\.(\d+)\.": r"mask_decoder.mask_embedder.layers.\1.",
r"^segmentation_head\.instance_seg_head\.": r"mask_decoder.instance_projection.",
r"^segmentation_head\.semantic_seg_head\.": r"mask_decoder.semantic_projection.",
r"^segmentation_head\.cross_attend_prompt\.out_proj\.": r"mask_decoder.prompt_cross_attn.o_proj.",
r"^segmentation_head\.cross_attend_prompt\.": r"mask_decoder.prompt_cross_attn.",
r"^segmentation_head\.cross_attn_norm\.": r"mask_decoder.prompt_cross_attn_norm.",
}
# fmt: on
def convert_old_keys_to_new_keys(state_dict_keys: list[str]) -> dict[str, str]:
"""
Convert original SAM3 checkpoint keys to HuggingFace format.
This function applies regex patterns to efficiently rename keys in bulk.
Args:
state_dict_keys: List of original checkpoint keys
Returns:
Dictionary mapping original keys to new keys
"""
output_dict = {}
if state_dict_keys is not None:
old_text = "\n".join(state_dict_keys)
new_text = old_text
# Apply all regex patterns
for pattern, replacement in ORIGINAL_TO_CONVERTED_KEY_MAPPING.items():
new_text = re.sub(pattern, replacement, new_text, flags=re.MULTILINE)
output_dict = dict(zip(old_text.split("\n"), new_text.split("\n")))
return output_dict
def split_qkv(state_dict: dict) -> dict:
"""
Split combined QKV weights/biases into separate Q, K, V projections.
Both the vision backbone and text encoder in the original SAM3 use combined QKV projections,
but the refactored model uses separate Q, K, V projections.
Args:
state_dict: State dictionary with combined QKV weights
Returns:
State dictionary with split Q, K, V weights
"""
# Handle vision backbone: .attention.qkv.* → .attention.{q,k,v}_proj.*
vision_keys_to_split = [key for key in state_dict.keys() if ".attention.qkv." in key]
for key in vision_keys_to_split:
qkv = state_dict.pop(key)
# Split into 3 equal chunks along dimension 0 (output dimension)
q, k, v = torch.chunk(qkv, 3, dim=0)
# Create new keys for q_proj, k_proj, v_proj
state_dict[key.replace(".qkv.", ".q_proj.")] = q
state_dict[key.replace(".qkv.", ".k_proj.")] = k
state_dict[key.replace(".qkv.", ".v_proj.")] = v
# Handle all attention layers with in_proj_* (text encoder, DETR decoder cross-attention, mask decoder)
# These use: .{attn_type}.in_proj_* → .{attn_type}.{q,k,v}_proj.*
in_proj_keys_to_split = [key for key in state_dict.keys() if ".in_proj_" in key]
for key in in_proj_keys_to_split:
in_proj = state_dict.pop(key)
# Split into 3 equal chunks along dimension 0 (output dimension)
q, k, v = torch.chunk(in_proj, 3, dim=0)
# Create new keys for q_proj, k_proj, v_proj
# Replace "in_proj_weight" with "q_proj.weight" (or "in_proj_bias" with "q_proj.bias")
if key.endswith("in_proj_weight"):
base_key = key.replace("in_proj_weight", "")
state_dict[base_key + "q_proj.weight"] = q
state_dict[base_key + "k_proj.weight"] = k
state_dict[base_key + "v_proj.weight"] = v
elif key.endswith("in_proj_bias"):
base_key = key.replace("in_proj_bias", "")
state_dict[base_key + "q_proj.bias"] = q
state_dict[base_key + "k_proj.bias"] = k
state_dict[base_key + "v_proj.bias"] = v
return state_dict
def load_original_state_dict(checkpoint_path: str) -> dict[str, torch.Tensor]:
"""Load the original SAM3 checkpoint."""
print(f"Loading original checkpoint from {checkpoint_path}")
checkpoint = torch.load(checkpoint_path, map_location="cpu")
# Handle different checkpoint formats
if "model" in checkpoint:
state_dict = checkpoint["model"]
elif "state_dict" in checkpoint:
state_dict = checkpoint["state_dict"]
else:
state_dict = checkpoint
print(f"Loaded {len(state_dict)} keys from checkpoint")
return state_dict
def get_sam3_config(
vision_config: dict | None = None,
text_config: dict | None = None,
) -> Sam3Config:
"""
Create SAM3 configuration.
Args:
vision_config: Optional vision encoder configuration overrides
text_config: Optional text encoder configuration overrides
Returns:
Sam3Config instance
"""
config = Sam3Config()
# Update with any provided overrides
if vision_config is not None:
for key, value in vision_config.items():
setattr(config.vision_config, key, value)
if text_config is not None:
# Text config is a CLIPTextConfig
for key, value in text_config.items():
setattr(config.text_config, key, value)
return config
def convert_sam3_checkpoint(
checkpoint_path: str,
output_path: str,
config: Sam3Config | None = None,
push_to_hub: bool = False,
repo_id: str | None = None,
):
"""
Convert SAM3 checkpoint from original format to HuggingFace format.
Args:
checkpoint_path: Path to the original checkpoint file
output_path: Path to save the converted checkpoint
config: Optional Sam3Config to use (otherwise creates default)
push_to_hub: Whether to push the model to the Hub
repo_id: Repository ID for pushing to Hub
"""
# Create output directory
os.makedirs(output_path, exist_ok=True)
# Load configuration
if config is None:
config = get_sam3_config()
config.architectures = ["Sam3Model"]
config.save_pretrained(output_path)
print("Model config saved successfully")
# Load and convert weights
print("Loading original checkpoint...")
state_dict_old = load_original_state_dict(checkpoint_path)
print("Converting checkpoint keys...")
all_keys = list(state_dict_old.keys())
key_mapping = convert_old_keys_to_new_keys(all_keys)
# Create new state dict with converted keys
state_dict_new = {}
for old_key in all_keys:
new_key = key_mapping.get(old_key, old_key)
# Special handling: Strip cls token from vision backbone position embeddings
if new_key == "vision_encoder.backbone.embeddings.position_embeddings":
# Original has [1, 577, 1024] with cls token, but refactored expects [1, 576, 1024] without cls token
# Strip the first position (cls token position)
state_dict_new[new_key] = state_dict_old[old_key][:, 1:, :]
else:
state_dict_new[new_key] = state_dict_old[old_key]
del state_dict_old
gc.collect()
# Split combined QKV projections into separate Q, K, V projections
print("Splitting QKV projections...")
state_dict_new = split_qkv(state_dict_new)
# Transpose CLIP text projection (stored transposed in original)
if "text_encoder.text_projection.weight" in state_dict_new:
print("Transposing CLIP text_projection...")
state_dict_new["text_encoder.text_projection.weight"] = state_dict_new["text_encoder.text_projection.weight"].T
# Load into HF model
print("Loading weights into Sam3Model...")
model = Sam3Model(config)
missing_keys, unexpected_keys = model.load_state_dict(state_dict_new, strict=False)
if missing_keys:
logger.warning(f"Missing keys ({len(missing_keys)}):")
for key in missing_keys: # Show more keys for debugging
logger.warning(f" - {key}")
if unexpected_keys:
logger.warning(f"Unexpected keys ({len(unexpected_keys)}):")
for key in unexpected_keys: # Show more keys for debugging
logger.warning(f" - {key}")
# Note: Some missing/unexpected keys are expected:
# - vision_encoder.backbone.embeddings.patch_embeddings.projection.bias: patch projection has bias=False
# - geometry_encoder.mask_encoder.projection.*: this is nn.Identity() in original (no weights)
# - rotary_emb.rope_embeddings: pre-computed in original, computed on-the-fly in refactored
# - text_encoder.text_projection.bias: projection layer might not have bias
# Save model
print(f"Saving converted model to {output_path}")
model.save_pretrained(
output_path,
)
# Save processor
print("Creating and saving processor...")
image_processor = Sam3ImageProcessorFast()
tokenizer = CLIPTokenizerFast.from_pretrained("openai/clip-vit-base-patch32", max_length=32, model_max_length=32)
processor = Sam3Processor(image_processor=image_processor, tokenizer=tokenizer)
processor.save_pretrained(output_path)
# Push to hub if requested
if push_to_hub:
if repo_id is None:
raise ValueError("repo_id must be provided when push_to_hub=True")
print(f"Pushing model to Hub: {repo_id}")
model.push_to_hub(repo_id)
processor.push_to_hub(repo_id)
print("Conversion complete!")
print(f"Model saved successfully to: {output_path}")
# Cleanup
del state_dict_new, model
gc.collect()
# Verify the conversion by reloading
print("\nVerifying converted checkpoint can be loaded...")
try:
model = Sam3Model.from_pretrained(output_path)
param_count = sum(p.numel() for p in model.parameters())
print(f"✓ Successfully loaded model with {param_count:,} parameters")
del model
gc.collect()
except Exception as e:
print(f"✗ Failed to reload model: {e}")
print("\n" + "=" * 80)
print("Conversion finished!")
print("=" * 80)
print(f"Output directory: {output_path}")
print("\nTo test the model, you can run:")
print(">>> from transformers import Sam3Model")
print(f">>> model = Sam3Model.from_pretrained('{output_path}')")
print("=" * 80)
def main():
parser = argparse.ArgumentParser(description="Convert SAM3 checkpoint to HuggingFace format")
parser.add_argument(
"--checkpoint_path",
type=str,
required=True,
help="Path to the original SAM3 checkpoint file",
)
parser.add_argument(
"--output_path",
type=str,
required=True,
help="Path to save the converted checkpoint",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the converted model to the Hugging Face Hub",
)
parser.add_argument(
"--repo_id",
type=str,
default=None,
help="Repository ID for pushing to Hub (e.g., 'facebook/sam3-large')",
)
args = parser.parse_args()
convert_sam3_checkpoint(
checkpoint_path=args.checkpoint_path,
output_path=args.output_path,
push_to_hub=args.push_to_hub,
repo_id=args.repo_id,
)
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/sam3/convert_sam3_to_hf.py",
"license": "Apache License 2.0",
"lines": 389,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/sam3/modeling_sam3.py | # Copyright 2025 The Meta AI Authors and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from collections.abc import Callable, Iterable
from dataclasses import dataclass
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torch import Tensor
from transformers import CLIPTextModelWithProjection
from ... import initialization as init
from ...activations import ACT2FN
from ...masking_utils import create_bidirectional_mask
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPooling,
ModelOutput,
)
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...processing_utils import Unpack
from ...pytorch_utils import compile_compatible_method_lru_cache
from ...utils import auto_docstring, can_return_tuple, logging
from ...utils.generic import (
TransformersKwargs,
is_flash_attention_requested,
merge_with_config_defaults,
)
from ...utils.output_capturing import capture_outputs
from ..auto import AutoModel
from .configuration_sam3 import (
Sam3Config,
Sam3DETRDecoderConfig,
Sam3DETREncoderConfig,
Sam3GeometryEncoderConfig,
Sam3MaskDecoderConfig,
Sam3VisionConfig,
Sam3ViTConfig,
)
logger = logging.get_logger(__name__)
@dataclass
@auto_docstring
class Sam3VisionEncoderOutput(BaseModelOutputWithPooling):
r"""
fpn_hidden_states (`tuple[torch.FloatTensor]`):
Tuple of multi-level FPN feature maps.
fpn_position_encoding (`tuple[torch.FloatTensor]`):
Tuple of position encodings for each FPN level.
"""
fpn_hidden_states: tuple[torch.FloatTensor, ...] = None
fpn_position_encoding: tuple[torch.FloatTensor, ...] = None
@dataclass
@auto_docstring
class Sam3GeometryEncoderOutput(ModelOutput):
r"""
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_prompts, hidden_size)`):
Encoded geometry prompt features (boxes).
attention_mask (`torch.BoolTensor` of shape `(batch_size, num_prompts)`, *optional*):
Attention mask for geometry prompts where True indicates valid positions and False indicates padding.
"""
last_hidden_state: torch.FloatTensor = None
attention_mask: torch.BoolTensor | None = None
@dataclass
@auto_docstring
class Sam3DETREncoderOutput(ModelOutput):
r"""
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Encoded vision features (flattened from multi-level features).
pos_embeds_flattened (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Flattened position embeddings for the vision features.
text_features (`torch.FloatTensor` of shape `(batch_size, text_seq_len, hidden_size)`, *optional*):
Text features (may be pooled after encoder processing).
spatial_shapes (`torch.LongTensor` of shape `(num_levels, 2)`, *optional*):
Spatial shapes (height, width) for each feature pyramid level.
hidden_states (`tuple[torch.FloatTensor]`, *optional*):
Tuple of hidden states from all encoder layers.
attentions (`tuple[torch.FloatTensor]`, *optional*):
Tuple of attention weights from all encoder layers.
"""
last_hidden_state: torch.FloatTensor = None
pos_embeds_flattened: torch.FloatTensor | None = None
text_features: torch.FloatTensor | None = None
spatial_shapes: torch.LongTensor | None = None
hidden_states: tuple[torch.FloatTensor] | None = None
attentions: tuple[torch.FloatTensor] | None = None
@dataclass
@auto_docstring
class Sam3DETRDecoderOutput(ModelOutput):
r"""
intermediate_hidden_states (`torch.FloatTensor` of shape `(num_layers, batch_size, num_queries, hidden_size)`):
Decoder hidden states from all layers.
reference_boxes (`torch.FloatTensor` of shape `(num_layers, batch_size, num_queries, 4)`):
Predicted reference boxes from all decoder layers in (cx, cy, w, h) format.
presence_logits (`torch.FloatTensor` of shape `(num_layers, batch_size, 1)`):
Presence logits from all decoder layers indicating object presence confidence.
hidden_states (`tuple[torch.FloatTensor]`, *optional*):
Tuple of hidden states from all decoder layers.
attentions (`tuple[torch.FloatTensor]`, *optional*):
Tuple of attention weights from all decoder layers (self-attention and cross-attention).
"""
intermediate_hidden_states: torch.FloatTensor = None
reference_boxes: torch.FloatTensor = None
presence_logits: torch.FloatTensor = None
hidden_states: tuple[torch.FloatTensor] | None = None
attentions: tuple[torch.FloatTensor] | None = None
@dataclass
@auto_docstring
class Sam3MaskDecoderOutput(ModelOutput):
r"""
pred_masks (`torch.FloatTensor` of shape `(batch_size, num_queries, height, width)`):
Predicted segmentation masks for each query.
semantic_seg (`torch.FloatTensor` of shape `(batch_size, 1, height, width)`, *optional*):
Semantic segmentation output.
attentions (`tuple[torch.FloatTensor]`, *optional*):
Tuple of attention weights from mask decoder cross-attention layers.
"""
pred_masks: torch.FloatTensor = None
semantic_seg: torch.FloatTensor | None = None
attentions: tuple[torch.FloatTensor] | None = None
@dataclass
@auto_docstring
class Sam3ImageSegmentationOutput(ModelOutput):
r"""
pred_masks (`torch.FloatTensor` of shape `(batch_size, num_queries, height, width)`):
Predicted segmentation masks for each query.
pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`):
Predicted bounding boxes in (x1, y1, x2, y2) format.
pred_logits (`torch.FloatTensor` of shape `(batch_size, num_queries)`, *optional*):
Classification confidence scores for each query, computed via dot product between
decoder query features and text features.
presence_logits (`torch.FloatTensor` of shape `(batch_size, 1)`, *optional*):
Presence logits from the DETR decoder presence token (last layer only). These indicate whether objects
are present in the scene. Can be used to compute final scores by multiplying with pred_logits:
`final_scores = pred_logits.sigmoid() * presence_logits.sigmoid()`.
semantic_seg (`torch.FloatTensor` of shape `(batch_size, 1, height, width)`, *optional*):
Semantic segmentation output.
decoder_hidden_states (`tuple[torch.FloatTensor]`, *optional*):
Tuple of hidden states from all DETR decoder layers. Each tensor has shape `(batch_size, num_queries, hidden_size)`.
decoder_reference_boxes (`torch.FloatTensor` of shape `(num_layers, batch_size, num_queries, 4)`, *optional*):
Reference boxes from all DETR decoder layers.
encoder_hidden_states (`tuple[torch.FloatTensor]`, *optional*):
Tuple of hidden states from all DETR encoder layers.
vision_hidden_states (`tuple[torch.FloatTensor]`, *optional*):
Tuple of hidden states from all vision encoder (ViT) layers.
vision_attentions (`tuple[torch.FloatTensor]`, *optional*):
Attention weights from vision encoder (ViT) layers.
detr_encoder_attentions (`tuple[torch.FloatTensor]`, *optional*):
Attention weights from DETR encoder layers.
detr_decoder_attentions (`tuple[torch.FloatTensor]`, *optional*):
Attention weights from DETR decoder layers (self-attention and cross-attention).
mask_decoder_attentions (`tuple[torch.FloatTensor]`, *optional*):
Attention weights from mask decoder layers.
"""
pred_masks: torch.FloatTensor = None
pred_boxes: torch.FloatTensor = None
pred_logits: torch.FloatTensor | None = None
presence_logits: torch.FloatTensor | None = None
semantic_seg: torch.FloatTensor | None = None
decoder_hidden_states: tuple[torch.FloatTensor] | None = None
decoder_reference_boxes: torch.FloatTensor | None = None
encoder_hidden_states: tuple[torch.FloatTensor] | None = None
vision_hidden_states: tuple[torch.FloatTensor] | None = None
vision_attentions: tuple[torch.FloatTensor] | None = None
detr_encoder_attentions: tuple[torch.FloatTensor] | None = None
detr_decoder_attentions: tuple[torch.FloatTensor] | None = None
mask_decoder_attentions: tuple[torch.FloatTensor] | None = None
def inverse_sigmoid(x: torch.Tensor, eps: float = 1e-3) -> torch.Tensor:
"""The inverse function for sigmoid activation function."""
x = x.clamp(min=0, max=1)
x1 = x.clamp(min=eps)
x2 = (1 - x).clamp(min=eps)
return torch.log(x1 / x2)
def concat_padded_sequences(seq1, mask1, seq2, mask2, return_index: bool = False):
"""
Concatenates two right-padded sequences, such that the resulting sequence
is contiguous and also right-padded.
Tensors are batch-first, masks are batch-first with True=valid, False=padding.
Args:
seq1: A tensor of shape (batch_size, seq1_length, hidden_size).
mask1: A tensor of shape (batch_size, seq1_length) with True=valid, False=padding.
seq2: A tensor of shape (batch_size, seq2_length, hidden_size).
mask2: A tensor of shape (batch_size, seq2_length) with True=valid, False=padding.
return_index: If True, also returns the index of the ids of the element of seq2
in the concatenated sequence. This can be used to retrieve the elements of seq2.
Returns:
A tuple (concatenated_sequence, concatenated_mask) if return_index is False,
otherwise (concatenated_sequence, concatenated_mask, index).
The concatenated_mask uses True=valid, False=padding convention.
"""
batch_size, seq1_length, hidden_size = seq1.shape
batch_size2, seq2_length, hidden_size2 = seq2.shape
assert batch_size == batch_size2 == mask1.size(0) == mask2.size(0)
assert hidden_size == hidden_size2
assert seq1_length == mask1.size(1)
assert seq2_length == mask2.size(1)
actual_seq1_lengths = mask1.sum(dim=-1)
actual_seq2_lengths = mask2.sum(dim=-1)
final_lengths = actual_seq1_lengths + actual_seq2_lengths
max_length = seq1_length + seq2_length
concatenated_mask = (
torch.arange(max_length, device=seq2.device)[None].repeat(batch_size, 1) < final_lengths[:, None]
)
concatenated_sequence = torch.zeros((batch_size, max_length, hidden_size), device=seq2.device, dtype=seq2.dtype)
concatenated_sequence[:, :seq1_length, :] = seq1
# Shift seq2 elements to start at the end of valid seq1
index = torch.arange(seq2_length, device=seq2.device)[None].repeat(batch_size, 1)
index = index + actual_seq1_lengths[:, None]
# Scatter seq2 into the right positions
concatenated_sequence = concatenated_sequence.scatter(1, index[:, :, None].expand(-1, -1, hidden_size), seq2)
if return_index:
return concatenated_sequence, concatenated_mask, index
return concatenated_sequence, concatenated_mask
def box_cxcywh_to_xyxy(x):
"""Convert boxes from (cx, cy, w, h) format to (x1, y1, x2, y2) format."""
x_c, y_c, w, h = x.unbind(-1)
b = [(x_c - 0.5 * w), (y_c - 0.5 * h), (x_c + 0.5 * w), (y_c + 0.5 * h)]
return torch.stack(b, dim=-1)
class Sam3MLP(nn.Module):
def __init__(self, config: Sam3ViTConfig):
super().__init__()
self.config = config
self.activation_fn = ACT2FN[config.hidden_act]
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.fc1(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = self.fc2(hidden_states)
return hidden_states
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: torch.Tensor | None,
scaling: float | None = None,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
if scaling is None:
scaling = query.size(-1) ** -0.5
# Take the dot product between "query" and "key" to get the raw attention scores.
attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling
if attention_mask is not None:
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
class Sam3Attention(nn.Module):
"""
Multi-head attention.
Handles standard [batch_size, seq_len, hidden_size] tensors.
"""
def __init__(self, config):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.num_attention_heads = config.num_attention_heads
self.head_dim = self.hidden_size // config.num_attention_heads
self.scaling = self.head_dim**-0.5
self.is_causal = False
self.q_proj = nn.Linear(self.hidden_size, self.hidden_size)
self.k_proj = nn.Linear(self.hidden_size, self.hidden_size)
self.v_proj = nn.Linear(self.hidden_size, self.hidden_size)
self.o_proj = nn.Linear(self.hidden_size, self.hidden_size)
def forward(
self,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: torch.Tensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, torch.Tensor]:
"""
Args:
query: [batch_size, query_len, hidden_size]
key: [batch_size, key_len, hidden_size]
value: [batch_size, value_len, hidden_size]
attention_mask: [batch_size, num_heads, query_len, key_len] or broadcastable
Returns:
Tuple of (output, attention_weights)
output: [batch_size, query_len, hidden_size]
attention_weights: [batch_size, num_heads, query_len, key_len]
"""
batch_size = query.shape[0]
query_len = query.shape[1]
key_len = key.shape[1]
query = self.q_proj(query).view(batch_size, query_len, self.num_attention_heads, self.head_dim).transpose(1, 2)
key = self.k_proj(key).view(batch_size, key_len, self.num_attention_heads, self.head_dim).transpose(1, 2)
value = self.v_proj(value).view(batch_size, key_len, self.num_attention_heads, self.head_dim).transpose(1, 2)
attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface(
self.config._attn_implementation, eager_attention_forward
)
if (
is_flash_attention_requested(self.config)
and attention_mask is not None
and attention_mask.dtype != torch.bool
):
# Relative position bias tensors are represented as float masks and are incompatible with Flash Attention
# Fallback to SDPA for this call only so the rest of the model can still benefit from FA
attention_interface = ALL_ATTENTION_FUNCTIONS["sdpa"]
logger.warning_once(
"Sam3Attention: falling back to SDPA for relative-position cross-attention because "
"Flash Attention does not support additive bias masks."
)
attn_output, attn_weights = attention_interface(
self,
query,
key,
value,
attention_mask=attention_mask,
dropout=0.0,
scaling=self.scaling,
is_causal=self.is_causal,
**kwargs,
)
attn_output = attn_output.reshape(batch_size, query_len, self.num_attention_heads * self.head_dim).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class Sam3ViTRotaryEmbedding(nn.Module):
"""
Vision Rotary Position Embedding for SAM3, following transformers library standards.
Supports 2D (axial) rotary embeddings for spatial dimensions.
"""
def __init__(self, config: Sam3ViTConfig, end_x: int, end_y: int, scale: float = 1.0):
super().__init__()
dim = config.hidden_size // config.num_attention_heads
# Ensure even dimension for proper axial splitting
if dim % 4 != 0:
raise ValueError("Dimension must be divisible by 4 for axial RoPE")
self.end_x, self.end_y = end_x, end_y
self.dim = dim
self.rope_theta = config.rope_theta
self.scale = scale
freqs = 1.0 / (config.rope_theta ** (torch.arange(0, dim, 4)[: (dim // 4)].float() / dim))
flattened_indices = torch.arange(end_x * end_y, dtype=torch.long)
x_positions = (flattened_indices % end_x) * scale
y_positions = torch.div(flattened_indices, end_x, rounding_mode="floor") * scale
freqs_x = torch.outer(x_positions, freqs).float()
freqs_y = torch.outer(y_positions, freqs).float()
inv_freq = torch.cat([freqs_x, freqs_y], dim=-1)
inv_freq = inv_freq.repeat_interleave(2, dim=-1)
# directly register the cos and sin embeddings as we have a fixed feature shape
self.register_buffer("rope_embeddings_cos", inv_freq.cos(), persistent=False)
self.register_buffer("rope_embeddings_sin", inv_freq.sin(), persistent=False)
@torch.no_grad()
def forward(self) -> tuple[torch.Tensor, torch.Tensor]:
# As the feature map size is fixed for each stage, we can just return the pre-computed embeddings.
return self.rope_embeddings_cos, self.rope_embeddings_sin
def rotate_pairwise(x):
"""
pairwise rotation of the hidden dims of the input. Differerent from Llama Half-Tensor Rotation.
This is an optimized version of the following more explicit implementation:
```python
x_rotated = torch.zeros_like(x, dtype=x.dtype, device=x.device)
x_rotated[..., ::2] = -x[..., 1::2]
x_rotated[..., 1::2] = x[..., ::2]
return x_rotated
```
"""
x = x.view(*x.shape[:-1], -1, 2)
x1, x2 = x.unbind(dim=-1)
x = torch.stack((-x2, x1), dim=-1)
return x.flatten(start_dim=-2)
def apply_rotary_pos_emb_2d(
q: torch.Tensor,
k: torch.Tensor,
cos: torch.Tensor,
sin: torch.Tensor,
) -> tuple[torch.Tensor, torch.Tensor]:
"""
Apply rotary position embedding to query and key tensors for self-attention.
Args:
q: Query tensor of shape (batch_size, num_windows, seq_len, num_heads, head_dim)
k: Key tensor of shape (batch_size, num_windows, seq_len, num_heads, head_dim)
cos: Cosine position embedding of shape (seq_len, head_dim)
sin: Sine position embedding of shape (seq_len, head_dim)
Returns:
Rotated (q, k) tensors
"""
q_embed = q.float()
q_embed = (q_embed * cos) + (rotate_pairwise(q_embed) * sin)
k_embed = k.float()
k_embed = (k_embed * cos) + (rotate_pairwise(k_embed) * sin)
return q_embed.type_as(q), k_embed.type_as(k)
class Sam3ViTRoPEAttention(nn.Module):
"""Self-attention with rotary position encoding."""
def __init__(self, config: Sam3ViTConfig):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.num_attention_heads = config.num_attention_heads
self.head_dim = self.hidden_size // config.num_attention_heads
self.scaling = self.head_dim**-0.5
self.attention_dropout = config.attention_dropout
self.is_causal = False
self.q_proj = nn.Linear(self.hidden_size, self.hidden_size)
self.k_proj = nn.Linear(self.hidden_size, self.hidden_size)
self.v_proj = nn.Linear(self.hidden_size, self.hidden_size)
self.o_proj = nn.Linear(self.hidden_size, self.hidden_size)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
**kwargs: Unpack[TransformersKwargs],
) -> Tensor:
batch_size, height, width, _ = hidden_states.shape
seq_len = height * width
new_shape = (batch_size, seq_len, self.num_attention_heads, self.head_dim)
query = self.q_proj(hidden_states).view(*new_shape).transpose(1, 2)
key = self.k_proj(hidden_states).view(*new_shape).transpose(1, 2)
value = self.v_proj(hidden_states).view(*new_shape).transpose(1, 2)
cos, sin = position_embeddings
query, key = apply_rotary_pos_emb_2d(query, key, cos=cos, sin=sin)
attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface(
self.config._attn_implementation, eager_attention_forward
)
attn_output, attn_weights = attention_interface(
self,
query,
key,
value,
attention_mask=None,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
is_causal=self.is_causal,
**kwargs,
)
attn_output = attn_output.reshape(batch_size, height, width, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class Sam3ViTPatchEmbeddings(nn.Module):
"""
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
`hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
Transformer.
"""
def __init__(self, config: Sam3ViTConfig):
super().__init__()
image_size, patch_size = config.pretrain_image_size, config.patch_size
num_channels, hidden_size = config.num_channels, config.hidden_size
image_size = image_size if isinstance(image_size, Iterable) else (image_size, image_size)
patch_size = patch_size if isinstance(patch_size, Iterable) else (patch_size, patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.num_patches = num_patches
self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size, bias=False)
def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
embeddings = self.projection(pixel_values.to(self.projection.weight.dtype)).flatten(2).transpose(1, 2)
return embeddings
class Sam3ViTEmbeddings(nn.Module):
"""
Construct the patch embeddings and position embeddings for SAM3 ViT.
Position embeddings are tiled (not interpolated) when resizing to match different input sizes.
"""
def __init__(self, config: Sam3ViTConfig):
super().__init__()
self.patch_embeddings = Sam3ViTPatchEmbeddings(config)
num_patches = self.patch_embeddings.num_patches
self.position_embeddings = nn.Parameter(
torch.randn(1, num_patches, config.hidden_size)
) # !Remove cls token in convert weights!
self.dropout = nn.Dropout(config.hidden_dropout)
self.patch_size = config.patch_size
def _tile_position_embeddings(
self,
position_embeddings: torch.Tensor,
height: int,
width: int,
) -> torch.Tensor:
"""
Tile position embeddings to match target spatial dimensions.
Args:
position_embeddings: Shape [1, num_pretrain_patches, hidden_size]
height: Target height in patches
width: Target width in patches
Returns:
Shape [1, height * width, hidden_size]
"""
pretrain_size = int(position_embeddings.shape[1] ** 0.5)
# Skip tiling if sizes match (but always tile during tracing for consistent graph)
if not torch.jit.is_tracing() and pretrain_size == height and pretrain_size == width:
return position_embeddings.reshape(1, height * width, -1)
# Tile position embeddings to match target spatial dimensions
hidden_size = position_embeddings.shape[-1]
pos_embed = position_embeddings.reshape(1, pretrain_size, pretrain_size, hidden_size).permute(0, 3, 1, 2)
repeat_h = height // pretrain_size + 1
repeat_w = width // pretrain_size + 1
pos_embed = pos_embed.tile([1, 1, repeat_h, repeat_w])[:, :, :height, :width]
return pos_embed.permute(0, 2, 3, 1).reshape(1, height * width, hidden_size)
def forward(
self,
pixel_values: torch.Tensor,
interpolate_pos_encoding: bool = False,
) -> torch.Tensor:
height, width = pixel_values.shape[-2:]
embeddings = self.patch_embeddings(pixel_values)
# Calculate spatial dimensions in patches
height_patches = height // self.patch_size
width_patches = width // self.patch_size
position_embeddings = self._tile_position_embeddings(
self.position_embeddings,
height_patches,
width_patches,
)
embeddings = embeddings + position_embeddings
embeddings = self.dropout(embeddings)
return embeddings
def window_partition(hidden_state, window_size):
"""
Partition into non-overlapping windows with padding if needed.
Args:
hidden_state (`torch.Tensor`):
Input tokens with [batch_size, height, width, num_channels].
window_size (`int`):
Window size.
Returns:
`tuple(torch.FloatTensor)` comprising various elements:
- windows: windows after partition with [batch_size * num_windows, window_size, window_size, num_channels].
- (padded_height, padded_width): padded height and width before partition
"""
batch_size, height, width, num_channels = hidden_state.shape
pad_height = (window_size - height % window_size) % window_size
pad_width = (window_size - width % window_size) % window_size
# Noop in case pad_width == 0 and pad_height == 0.
hidden_state = nn.functional.pad(hidden_state, (0, 0, 0, pad_width, 0, pad_height))
padded_height, padded_width = height + pad_height, width + pad_width
hidden_state = hidden_state.view(
batch_size, padded_height // window_size, window_size, padded_width // window_size, window_size, num_channels
)
windows = hidden_state.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, num_channels)
return windows, (padded_height, padded_width)
def window_unpartition(windows, window_size, pad_height_width, height_width):
"""
Window unpartition into original sequences and removing padding.
Args:
windows (`torch.Tensor`):
Input tokens with [batch_size * num_windows, window_size, window_size, num_channels].
window_size (`int`):
Window size.
pad_height_width (`tuple[int]`):
Padded height and width (padded_height, padded_width).
height_width (`tuple[int]`):
Original height and width before padding.
Returns:
hidden_state: unpartitioned sequences with [batch_size, height, width, num_channels].
"""
padded_height, padded_width = pad_height_width
height, width = height_width
batch_size = windows.shape[0] // (padded_height * padded_width // window_size // window_size)
hidden_state = windows.view(
batch_size, padded_height // window_size, padded_width // window_size, window_size, window_size, -1
)
hidden_state = hidden_state.permute(0, 1, 3, 2, 4, 5).contiguous()
hidden_state = hidden_state.view(batch_size, padded_height, padded_width, -1)
# We always have height <= padded_height and width <= padded_width
hidden_state = hidden_state[:, :height, :width, :].contiguous()
return hidden_state
class Sam3ViTLayerScale(nn.Module):
def __init__(self, config) -> None:
super().__init__()
self.lambda1 = nn.Parameter(config.layer_scale_init_value * torch.ones(config.hidden_size))
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
return hidden_state * self.lambda1
class Sam3ViTLayer(GradientCheckpointingLayer):
"""Vision Transformer layer with rotary position embeddings and optional windowed attention."""
def __init__(self, config: Sam3ViTConfig, window_size: int = 0) -> None:
super().__init__()
hidden_size = config.hidden_size
image_size = config.image_size
image_size = image_size if isinstance(image_size, (list, tuple)) else (image_size, image_size)
patch_size = config.patch_size
patch_size = patch_size if isinstance(patch_size, (list, tuple)) else (patch_size, patch_size)
input_size = (image_size[0] // patch_size[0], image_size[1] // patch_size[1])
self.layer_norm1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps)
rotary_input_size = input_size if window_size == 0 else (window_size, window_size)
rotary_scale = config.window_size / rotary_input_size[0]
self.rotary_emb = Sam3ViTRotaryEmbedding(
config, end_x=rotary_input_size[0], end_y=rotary_input_size[1], scale=rotary_scale
)
self.attention = Sam3ViTRoPEAttention(config)
self.layer_norm2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps)
self.mlp = Sam3MLP(config)
self.dropout = nn.Dropout(config.hidden_dropout)
self.window_size = window_size
def forward(
self,
hidden_states: torch.Tensor,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
residual = hidden_states
hidden_states = self.layer_norm1(hidden_states)
if self.window_size > 0:
height, width = hidden_states.shape[1], hidden_states.shape[2]
# Partition into non-overlapping windows for efficient attention
hidden_states, pad_height_width = window_partition(hidden_states, self.window_size)
position_embeddings = self.rotary_emb()
hidden_states, _ = self.attention(hidden_states, position_embeddings, **kwargs)
if self.window_size > 0:
# Reverse window partition to restore original spatial layout
hidden_states = window_unpartition(hidden_states, self.window_size, pad_height_width, (height, width))
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.layer_norm2(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + self.dropout(hidden_states)
return hidden_states
@auto_docstring
class Sam3PreTrainedModel(PreTrainedModel):
config_class = Sam3Config
base_model_prefix = "sam3"
main_input_name = "pixel_values"
input_modalities = ["image", "text"]
_supports_sdpa = True
_supports_flash_attn = True
_supports_flex_attn = True
_supports_attention_backend = True
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, Sam3ViTEmbeddings):
init.normal_(module.position_embeddings, mean=0.0, std=self.config.initializer_range)
elif isinstance(module, Sam3ViTRotaryEmbedding):
end_x, end_y = module.end_x, module.end_y
dim = module.dim
freqs = 1.0 / (module.rope_theta ** (torch.arange(0, dim, 4)[: (dim // 4)].float() / dim))
flattened_indices = torch.arange(end_x * end_y, dtype=torch.long)
x_positions = (flattened_indices % end_x) * module.scale
y_positions = torch.div(flattened_indices, end_x, rounding_mode="floor") * module.scale
freqs_x = torch.outer(x_positions, freqs).float()
freqs_y = torch.outer(y_positions, freqs).float()
inv_freq = torch.cat([freqs_x, freqs_y], dim=-1)
inv_freq = inv_freq.repeat_interleave(2, dim=-1)
init.copy_(module.rope_embeddings_cos, inv_freq.cos())
init.copy_(module.rope_embeddings_sin, inv_freq.sin())
@auto_docstring
class Sam3ViTModel(Sam3PreTrainedModel):
_can_record_outputs = {
"hidden_states": Sam3ViTLayer,
"attentions": Sam3ViTRoPEAttention,
}
def __init__(self, config: Sam3ViTConfig):
super().__init__(config)
self.config = config
self.embeddings = Sam3ViTEmbeddings(config)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.layers = nn.ModuleList(
[
Sam3ViTLayer(config, window_size=config.window_size if i not in config.global_attn_indexes else 0)
for i in range(config.num_hidden_layers)
]
)
self.post_init()
def get_input_embeddings(self) -> Sam3ViTPatchEmbeddings:
return self.embeddings.patch_embeddings
@merge_with_config_defaults
@capture_outputs(tie_last_hidden_states=False)
@auto_docstring
def forward(
self,
pixel_values: torch.Tensor,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutput:
hidden_states = self.embeddings(pixel_values) # [batch_size, seq_len, hidden_size]
batch_size = hidden_states.shape[0]
height = pixel_values.shape[-2] // self.config.patch_size
width = pixel_values.shape[-1] // self.config.patch_size
hidden_size = hidden_states.shape[-1]
# Reshape to spatial format for windowed attention: [batch_size, height, width, hidden_size]
hidden_states = hidden_states.view(batch_size, height, width, hidden_size)
hidden_states = self.layer_norm(hidden_states)
for layer in self.layers:
hidden_states = layer(hidden_states, **kwargs)
# Reshape back to sequence format: [batch_size, height*width, hidden_size]
hidden_states = hidden_states.view(batch_size, height * width, hidden_size)
return BaseModelOutput(last_hidden_state=hidden_states)
class Sam3SinePositionEmbedding(nn.Module):
"""
This is a more standard version of the position embedding, very similar to the one used by the Attention is all you
need paper, generalized to work on images.
"""
def __init__(
self, num_pos_feats: int = 64, temperature: int = 10000, normalize: bool = False, scale: float | None = None
):
super().__init__()
if scale is not None and normalize is False:
raise ValueError("normalize should be True if scale is passed")
self.num_pos_feats = num_pos_feats
self.temperature = temperature
self.normalize = normalize
self.scale = 2 * math.pi if scale is None else scale
def encode_1d_positions(self, x: torch.Tensor, y: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
"""
Encode 1D coordinate pairs using sine/cosine positional embeddings.
Args:
x: 1D tensor of x coordinates (flattened)
y: 1D tensor of y coordinates (flattened)
Returns:
Tuple of (pos_x, pos_y) positional embeddings
"""
x_embed = x * self.scale
y_embed = y * self.scale
dim_t = torch.arange(self.num_pos_feats, dtype=torch.int64, device=x.device).to(x.dtype)
dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
pos_x = x_embed[:, None] / dim_t
pos_y = y_embed[:, None] / dim_t
pos_x = torch.stack((pos_x[:, 0::2].sin(), pos_x[:, 1::2].cos()), dim=2).flatten(1)
pos_y = torch.stack((pos_y[:, 0::2].sin(), pos_y[:, 1::2].cos()), dim=2).flatten(1)
return pos_x, pos_y
def encode_boxes(self, boxes: torch.Tensor) -> torch.Tensor:
"""
Encode 4D box coordinates (x, y, w, h) for decoder conditioning using sine/cosine embeddings.
Args:
boxes: Box coordinates [batch_size, num_queries, 4] in (x, y, w, h) format
Returns:
Position embeddings [batch_size, num_queries, num_pos_feats*4]
"""
assert boxes.size(-1) == 4, f"Expected 4D box coordinates (x, y, w, h), got shape {boxes.shape}"
dim_t = torch.arange(self.num_pos_feats, dtype=torch.int64, device=boxes.device).to(boxes.dtype)
dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode="floor") / self.num_pos_feats)
x_embed = boxes[:, :, 0] * self.scale
y_embed = boxes[:, :, 1] * self.scale
w_embed = boxes[:, :, 2] * self.scale
h_embed = boxes[:, :, 3] * self.scale
pos_x = x_embed[:, :, None] / dim_t
pos_y = y_embed[:, :, None] / dim_t
pos_w = w_embed[:, :, None] / dim_t
pos_h = h_embed[:, :, None] / dim_t
pos_x = torch.stack((pos_x[:, :, 0::2].sin(), pos_x[:, :, 1::2].cos()), dim=3).flatten(2)
pos_y = torch.stack((pos_y[:, :, 0::2].sin(), pos_y[:, :, 1::2].cos()), dim=3).flatten(2)
pos_w = torch.stack((pos_w[:, :, 0::2].sin(), pos_w[:, :, 1::2].cos()), dim=3).flatten(2)
pos_h = torch.stack((pos_h[:, :, 0::2].sin(), pos_h[:, :, 1::2].cos()), dim=3).flatten(2)
pos = torch.cat((pos_y, pos_x, pos_w, pos_h), dim=2)
return pos
@compile_compatible_method_lru_cache(maxsize=4)
def forward(
self,
shape: torch.Size,
device: torch.device | str,
dtype: torch.dtype,
mask: Tensor | None = None,
) -> Tensor:
if mask is None:
mask = torch.zeros((shape[0], shape[2], shape[3]), device=device, dtype=torch.bool)
not_mask = (~mask).to(dtype)
y_embed = not_mask.cumsum(1)
x_embed = not_mask.cumsum(2)
if self.normalize:
eps = 1e-6
y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
dim_t = torch.arange(self.num_pos_feats, dtype=torch.int64, device=device).to(dtype)
dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode="floor") / self.num_pos_feats)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
return pos
class Sam3FPNLayer(nn.Module):
def __init__(self, in_channels: int, fpn_dim: int, scale_factor: float):
super().__init__()
self.scale_factor = scale_factor
# Build the upsampling/downsampling layers based on scale factor
self.scale_layers = nn.ModuleList()
if scale_factor == 4.0:
self.scale_layers.append(nn.ConvTranspose2d(in_channels, in_channels // 2, kernel_size=2, stride=2))
self.scale_layers.append(nn.GELU())
self.scale_layers.append(nn.ConvTranspose2d(in_channels // 2, in_channels // 4, kernel_size=2, stride=2))
intermediate_channels = in_channels // 4
elif scale_factor == 2.0:
self.scale_layers.append(nn.ConvTranspose2d(in_channels, in_channels // 2, kernel_size=2, stride=2))
intermediate_channels = in_channels // 2
elif scale_factor == 1.0:
intermediate_channels = in_channels
elif scale_factor == 0.5:
self.scale_layers.append(nn.MaxPool2d(kernel_size=2, stride=2))
intermediate_channels = in_channels
else:
raise NotImplementedError(f"scale_factor={scale_factor} is not supported yet.")
self.proj1 = nn.Conv2d(in_channels=intermediate_channels, out_channels=fpn_dim, kernel_size=1)
self.proj2 = nn.Conv2d(in_channels=fpn_dim, out_channels=fpn_dim, kernel_size=3, padding=1)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = hidden_states.to(self.proj1.weight.dtype)
for layer in self.scale_layers:
hidden_states = layer(hidden_states)
hidden_states = self.proj1(hidden_states)
hidden_states = self.proj2(hidden_states)
return hidden_states
class Sam3VisionNeck(nn.Module):
def __init__(self, config: Sam3VisionConfig):
super().__init__()
self.config = config
self.position_encoding = Sam3SinePositionEmbedding(num_pos_feats=config.fpn_hidden_size // 2, normalize=True)
# Create one FPN layer per scale factor
self.fpn_layers = nn.ModuleList(
[
Sam3FPNLayer(
in_channels=config.backbone_config.hidden_size, fpn_dim=config.fpn_hidden_size, scale_factor=scale
)
for scale in config.scale_factors
]
)
def forward(self, hidden_states: torch.Tensor) -> tuple[tuple[torch.Tensor, ...], tuple[torch.Tensor, ...]]:
fpn_hidden_states = ()
fpn_position_encoding = ()
for fpn_layer in self.fpn_layers:
fpn_output = fpn_layer(hidden_states)
fpn_hidden_states += (fpn_output,)
# Generate position encoding for this FPN level
pos_enc = self.position_encoding(fpn_output.shape, fpn_output.device, fpn_output.dtype)
fpn_position_encoding += (pos_enc,)
return fpn_hidden_states, fpn_position_encoding
@auto_docstring(
custom_intro="""
The vision model from Sam without any head or projection on top.
"""
)
class Sam3VisionModel(Sam3PreTrainedModel):
config_class = Sam3VisionConfig
main_input_name = "pixel_values"
def __init__(self, config: Sam3VisionConfig):
super().__init__(config)
self.config = config
self.backbone = AutoModel.from_config(config.backbone_config)
self.neck = Sam3VisionNeck(config)
self.post_init()
def get_input_embeddings(self):
return self.backbone.get_input_embeddings()
@can_return_tuple
def forward(
self,
pixel_values: torch.FloatTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | Sam3VisionEncoderOutput:
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
backbone_output = self.backbone(pixel_values, **kwargs)
hidden_states = backbone_output.last_hidden_state # [batch_size, seq_len, hidden_size]
# Reshape for FPN neck: [batch_size, seq_len, hidden_size] -> [batch_size, hidden_size, height, width]
batch_size = hidden_states.shape[0]
height = pixel_values.shape[-2] // self.config.backbone_config.patch_size
width = pixel_values.shape[-1] // self.config.backbone_config.patch_size
hidden_states_spatial = hidden_states.view(batch_size, height, width, -1).permute(0, 3, 1, 2)
fpn_hidden_states, fpn_position_encoding = self.neck(hidden_states_spatial)
return Sam3VisionEncoderOutput(
last_hidden_state=hidden_states,
fpn_hidden_states=fpn_hidden_states,
fpn_position_encoding=fpn_position_encoding,
hidden_states=backbone_output.hidden_states,
attentions=backbone_output.attentions,
)
class Sam3GeometryEncoderLayer(nn.Module):
def __init__(self, config: Sam3GeometryEncoderConfig):
super().__init__()
self.layer_norm1 = nn.LayerNorm(config.hidden_size)
self.self_attn = Sam3Attention(config)
self.dropout = nn.Dropout(config.dropout)
self.cross_attn = Sam3Attention(config)
self.layer_norm2 = nn.LayerNorm(config.hidden_size)
self.mlp = Sam3MLP(config)
self.layer_norm3 = nn.LayerNorm(config.hidden_size)
def forward(
self,
prompt_feats: Tensor,
vision_feats: Tensor,
vision_pos_encoding: Tensor,
prompt_mask: Tensor,
**kwargs: Unpack[TransformersKwargs],
):
residual = prompt_feats
hidden_states = self.layer_norm1(prompt_feats)
hidden_states, _ = self.self_attn(
query=hidden_states, key=hidden_states, value=hidden_states, attention_mask=prompt_mask, **kwargs
)
hidden_states = self.dropout(hidden_states) + residual
residual = hidden_states
hidden_states = self.layer_norm2(hidden_states)
key = vision_feats + vision_pos_encoding
hidden_states, _ = self.cross_attn(query=hidden_states, key=key, value=vision_feats, **kwargs)
hidden_states = self.dropout(hidden_states) + residual
residual = hidden_states
hidden_states = self.layer_norm3(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = self.dropout(hidden_states) + residual
return hidden_states
class Sam3GeometryEncoder(nn.Module):
"""
Encoder for geometric prompts (boxes).
Boxes are encoded using three approaches:
- Direct projection: linear projection from coordinate space to hidden_size
- Pooling: pool features from the backbone at the specified location (ROI align for boxes)
- Position encoding: use position encoding of the box center
These encodings are combined additively and further processed with transformer layers.
"""
def __init__(self, config: Sam3GeometryEncoderConfig):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.roi_size = config.roi_size
self.position_encoding = Sam3SinePositionEmbedding(num_pos_feats=config.hidden_size // 2, normalize=True)
self.label_embed = nn.Embedding(2, self.hidden_size)
self.cls_embed = nn.Embedding(1, self.hidden_size)
# Box encoding layers
self.boxes_direct_project = nn.Linear(4, self.hidden_size)
self.boxes_pool_project = nn.Conv2d(self.hidden_size, self.hidden_size, self.roi_size)
self.boxes_pos_enc_project = nn.Linear(self.hidden_size + 2, self.hidden_size)
# Image feature normalization
self.vision_layer_norm = nn.LayerNorm(self.hidden_size)
# Prompt projection and normalization
self.final_proj = nn.Linear(self.hidden_size, self.hidden_size)
self.prompt_layer_norm = nn.LayerNorm(self.hidden_size)
# Transformer layers
self.layers = nn.ModuleList([Sam3GeometryEncoderLayer(config) for _ in range(config.num_layers)])
self.output_layer_norm = nn.LayerNorm(self.hidden_size)
def _encode_box_coordinates(
self, center_x: torch.Tensor, center_y: torch.Tensor, width: torch.Tensor, height: torch.Tensor
) -> torch.Tensor:
"""
Encode box coordinates by combining position-encoded centers with raw width/height.
Args:
center_x: 1D tensor of box center x coordinates
center_y: 1D tensor of box center y coordinates
width: 1D tensor of box widths
height: 1D tensor of box heights
Returns:
Encoded box coordinates [N, embedding_dim]
"""
pos_x, pos_y = self.position_encoding.encode_1d_positions(center_x, center_y)
pos = torch.cat((pos_y, pos_x, height[:, None], width[:, None]), dim=1)
return pos
def _encode_boxes(self, boxes, boxes_mask, boxes_labels, vision_features):
"""Encode box prompts. Mask convention: True=valid, False=padding."""
batch_size, num_boxes = boxes.shape[:2]
height, width = vision_features.shape[-2:]
boxes_embed = self.boxes_direct_project(boxes)
# Pool features using ROI align
# Convert boxes from CxCyWH to xyxy format and denormalize
boxes_xyxy = box_cxcywh_to_xyxy(boxes)
scale = torch.tensor([width, height, width, height], dtype=boxes_xyxy.dtype, device=boxes_xyxy.device)
scale = scale.view(1, 1, 4)
boxes_xyxy = boxes_xyxy * scale
# ROI align expects list of boxes per batch element,
# convert from bfloat16 to float16 as roi_align only supports float16 and float32
dtype = torch.float16 if vision_features.dtype == torch.bfloat16 else vision_features.dtype
sampled_features = torchvision.ops.roi_align(
vision_features.to(dtype), boxes_xyxy.to(dtype).unbind(0), self.roi_size
).to(vision_features.dtype)
pooled_projection = self.boxes_pool_project(sampled_features)
pooled_projection = pooled_projection.view(batch_size, num_boxes, self.hidden_size)
boxes_embed = boxes_embed + pooled_projection
# Add position encoding
center_x, center_y, box_width, box_height = boxes.unbind(-1)
pos_enc = self._encode_box_coordinates(
center_x.flatten(), center_y.flatten(), box_width.flatten(), box_height.flatten()
)
pos_enc = pos_enc.view(batch_size, num_boxes, pos_enc.shape[-1])
pos_projection = self.boxes_pos_enc_project(pos_enc)
boxes_embed = boxes_embed + pos_projection
# Add label embeddings (positive/negative)
label_embed = self.label_embed(boxes_labels.long())
return label_embed + boxes_embed, boxes_mask
def forward(
self,
box_embeddings: torch.Tensor,
box_mask: torch.Tensor,
box_labels: torch.Tensor,
img_feats: tuple[torch.Tensor, ...],
img_pos_embeds: tuple[torch.Tensor, ...] | None = None,
):
"""
Forward pass for encoding geometric prompts.
Args:
box_embeddings: Box coordinates in CxCyWH format [batch_size, num_boxes, 4]
box_mask: Attention mask for boxes [batch_size, num_boxes]
box_labels: Labels for boxes (positive/negative) [batch_size, num_boxes]
img_feats: Image features from vision encoder
img_pos_embeds: Optional position embeddings for image features
Returns:
Sam3GeometryEncoderOutput containing encoded geometry features and attention mask.
"""
batch_size = box_embeddings.shape[0]
# Prepare vision features for cross-attention: flatten spatial dimensions
vision_feats = img_feats[-1] # [B, C, H, W]
vision_pos_embeds = img_pos_embeds[-1] if img_pos_embeds is not None else torch.zeros_like(vision_feats)
vision_feats_flat = vision_feats.flatten(2).transpose(1, 2) # [B, H*W, C]
vision_pos_embeds_flat = vision_pos_embeds.flatten(2).transpose(1, 2) # [B, H*W, C]
# Normalize image features for pooling operations
img_feats_last = img_feats[-1] # [B, C, H, W]
img_feats_last = img_feats_last.permute(0, 2, 3, 1) # [B, H, W, C]
normalized_img_feats = self.vision_layer_norm(img_feats_last)
normalized_img_feats = normalized_img_feats.permute(0, 3, 1, 2) # [B, C, H, W]
prompt_embeds, prompt_mask = self._encode_boxes(box_embeddings, box_mask, box_labels, normalized_img_feats)
# Add CLS token (always valid)
cls_embed = self.cls_embed.weight.view(1, self.hidden_size).unsqueeze(0).expand(batch_size, -1, -1)
cls_mask = torch.ones(batch_size, 1, dtype=prompt_mask.dtype, device=prompt_mask.device)
prompt_embeds, prompt_mask = concat_padded_sequences(prompt_embeds, prompt_mask, cls_embed, cls_mask)
prompt_embeds = self.prompt_layer_norm(self.final_proj(prompt_embeds))
# Create bidirectional attention mask for transformer layers
prompt_attention_mask = None
if prompt_mask is not None:
prompt_attention_mask = create_bidirectional_mask(
config=self.config,
inputs_embeds=prompt_embeds,
attention_mask=prompt_mask,
)
# Apply transformer layers with cross-attention to vision features
for layer in self.layers:
prompt_embeds = layer(
prompt_feats=prompt_embeds,
vision_feats=vision_feats_flat,
vision_pos_encoding=vision_pos_embeds_flat,
prompt_mask=prompt_attention_mask,
)
# Final output normalization
prompt_embeds = self.output_layer_norm(prompt_embeds)
return Sam3GeometryEncoderOutput(
last_hidden_state=prompt_embeds,
attention_mask=prompt_mask,
)
class Sam3DetrEncoderLayer(nn.Module):
"""DETR encoder layer with self-attention and cross-attention."""
def __init__(self, config: Sam3DETREncoderConfig):
super().__init__()
self.config = config
self.layer_norm1 = nn.LayerNorm(config.hidden_size)
self.self_attn = Sam3Attention(config)
self.dropout = nn.Dropout(config.dropout)
self.cross_attn = Sam3Attention(config)
self.layer_norm2 = nn.LayerNorm(config.hidden_size)
self.mlp = Sam3MLP(config)
self.layer_norm3 = nn.LayerNorm(config.hidden_size)
def forward(
self,
vision_feats: Tensor,
prompt_feats: Tensor,
vision_pos_encoding: Tensor,
prompt_cross_attn_mask: Tensor | None = None,
**kwargs: Unpack[TransformersKwargs],
):
"""
Forward pass for DETR encoder layer.
Args:
vision_feats: Vision features [batch_size, vision_len, hidden_size] (main hidden states)
prompt_feats: Text prompt features [batch_size, text_len, hidden_size]
vision_pos_encoding: Position encoding for vision [batch_size, vision_len, hidden_size]
prompt_cross_attn_mask: Cross-attention mask for prompt features
Returns:
Updated vision features [batch_size, vision_len, hidden_size]
"""
# Self-attention on vision features with position encoding
residual = vision_feats
hidden_states = self.layer_norm1(vision_feats)
hidden_states_with_pos = hidden_states + vision_pos_encoding
hidden_states, _ = self.self_attn(
query=hidden_states_with_pos,
key=hidden_states_with_pos,
value=hidden_states,
**kwargs,
)
hidden_states = self.dropout(hidden_states) + residual
# Cross-attention: vision queries attend to text/prompt features
residual = hidden_states
hidden_states = self.layer_norm2(hidden_states)
hidden_states, _ = self.cross_attn(
query=hidden_states,
key=prompt_feats,
value=prompt_feats,
attention_mask=prompt_cross_attn_mask,
**kwargs,
)
hidden_states = self.dropout(hidden_states) + residual
# MLP
residual = hidden_states
hidden_states = self.layer_norm3(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = self.dropout(hidden_states) + residual
return hidden_states
class Sam3DetrEncoder(Sam3PreTrainedModel):
"""
DETR-style encoder that processes multi-level vision features with text fusion.
This encoder processes vision features from multiple levels (e.g., FPN features at different
resolutions) and fuses them with text prompts through a stack of transformer encoder layers.
"""
_can_record_outputs = {
"hidden_states": Sam3DetrEncoderLayer,
"attentions": Sam3Attention,
}
def __init__(self, config: Sam3DETREncoderConfig):
super().__init__(config)
self.config = config
self.hidden_size = config.hidden_size
self.layers = nn.ModuleList([Sam3DetrEncoderLayer(config) for _ in range(config.num_layers)])
self.post_init()
def _prepare_multilevel_features(
self,
vision_features: list[torch.Tensor],
vision_pos_embeds: list[torch.Tensor],
):
"""
Prepare multi-level vision features by flattening spatial dimensions and adding level embeddings.
Args:
vision_features: List of vision features at different levels [batch_size, channels, height, width]
vision_pos_embeds: List of position embeddings for each level [batch_size, channels, height, width]
Returns:
Tuple containing flattened features, position embeddings, and spatial metadata
"""
features_flattened = []
pos_embeds_flattened = []
spatial_shapes = []
for features, pos_embed in zip(vision_features, vision_pos_embeds):
height, width = features.shape[-2:]
spatial_shapes.append((height, width))
# Flatten spatial dimensions: [batch_size, channels, height, width] -> [batch_size, height*width, channels]
features = features.flatten(2).transpose(1, 2)
pos_embed = pos_embed.flatten(2).transpose(1, 2)
features_flattened.append(features)
pos_embeds_flattened.append(pos_embed)
# Concatenate all levels into single sequence
features_flattened = torch.cat(features_flattened, dim=1)
pos_embeds_flattened = torch.cat(pos_embeds_flattened, dim=1)
spatial_shapes = torch.tensor(spatial_shapes, dtype=torch.long, device=features_flattened.device)
return (
features_flattened,
pos_embeds_flattened,
spatial_shapes,
)
@merge_with_config_defaults
@capture_outputs
def forward(
self,
vision_features: list[torch.Tensor],
text_features: torch.Tensor,
vision_pos_embeds: list[torch.Tensor] | None = None,
text_mask: torch.Tensor | None = None,
spatial_sizes: list[tuple[int, int]] | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | Sam3DETREncoderOutput:
"""
Forward pass for the DETR encoder.
Args:
vision_features: List of vision features at different levels
text_features: Text prompt features [batch_size, seq_len, hidden_size]
vision_pos_embeds: Optional list of position embeddings for each level
text_mask: Optional text padding mask [batch_size, seq_len]
spatial_sizes: Optional list of (height, width) tuples for reshaping
Returns:
Sam3DETREncoderOutput containing encoded features and metadata.
"""
batch_size = vision_features[0].shape[0] if vision_features[0].dim() == 4 else vision_features[0].shape[1]
# TODO: See if we can remove that reshaping and just use the features as is.
if spatial_sizes is not None:
for i, (height, width) in enumerate(spatial_sizes):
# Reshape from [height*width, batch_size, channels] to [batch_size, channels, height, width]
vision_features[i] = vision_features[i].reshape(height, width, batch_size, -1).permute(2, 3, 0, 1)
vision_pos_embeds[i] = vision_pos_embeds[i].reshape(height, width, batch_size, -1).permute(2, 3, 0, 1)
# Flatten multi-level features for encoder processing
(
features_flattened,
pos_embeds_flattened,
spatial_shapes,
) = self._prepare_multilevel_features(vision_features, vision_pos_embeds)
prompt_cross_attn_mask = None
if text_mask is not None:
prompt_cross_attn_mask = create_bidirectional_mask(
config=self.config,
inputs_embeds=features_flattened,
attention_mask=text_mask,
encoder_hidden_states=text_features,
)
hidden_states = features_flattened
for layer in self.layers:
hidden_states = layer(
hidden_states,
prompt_feats=text_features,
vision_pos_encoding=pos_embeds_flattened,
prompt_cross_attn_mask=prompt_cross_attn_mask,
**kwargs,
)
return Sam3DETREncoderOutput(
last_hidden_state=hidden_states,
pos_embeds_flattened=pos_embeds_flattened,
text_features=text_features,
spatial_shapes=spatial_shapes,
)
class Sam3DecoderMLP(nn.Module):
"""Simple 2 or 3-layer MLP for decoder components."""
def __init__(self, input_dim: int, hidden_dim: int, output_dim: int, num_layers: int = 2):
super().__init__()
if num_layers == 2:
self.layer1 = nn.Linear(input_dim, hidden_dim)
self.layer2 = nn.Linear(hidden_dim, output_dim)
self.layer3 = None
elif num_layers == 3:
self.layer1 = nn.Linear(input_dim, hidden_dim)
self.layer2 = nn.Linear(hidden_dim, hidden_dim)
self.layer3 = nn.Linear(hidden_dim, output_dim)
else:
raise ValueError(f"Only 2 or 3 layers supported, got {num_layers}")
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = F.relu(self.layer1(x))
if self.layer3 is not None:
x = F.relu(self.layer2(x))
x = self.layer3(x)
else:
x = self.layer2(x)
return x
class Sam3DetrDecoderLayer(nn.Module):
"""DETR decoder layer with self-attention, text cross-attention, and vision cross-attention."""
def __init__(self, config: Sam3DETRDecoderConfig):
super().__init__()
self.config = config
self.self_attn = Sam3Attention(config)
self.self_attn_dropout = nn.Dropout(config.dropout)
self.self_attn_layer_norm = nn.LayerNorm(config.hidden_size)
self.text_cross_attn = Sam3Attention(config)
self.text_cross_attn_dropout = nn.Dropout(config.dropout)
self.text_cross_attn_layer_norm = nn.LayerNorm(config.hidden_size)
self.vision_cross_attn = Sam3Attention(config)
self.vision_cross_attn_dropout = nn.Dropout(config.dropout)
self.vision_cross_attn_layer_norm = nn.LayerNorm(config.hidden_size)
self.mlp = Sam3MLP(config)
self.mlp_layer_norm = nn.LayerNorm(config.hidden_size)
self.mlp_dropout = nn.Dropout(config.dropout)
def forward(
self,
hidden_states: torch.Tensor,
query_pos: torch.Tensor,
text_features: torch.Tensor,
vision_features: torch.Tensor,
vision_pos_encoding: torch.Tensor,
text_cross_attn_mask: torch.Tensor | None = None,
vision_cross_attn_mask: torch.Tensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
"""
Forward pass for decoder layer.
Args:
hidden_states: Query features [batch_size, num_queries + 1, hidden_size] (includes presence token at position 0)
query_pos: Query position embeddings [batch_size, num_queries, hidden_size]
text_features: Text features [batch_size, seq_len, hidden_size]
vision_features: Vision features [batch_size, height*width, hidden_size]
vision_pos_encoding: Vision position encoding [batch_size, height*width, hidden_size]
text_cross_attn_mask: Text cross-attention mask
vision_cross_attn_mask: Vision cross-attention mask, already expanded for presence token
Returns:
Updated hidden states (including presence token at position 0)
"""
# Prepend zeros to query_pos for presence token
query_pos = F.pad(query_pos, (0, 0, 1, 0), mode="constant", value=0)
# Self-attention with query position encoding
residual = hidden_states
query_with_pos = hidden_states + query_pos
attn_output, _ = self.self_attn(
query=query_with_pos,
key=query_with_pos,
value=hidden_states,
attention_mask=None,
**kwargs,
)
hidden_states = residual + self.self_attn_dropout(attn_output)
hidden_states = self.self_attn_layer_norm(hidden_states)
# Text cross-attention: queries attend to text features
residual = hidden_states
query_with_pos = hidden_states + query_pos
attn_output, _ = self.text_cross_attn(
query=query_with_pos,
key=text_features,
value=text_features,
attention_mask=text_cross_attn_mask,
**kwargs,
)
hidden_states = residual + self.text_cross_attn_dropout(attn_output)
hidden_states = self.text_cross_attn_layer_norm(hidden_states)
# Vision cross-attention: queries attend to vision features (with RPB)
residual = hidden_states
query_with_pos = hidden_states + query_pos
key_with_pos = vision_features + vision_pos_encoding
attn_output, _ = self.vision_cross_attn(
query=query_with_pos,
key=key_with_pos,
value=vision_features,
attention_mask=vision_cross_attn_mask,
**kwargs,
)
hidden_states = residual + self.vision_cross_attn_dropout(attn_output)
hidden_states = self.vision_cross_attn_layer_norm(hidden_states)
# MLP
residual = hidden_states
hidden_states = self.mlp(hidden_states)
hidden_states = residual + self.mlp_dropout(hidden_states)
hidden_states = self.mlp_layer_norm(hidden_states)
return hidden_states
class Sam3DetrDecoder(Sam3PreTrainedModel):
"""
DETR-style decoder with box refinement and presence token.
Simplified version that assumes:
- Box refinement is always enabled
- Intermediate outputs are always returned
- BoxRPB (relative position bias) with log-scale encoding
- Presence token is used
"""
_can_record_outputs = {
"hidden_states": Sam3DetrDecoderLayer,
"attentions": Sam3Attention,
}
def __init__(
self,
config: Sam3DETRDecoderConfig,
):
super().__init__(config)
self.config = config
self.hidden_size = config.hidden_size
self.layers = nn.ModuleList([Sam3DetrDecoderLayer(config) for _ in range(config.num_layers)])
self.output_layer_norm = nn.LayerNorm(config.hidden_size)
self.box_head = Sam3DecoderMLP(config.hidden_size, config.hidden_size, 4, 3)
self.query_embed = nn.Embedding(config.num_queries, config.hidden_size)
self.reference_points = nn.Embedding(config.num_queries, 4)
self.presence_token = nn.Embedding(1, config.hidden_size)
self.presence_head = Sam3DecoderMLP(config.hidden_size, config.hidden_size, 1, 3)
self.presence_layer_norm = nn.LayerNorm(config.hidden_size)
self.clamp_presence_logit_max_val = 10.0
self.ref_point_head = Sam3DecoderMLP(2 * config.hidden_size, config.hidden_size, config.hidden_size, 2)
self.box_rpb_embed_x = Sam3DecoderMLP(2, config.hidden_size, config.num_attention_heads, 2)
self.box_rpb_embed_y = Sam3DecoderMLP(2, config.hidden_size, config.num_attention_heads, 2)
self.position_encoding = Sam3SinePositionEmbedding(num_pos_feats=config.hidden_size // 2, normalize=False)
self.post_init()
@compile_compatible_method_lru_cache(maxsize=1)
def _get_coords(
self, height: torch.Tensor, width: torch.Tensor, dtype: torch.dtype, device: torch.device
) -> tuple[torch.Tensor, torch.Tensor]:
"""Generate normalized coordinate grids."""
coords_h = torch.arange(0, height, device=device, dtype=dtype) / height
coords_w = torch.arange(0, width, device=device, dtype=dtype) / width
return coords_h, coords_w
def _get_rpb_matrix(
self, reference_boxes: torch.Tensor, spatial_shape: tuple[torch.Tensor, torch.Tensor]
) -> torch.Tensor:
"""
Compute box relative position bias (RPB) matrix using log-scale encoding.
RPB helps the decoder attend to relevant spatial locations based on predicted box positions.
Args:
reference_boxes: Reference boxes [batch_size, num_queries, 4] in sigmoid space
spatial_shape: (height, width) of the vision features as tensors
Returns:
RPB matrix [batch_size, num_heads, num_queries, height*width]
"""
height, width = spatial_shape
boxes_xyxy = box_cxcywh_to_xyxy(reference_boxes)
batch_size, num_queries, _ = boxes_xyxy.shape
# Generate coordinate grids
coords_h, coords_w = self._get_coords(
height, width, dtype=reference_boxes.dtype, device=reference_boxes.device
)
# Compute deltas between coordinates and box boundaries
deltas_y = coords_h.view(1, -1, 1) - boxes_xyxy.reshape(-1, 1, 4)[:, :, 1:4:2]
deltas_y = deltas_y.view(batch_size, num_queries, -1, 2)
deltas_x = coords_w.view(1, -1, 1) - boxes_xyxy.reshape(-1, 1, 4)[:, :, 0:3:2]
deltas_x = deltas_x.view(batch_size, num_queries, -1, 2)
# Apply log-scale encoding
deltas_x_log = deltas_x * 8
deltas_x_log = torch.sign(deltas_x_log) * torch.log2(torch.abs(deltas_x_log) + 1.0) / math.log2(8)
deltas_y_log = deltas_y * 8
deltas_y_log = torch.sign(deltas_y_log) * torch.log2(torch.abs(deltas_y_log) + 1.0) / math.log2(8)
# Embed deltas
deltas_x = self.box_rpb_embed_x(deltas_x_log) # [batch_size, num_queries, width, num_heads]
deltas_y = self.box_rpb_embed_y(deltas_y_log) # [batch_size, num_queries, height, num_heads]
# Combine into 2D bias matrix
rpb_matrix = deltas_y.unsqueeze(3) + deltas_x.unsqueeze(
2
) # [batch_size, num_queries, height, width, num_heads]
rpb_matrix = rpb_matrix.flatten(2, 3) # [batch_size, num_queries, height*width, num_heads]
rpb_matrix = rpb_matrix.permute(0, 3, 1, 2).contiguous() # [batch_size, num_heads, num_queries, height*width]
return rpb_matrix
@merge_with_config_defaults
@capture_outputs
def forward(
self,
vision_features: torch.Tensor,
text_features: torch.Tensor,
vision_pos_encoding: torch.Tensor,
text_mask: torch.Tensor | None = None,
spatial_shapes: torch.Tensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | Sam3DETRDecoderOutput:
"""
Forward pass for the DETR decoder.
Args:
vision_features: Vision features [batch_size, height*width, hidden_size]
text_features: Text features [batch_size, seq_len, hidden_size]
vision_pos_encoding: Vision position encoding [batch_size, height*width, hidden_size]
text_mask: Text padding mask [batch_size, seq_len] where True=valid, False=padding
spatial_shapes: Spatial shapes [num_levels, 2]
Returns:
Sam3DETRDecoderOutput containing decoder outputs from all layers.
"""
batch_size = vision_features.shape[0]
query_embeds = self.query_embed.weight.unsqueeze(0).expand(batch_size, -1, -1)
reference_boxes = self.reference_points.weight.unsqueeze(0).expand(batch_size, -1, -1)
reference_boxes = reference_boxes.sigmoid()
presence_token = self.presence_token.weight.unsqueeze(0).expand(batch_size, -1, -1)
# Concatenate presence token with query embeddings
hidden_states = torch.cat([presence_token, query_embeds], dim=1)
text_cross_attn_mask = None
if text_mask is not None:
text_cross_attn_mask = create_bidirectional_mask(
config=self.config,
inputs_embeds=hidden_states,
attention_mask=text_mask,
encoder_hidden_states=text_features,
)
intermediate_outputs = []
intermediate_boxes = [reference_boxes]
intermediate_presence_logits = []
for layer in self.layers:
# Generate sine embeddings for conditional queries
reference_points_input = reference_boxes.unsqueeze(2)
query_sine_embed = self.position_encoding.encode_boxes(reference_points_input[:, :, 0, :])
query_pos = self.ref_point_head(query_sine_embed)
# Compute box relative position bias (RPB) attention mask
vision_cross_attn_mask = None
if spatial_shapes is not None and spatial_shapes.shape[0] == 1:
spatial_shape = (spatial_shapes[0, 0], spatial_shapes[0, 1])
rpb_matrix = self._get_rpb_matrix(reference_boxes, spatial_shape)
# Prepend zeros row for presence token (it attends to all vision tokens equally)
vision_cross_attn_mask = F.pad(rpb_matrix, (0, 0, 1, 0), mode="constant", value=0)
hidden_states = layer(
hidden_states,
query_pos=query_pos,
text_features=text_features,
vision_features=vision_features,
vision_pos_encoding=vision_pos_encoding,
text_cross_attn_mask=text_cross_attn_mask,
vision_cross_attn_mask=vision_cross_attn_mask,
**kwargs,
)
# Extract query hidden states (without presence token) for box refinement
query_hidden_states = hidden_states[:, 1:]
# Box refinement: predict delta and update reference boxes
reference_boxes_before_sigmoid = inverse_sigmoid(reference_boxes)
delta_boxes = self.box_head(self.output_layer_norm(query_hidden_states))
new_reference_boxes = (delta_boxes + reference_boxes_before_sigmoid).sigmoid()
reference_boxes = new_reference_boxes.detach()
intermediate_outputs.append(self.output_layer_norm(query_hidden_states))
intermediate_boxes.append(new_reference_boxes)
# Process presence token
presence_hidden = hidden_states[:, :1]
presence_logits = self.presence_head(self.presence_layer_norm(presence_hidden)).squeeze(-1)
presence_logits = presence_logits.clamp(
min=-self.clamp_presence_logit_max_val, max=self.clamp_presence_logit_max_val
)
intermediate_presence_logits.append(presence_logits)
# Stack outputs from all layers
intermediate_outputs = torch.stack(intermediate_outputs)
intermediate_boxes = torch.stack(intermediate_boxes[:-1])
intermediate_presence_logits = torch.stack(intermediate_presence_logits)
return Sam3DETRDecoderOutput(
intermediate_hidden_states=intermediate_outputs,
reference_boxes=intermediate_boxes,
presence_logits=intermediate_presence_logits,
)
class Sam3DotProductScoring(nn.Module):
"""
Computes classification scores by computing dot product between projected decoder queries and pooled text features.
This is used to determine confidence/presence scores for each query.
"""
def __init__(self, config: Sam3Config):
super().__init__()
self.config = config
hidden_size = config.detr_decoder_config.hidden_size
projection_dim = config.detr_decoder_config.hidden_size
self.text_mlp = Sam3DecoderMLP(
input_dim=hidden_size,
hidden_dim=config.detr_decoder_config.intermediate_size,
output_dim=hidden_size,
num_layers=2,
)
self.text_mlp_dropout = nn.Dropout(config.detr_decoder_config.dropout)
self.text_mlp_out_norm = nn.LayerNorm(hidden_size)
# Projections for text and query features
self.text_proj = nn.Linear(hidden_size, projection_dim)
self.query_proj = nn.Linear(hidden_size, projection_dim)
# Scale factor for dot product
self.scale = float(1.0 / np.sqrt(projection_dim))
# Clamping to avoid numerical issues
self.clamp_logits = True
self.clamp_max_val = 12.0
def _pool_text_features(self, text_features: torch.Tensor, text_mask: torch.Tensor | None) -> torch.Tensor:
"""
Mean pool text features, accounting for padding.
Args:
text_features: [batch_size, seq_len, hidden_size]
text_mask: [batch_size, seq_len] where True indicates valid tokens, False indicates padding
Returns:
pooled_text: [batch_size, hidden_size]
"""
if text_mask is None:
# No padding, simple mean
return text_features.mean(dim=1)
is_valid = text_mask.to(text_features.dtype).unsqueeze(-1) # [batch_size, seq_len, 1]
# Count valid tokens per batch
num_valid = is_valid.sum(dim=1).clamp(min=1.0) # [batch_size, 1]
# Mean pool only over valid tokens
pooled_text = (text_features * is_valid).sum(dim=1) / num_valid # [batch_size, hidden_size]
return pooled_text
def forward(
self,
decoder_hidden_states: torch.Tensor,
text_features: torch.Tensor,
text_mask: torch.Tensor | None = None,
) -> torch.Tensor:
"""
Compute classification scores via dot product.
Args:
decoder_hidden_states: [num_layers, batch_size, num_queries, hidden_size]
text_features: [batch_size, seq_len, hidden_size]
text_mask: [batch_size, seq_len] where True=valid, False=padding
Returns:
scores: [num_layers, batch_size, num_queries, 1]
"""
orig_text_features = text_features
text_features = self.text_mlp(text_features)
text_features = self.text_mlp_dropout(text_features)
text_features = text_features + orig_text_features
text_features = self.text_mlp_out_norm(text_features)
pooled_text = self._pool_text_features(text_features, text_mask)
proj_text = self.text_proj(pooled_text)
proj_queries = self.query_proj(decoder_hidden_states)
proj_text = proj_text.unsqueeze(-1)
scores = torch.matmul(proj_queries, proj_text.unsqueeze(0))
scores = scores * self.scale
if self.clamp_logits:
scores = scores.clamp(min=-self.clamp_max_val, max=self.clamp_max_val)
return scores
class Sam3MaskEmbedder(nn.Module):
"""
MLP that embeds object queries for mask prediction.
Similar to MaskFormer's mask embedder.
"""
def __init__(self, config: Sam3MaskDecoderConfig):
super().__init__()
self.config = config
hidden_size = config.hidden_size
self.layers = nn.ModuleList(
[
nn.Linear(hidden_size, hidden_size),
nn.Linear(hidden_size, hidden_size),
nn.Linear(hidden_size, hidden_size),
]
)
self.activation = nn.ReLU()
def forward(self, queries: torch.Tensor) -> torch.Tensor:
"""
Args:
queries: Query embeddings [batch_size, num_queries, hidden_size]
Returns:
Mask embeddings [batch_size, num_queries, hidden_size]
"""
hidden_states = queries
for i, layer in enumerate(self.layers):
hidden_states = layer(hidden_states)
if i < len(self.layers) - 1:
hidden_states = self.activation(hidden_states)
return hidden_states
class Sam3PixelDecoder(nn.Module):
"""
Feature Pyramid Network (FPN) decoder that generates pixel-level features.
Inspired by MaskFormer's pixel decoder.
"""
def __init__(self, config: Sam3MaskDecoderConfig):
super().__init__()
self.config = config
hidden_size = config.hidden_size
num_upsampling_stages = config.num_upsampling_stages
# Create conv layers and norms for FPN
self.conv_layers = nn.ModuleList(
[
nn.Conv2d(hidden_size, hidden_size, kernel_size=3, stride=1, padding=1)
for _ in range(num_upsampling_stages)
]
)
self.norms = nn.ModuleList([nn.GroupNorm(8, hidden_size) for _ in range(num_upsampling_stages)])
self.out_channels = hidden_size
def forward(self, backbone_features: list[torch.Tensor]) -> torch.Tensor:
"""
Args:
backbone_features: List of backbone features [batch_size, hidden_size, H_i, W_i]
from low to high resolution (assumes already projected to hidden_size)
Returns:
Pixel embeddings [batch_size, hidden_size, H, W] at the finest resolution
"""
# Start from the coarsest feature (last in list)
prev_fpn = backbone_features[-1]
# Iterate through features from coarse to fine (excluding the last which we started with)
for layer_idx, backbone_feat in enumerate(reversed(backbone_features[:-1])):
# Upsample previous FPN output to match current backbone feature size
prev_fpn = F.interpolate(prev_fpn, size=backbone_feat.shape[-2:], mode="nearest")
# Add skip connection
prev_fpn = prev_fpn + backbone_feat
# Apply conv and norm
prev_fpn = self.conv_layers[layer_idx](prev_fpn)
prev_fpn = self.norms[layer_idx](prev_fpn)
prev_fpn = F.relu(prev_fpn)
return prev_fpn
class Sam3MaskDecoder(Sam3PreTrainedModel):
"""
Mask decoder that combines object queries with pixel-level features to predict instance masks.
Also produces a semantic segmentation output and supports cross-attention to prompts.
"""
_can_record_outputs = {
"attentions": Sam3Attention,
}
def __init__(self, config: Sam3MaskDecoderConfig):
super().__init__(config)
self.config = config
hidden_size = config.hidden_size
# Pixel decoder (FPN)
self.pixel_decoder = Sam3PixelDecoder(config)
# Mask embedder (MLP to transform queries)
self.mask_embedder = Sam3MaskEmbedder(config)
# Projection from pixel decoder output to mask embedding space
self.instance_projection = nn.Conv2d(self.pixel_decoder.out_channels, hidden_size, kernel_size=1)
# Semantic segmentation head (always present in UniversalSegmentationHead)
self.semantic_projection = nn.Conv2d(self.pixel_decoder.out_channels, 1, kernel_size=1)
self.prompt_cross_attn = Sam3Attention(config)
self.prompt_cross_attn_norm = nn.LayerNorm(hidden_size)
self.prompt_cross_attn_dropout = nn.Dropout(config.dropout)
self.post_init()
@merge_with_config_defaults
@capture_outputs
def forward(
self,
decoder_queries: torch.Tensor,
backbone_features: list[torch.Tensor],
encoder_hidden_states: torch.Tensor,
prompt_features: torch.Tensor | None = None,
prompt_mask: torch.Tensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | Sam3MaskDecoderOutput:
"""
Args:
decoder_queries: Decoder output queries [batch_size, num_queries, hidden_size]
backbone_features: List of backbone features to process through FPN
encoder_hidden_states: Encoder outputs [batch_size, seq_len, hidden_size]
prompt_features: Prompt features (text + geometry) for cross-attention [batch_size, prompt_len, hidden_size]
prompt_mask: Padding mask [batch_size, prompt_len] where True=valid, False=padding
Returns:
Sam3MaskDecoderOutput containing predicted masks and semantic segmentation.
"""
if prompt_features is not None:
# Cross-attention: encoder features attend to prompt features
residual = encoder_hidden_states
normed_hidden_states = self.prompt_cross_attn_norm(encoder_hidden_states)
cross_attn_mask = None
if prompt_mask is not None:
cross_attn_mask = create_bidirectional_mask(
config=self.config,
inputs_embeds=normed_hidden_states,
encoder_hidden_states=prompt_features,
attention_mask=prompt_mask,
)
attn_output, _ = self.prompt_cross_attn(
query=normed_hidden_states,
key=prompt_features,
value=prompt_features,
attention_mask=cross_attn_mask,
**kwargs,
)
encoder_hidden_states = residual + self.prompt_cross_attn_dropout(attn_output)
# Process backbone features through FPN to get pixel embeddings
pixel_embed = self._embed_pixels(
backbone_features=backbone_features,
encoder_hidden_states=encoder_hidden_states,
)
# Predict instance masks via dot product between query embeddings and pixel embeddings
instance_embeds = self.instance_projection(pixel_embed)
mask_embeddings = self.mask_embedder(decoder_queries)
pred_masks = torch.einsum("bqc,bchw->bqhw", mask_embeddings, instance_embeds)
# Generate semantic segmentation
semantic_seg = self.semantic_projection(pixel_embed)
return Sam3MaskDecoderOutput(
pred_masks=pred_masks,
semantic_seg=semantic_seg,
)
def _embed_pixels(
self,
backbone_features: list[torch.Tensor],
encoder_hidden_states: torch.Tensor,
) -> torch.Tensor:
"""
Embed pixels by combining backbone FPN features with encoder vision features.
The encoder vision features replace the finest-resolution backbone feature.
Args:
backbone_features: List of backbone features [batch_size, C, H_i, W_i]
encoder_hidden_states: Encoder outputs [batch_size, seq_len, hidden_size]
Returns:
Pixel embeddings [batch_size, hidden_size, H, W]
"""
backbone_visual_feats = [feat.clone() for feat in backbone_features]
# Extract vision features from encoder output and reshape to spatial format
spatial_dim = backbone_features[-1].shape[-2] * backbone_features[-1].shape[-1]
encoder_visual_embed = encoder_hidden_states[:, :spatial_dim, :]
batch_size, _, hidden_size = encoder_visual_embed.shape
height, width = backbone_features[-1].shape[-2:]
encoder_visual_embed = encoder_visual_embed.transpose(1, 2).reshape(batch_size, hidden_size, height, width)
# Replace finest backbone feature with encoder vision features
backbone_visual_feats[-1] = encoder_visual_embed
# Process through FPN decoder
pixel_embed = self.pixel_decoder(backbone_visual_feats)
return pixel_embed
class Sam3Model(Sam3PreTrainedModel):
input_modalities = ["image", "text"]
_checkpoint_conversion_mapping = {
r"detector_model.(.+)": r"\1" # the regex allows to remove the prefix, and add it back in revert mode
}
_keys_to_ignore_on_load_unexpected = [
r"^tracker_model.",
r"^tracker_neck.",
]
def __init__(self, config: Sam3Config):
# loading from a sam3_video config
if hasattr(config, "detector_config") and config.detector_config is not None:
detector_config = config.detector_config
if isinstance(detector_config, dict):
detector_config = Sam3Config(**detector_config)
config = detector_config
super().__init__(config)
self.vision_encoder = Sam3VisionModel(config.vision_config)
self.text_encoder = CLIPTextModelWithProjection(config.text_config)
self.vocab_size = config.text_config.vocab_size
# Project text features from text encoder hidden size to model hidden size
# CLIP text encoder outputs 1024-dim features, but we need 256-dim for DETR
self.text_projection = nn.Linear(config.text_config.hidden_size, config.detr_encoder_config.hidden_size)
# Pass _attn_implementation to subconfigs BEFORE creating modules
config.geometry_encoder_config._attn_implementation = config._attn_implementation
config.detr_encoder_config._attn_implementation = config._attn_implementation
config.detr_decoder_config._attn_implementation = config._attn_implementation
config.mask_decoder_config._attn_implementation = config._attn_implementation
self.geometry_encoder = Sam3GeometryEncoder(config.geometry_encoder_config)
self.detr_encoder = Sam3DetrEncoder(config.detr_encoder_config)
self.detr_decoder = Sam3DetrDecoder(config.detr_decoder_config)
self.mask_decoder = Sam3MaskDecoder(config.mask_decoder_config)
# Dot product scoring to compute classification scores
self.dot_product_scoring = Sam3DotProductScoring(config)
self.post_init()
@can_return_tuple
@auto_docstring
def get_text_features(
self,
input_ids: torch.LongTensor,
attention_mask: torch.Tensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | BaseModelOutputWithPooling:
r"""
Example:
```python
>>> from transformers import Sam3Model, Sam3Processor
>>> from PIL import Image
>>> import httpx
>>> from io import BytesIO
>>> model = Sam3Model.from_pretrained("facebook/sam3")
>>> processor = Sam3Processor.from_pretrained("facebook/sam3")
>>> # Pre-compute text embeddings
>>> text_inputs = processor(text="cat", return_tensors="pt")
>>> text_embeds = model.get_text_features(**text_inputs).pooler_output
>>> # Reuse text embeddings for multiple images
>>> url = "http://images.cocodataset.org/val2017/000000077595.jpg"
>>> with httpx.stream("GET", url) as response:
... image = Image.open(BytesIO(response.read()))
>>> img_inputs = processor(images=image, return_tensors="pt")
>>> outputs = model(pixel_values=img_inputs.pixel_values, text_embeds=text_embeds)
```
"""
text_outputs = self.text_encoder(
input_ids=input_ids, attention_mask=attention_mask, return_dict=True, **kwargs
)
last_hidden_state = text_outputs.last_hidden_state
text_outputs.pooler_output = self.text_projection(last_hidden_state)
return text_outputs
@auto_docstring
def get_vision_features(
self,
pixel_values: torch.FloatTensor,
**kwargs: Unpack[TransformersKwargs],
) -> Sam3VisionEncoderOutput:
r"""
Example:
```python
>>> from transformers import Sam3Model, Sam3Processor
>>> from PIL import Image
>>> import httpx
>>> from io import BytesIO
>>> model = Sam3Model.from_pretrained("facebook/sam3")
>>> processor = Sam3Processor.from_pretrained("facebook/sam3")
>>> # Pre-compute vision embeddings
>>> url = "http://images.cocodataset.org/val2017/000000077595.jpg"
>>> with httpx.stream("GET", url) as response:
... image = Image.open(BytesIO(response.read()))
>>> img_inputs = processor(images=image, return_tensors="pt")
>>> vision_embeds = model.get_vision_features(pixel_values=img_inputs.pixel_values)
>>> # Reuse vision embeddings for multiple text prompts
>>> text_inputs = processor(text="cat", return_tensors="pt")
>>> outputs = model(vision_embeds=vision_embeds, input_ids=text_inputs.input_ids)
```
"""
vision_outputs = self.vision_encoder(pixel_values, **kwargs)
return vision_outputs
@can_return_tuple
@auto_docstring
def forward(
self,
pixel_values: torch.FloatTensor | None = None,
vision_embeds: Sam3VisionEncoderOutput | None = None,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
text_embeds: torch.FloatTensor | None = None,
input_boxes: torch.FloatTensor | None = None,
input_boxes_labels: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> Sam3ImageSegmentationOutput:
r"""
vision_embeds (`Sam3VisionEncoderOutput`, *optional*):
Pre-computed vision embeddings. Can be used to easily reuse vision embeddings. If provided, `pixel_values`
should not be passed. Mutually exclusive with `pixel_values`.
text_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Pre-computed text embeddings. Can be used to easily reuse text embeddings. If provided, `input_ids`
should not be passed. Mutually exclusive with `input_ids`.
input_boxes (`torch.FloatTensor` of shape `(batch_size, num_boxes, 4)`, *optional*):
Normalized box coordinates in [0, 1] range, in (cx, cy, w, h) format.
input_boxes_labels (`torch.LongTensor` of shape `(batch_size, num_boxes)`, *optional*):
Labels for boxes: 1 (positive), 0 (negative).
Example:
```python
>>> from PIL import Image
>>> import httpx
>>> from io import BytesIO
>>> from transformers import AutoModel, AutoProcessor
>>> model = AutoModel.from_pretrained("facebook/sam3")
>>> processor = AutoProcessor.from_pretrained("facebook/sam3")
>>> url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/sam-car.png"
>>> with httpx.stream("GET", url) as response:
... image = Image.open(BytesIO(response.read())).convert("RGB")
>>> text = "car"
>>> inputs = processor(images=image, text=text, return_tensors="pt")
>>> # Get segmentation output
>>> outputs = model(**inputs)
>>> pred_masks = outputs.pred_masks
>>> pred_boxes = outputs.pred_boxes
```
"""
if (pixel_values is None) == (vision_embeds is None):
raise ValueError("You must specify exactly one of pixel_values or vision_embeds")
if (input_ids is None) == (text_embeds is None):
raise ValueError("You must specify exactly one of input_ids or text_embeds")
if pixel_values is not None:
batch_size = pixel_values.shape[0]
device = pixel_values.device
else:
batch_size = vision_embeds.fpn_hidden_states[0].shape[0]
device = vision_embeds.fpn_hidden_states[0].device
if vision_embeds is None:
vision_outputs = self.vision_encoder(pixel_values, **kwargs)
else:
vision_outputs = vision_embeds
fpn_hidden_states = vision_outputs.fpn_hidden_states[:-1]
fpn_position_encoding = vision_outputs.fpn_position_encoding[:-1]
if text_embeds is None:
text_features = self.get_text_features(
input_ids=input_ids, attention_mask=attention_mask, return_dict=True
).pooler_output
else:
text_features = text_embeds
text_mask = attention_mask.bool() if attention_mask is not None else None
has_geometry_prompts = input_boxes is not None and input_boxes.numel() > 0
geometry_prompt_features = None
geometry_prompt_mask = None
if has_geometry_prompts:
if input_boxes is not None and input_boxes.numel() > 0:
box_embeddings = input_boxes # [batch_size, num_boxes, 4]
box_labels = (
input_boxes_labels
if input_boxes_labels is not None
else torch.ones_like(box_embeddings[..., 0], dtype=torch.long)
)
box_mask = (
(input_boxes_labels != -10)
if input_boxes_labels is not None
else torch.ones(batch_size, input_boxes.shape[1], dtype=torch.bool, device=device)
)
box_labels = torch.where(box_labels == -10, 0, box_labels)
else:
box_embeddings = torch.zeros(batch_size, 0, 4, dtype=text_features.dtype, device=device)
box_labels = torch.zeros(batch_size, 0, dtype=torch.long, device=device)
box_mask = torch.zeros(batch_size, 0, dtype=torch.bool, device=device)
geometry_outputs = self.geometry_encoder(
box_embeddings=box_embeddings,
box_mask=box_mask,
box_labels=box_labels,
img_feats=fpn_hidden_states,
img_pos_embeds=fpn_position_encoding,
)
geometry_prompt_features = geometry_outputs.last_hidden_state
geometry_prompt_mask = geometry_outputs.attention_mask
if geometry_prompt_features is not None:
# Repeat text_features for all geometry prompts
if text_features.shape[0] == 1 and geometry_prompt_features.shape[0] > 1:
text_features = text_features.repeat(geometry_prompt_features.shape[0], 1, 1)
combined_prompt_features = torch.cat([text_features, geometry_prompt_features], dim=1)
if text_mask is not None and text_mask.shape[0] == 1 and geometry_prompt_mask.shape[0] > 1:
text_mask = text_mask.repeat(geometry_prompt_mask.shape[0], 1)
if text_mask is not None and geometry_prompt_mask is not None:
combined_prompt_mask = torch.cat([text_mask, geometry_prompt_mask], dim=1)
elif text_mask is not None:
geo_valid_mask = torch.ones(
batch_size, geometry_prompt_features.shape[1], dtype=torch.bool, device=device
)
combined_prompt_mask = torch.cat([text_mask, geo_valid_mask], dim=1)
elif geometry_prompt_mask is not None:
text_valid_mask = torch.ones(batch_size, text_features.shape[1], dtype=torch.bool, device=device)
combined_prompt_mask = torch.cat([text_valid_mask, geometry_prompt_mask], dim=1)
else:
combined_prompt_mask = None
else:
combined_prompt_features = text_features
combined_prompt_mask = text_mask
encoder_outputs = self.detr_encoder(
vision_features=[fpn_hidden_states[-1]],
text_features=combined_prompt_features,
vision_pos_embeds=[fpn_position_encoding[-1]],
text_mask=combined_prompt_mask,
**kwargs,
)
decoder_outputs = self.detr_decoder(
vision_features=encoder_outputs.last_hidden_state,
text_features=encoder_outputs.text_features,
vision_pos_encoding=encoder_outputs.pos_embeds_flattened,
text_mask=combined_prompt_mask,
spatial_shapes=encoder_outputs.spatial_shapes,
**kwargs,
)
# Refine boxes from decoder
all_box_offsets = self.detr_decoder.box_head(decoder_outputs.intermediate_hidden_states)
reference_boxes_inv_sig = inverse_sigmoid(decoder_outputs.reference_boxes)
all_pred_boxes_cxcywh = (reference_boxes_inv_sig + all_box_offsets).sigmoid()
all_pred_boxes = box_cxcywh_to_xyxy(all_pred_boxes_cxcywh)
all_pred_logits = self.dot_product_scoring(
decoder_hidden_states=decoder_outputs.intermediate_hidden_states,
text_features=encoder_outputs.text_features,
text_mask=combined_prompt_mask,
).squeeze(-1)
pred_logits = all_pred_logits[-1]
pred_boxes = all_pred_boxes[-1]
decoder_hidden_states = decoder_outputs.intermediate_hidden_states[-1]
presence_logits = decoder_outputs.presence_logits[-1]
mask_outputs = self.mask_decoder(
decoder_queries=decoder_hidden_states,
backbone_features=list(fpn_hidden_states),
encoder_hidden_states=encoder_outputs.last_hidden_state,
prompt_features=combined_prompt_features,
prompt_mask=combined_prompt_mask,
**kwargs,
)
return Sam3ImageSegmentationOutput(
pred_masks=mask_outputs.pred_masks,
pred_boxes=pred_boxes,
pred_logits=pred_logits,
presence_logits=presence_logits,
semantic_seg=mask_outputs.semantic_seg,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_reference_boxes=decoder_outputs.reference_boxes,
encoder_hidden_states=encoder_outputs.hidden_states,
vision_hidden_states=vision_outputs.hidden_states,
vision_attentions=vision_outputs.attentions,
detr_encoder_attentions=encoder_outputs.attentions,
detr_decoder_attentions=decoder_outputs.attentions,
mask_decoder_attentions=mask_outputs.attentions,
)
__all__ = ["Sam3Model", "Sam3VisionModel", "Sam3ViTModel", "Sam3PreTrainedModel"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/sam3/modeling_sam3.py",
"license": "Apache License 2.0",
"lines": 1996,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/sam3/modular_sam3.py | # Copyright 2025 The Meta AI Authors and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
)
from ..sam2.image_processing_sam2_fast import Sam2ImageProcessorFast
def _scale_boxes(boxes, target_sizes):
"""
Scale batch of bounding boxes to the target sizes.
Args:
boxes (`torch.Tensor` of shape `(batch_size, num_boxes, 4)`):
Bounding boxes to scale. Each box is expected to be in (x1, y1, x2, y2) format.
target_sizes (`list[tuple[int, int]]` or `torch.Tensor` of shape `(batch_size, 2)`):
Target sizes to scale the boxes to. Each target size is expected to be in (height, width) format.
Returns:
`torch.Tensor` of shape `(batch_size, num_boxes, 4)`: Scaled bounding boxes.
"""
if isinstance(target_sizes, (list, tuple)):
image_height = torch.tensor([i[0] for i in target_sizes])
image_width = torch.tensor([i[1] for i in target_sizes])
elif isinstance(target_sizes, torch.Tensor):
image_height, image_width = target_sizes.unbind(1)
else:
raise TypeError("`target_sizes` must be a list, tuple or torch.Tensor")
scale_factor = torch.stack([image_width, image_height, image_width, image_height], dim=1)
scale_factor = scale_factor.unsqueeze(1).to(boxes.device)
boxes = boxes * scale_factor
return boxes
class Sam3ImageProcessorFast(Sam2ImageProcessorFast):
image_mean = IMAGENET_STANDARD_MEAN
image_std = IMAGENET_STANDARD_STD
size = {"height": 1008, "width": 1008}
mask_size = {"height": 288, "width": 288}
def post_process_semantic_segmentation(
self, outputs, target_sizes: list[tuple] | None = None, threshold: float = 0.5
):
"""
Converts the output of [`Sam3Model`] into semantic segmentation maps.
Args:
outputs ([`Sam3ImageSegmentationOutput`]):
Raw outputs of the model containing semantic_seg.
target_sizes (`list[tuple]` of length `batch_size`, *optional*):
List of tuples corresponding to the requested final size (height, width) of each prediction. If unset,
predictions will not be resized.
threshold (`float`, *optional*, defaults to 0.5):
Threshold for binarizing the semantic segmentation masks.
Returns:
semantic_segmentation: `list[torch.Tensor]` of length `batch_size`, where each item is a semantic
segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is
specified). Each entry is a binary mask (0 or 1).
"""
# Get semantic segmentation output
# semantic_seg has shape (batch_size, 1, height, width)
semantic_logits = outputs.semantic_seg
if semantic_logits is None:
raise ValueError(
"Semantic segmentation output is not available in the model outputs. "
"Make sure the model was run with semantic segmentation enabled."
)
# Apply sigmoid to convert logits to probabilities
semantic_probs = semantic_logits.sigmoid()
# Resize and binarize semantic segmentation maps
if target_sizes is not None:
if len(semantic_logits) != len(target_sizes):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits"
)
semantic_segmentation = []
for idx in range(len(semantic_logits)):
resized_probs = torch.nn.functional.interpolate(
semantic_probs[idx].unsqueeze(dim=0),
size=target_sizes[idx],
mode="bilinear",
align_corners=False,
)
# Binarize: values > threshold become 1, otherwise 0
semantic_map = (resized_probs[0, 0] > threshold).to(torch.long)
semantic_segmentation.append(semantic_map)
else:
# Binarize without resizing
semantic_segmentation = (semantic_probs[:, 0] > threshold).to(torch.long)
semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
def post_process_object_detection(self, outputs, threshold: float = 0.3, target_sizes: list[tuple] | None = None):
"""
Converts the raw output of [`Sam3Model`] into final bounding boxes in (top_left_x, top_left_y,
bottom_right_x, bottom_right_y) format.
Args:
outputs ([`Sam3ImageSegmentationOutput`]):
Raw outputs of the model containing pred_boxes, pred_logits, and optionally presence_logits.
threshold (`float`, *optional*, defaults to 0.3):
Score threshold to keep object detection predictions.
target_sizes (`list[tuple[int, int]]`, *optional*):
List of tuples (`tuple[int, int]`) containing the target size `(height, width)` of each image in the
batch. If unset, predictions will not be resized.
Returns:
`list[dict]`: A list of dictionaries, each dictionary containing the following keys:
- **scores** (`torch.Tensor`): The confidence scores for each predicted box on the image.
- **boxes** (`torch.Tensor`): Image bounding boxes in (top_left_x, top_left_y, bottom_right_x,
bottom_right_y) format.
"""
pred_logits = outputs.pred_logits # (batch_size, num_queries)
pred_boxes = outputs.pred_boxes # (batch_size, num_queries, 4) in xyxy format
presence_logits = outputs.presence_logits # (batch_size, 1) or None
batch_size = pred_logits.shape[0]
if target_sizes is not None and len(target_sizes) != batch_size:
raise ValueError("Make sure that you pass in as many target sizes as images")
# Compute scores: combine pred_logits with presence_logits if available
batch_scores = pred_logits.sigmoid()
if presence_logits is not None:
presence_scores = presence_logits.sigmoid() # (batch_size, 1)
batch_scores = batch_scores * presence_scores # Broadcast multiplication
# Boxes are already in xyxy format from the model
batch_boxes = pred_boxes
# Convert from relative [0, 1] to absolute [0, height/width] coordinates
if target_sizes is not None:
batch_boxes = _scale_boxes(batch_boxes, target_sizes)
results = []
for scores, boxes in zip(batch_scores, batch_boxes):
keep = scores > threshold
scores = scores[keep]
boxes = boxes[keep]
results.append({"scores": scores, "boxes": boxes})
return results
def post_process_instance_segmentation(
self,
outputs,
threshold: float = 0.3,
mask_threshold: float = 0.5,
target_sizes: list[tuple] | None = None,
):
"""
Converts the raw output of [`Sam3Model`] into instance segmentation predictions with bounding boxes and masks.
Args:
outputs ([`Sam3ImageSegmentationOutput`]):
Raw outputs of the model containing pred_boxes, pred_logits, pred_masks, and optionally
presence_logits.
threshold (`float`, *optional*, defaults to 0.3):
Score threshold to keep instance predictions.
mask_threshold (`float`, *optional*, defaults to 0.5):
Threshold for binarizing the predicted masks.
target_sizes (`list[tuple[int, int]]`, *optional*):
List of tuples (`tuple[int, int]`) containing the target size `(height, width)` of each image in the
batch. If unset, predictions will not be resized.
Returns:
`list[dict]`: A list of dictionaries, each dictionary containing the following keys:
- **scores** (`torch.Tensor`): The confidence scores for each predicted instance on the image.
- **boxes** (`torch.Tensor`): Image bounding boxes in (top_left_x, top_left_y, bottom_right_x,
bottom_right_y) format.
- **masks** (`torch.Tensor`): Binary segmentation masks for each instance, shape (num_instances,
height, width).
"""
pred_logits = outputs.pred_logits # (batch_size, num_queries)
pred_boxes = outputs.pred_boxes # (batch_size, num_queries, 4) in xyxy format
pred_masks = outputs.pred_masks # (batch_size, num_queries, height, width)
presence_logits = outputs.presence_logits # (batch_size, 1) or None
batch_size = pred_logits.shape[0]
if target_sizes is not None and len(target_sizes) != batch_size:
raise ValueError("Make sure that you pass in as many target sizes as images")
# Compute scores: combine pred_logits with presence_logits if available
batch_scores = pred_logits.sigmoid()
if presence_logits is not None:
presence_scores = presence_logits.sigmoid() # (batch_size, 1)
batch_scores = batch_scores * presence_scores # Broadcast multiplication
# Apply sigmoid to mask logits
batch_masks = pred_masks.sigmoid()
# Boxes are already in xyxy format from the model
batch_boxes = pred_boxes
# Scale boxes to target sizes if provided
if target_sizes is not None:
batch_boxes = _scale_boxes(batch_boxes, target_sizes)
results = []
for idx, (scores, boxes, masks) in enumerate(zip(batch_scores, batch_boxes, batch_masks)):
# Filter by score threshold
keep = scores > threshold
scores = scores[keep]
boxes = boxes[keep]
masks = masks[keep] # (num_keep, height, width)
# Resize masks to target size if provided
if target_sizes is not None:
target_size = target_sizes[idx]
if len(masks) > 0:
masks = torch.nn.functional.interpolate(
masks.unsqueeze(0), # (1, num_keep, height, width)
size=target_size,
mode="bilinear",
align_corners=False,
).squeeze(0) # (num_keep, target_height, target_width)
# Binarize masks
masks = (masks > mask_threshold).to(torch.long)
results.append({"scores": scores, "boxes": boxes, "masks": masks})
return results
__all__ = ["Sam3ImageProcessorFast"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/sam3/modular_sam3.py",
"license": "Apache License 2.0",
"lines": 206,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/sam3/processing_sam3.py | # Copyright 2025 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Processor class for SAM3.
"""
from copy import deepcopy
import numpy as np
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PreTokenizedInput, TextInput
from ...utils import TensorType, auto_docstring, is_torch_available, logging
from ...utils.import_utils import requires
logger = logging.get_logger(__name__)
if is_torch_available():
import torch
def box_cxcywh_to_xyxy(x):
x_c, y_c, w, h = x.unbind(-1)
b = [(x_c - 0.5 * w), (y_c - 0.5 * h), (x_c + 0.5 * w), (y_c + 0.5 * h)]
return torch.stack(b, dim=-1)
def box_cxcywh_to_xywh(x):
x_c, y_c, w, h = x.unbind(-1)
b = [(x_c - 0.5 * w), (y_c - 0.5 * h), (w), (h)]
return torch.stack(b, dim=-1)
def box_xywh_to_xyxy(x):
x, y, w, h = x.unbind(-1)
b = [(x), (y), (x + w), (y + h)]
return torch.stack(b, dim=-1)
def box_xywh_to_cxcywh(x):
x, y, w, h = x.unbind(-1)
b = [(x + 0.5 * w), (y + 0.5 * h), (w), (h)]
return torch.stack(b, dim=-1)
def box_xyxy_to_xywh(x):
x, y, X, Y = x.unbind(-1)
b = [(x), (y), (X - x), (Y - y)]
return torch.stack(b, dim=-1)
def box_xyxy_to_cxcywh(x):
x0, y0, x1, y1 = x.unbind(-1)
b = [(x0 + x1) / 2, (y0 + y1) / 2, (x1 - x0), (y1 - y0)]
return torch.stack(b, dim=-1)
def box_area(boxes):
"""
Batched version of box area. Boxes should be in [x0, y0, x1, y1] format.
Inputs:
- boxes: Tensor of shape (..., 4)
Returns:
- areas: Tensor of shape (...,)
"""
x0, y0, x1, y1 = boxes.unbind(-1)
return (x1 - x0) * (y1 - y0)
@requires(backends=("torch",))
@auto_docstring
class Sam3Processor(ProcessorMixin):
def __init__(
self, image_processor, tokenizer, target_size: int | None = None, point_pad_value: int = -10, **kwargs
):
r"""
target_size (`int`, *optional*):
The target size (target_size, target_size) to which the image will be resized.
point_pad_value (`int`, *optional*, defaults to -10):
The value used for padding input boxes.
"""
super().__init__(image_processor, tokenizer, **kwargs)
self.point_pad_value = point_pad_value
self.target_size = target_size if target_size is not None else self.image_processor.size["height"]
@auto_docstring
def __call__(
self,
images: ImageInput | None = None,
text: TextInput | PreTokenizedInput | list[TextInput] | list[PreTokenizedInput] | None = None,
segmentation_maps: ImageInput | None = None,
input_boxes: list[list[list[float]]] | torch.Tensor | None = None,
input_boxes_labels: list[list[list[int]]] | torch.Tensor | None = None,
original_sizes: list[list[float]] | torch.Tensor | None = None,
return_tensors: str | TensorType | None = None,
**kwargs,
) -> BatchEncoding:
r"""
images (`ImageInput`, *optional*):
The image(s) to process.
text (`str`, `list[str]`, `list[list[str]]`, *optional*):
The text to process.
segmentation_maps (`ImageInput`, *optional*):
The segmentation maps to process.
input_boxes (`list[list[list[float]]]`, `torch.Tensor`, *optional*):
The bounding boxes to process.
input_boxes_labels (`list[list[int]]`, `torch.Tensor`, *optional*):
The labels for the bounding boxes.
original_sizes (`list[list[float]]`, `torch.Tensor`, *optional*):
The original sizes of the images.
Returns:
A [`BatchEncoding`] with the following fields:
- `pixel_values` (`torch.Tensor`): The processed image(s).
- `original_sizes` (`list[list[float]]`): The original sizes of the images.
- `labels` (`torch.Tensor`): The processed segmentation maps (if provided).
- `input_boxes_labels` (`torch.Tensor`): The processed labels for the bounding boxes.
- `input_boxes` (`torch.Tensor`): The processed bounding boxes.
"""
encoding = None
if images is not None:
encoding = self.image_processor(
images,
segmentation_maps=segmentation_maps,
return_tensors=return_tensors,
**kwargs,
)
elif original_sizes is not None:
if isinstance(original_sizes, torch.Tensor):
original_sizes = original_sizes.cpu().tolist()
encoding = BatchEncoding({"original_sizes": original_sizes}, tensor_type=return_tensors)
elif input_boxes is not None:
raise ValueError("Either images or original_sizes must be provided if input_boxes is not None")
text = self._resolve_text_prompts(text, input_boxes)
if text is not None:
text_inputs = self.tokenizer(text, return_tensors=return_tensors, padding="max_length", max_length=32)
if encoding is not None:
encoding.update(text_inputs)
else:
encoding = text_inputs
# Process input boxes if provided
if input_boxes is not None:
original_sizes = encoding["original_sizes"]
# Validate and convert inputs to standardized format
processed_boxes = self._validate_single_input(
input_boxes,
expected_depth=3,
input_name="boxes",
expected_format="[image level, box level, box coordinates]",
expected_coord_size=4,
)
processed_boxes_labels = self._validate_single_input(
input_boxes_labels,
expected_depth=2,
input_name="labels",
expected_format="[image level, box level]",
)
# Get padding requirements for all inputs
if processed_boxes is not None:
boxes_max_dims = self._get_nested_dimensions(processed_boxes)[:2]
if processed_boxes_labels is not None:
boxes_labels_max_dims = self._get_nested_dimensions(processed_boxes_labels)[:2]
# Ensure boxes and labels have consistent dimensions
if processed_boxes is not None and processed_boxes_labels is not None:
if boxes_max_dims != boxes_labels_max_dims:
raise ValueError(
"Input boxes and labels have inconsistent dimensions. Please ensure they have the same dimensions."
)
# Pad and normalize all inputs to final tensor format
if processed_boxes is not None:
padded_boxes = self._pad_nested_list(processed_boxes, boxes_max_dims + [4])
final_boxes = torch.tensor(padded_boxes, dtype=torch.float32)
self._normalize_tensor_coordinates(
final_boxes, original_sizes, is_bounding_box=True, preserve_padding=True
)
final_boxes = box_xyxy_to_cxcywh(final_boxes)
encoding.update({"input_boxes": final_boxes})
if processed_boxes_labels is not None:
padded_boxes_labels = self._pad_nested_list(processed_boxes_labels, boxes_labels_max_dims)
final_boxes_labels = torch.tensor(padded_boxes_labels, dtype=torch.int64)
encoding.update({"input_boxes_labels": final_boxes_labels})
return encoding
def _normalize_coordinates(self, coords: "torch.Tensor", original_size, is_bounding_box=False) -> "torch.Tensor":
"""
Expects a numpy array of length 2 in the final dimension. Requires the original image size in (H, W) format.
Args:
target_size (`int`):
The target size of the image.
coords (`torch.Tensor`):
The coordinates to be normalized.
original_size (`tuple`):
The original size of the image.
is_bounding_box (`bool`, *optional*, defaults to `False`):
Whether the coordinates are bounding boxes.
"""
old_h, old_w = original_size
coords = deepcopy(coords).float()
if is_bounding_box:
coords = coords.reshape(-1, 2, 2)
coords[..., 0] = coords[..., 0] / old_w
coords[..., 1] = coords[..., 1] / old_h
if is_bounding_box:
coords = coords.reshape(-1, 4)
return coords
def _convert_to_nested_list(self, data, expected_depth, current_depth=0):
"""
Recursively convert various input formats (tensors, numpy arrays, lists) to nested lists.
Preserves None values within lists.
Args:
data: Input data in any format (may be None or contain None values)
expected_depth: Expected nesting depth
current_depth: Current depth in recursion
Returns:
Nested list representation of the data (or None)
"""
if data is None:
return None
# Convert tensor/numpy to list if we're at a leaf level or if it's a multi-dimensional array
if isinstance(data, torch.Tensor): # PyTorch tensor
if current_depth == expected_depth - 2 or len(data.shape) <= 2: # At coordinate level or small tensor
return data.numpy().tolist()
else:
return [self._convert_to_nested_list(item, expected_depth, current_depth + 1) for item in data]
elif isinstance(data, np.ndarray): # NumPy array
if current_depth == expected_depth - 2 or len(data.shape) <= 2: # At coordinate level or small array
return data.tolist()
else:
return [self._convert_to_nested_list(item, expected_depth, current_depth + 1) for item in data]
elif isinstance(data, list):
if current_depth == expected_depth:
# We've reached the expected depth, return as is
return data
else:
# Continue recursion, preserving None values
return [
self._convert_to_nested_list(item, expected_depth, current_depth + 1) if item is not None else None
for item in data
]
elif isinstance(data, (int, float)):
return data
else:
raise ValueError(f"Unsupported data type: {type(data)}")
def _resolve_text_prompts(self, text, input_boxes):
"""
Resolve text prompts by setting defaults based on prompt types.
"""
# If no text provided, infer default based on prompt type
if text is None:
return "visual" if input_boxes else None
if not isinstance(text, (list, tuple)):
return text
# Validate list/tuple length matches both prompt types if provided
text = list(text) # Convert to list to allow modification
if input_boxes and len(text) != len(input_boxes):
raise ValueError(
f"The number of text prompts must match the number of input boxes. "
f"Got {len(text)} text prompts and {len(input_boxes)} input boxes."
)
# Fill in None values with defaults based on corresponding prompt
for i, text_value in enumerate(text):
if text_value is None and input_boxes and input_boxes[i] is not None:
text[i] = "visual"
return text
def _get_nested_dimensions(self, nested_list, max_dims=None):
"""
Get the maximum dimensions at each level of nesting, skipping None values.
Args:
nested_list (`list`):
Nested list structure (may contain None values).
max_dims (`list`, *optional*):
Current maximum dimensions (for recursion).
Returns:
`list`: A list of maximum dimensions for each nesting level.
"""
if max_dims is None:
max_dims = []
if not isinstance(nested_list, list):
return max_dims
if len(max_dims) == 0:
max_dims.append(len(nested_list))
else:
max_dims[0] = max(max_dims[0], len(nested_list))
if len(nested_list) > 0:
for item in nested_list:
# Skip None values
if item is None:
continue
if isinstance(item, list):
sub_dims = self._get_nested_dimensions(item)
# Merge sub_dims into max_dims
for i, dim in enumerate(sub_dims):
if i + 1 >= len(max_dims):
max_dims.append(dim)
else:
max_dims[i + 1] = max(max_dims[i + 1], dim)
return max_dims
def _pad_nested_list(self, nested_list, target_dims, current_level=0, pad_value=None):
"""
Recursively pad a nested list to match target dimensions. Replaces None values with padded structures.
Args:
nested_list (`list`):
Nested list to pad (may contain None values).
target_dims (`list`):
Target dimensions for each level.
current_level (`int`, *optional*, defaults to 0):
Current nesting level.
pad_value (`int`, *optional*):
Value to use for padding.
Returns:
`list`: The padded nested list.
"""
if pad_value is None:
pad_value = self.point_pad_value
if current_level >= len(target_dims):
return nested_list
# Ensure we have a list
if not isinstance(nested_list, list):
nested_list = [nested_list]
# Pad current level
current_size = len(nested_list)
target_size = target_dims[current_level]
# Pad with appropriate values
if current_level == len(target_dims) - 1:
# At the coordinate level, pad with pad_value
nested_list.extend([pad_value] * (target_size - current_size))
else:
# At higher levels, pad with nested structures
if current_size > 0:
# Create appropriately sized template
if current_level < len(target_dims) - 2:
# For non-coordinate levels, create empty nested structure
template_dims = target_dims[current_level + 1 :]
template = self._create_empty_nested_structure(template_dims, pad_value)
else:
# For coordinate level, create list of pad_values
template = [pad_value] * target_dims[current_level + 1]
nested_list.extend([deepcopy(template) for _ in range(target_size - current_size)])
else:
# Create from scratch
template_dims = target_dims[current_level + 1 :]
template = self._create_empty_nested_structure(template_dims, pad_value)
nested_list.extend([deepcopy(template) for _ in range(target_size)])
# Recursively pad sublists, replacing None with padded structures
if current_level < len(target_dims) - 1:
for i in range(len(nested_list)):
if nested_list[i] is None:
# Replace None with fully padded structure
template_dims = target_dims[current_level + 1 :]
nested_list[i] = self._create_empty_nested_structure(template_dims, pad_value)
elif isinstance(nested_list[i], list):
nested_list[i] = self._pad_nested_list(nested_list[i], target_dims, current_level + 1, pad_value)
return nested_list
def _create_empty_nested_structure(self, dims, pad_value):
"""
Create an empty nested structure with given dimensions filled with pad_value.
Args:
dims (`list`):
The dimensions of the nested structure.
pad_value (`int`):
The value to fill the structure with.
"""
if len(dims) == 1:
return [pad_value] * dims[0]
else:
return [self._create_empty_nested_structure(dims[1:], pad_value) for _ in range(dims[0])]
def _get_nesting_level(self, input_list):
"""
Get the nesting level of a list structure, skipping None values.
Args:
input_list (`list`):
The list to get the nesting level of.
"""
if isinstance(input_list, list):
if len(input_list) == 0:
return 1
# Find first non-None element to determine nesting level
for item in input_list:
if item is not None:
return 1 + self._get_nesting_level(item)
# All elements are None, treat as single level
return 1
elif isinstance(input_list, (np.ndarray, torch.Tensor)):
# For arrays/tensors, the nesting level is the number of dimensions
return len(input_list.shape)
return 0
def _validate_single_input(
self,
data: torch.Tensor | np.ndarray | list,
expected_depth: int,
input_name: str,
expected_format: str,
expected_coord_size: int | None = None,
) -> list:
"""
Validate a single input by ensuring proper nesting and raising an error if the input is not valid.
Args:
data (`torch.Tensor`, `np.ndarray`, or `list`):
Input data to process.
expected_depth (`int`):
Expected nesting depth.
input_name (`str`):
Name of the input for error messages.
expected_format (`str`):
The expected format of the input.
expected_coord_size (`int`, *optional*):
Expected coordinate size (4 for boxes, None for labels).
.
"""
if data is None:
return None
# Handle tensors and numpy arrays first
if isinstance(data, (torch.Tensor, np.ndarray)):
# For tensors/arrays, we can directly check the number of dimensions
if data.ndim != expected_depth:
raise ValueError(
f"Input {input_name} must be a tensor/array with {expected_depth} dimensions. The expected nesting format is {expected_format}. Got {data.ndim} dimensions."
)
elif expected_coord_size is not None:
if data.shape[-1] != expected_coord_size:
raise ValueError(
f"Input {input_name} must be a tensor/array with {expected_coord_size} as the last dimension, got {data.shape[-1]}."
)
return self._convert_to_nested_list(data, expected_depth)
# Handle nested lists
if isinstance(data, list):
current_depth = self._get_nesting_level(data)
if current_depth != expected_depth:
raise ValueError(
f"Input {input_name} must be a nested list with {expected_depth} levels. The expected nesting format is {expected_format}. Got {current_depth} levels."
)
return self._convert_to_nested_list(data, expected_depth)
def _normalize_tensor_coordinates(self, tensor, original_sizes, is_bounding_box=False, preserve_padding=False):
"""
Helper method to normalize coordinates in a tensor across multiple images.
Args:
tensor (`torch.Tensor`):
Input tensor with coordinates.
original_sizes (`list`):
Original image sizes.
is_bounding_box (`bool`, *optional*, defaults to `False`):
Whether coordinates are bounding boxes.
preserve_padding (`bool`, *optional*, defaults to `False`):
Whether to preserve padding values (for boxes).
"""
if preserve_padding:
# For boxes: avoid normalizing pad values
mask = tensor != self.point_pad_value
coord_mask = mask.all(dim=-1, keepdim=True)
for img_idx in range(len(original_sizes)):
if img_idx < tensor.shape[0]:
original_size = original_sizes[img_idx] if img_idx < len(original_sizes) else original_sizes[0]
normalized_coords = self._normalize_coordinates(
tensor[img_idx], original_size, is_bounding_box=is_bounding_box
)
if preserve_padding:
# Only update non-padded values
img_mask = coord_mask[img_idx]
tensor[img_idx] = torch.where(
img_mask.expand_as(tensor[img_idx]), normalized_coords, tensor[img_idx]
)
else:
tensor[img_idx] = normalized_coords
def post_process_semantic_segmentation(self, outputs, target_sizes=None, threshold=0.5):
"""
Converts the output of [`Sam3Model`] into semantic segmentation maps.
Args:
outputs ([`Sam3ImageSegmentationOutput`]):
Raw outputs of the model containing semantic_seg.
target_sizes (`list[tuple]` of length `batch_size`, *optional*):
List of tuples corresponding to the requested final size (height, width) of each prediction. If unset,
predictions will not be resized.
threshold (`float`, *optional*, defaults to 0.5):
Threshold for binarizing the semantic segmentation masks.
Returns:
semantic_segmentation: `list[torch.Tensor]` of length `batch_size`, where each item is a semantic
segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is
specified). Each entry is a binary mask (0 or 1).
"""
return self.image_processor.post_process_semantic_segmentation(outputs, target_sizes, threshold)
def post_process_object_detection(self, outputs, threshold=0.3, target_sizes=None):
"""
Converts the raw output of [`Sam3Model`] into final bounding boxes in (top_left_x, top_left_y,
bottom_right_x, bottom_right_y) format. This is a convenience wrapper around the image processor method.
Args:
outputs ([`Sam3ImageSegmentationOutput`]):
Raw outputs of the model containing pred_boxes, pred_logits, and optionally presence_logits.
threshold (`float`, *optional*, defaults to 0.3):
Score threshold to keep object detection predictions.
target_sizes (`list[tuple[int, int]]`, *optional*):
List of tuples (`tuple[int, int]`) containing the target size `(height, width)` of each image in the
batch. If unset, predictions will not be resized.
Returns:
`list[dict]`: A list of dictionaries, each dictionary containing the following keys:
- **scores** (`torch.Tensor`): The confidence scores for each predicted box on the image.
- **boxes** (`torch.Tensor`): Image bounding boxes in (top_left_x, top_left_y, bottom_right_x,
bottom_right_y) format.
Example:
```python
>>> from transformers import AutoModel, AutoProcessor
>>> from PIL import Image
>>> import httpx
>>> from io import BytesIO
>>> model = AutoModel.from_pretrained("facebook/sam3-base")
>>> processor = AutoProcessor.from_pretrained("facebook/sam3-base")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> with httpx.stream("GET", url) as response:
... image = Image.open(BytesIO(response.read()))
>>> inputs = processor(images=image, text="cat", return_tensors="pt")
>>> outputs = model(**inputs)
>>> # Post-process to get bounding boxes
>>> results = processor.post_process_object_detection(outputs, threshold=0.3, target_sizes=[image.size[::-1]])
>>> boxes = results[0]["boxes"]
>>> scores = results[0]["scores"]
```
"""
return self.image_processor.post_process_object_detection(outputs, threshold, target_sizes)
def post_process_instance_segmentation(
self,
outputs,
threshold=0.3,
mask_threshold=0.5,
target_sizes=None,
):
"""
Converts the raw output of [`Sam3Model`] into instance segmentation predictions with bounding boxes and masks.
This is a convenience wrapper around the image processor method.
Args:
outputs ([`Sam3ImageSegmentationOutput`]):
Raw outputs of the model containing pred_boxes, pred_logits, pred_masks, and optionally
presence_logits.
threshold (`float`, *optional*, defaults to 0.3):
Score threshold to keep instance predictions.
mask_threshold (`float`, *optional*, defaults to 0.5):
Threshold for binarizing the predicted masks.
target_sizes (`list[tuple[int, int]]`, *optional*):
List of tuples (`tuple[int, int]`) containing the target size `(height, width)` of each image in the
batch. If unset, predictions will not be resized.
Returns:
`list[dict]`: A list of dictionaries, each dictionary containing the following keys:
- **scores** (`torch.Tensor`): The confidence scores for each predicted instance on the image.
- **boxes** (`torch.Tensor`): Image bounding boxes in (top_left_x, top_left_y, bottom_right_x,
bottom_right_y) format.
- **masks** (`torch.Tensor`): Binary segmentation masks for each instance, shape (num_instances,
height, width).
Example:
```python
>>> from transformers import AutoModel, AutoProcessor
>>> from PIL import Image
>>> import httpx
>>> from io import BytesIO
>>> model = AutoModel.from_pretrained("facebook/sam3-base")
>>> processor = AutoProcessor.from_pretrained("facebook/sam3-base")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> with httpx.stream("GET", url) as response:
... image = Image.open(BytesIO(response.read()))
>>> inputs = processor(images=image, text="cat", return_tensors="pt")
>>> outputs = model(**inputs)
>>> # Post-process to get instance segmentation
>>> results = processor.post_process_instance_segmentation(
... outputs, threshold=0.3, target_sizes=[image.size[::-1]]
... )
>>> masks = results[0]["masks"]
>>> boxes = results[0]["boxes"]
>>> scores = results[0]["scores"]
```
"""
return self.image_processor.post_process_instance_segmentation(
outputs, threshold, mask_threshold, target_sizes
)
__all__ = ["Sam3Processor"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/sam3/processing_sam3.py",
"license": "Apache License 2.0",
"lines": 559,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/sam3_tracker/modular_sam3_tracker.py | # Copyright 2025 the HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ... import initialization as init
from ...configuration_utils import PreTrainedConfig
from ...modeling_utils import PreTrainedModel
from ...utils import auto_docstring
from ..auto import CONFIG_MAPPING, AutoModel
from ..sam2.configuration_sam2 import (
Sam2Config,
Sam2MaskDecoderConfig,
Sam2PromptEncoderConfig,
)
from ..sam2.modeling_sam2 import (
Sam2Attention,
Sam2FeedForward,
Sam2ImageSegmentationOutput,
Sam2LayerNorm,
Sam2MaskDecoder,
Sam2MaskEmbedding,
Sam2Model,
Sam2PositionalEmbedding,
Sam2PreTrainedModel,
Sam2PromptEncoder,
Sam2TwoWayAttentionBlock,
Sam2TwoWayTransformer,
)
from ..sam2.processing_sam2 import Sam2Processor
class Sam3TrackerPromptEncoderConfig(Sam2PromptEncoderConfig):
r"""
This is the configuration class to store the configuration of a [`Sam3TrackerPromptEncoder`]. The [`Sam3TrackerPromptEncoder`]
module is used to encode the input 2D points and bounding boxes.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 256):
Dimensionality of the hidden states.
image_size (`int`, *optional*, defaults to 1008):
The expected output resolution of the image.
patch_size (`int`, *optional*, defaults to 14):
The size (resolution) of each patch.
mask_input_channels (`int`, *optional*, defaults to 16):
The number of channels to be fed to the `MaskDecoder` module.
num_point_embeddings (`int`, *optional*, defaults to 4):
The number of point embeddings to be used.
hidden_act (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function in the encoder and pooler.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
scale (`float`, *optional*, defaults to 1):
The scale factor for the prompt encoder.
"""
base_config_key = "prompt_encoder_config"
def __init__(
self,
hidden_size=256,
image_size=1008,
patch_size=14,
mask_input_channels=16,
num_point_embeddings=4,
hidden_act="gelu",
layer_norm_eps=1e-6,
scale=1,
**kwargs,
):
super().__init__(**kwargs)
class Sam3TrackerProcessor(Sam2Processor):
pass
class Sam3TrackerMaskDecoderConfig(Sam2MaskDecoderConfig):
pass
class Sam3TrackerConfig(Sam2Config):
r"""
[`Sam3TrackerConfig`] is the configuration class to store the configuration of a [`Sam3TrackerModel`]. It is used to instantiate a
SAM3_TRACKER model according to the specified arguments, defining the memory attention, memory encoder, and image encoder
configs. Instantiating a configuration defaults will yield a similar configuration to that of the SAM 2.1 Hiera-tiny
[facebook/sam3_tracker.1-hiera-tiny](https://huggingface.co/facebook/sam3_tracker.1-hiera-tiny) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
<Tip>
SAM3 Tracker checkpoints with `model_type="sam3_tracker_video"` are compatible with `Sam3TrackerModel` since the
video variant weights are a superset of the image-only model weights. You may see a warning about model type
mismatch when loading such checkpoints, which can be safely ignored in this case.
</Tip>
Args:
vision_config (Union[`dict`, `Sam3TrackerVisionConfig`], *optional*):
Dictionary of configuration options used to initialize [`Sam3TrackerVisionConfig`].
prompt_encoder_config (Union[`dict`, `Sam3TrackerPromptEncoderConfig`], *optional*):
Dictionary of configuration options used to initialize [`Sam3TrackerPromptEncoderConfig`].
mask_decoder_config (Union[`dict`, `Sam3TrackerMaskDecoderConfig`], *optional*):
Dictionary of configuration options used to initialize [`Sam3TrackerMaskDecoderConfig`].
initializer_range (`float`, *optional*, defaults to 0.02):
Standard deviation for parameter initialization.
Example:
```python
>>> from transformers import (
... Sam3TrackerVisionConfig,
... Sam3TrackerPromptEncoderConfig,
... Sam3TrackerMaskDecoderConfig,
... Sam3TrackerModel,
... )
>>> # Initializing a Sam3TrackerConfig with `"facebook/sam3_tracker.1_hiera_tiny"` style configuration
>>> configuration = Sam3TrackerConfig()
>>> # Initializing a Sam3TrackerModel (with random weights) from the `"facebook/sam3_tracker.1_hiera_tiny"` style configuration
>>> model = Sam3TrackerModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
>>> # We can also initialize a Sam3TrackerConfig from a Sam3TrackerVisionConfig, Sam3TrackerPromptEncoderConfig, and Sam3TrackerMaskDecoderConfig
>>> # Initializing SAM3_TRACKER vision encoder, memory attention, and memory encoder configurations
>>> vision_config = Sam3TrackerVisionConfig()
>>> prompt_encoder_config = Sam3TrackerPromptEncoderConfig()
>>> mask_decoder_config = Sam3TrackerMaskDecoderConfig()
>>> config = Sam3TrackerConfig(vision_config, prompt_encoder_config, mask_decoder_config)
```
"""
def __init__(
self,
vision_config=None,
prompt_encoder_config=None,
mask_decoder_config=None,
initializer_range=0.02,
**kwargs,
):
vision_config = (
vision_config
if vision_config is not None
else {"backbone_feature_sizes": [[288, 288], [144, 144], [72, 72]]}
)
prompt_encoder_config = prompt_encoder_config if prompt_encoder_config is not None else {}
mask_decoder_config = mask_decoder_config if mask_decoder_config is not None else {}
if isinstance(vision_config, dict):
vision_config["model_type"] = vision_config.get("model_type", "sam3_vision_model")
vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config)
if isinstance(prompt_encoder_config, Sam3TrackerPromptEncoderConfig):
prompt_encoder_config = prompt_encoder_config.to_dict()
if isinstance(mask_decoder_config, Sam3TrackerMaskDecoderConfig):
mask_decoder_config = mask_decoder_config.to_dict()
self.vision_config = vision_config
self.prompt_encoder_config = Sam3TrackerPromptEncoderConfig(**prompt_encoder_config)
self.mask_decoder_config = Sam3TrackerMaskDecoderConfig(**mask_decoder_config)
self.initializer_range = initializer_range
PreTrainedConfig.__init__(**kwargs)
class Sam3TrackerImageSegmentationOutput(Sam2ImageSegmentationOutput):
pass
class Sam3TrackerFeedForward(Sam2FeedForward):
pass
@auto_docstring(
custom_intro="""
Segment Anything Model 3 (SAM 3) for generating segmentation masks, given an input image and
input points and labels, boxes, or masks.
"""
)
class Sam3TrackerPreTrainedModel(Sam2PreTrainedModel):
@torch.no_grad()
def _init_weights(self, module):
PreTrainedModel._init_weights(module)
if isinstance(module, Sam3TrackerModel):
if module.no_memory_embedding is not None:
init.zeros_(module.no_memory_embedding)
elif isinstance(module, Sam3TrackerPositionalEmbedding):
init.normal_(module.positional_embedding, std=module.scale)
class Sam3TrackerPositionalEmbedding(Sam2PositionalEmbedding):
pass
class Sam3TrackerMaskEmbedding(Sam2MaskEmbedding):
pass
class Sam3TrackerPromptEncoder(Sam2PromptEncoder):
pass
class Sam3TrackerAttention(Sam2Attention):
pass
class Sam3TrackerTwoWayAttentionBlock(Sam2TwoWayAttentionBlock):
pass
class Sam3TrackerTwoWayTransformer(Sam2TwoWayTransformer):
pass
class Sam3TrackerLayerNorm(Sam2LayerNorm):
pass
class Sam3TrackerMaskDecoder(Sam2MaskDecoder):
pass
class Sam3TrackerModel(Sam2Model):
_checkpoint_conversion_mapping = {
r"tracker_model.(.+)": r"\1", # the regex allows to remove the prefix, and add it back in revert mode
"detector_model.vision_encoder.backbone.": "vision_encoder.backbone.",
"tracker_neck.": "vision_encoder.neck.",
}
_keys_to_ignore_on_load_unexpected = [
r"^detector_model.",
r"^memory_.*",
r"^mask_downsample.*",
r"^object_pointer_proj.*",
r"^temporal_positional_encoding_projection_layer.*",
"no_memory_positional_encoding",
"no_object_pointer",
"occlusion_spatial_embedding_parameter",
]
def __init__(self, config: Sam3TrackerConfig):
# loading from a sam3_video config
if hasattr(config, "tracker_config") and config.tracker_config is not None:
if isinstance(config.tracker_config, dict):
config.tracker_config = Sam3TrackerConfig(**config.tracker_config)
config = config.tracker_config
Sam3TrackerPreTrainedModel.__init__(config)
self.shared_image_embedding = Sam3TrackerPositionalEmbedding(config.prompt_encoder_config)
self.vision_encoder = AutoModel.from_config(config.vision_config)
self.prompt_encoder = Sam3TrackerPromptEncoder(config.prompt_encoder_config)
# The module using it is not a PreTrainedModel subclass so we need this
config.mask_decoder_config._attn_implementation = config._attn_implementation
self.mask_decoder = Sam3TrackerMaskDecoder(config.mask_decoder_config)
self.backbone_feature_sizes = config.vision_config.backbone_feature_sizes
# a single token to indicate no memory embedding from previous frames
self.hidden_dim = config.vision_config.fpn_hidden_size
self.no_memory_embedding = torch.nn.Parameter(torch.zeros(1, 1, self.hidden_dim))
self.post_init()
__all__ = [
"Sam3TrackerConfig",
"Sam3TrackerPromptEncoderConfig",
"Sam3TrackerMaskDecoderConfig",
"Sam3TrackerProcessor",
"Sam3TrackerModel",
"Sam3TrackerPreTrainedModel",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/sam3_tracker/modular_sam3_tracker.py",
"license": "Apache License 2.0",
"lines": 229,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/sam3_tracker_video/modular_sam3_tracker_video.py | # Copyright 2025 the HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ...configuration_utils import PreTrainedConfig
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
from ..auto import CONFIG_MAPPING, AutoConfig, AutoModel
from ..sam2_video.configuration_sam2_video import Sam2VideoMaskDecoderConfig, Sam2VideoPromptEncoderConfig
from ..sam2_video.modeling_sam2_video import (
Sam2VideoAttention,
Sam2VideoFeedForward,
Sam2VideoImageSegmentationOutput,
Sam2VideoInferenceCache,
Sam2VideoInferenceSession,
Sam2VideoLayerNorm,
Sam2VideoMaskDecoder,
Sam2VideoMaskDownSampler,
Sam2VideoMaskDownSamplerLayer,
Sam2VideoMaskEmbedding,
Sam2VideoMemoryAttention,
Sam2VideoMemoryAttentionLayer,
Sam2VideoMemoryEncoder,
Sam2VideoMemoryFuser,
Sam2VideoMemoryFuserCXBlock,
Sam2VideoModel,
Sam2VideoPositionalEmbedding,
Sam2VideoPositionEmbeddingSine,
Sam2VideoPreTrainedModel,
Sam2VideoPromptEncoder,
Sam2VideoRoPEAttention,
Sam2VideoSegmentationOutput,
Sam2VideoTwoWayAttentionBlock,
Sam2VideoTwoWayTransformer,
Sam2VideoVisionEncoderOutput,
Sam2VideoVisionRotaryEmbedding,
)
from ..sam2_video.processing_sam2_video import Sam2VideoProcessor
class Sam3TrackerVideoPromptEncoderConfig(Sam2VideoPromptEncoderConfig):
r"""
This is the configuration class to store the configuration of a [`Sam3TrackerVideoPromptEncoder`]. The [`Sam3TrackerVideoPromptEncoder`]
module is used to encode the input 2D points and bounding boxes.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 256):
Dimensionality of the hidden states.
image_size (`int`, *optional*, defaults to 1008):
The expected output resolution of the image.
patch_size (`int`, *optional*, defaults to 14):
The size (resolution) of each patch.
mask_input_channels (`int`, *optional*, defaults to 16):
The number of channels to be fed to the `MaskDecoder` module.
num_point_embeddings (`int`, *optional*, defaults to 4):
The number of point embeddings to be used.
hidden_act (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function in the encoder and pooler.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
scale (`float`, *optional*, defaults to 1):
The scale factor for the prompt encoder.
"""
base_config_key = "prompt_encoder_config"
def __init__(
self,
hidden_size=256,
image_size=1008,
patch_size=14,
mask_input_channels=16,
num_point_embeddings=4,
hidden_act="gelu",
layer_norm_eps=1e-6,
scale=1,
**kwargs,
):
super().__init__(**kwargs)
class Sam3TrackerVideoProcessor(Sam2VideoProcessor):
pass
class Sam3TrackerVideoMaskDecoderConfig(Sam2VideoMaskDecoderConfig):
pass
class Sam3TrackerVideoConfig(PreTrainedConfig):
r"""
[`Sam3TrackerVideoConfig`] is the configuration class to store the configuration of a [`Sam3TrackerVideoModel`]. It is used to instantiate a
SAM3 tracker video model according to the specified arguments, defining the memory attention, memory encoder, and image encoder
configs. Instantiating a configuration defaults will yield a similar configuration to that of the SAM 3
[facebook/sam3](https://huggingface.co/facebook/sam3) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vision_config (Union[`dict`, `Sam3TrackerVideoVisionConfig`], *optional*):
Dictionary of configuration options used to initialize [`Sam3TrackerVideoVisionConfig`].
prompt_encoder_config (Union[`dict`, `Sam3TrackerVideoPromptEncoderConfig`], *optional*):
Dictionary of configuration options used to initialize [`Sam3TrackerVideoPromptEncoderConfig`].
mask_decoder_config (Union[`dict`, `Sam3TrackerVideoMaskDecoderConfig`], *optional*):
Dictionary of configuration options used to initialize [`Sam3TrackerVideoMaskDecoderConfig`].
initializer_range (`float`, *optional*, defaults to 0.02):
Standard deviation for parameter initialization.
num_maskmem (`int`, *optional*, defaults to 7):
The number of memory slots for the mask memory.
image_size (`int`, *optional*, defaults to 1008):
The size of the input images.
sigmoid_scale_for_mem_enc (`float`, *optional*, defaults to 20.0):
Scale factor for the sigmoid function in the memory encoder.
sigmoid_bias_for_mem_enc (`float`, *optional*, defaults to -10.0):
Bias for the sigmoid function in the memory encoder.
enable_occlusion_spatial_embedding (`bool`, *optional*, defaults to `True`):
Whether to enable spatial embedding for occlusions.
multimask_output_in_sam (`bool`, *optional*, defaults to `True`):
Whether to output multiple masks from the SAM head.
multimask_min_pt_num (`int`, *optional*, defaults to 0):
The minimum number of points to trigger multimask output.
multimask_max_pt_num (`int`, *optional*, defaults to 1):
The maximum number of points to trigger multimask output.
multimask_output_for_tracking (`bool`, *optional*, defaults to `True`):
Whether to use multimask output for tracking.
max_object_pointers_in_encoder (`int`, *optional*, defaults to 16):
The maximum number of object pointers in the encoder.
max_cond_frame_num (`int`, *optional*, defaults to 4):
Maximum number of conditioning frames to use in memory attention.
enable_temporal_pos_encoding_for_object_pointers (`bool`, *optional*, defaults to `True`):
Whether to enable temporal positional encoding for object pointers.
memory_attention_hidden_size (`int`, *optional*, defaults to 256):
Dimensionality of the memory attention hidden states.
memory_attention_num_layers (`int`, *optional*, defaults to 4):
The number of layers in the memory attention module.
memory_attention_num_attention_heads (`int`, *optional*, defaults to 1):
Number of attention heads for each attention layer in the memory attention.
memory_attention_downsample_rate (`int`, *optional*, defaults to 1):
The downsample rate for the attention layers.
memory_attention_feed_forward_hidden_size (`int`, *optional*, defaults to 2048):
The dimension of the feedforward network in the memory attention module.
memory_attention_feed_forward_hidden_act (`str`, *optional*, defaults to `"relu"`):
The non-linear activation function in the feedforward network in the memory attention module.
memory_attention_dropout (`float`, *optional*, defaults to 0.1):
The dropout rate for the memory attention module.
memory_attention_rope_theta (`float`, *optional*, defaults to 10000):
The Rope theta parameter.
memory_attention_rope_feat_sizes (`list[int]`, *optional*, defaults to `[72, 72]`):
The feature sizes for the Rope positional encoding.
memory_attention_rope_dropout (`float`, *optional*, defaults to 0.1):
The dropout rate for the Rope positional encoding.
memory_encoder_hidden_size (`int`, *optional*, defaults to 256):
Dimensionality of the memory encoder hidden states.
memory_encoder_output_channels (`int`, *optional*, defaults to 64):
The number of output channels for the memory encoder.
mask_downsampler_embed_dim (`int`, *optional*, defaults to 256):
The dimension of the mask downsampler embedding.
mask_downsampler_kernel_size (`int`, *optional*, defaults to 3):
The kernel size for the mask downsampler.
mask_downsampler_stride (`int`, *optional*, defaults to 2):
The stride for the mask downsampler.
mask_downsampler_padding (`int`, *optional*, defaults to 1):
The padding for the mask downsampler.
mask_downsampler_total_stride (`int`, *optional*, defaults to 16):
The total stride for the mask downsampler.
mask_downsampler_hidden_act (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function in the mask downsampler.
memory_fuser_num_layers (`int`, *optional*, defaults to 2):
The number of layers in the memory fuser.
memory_fuser_embed_dim (`int`, *optional*, defaults to 256):
The dimension of the embedding layer in the memory fuser.
memory_fuser_intermediate_dim (`int`, *optional*, defaults to 1024):
The dimension of the intermediate layer in the memory fuser.
memory_fuser_kernel_size (`int`, *optional*, defaults to 7):
The kernel size for the memory fuser.
memory_fuser_padding (`int`, *optional*, defaults to 3):
The padding for the memory fuser.
memory_fuser_layer_scale_init_value (`float`, *optional*, defaults to 1e-06):
The initial value for the layer scale in the memory fuser.
memory_fuser_hidden_act (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function in the memory fuser.
kwargs (*optional*):
Dictionary of keyword arguments.
Example:
```python
>>> from transformers import (
... Sam3VisionConfig,
... Sam3TrackerVideoPromptEncoderConfig,
... Sam3TrackerVideoMaskDecoderConfig,
... Sam3TrackerVideoModel,
... )
>>> # Initializing a Sam3TrackerVideoConfig with `"facebook/sam3"` style configuration
>>> configuration = Sam3TrackerVideoConfig()
>>> # Initializing a Sam3TrackerVideoModel (with random weights) from the `"facebook/sam3"` style configuration
>>> model = Sam3TrackerVideoModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
>>> # We can also initialize a Sam3TrackerVideoConfig from a Sam3TrackerVideoVisionConfig, Sam3TrackerVideoPromptEncoderConfig, and Sam3TrackerVideoMaskDecoderConfig
>>> # Initializing SAM3 tracker video vision encoder, memory attention, and memory encoder configurations
>>> vision_config = Sam3TrackerVideoVisionConfig()
>>> prompt_encoder_config = Sam3TrackerVideoPromptEncoderConfig()
>>> mask_decoder_config = Sam3TrackerVideoMaskDecoderConfig()
>>> config = Sam3TrackerVideoConfig(vision_config, prompt_encoder_config, mask_decoder_config)
```"""
model_type = "sam3_tracker_video"
sub_configs = {
"vision_config": AutoConfig,
"prompt_encoder_config": Sam3TrackerVideoPromptEncoderConfig,
"mask_decoder_config": Sam3TrackerVideoMaskDecoderConfig,
}
def __init__(
self,
vision_config=None,
prompt_encoder_config=None,
mask_decoder_config=None,
initializer_range=0.02,
num_maskmem=7,
image_size=1008,
sigmoid_scale_for_mem_enc=20.0,
sigmoid_bias_for_mem_enc=-10.0,
enable_occlusion_spatial_embedding=True,
multimask_output_in_sam=True,
multimask_min_pt_num=0,
multimask_max_pt_num=1,
multimask_output_for_tracking=True,
max_object_pointers_in_encoder=16,
max_cond_frame_num=4,
enable_temporal_pos_encoding_for_object_pointers=True,
# memory attention
memory_attention_hidden_size=256,
memory_attention_num_layers=4,
memory_attention_num_attention_heads=1,
memory_attention_downsample_rate=1,
memory_attention_feed_forward_hidden_size=2048,
memory_attention_feed_forward_hidden_act="relu",
memory_attention_dropout=0.1,
memory_attention_rope_theta=10000,
memory_attention_rope_feat_sizes=None,
memory_attention_rope_dropout=0.1,
# memory encoder
memory_encoder_hidden_size=256,
memory_encoder_output_channels=64,
mask_downsampler_embed_dim=256,
mask_downsampler_kernel_size=3,
mask_downsampler_stride=2,
mask_downsampler_padding=1,
mask_downsampler_total_stride=16,
mask_downsampler_hidden_act="gelu",
memory_fuser_num_layers=2,
memory_fuser_embed_dim=256,
memory_fuser_intermediate_dim=1024,
memory_fuser_kernel_size=7,
memory_fuser_padding=3,
memory_fuser_layer_scale_init_value=1e-6,
memory_fuser_hidden_act="gelu",
**kwargs,
):
vision_config = (
vision_config
if vision_config is not None
else {"backbone_feature_sizes": [[288, 288], [144, 144], [72, 72]]}
)
prompt_encoder_config = prompt_encoder_config if prompt_encoder_config is not None else {}
mask_decoder_config = mask_decoder_config if mask_decoder_config is not None else {}
memory_attention_rope_feat_sizes = (
[72, 72] if memory_attention_rope_feat_sizes is None else memory_attention_rope_feat_sizes
)
if isinstance(vision_config, dict):
vision_config["model_type"] = vision_config.get("model_type", "sam3_vision_model")
vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config)
if isinstance(prompt_encoder_config, Sam3TrackerVideoPromptEncoderConfig):
prompt_encoder_config = prompt_encoder_config.to_dict()
if isinstance(mask_decoder_config, Sam3TrackerVideoMaskDecoderConfig):
mask_decoder_config = mask_decoder_config.to_dict()
self.vision_config = vision_config
self.prompt_encoder_config = Sam3TrackerVideoPromptEncoderConfig(**prompt_encoder_config)
self.mask_decoder_config = Sam3TrackerVideoMaskDecoderConfig(**mask_decoder_config)
self.initializer_range = initializer_range
self.num_maskmem = num_maskmem # default 1 input frame + 6 previous frames
self.image_size = image_size
self.sigmoid_scale_for_mem_enc = sigmoid_scale_for_mem_enc
self.sigmoid_bias_for_mem_enc = sigmoid_bias_for_mem_enc
self.multimask_output_in_sam = multimask_output_in_sam
self.multimask_min_pt_num = multimask_min_pt_num
self.multimask_max_pt_num = multimask_max_pt_num
self.multimask_output_for_tracking = multimask_output_for_tracking
self.max_object_pointers_in_encoder = max_object_pointers_in_encoder
self.max_cond_frame_num = max_cond_frame_num
# The next 4 are True for sam2.1 and False for sam2
self.enable_occlusion_spatial_embedding = enable_occlusion_spatial_embedding
self.enable_temporal_pos_encoding_for_object_pointers = enable_temporal_pos_encoding_for_object_pointers
# memory attention
self.memory_attention_hidden_size = memory_attention_hidden_size
self.memory_attention_num_layers = memory_attention_num_layers
self.memory_attention_num_attention_heads = memory_attention_num_attention_heads
self.memory_attention_downsample_rate = memory_attention_downsample_rate
self.memory_attention_feed_forward_hidden_size = memory_attention_feed_forward_hidden_size
self.memory_attention_feed_forward_hidden_act = memory_attention_feed_forward_hidden_act
self.memory_attention_dropout = memory_attention_dropout
self.memory_attention_rope_theta = memory_attention_rope_theta
self.memory_attention_rope_feat_sizes = memory_attention_rope_feat_sizes
self.memory_attention_rope_dropout = memory_attention_rope_dropout
# memory encoder
self.memory_encoder_hidden_size = memory_encoder_hidden_size
self.memory_encoder_output_channels = memory_encoder_output_channels
self.mask_downsampler_embed_dim = mask_downsampler_embed_dim
self.mask_downsampler_kernel_size = mask_downsampler_kernel_size
self.mask_downsampler_stride = mask_downsampler_stride
self.mask_downsampler_padding = mask_downsampler_padding
self.mask_downsampler_total_stride = mask_downsampler_total_stride
self.mask_downsampler_hidden_act = mask_downsampler_hidden_act
self.memory_fuser_num_layers = memory_fuser_num_layers
self.memory_fuser_embed_dim = memory_fuser_embed_dim
self.memory_fuser_intermediate_dim = memory_fuser_intermediate_dim
self.memory_fuser_kernel_size = memory_fuser_kernel_size
self.memory_fuser_padding = memory_fuser_padding
self.memory_fuser_layer_scale_init_value = memory_fuser_layer_scale_init_value
self.memory_fuser_hidden_act = memory_fuser_hidden_act
super().__init__(**kwargs)
@property
def image_size(self):
"""Image size for the tracker video model."""
return self.vision_config.image_size
@image_size.setter
def image_size(self, value):
"""Set the image size and propagate to sub-configs. Calculates feature sizes based on patch_size."""
self.prompt_encoder_config.image_size = value
self.vision_config.image_size = value
patch_size = self.vision_config.backbone_config.patch_size
self.vision_config.backbone_feature_sizes = [
[4 * value // patch_size, 4 * value // patch_size],
[2 * value // patch_size, 2 * value // patch_size],
[value // patch_size, value // patch_size],
]
self.memory_attention_rope_feat_sizes = [
value // patch_size,
value // patch_size,
]
# keep the image_size in the __dict__ to save the value in the config file (backward compatibility)
self.__dict__["image_size"] = value
class Sam3TrackerVideoInferenceCache(Sam2VideoInferenceCache):
pass
class Sam3TrackerVideoInferenceSession(Sam2VideoInferenceSession):
pass
class Sam3TrackerVideoLayerNorm(Sam2VideoLayerNorm):
pass
class Sam3TrackerVideoPositionEmbeddingSine(Sam2VideoPositionEmbeddingSine):
pass
class Sam3TrackerVideoAttention(Sam2VideoAttention):
pass
class Sam3TrackerVideoTwoWayAttentionBlock(Sam2VideoTwoWayAttentionBlock):
pass
class Sam3TrackerVideoFeedForward(Sam2VideoFeedForward):
pass
class Sam3TrackerVideoImageSegmentationOutput(Sam2VideoImageSegmentationOutput):
pass
class Sam3TrackerVideoSegmentationOutput(Sam2VideoSegmentationOutput):
pass
class Sam3TrackerVideoPreTrainedModel(Sam2VideoPreTrainedModel):
pass
class Sam3TrackerVideoVisionRotaryEmbedding(Sam2VideoVisionRotaryEmbedding):
pass
class Sam3TrackerVideoRoPEAttention(Sam2VideoRoPEAttention):
pass
class Sam3TrackerVideoMemoryAttentionLayer(Sam2VideoMemoryAttentionLayer):
pass
class Sam3TrackerVideoMemoryAttention(Sam2VideoMemoryAttention):
pass
class Sam3TrackerVideoMemoryFuserCXBlock(Sam2VideoMemoryFuserCXBlock):
pass
class Sam3TrackerVideoMemoryFuser(Sam2VideoMemoryFuser):
pass
class Sam3TrackerVideoMaskDownSamplerLayer(Sam2VideoMaskDownSamplerLayer):
pass
class Sam3TrackerVideoMaskDownSampler(Sam2VideoMaskDownSampler):
pass
class Sam3TrackerVideoMemoryEncoder(Sam2VideoMemoryEncoder):
pass
class Sam3TrackerVideoVisionEncoderOutput(Sam2VideoVisionEncoderOutput):
pass
class Sam3TrackerVideoPositionalEmbedding(Sam2VideoPositionalEmbedding):
pass
class Sam3TrackerVideoMaskEmbedding(Sam2VideoMaskEmbedding):
pass
class Sam3TrackerVideoPromptEncoder(Sam2VideoPromptEncoder):
pass
class Sam3TrackerVideoTwoWayTransformer(Sam2VideoTwoWayTransformer):
pass
class Sam3TrackerVideoMaskDecoder(Sam2VideoMaskDecoder):
pass
class Sam3TrackerVideoModel(Sam2VideoModel):
_checkpoint_conversion_mapping = {
r"tracker_model.(.+)": r"\1", # the regex allows to remove the prefix, and add it back in revert mode
"detector_model.vision_encoder.backbone.": "vision_encoder.backbone.",
"tracker_neck.": "vision_encoder.neck.",
}
_keys_to_ignore_on_load_unexpected = [r"^detector_model."]
def __init__(self, config: Sam3TrackerVideoConfig, remove_vision_encoder: bool = False):
r"""
remove_vision_encoder (`bool`, *optional*, defaults to `False`):
Whether to remove the vision encoder. If True, the vision encoder will be set to None.
"""
# loading from a sam3_video config
if hasattr(config, "tracker_config") and config.tracker_config is not None:
tracker_config = config.tracker_config
if isinstance(tracker_config, dict):
tracker_config = Sam3TrackerVideoConfig(**tracker_config)
config = tracker_config
Sam3TrackerVideoPreTrainedModel.__init__(config)
self.shared_image_embedding = Sam3TrackerVideoPositionalEmbedding(config.prompt_encoder_config)
self.vision_encoder = AutoModel.from_config(config.vision_config) if not remove_vision_encoder else None
self.prompt_encoder = Sam3TrackerVideoPromptEncoder(config.prompt_encoder_config)
# The module using it is not a PreTrainedModel subclass so we need this
config.mask_decoder_config._attn_implementation = config._attn_implementation
self.mask_decoder = Sam3TrackerVideoMaskDecoder(config.mask_decoder_config)
self.backbone_feature_sizes = config.vision_config.backbone_feature_sizes
# a single token to indicate no memory embedding from previous frames
self.hidden_dim = config.vision_config.fpn_hidden_size
self.no_memory_embedding = torch.nn.Parameter(torch.zeros(1, 1, self.hidden_dim))
self.config = config
# For video sequence inference
self.image_size = config.image_size
self.memory_attention = Sam3TrackerVideoMemoryAttention(config)
self.memory_encoder = Sam3TrackerVideoMemoryEncoder(config)
self.no_memory_positional_encoding = torch.nn.Parameter(
torch.zeros(1, 1, config.vision_config.fpn_hidden_size)
)
self.mem_dim = config.memory_encoder_output_channels
self.num_maskmem = config.num_maskmem # Number of memories accessible
# Temporal encoding of the memories
self.memory_temporal_positional_encoding = torch.nn.Parameter(
torch.zeros(self.num_maskmem, 1, 1, self.mem_dim)
)
self.no_object_pointer = torch.nn.Parameter(torch.zeros(1, self.hidden_dim))
# A conv layer to downsample the mask prompt to stride 4 (the same stride as
# low-res SAM mask logits) and to change its scales from 0~1 to SAM logit scale,
# so that it can be fed into the SAM mask decoder to generate a pointer.
self.mask_downsample = torch.nn.Conv2d(1, 1, kernel_size=4, stride=4)
# a feedforward layer on SAM output tokens to turn them into object pointers
self.object_pointer_proj = Sam3TrackerVideoFeedForward(self.hidden_dim, self.hidden_dim, self.hidden_dim, 3)
if self.config.enable_temporal_pos_encoding_for_object_pointers:
# a linear projection on temporal positional encoding in object pointers to
# avoid potential interference with spatial positional encoding
self.temporal_positional_encoding_projection_layer = torch.nn.Linear(self.hidden_dim, self.mem_dim)
else:
self.temporal_positional_encoding_projection_layer = torch.nn.Identity()
self.occlusion_spatial_embedding_parameter = None # compatibility with Sam2
if config.enable_occlusion_spatial_embedding:
self.occlusion_spatial_embedding_parameter = torch.nn.Parameter(torch.zeros(1, self.mem_dim))
self.post_init()
@can_return_tuple
@auto_docstring
def get_image_features(
self,
pixel_values: torch.FloatTensor,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | Sam3TrackerVideoVisionEncoderOutput:
r"""
pixel_values (`torch.FloatTensor`):
Input pixel values of shape `(batch_size, num_channels, height, width)`.
"""
vision_outputs: Sam3TrackerVideoVisionEncoderOutput = self.vision_encoder(
pixel_values, return_dict=True, **kwargs
)
feature_maps = vision_outputs.fpn_hidden_states
feature_maps_position_embeddings = vision_outputs.fpn_position_encoding
# precompute projected level 0 and level 1 features in SAM decoder
# to avoid running it again on every SAM click
feature_maps = list(feature_maps[:-1])
feature_maps[0] = self.mask_decoder.conv_s0(feature_maps[0])
feature_maps[1] = self.mask_decoder.conv_s1(feature_maps[1])
# flatten NxCxHxW to HWxNxC
feature_maps = [feature_map.flatten(2).permute(2, 0, 1) for feature_map in feature_maps]
feature_maps_position_embeddings = [
feature_map_position_embedding.flatten(2).permute(2, 0, 1)
for feature_map_position_embedding in feature_maps_position_embeddings[:-1]
]
vision_outputs.fpn_hidden_states = feature_maps
vision_outputs.fpn_position_encoding = feature_maps_position_embeddings
return vision_outputs
__all__ = [
"Sam3TrackerVideoMaskDecoderConfig",
"Sam3TrackerVideoPromptEncoderConfig",
"Sam3TrackerVideoConfig",
"Sam3TrackerVideoModel",
"Sam3TrackerVideoInferenceSession",
"Sam3TrackerVideoPreTrainedModel",
"Sam3TrackerVideoProcessor",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/sam3_tracker_video/modular_sam3_tracker_video.py",
"license": "Apache License 2.0",
"lines": 489,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/sam3_video/configuration_sam3_video.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SAM3 Video model configuration"""
from ...configuration_utils import PreTrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING, AutoConfig
logger = logging.get_logger(__name__)
class Sam3VideoConfig(PreTrainedConfig):
r"""
Configuration class for [`Sam3VideoModel`]. This combines configurations for the detector (Sam3) and tracker
(Sam2Video) components, along with detection-tracking fusion hyperparameters.
Instantiating a configuration defaults will yield a similar configuration to that of SAM 3
[facebook/sam3](https://huggingface.co/facebook/sam3) architecture.
This model integrates detection and tracking with various fusion heuristics including NMS, association,
hotstart, reconditioning, and occlusion handling.
Args:
detector_config (`dict` or `Sam3Config`, *optional*):
Configuration for the Sam3 detector model. If not provided, default Sam3Config will be used.
tracker_config (`dict` or `Sam2VideoConfig`, *optional*):
Configuration for the Sam2Video tracker model. If not provided, default Sam2VideoConfig will be used.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing weight matrices.
low_res_mask_size (`int`, *optional*, defaults to 288):
Size (height and width) of the low-resolution mask outputs from the tracker before upsampling to video resolution.
score_threshold_detection (`float`, *optional*, defaults to 0.5):
Probability threshold for detection outputs - only keep detections above this threshold.
det_nms_thresh (`float`, *optional*, defaults to 0.1):
IoU threshold for detection NMS (Non-Maximum Suppression).
assoc_iou_thresh (`float`, *optional*, defaults to 0.1):
IoU threshold for detection-to-track matching. A detection is considered "matched" to a tracklet if
it overlaps with the tracklet above this threshold. Often a loose threshold like 0.1.
trk_assoc_iou_thresh (`float`, *optional*, defaults to 0.5):
IoU threshold for detection-to-track matching, used to determine whether a masklet is "unmatched"
by any detections. Often a stricter threshold like 0.5.
new_det_thresh (`float`, *optional*, defaults to 0.7):
Probability threshold for a detection to be added as a new object.
recondition_on_trk_masks (`bool`, *optional*, defaults to `True`):
Whether to use tracked masks (True) or detection masks (False) for reconditioning. Use True when tracked
masks are higher quality and detector serves as validation signal to strengthen memory and prevent drift.
hotstart_delay (`int`, *optional*, defaults to 15):
Number of frames to buffer outputs during hotstart. We hold off the outputs for `hotstart_delay`
frames and remove tracklets based on hotstart heuristics.
hotstart_unmatch_thresh (`int`, *optional*, defaults to 8):
Number of unmatched frames required to remove a tracklet during hotstart period.
hotstart_dup_thresh (`int`, *optional*, defaults to 8):
Number of overlapping frames required to remove a duplicate tracklet during hotstart period.
suppress_unmatched_only_within_hotstart (`bool`, *optional*, defaults to `True`):
Whether to suppress masks only within hotstart period. If False, we can suppress masks even if
they start before hotstart period.
init_trk_keep_alive (`int`, *optional*, defaults to 30):
Initial keep-alive counter for new tracks.
max_trk_keep_alive (`int`, *optional*, defaults to 30):
Maximum keep-alive counter value. Tracks with matched detections get their counter increased up to this value.
min_trk_keep_alive (`int`, *optional*, defaults to -1):
Minimum keep-alive counter value. Tracks with unmatched detections get their counter decreased to this value.
suppress_overlapping_based_on_recent_occlusion_threshold (`float`, *optional*, defaults to 0.7):
Threshold for suppressing overlapping objects based on recent occlusion. Overlapping masks with
IoU above this threshold are suppressed based on which was most recently occluded.
decrease_trk_keep_alive_for_empty_masklets (`bool`, *optional*, defaults to `False`):
Whether to decrease keep-alive counter for masklets with zero area in SAM2 prediction.
fill_hole_area (`int`, *optional*, defaults to 16):
Minimum area (in pixels) for filling holes in masks and removing small sprinkles.
max_num_objects (`int`, *optional*, defaults to 10000):
Maximum number of objects to track. Default 10000 effectively turns off this limit.
recondition_every_nth_frame (`int`, *optional*, defaults to 16):
Frequency of mask reconditioning (in frames). Set to 0 to disable reconditioning.
high_conf_thresh (`float`, *optional*, defaults to 0.8):
High confidence threshold for reconditioning. Only detections above this threshold can recondition tracklets.
high_iou_thresh (`float`, *optional*, defaults to 0.8):
High IoU threshold for reconditioning. Only detections with IoU above this threshold can recondition tracklets.
Example:
```python
>>> from transformers import Sam3VideoConfig, Sam3VideoModel
>>> # Initializing a SAM3 Video configuration with default detector and tracker
>>> configuration = Sam3VideoConfig()
>>> # Changing image size for custom resolution inference (automatically propagates to all nested configs)
>>> configuration.image_size = 560
>>> # Initializing a model from the configuration
>>> model = Sam3VideoModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
>>> detector_config = configuration.detector_config
>>> tracker_config = configuration.tracker_config
```
"""
model_type = "sam3_video"
is_composition = True
sub_configs = {
"detector_config": AutoConfig,
"tracker_config": AutoConfig,
}
def __init__(
self,
detector_config=None,
tracker_config=None,
initializer_range=0.02,
low_res_mask_size=288,
# Detection-tracking fusion hyperparameters
score_threshold_detection=0.5,
det_nms_thresh=0.1,
assoc_iou_thresh=0.1,
trk_assoc_iou_thresh=0.5,
new_det_thresh=0.7,
recondition_on_trk_masks=True,
# Hotstart parameters
hotstart_delay=15,
hotstart_unmatch_thresh=8,
hotstart_dup_thresh=8,
suppress_unmatched_only_within_hotstart=True,
# Keep-alive parameters
init_trk_keep_alive=30,
max_trk_keep_alive=30,
min_trk_keep_alive=-1,
# Occlusion and overlap handling
suppress_overlapping_based_on_recent_occlusion_threshold=0.7,
decrease_trk_keep_alive_for_empty_masklets=False,
# Mask post-processing
fill_hole_area=16,
# Object tracking limits
max_num_objects=10000,
# Reconditioning parameters
recondition_every_nth_frame=16,
high_conf_thresh=0.8,
high_iou_thresh=0.8,
**kwargs,
):
super().__init__(**kwargs)
# Initialize detector config (Sam3)
if detector_config is None:
detector_config = {}
logger.info("detector_config is None. Initializing the Sam3Config with default values.")
if isinstance(detector_config, dict):
detector_config["model_type"] = detector_config.get("model_type", "sam3")
self.detector_config = CONFIG_MAPPING[detector_config["model_type"]](**detector_config)
elif isinstance(detector_config, PreTrainedConfig):
self.detector_config = detector_config
else:
raise ValueError(f"detector_config must be a dict or Sam3Config, got {type(detector_config)}")
# Initialize tracker config (Sam2Video)
if tracker_config is None:
tracker_config = {}
logger.info("tracker_config is None. Initializing the Sam3TrackerVideoConfig with default values.")
if isinstance(tracker_config, dict):
tracker_config["model_type"] = tracker_config.get("model_type", "sam3_tracker_video")
self.tracker_config = CONFIG_MAPPING[tracker_config["model_type"]](**tracker_config)
elif isinstance(tracker_config, PreTrainedConfig):
self.tracker_config = tracker_config
else:
raise ValueError(f"tracker_config must be a dict or Sam3TrackerVideoConfig, got {type(tracker_config)}")
# Model initialization
self.initializer_range = initializer_range
self.low_res_mask_size = low_res_mask_size
# Detection-tracking fusion hyperparameters
self.score_threshold_detection = score_threshold_detection
self.det_nms_thresh = det_nms_thresh
self.assoc_iou_thresh = assoc_iou_thresh
self.trk_assoc_iou_thresh = trk_assoc_iou_thresh
self.new_det_thresh = new_det_thresh
self.recondition_on_trk_masks = recondition_on_trk_masks
# Hotstart parameters
if hotstart_delay > 0:
if hotstart_unmatch_thresh > hotstart_delay:
raise ValueError(
f"hotstart_unmatch_thresh ({hotstart_unmatch_thresh}) must be <= hotstart_delay ({hotstart_delay})"
)
if hotstart_dup_thresh > hotstart_delay:
raise ValueError(
f"hotstart_dup_thresh ({hotstart_dup_thresh}) must be <= hotstart_delay ({hotstart_delay})"
)
self.hotstart_delay = hotstart_delay
self.hotstart_unmatch_thresh = hotstart_unmatch_thresh
self.hotstart_dup_thresh = hotstart_dup_thresh
self.suppress_unmatched_only_within_hotstart = suppress_unmatched_only_within_hotstart
# Keep-alive parameters
self.init_trk_keep_alive = init_trk_keep_alive
self.max_trk_keep_alive = max_trk_keep_alive
self.min_trk_keep_alive = min_trk_keep_alive
# Occlusion and overlap handling
self.suppress_overlapping_based_on_recent_occlusion_threshold = (
suppress_overlapping_based_on_recent_occlusion_threshold
)
self.decrease_trk_keep_alive_for_empty_masklets = decrease_trk_keep_alive_for_empty_masklets
# Mask post-processing
self.fill_hole_area = fill_hole_area
# Object tracking limits
self.max_num_objects = max_num_objects
# Reconditioning parameters
self.recondition_every_nth_frame = recondition_every_nth_frame
self.high_conf_thresh = high_conf_thresh
self.high_iou_thresh = high_iou_thresh
@property
def image_size(self):
"""Image size for the video model."""
return self.detector_config.image_size
@image_size.setter
def image_size(self, value):
"""Recursively propagate the image size to detector and tracker configs."""
self.detector_config.image_size = value
self.tracker_config.image_size = value
__all__ = ["Sam3VideoConfig"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/sam3_video/configuration_sam3_video.py",
"license": "Apache License 2.0",
"lines": 211,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/sam3_video/convert_sam3_video_to_hf.py | # Copyright 2025 The Meta AI Authors and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Convert SAM3 checkpoints from the original implementation to HuggingFace format.
"""
import argparse
import gc
import os
import regex as re
import torch
from transformers import CLIPTokenizerFast
from transformers.models.sam2_video.video_processing_sam2_video import Sam2VideoVideoProcessor
from transformers.models.sam3.image_processing_sam3_fast import Sam3ImageProcessorFast
from transformers.models.sam3.modeling_sam3 import Sam3Model
from transformers.models.sam3_tracker.modeling_sam3_tracker import Sam3TrackerModel
from transformers.models.sam3_tracker_video.modeling_sam3_tracker_video import Sam3TrackerVideoModel
from transformers.models.sam3_video.configuration_sam3_video import Sam3VideoConfig
from transformers.models.sam3_video.modeling_sam3_video import Sam3VideoModel
from transformers.models.sam3_video.processing_sam3_video import Sam3VideoProcessor
from transformers.utils import logging
logging.set_verbosity_info()
logger = logging.get_logger(__name__)
# fmt: off
ORIGINAL_TO_CONVERTED_KEY_MAPPING = {
r"^sam3_model\.": r"detector_model.",
r"^sam2_predictor\.model\.": r"tracker_model.",
# ============================================================================
# Vision Encoder - ViT Backbone
# ============================================================================
r"backbone\.vision_backbone\.trunk\.": r"vision_encoder.backbone.",
r"vision_encoder\.backbone\.pos_embed": r"vision_encoder.backbone.embeddings.position_embeddings",
r"vision_encoder\.backbone\.patch_embed\.proj\.": r"vision_encoder.backbone.embeddings.patch_embeddings.projection.",
r"vision_encoder\.backbone\.ln_pre\.": r"vision_encoder.backbone.layer_norm.",
r"vision_encoder\.backbone\.blocks\.(\d+)\.norm1\.": r"vision_encoder.backbone.layers.\1.layer_norm1.",
r"vision_encoder\.backbone\.blocks\.(\d+)\.norm2\.": r"vision_encoder.backbone.layers.\1.layer_norm2.",
r"vision_encoder\.backbone\.blocks\.(\d+)\.attn\.qkv\.": r"vision_encoder.backbone.layers.\1.attention.qkv.",
r"vision_encoder\.backbone\.blocks\.(\d+)\.attn\.proj\.": r"vision_encoder.backbone.layers.\1.attention.o_proj.",
r"vision_encoder\.backbone\.blocks\.(\d+)\.attn\.freqs_cis": r"vision_encoder.backbone.layers.\1.rotary_emb.rope_embeddings",
r"vision_encoder\.backbone\.blocks\.(\d+)\.mlp\.fc1\.": r"vision_encoder.backbone.layers.\1.mlp.fc1.",
r"vision_encoder\.backbone\.blocks\.(\d+)\.mlp\.fc2\.": r"vision_encoder.backbone.layers.\1.mlp.fc2.",
# ision Encoder - FPN Neck
r"backbone\.vision_backbone\.neck\.fpn\.(\d+)\.": r"vision_encoder.neck.fpn_layers.\1.",
r"backbone\.vision_backbone\.convs\.(\d+)\.dconv_2x2_0\.": r"vision_encoder.neck.fpn_layers.\1.scale_layers.0.",
r"backbone\.vision_backbone\.convs\.(\d+)\.dconv_2x2_1\.": r"vision_encoder.neck.fpn_layers.\1.scale_layers.2.",
r"backbone\.vision_backbone\.convs\.(\d+)\.dconv_2x2\.": r"vision_encoder.neck.fpn_layers.\1.scale_layers.0.",
r"backbone\.vision_backbone\.convs\.(\d+)\.maxpool_2x2\.": r"vision_encoder.neck.fpn_layers.\1.scale_layers.0.",
r"backbone\.vision_backbone\.convs\.(\d+)\.conv_1x1\.": r"vision_encoder.neck.fpn_layers.\1.proj1.",
r"backbone\.vision_backbone\.convs\.(\d+)\.conv_3x3\.": r"vision_encoder.neck.fpn_layers.\1.proj2.",
# ision Encoder - Tracker FPN Neck
r"backbone\.vision_backbone\.sam2_convs\.(\d+)\.dconv_2x2_0\.": r"vision_encoder.tracker_neck.fpn_layers.\1.scale_layers.0.",
r"backbone\.vision_backbone\.sam2_convs\.(\d+)\.dconv_2x2_1\.": r"vision_encoder.tracker_neck.fpn_layers.\1.scale_layers.2.",
r"backbone\.vision_backbone\.sam2_convs\.(\d+)\.dconv_2x2\.": r"vision_encoder.tracker_neck.fpn_layers.\1.scale_layers.0.",
r"backbone\.vision_backbone\.sam2_convs\.(\d+)\.maxpool_2x2\.": r"vision_encoder.tracker_neck.fpn_layers.\1.scale_layers.0.",
r"backbone\.vision_backbone\.sam2_convs\.(\d+)\.conv_1x1\.": r"vision_encoder.tracker_neck.fpn_layers.\1.proj1.",
r"backbone\.vision_backbone\.sam2_convs\.(\d+)\.conv_3x3\.": r"vision_encoder.tracker_neck.fpn_layers.\1.proj2.",
# ===========================================================================
# ext Encoder (CLIP)
# ===========================================================================
r"backbone\.language_backbone\.encoder\.": r"text_encoder.",
r"text_encoder\.token_embedding\.": r"text_encoder.text_model.embeddings.token_embedding.",
r"text_encoder\.positional_embedding": r"text_encoder.text_model.embeddings.position_embedding.weight",
r"text_encoder\.ln_final\.": r"text_encoder.text_model.final_layer_norm.",
r"text_encoder\.text_projection": r"text_encoder.text_projection.weight",
r"text_encoder\.transformer\.resblocks\.(\d+)\.attn\.in_proj_": r"text_encoder.text_model.encoder.layers.\1.self_attn.in_proj_",
r"text_encoder\.transformer\.resblocks\.(\d+)\.attn\.out_proj\.": r"text_encoder.text_model.encoder.layers.\1.self_attn.out_proj.",
r"text_encoder\.transformer\.resblocks\.(\d+)\.ln_1\.": r"text_encoder.text_model.encoder.layers.\1.layer_norm1.",
r"text_encoder\.transformer\.resblocks\.(\d+)\.ln_2\.": r"text_encoder.text_model.encoder.layers.\1.layer_norm2.",
r"text_encoder\.transformer\.resblocks\.(\d+)\.mlp\.c_fc\.": r"text_encoder.text_model.encoder.layers.\1.mlp.fc1.",
r"text_encoder\.transformer\.resblocks\.(\d+)\.mlp\.c_proj\.": r"text_encoder.text_model.encoder.layers.\1.mlp.fc2.",
r"backbone\.language_backbone\.resizer\.": r"text_projection.",
# ===========================================================================
# eometry Encoder
# ===========================================================================
r"geometry_encoder\.encode\.(\d+)\.cross_attn_image\.out_proj\.": r"geometry_encoder.layers.\1.cross_attn.o_proj.",
r"geometry_encoder\.encode\.(\d+)\.cross_attn_image\.": r"geometry_encoder.layers.\1.cross_attn.",
r"geometry_encoder\.encode\.(\d+)\.self_attn\.out_proj\.": r"geometry_encoder.layers.\1.self_attn.o_proj.",
r"geometry_encoder\.encode\.(\d+)\.self_attn\.": r"geometry_encoder.layers.\1.self_attn.",
r"geometry_encoder\.encode\.(\d+)\.linear1\.": r"geometry_encoder.layers.\1.mlp.fc1.",
r"geometry_encoder\.encode\.(\d+)\.linear2\.": r"geometry_encoder.layers.\1.mlp.fc2.",
r"geometry_encoder\.encode\.(\d+)\.norm1\.": r"geometry_encoder.layers.\1.layer_norm1.",
r"geometry_encoder\.encode\.(\d+)\.norm2\.": r"geometry_encoder.layers.\1.layer_norm2.",
r"geometry_encoder\.encode\.(\d+)\.norm3\.": r"geometry_encoder.layers.\1.layer_norm3.",
r"geometry_encoder\.img_pre_norm\.": r"geometry_encoder.vision_layer_norm.",
r"geometry_encoder\.norm\.": r"geometry_encoder.prompt_layer_norm.",
r"geometry_encoder\.encode_norm\.": r"geometry_encoder.output_layer_norm.",
# ===========================================================================
# ETR Encoder
# ===========================================================================
r"detector_model.transformer\.encoder\.layers\.(\d+)\.cross_attn_image\.out_proj\.": r"detector_model.detr_encoder.layers.\1.cross_attn.o_proj.",
r"detector_model.transformer\.encoder\.layers\.(\d+)\.cross_attn_image\.": r"detector_model.detr_encoder.layers.\1.cross_attn.",
r"detector_model.transformer\.encoder\.layers\.(\d+)\.self_attn\.out_proj\.": r"detector_model.detr_encoder.layers.\1.self_attn.o_proj.",
r"detector_model.transformer\.encoder\.layers\.(\d+)\.self_attn\.": r"detector_model.detr_encoder.layers.\1.self_attn.",
r"detector_model.transformer\.encoder\.layers\.(\d+)\.cross_attn\.out_proj\.": r"detector_model.detr_encoder.layers.\1.cross_attn.o_proj.",
r"detector_model.transformer\.encoder\.layers\.(\d+)\.cross_attn\.": r"detector_model.detr_encoder.layers.\1.cross_attn.",
r"detector_model.transformer\.encoder\.layers\.(\d+)\.linear1\.": r"detector_model.detr_encoder.layers.\1.mlp.fc1.",
r"detector_model.transformer\.encoder\.layers\.(\d+)\.linear2\.": r"detector_model.detr_encoder.layers.\1.mlp.fc2.",
r"detector_model.transformer\.encoder\.layers\.(\d+)\.norm1\.": r"detector_model.detr_encoder.layers.\1.layer_norm1.",
r"detector_model.transformer\.encoder\.layers\.(\d+)\.norm2\.": r"detector_model.detr_encoder.layers.\1.layer_norm2.",
r"detector_model.transformer\.encoder\.layers\.(\d+)\.norm3\.": r"detector_model.detr_encoder.layers.\1.layer_norm3.",
# ===========================================================================
# ETR Decoder
# ===========================================================================
r"transformer\.decoder\.query_embed\.": r"detr_decoder.query_embed.",
r"transformer\.decoder\.reference_points\.": r"detr_decoder.reference_points.",
r"transformer\.decoder\.instance_query_embed\.": r"detr_decoder.instance_query_embed.",
r"transformer\.decoder\.instance_reference_points\.": r"detr_decoder.instance_reference_points.",
r"transformer\.decoder\.presence_token\.": r"detr_decoder.presence_token.",
r"transformer\.decoder\.presence_token_head\.layers\.0\.": r"detr_decoder.presence_head.layer1.",
r"transformer\.decoder\.presence_token_head\.layers\.1\.": r"detr_decoder.presence_head.layer2.",
r"transformer\.decoder\.presence_token_head\.layers\.2\.": r"detr_decoder.presence_head.layer3.",
r"transformer\.decoder\.presence_token_out_norm\.": r"detr_decoder.presence_layer_norm.",
r"transformer\.decoder\.norm\.": r"detr_decoder.output_layer_norm.",
r"transformer\.decoder\.bbox_embed\.layers\.0\.": r"detr_decoder.box_head.layer1.",
r"transformer\.decoder\.bbox_embed\.layers\.1\.": r"detr_decoder.box_head.layer2.",
r"transformer\.decoder\.bbox_embed\.layers\.2\.": r"detr_decoder.box_head.layer3.",
r"transformer\.decoder\.instance_bbox_embed\.layers\.0\.": r"detr_decoder.instance_box_head.layer1.",
r"transformer\.decoder\.instance_bbox_embed\.layers\.1\.": r"detr_decoder.instance_box_head.layer2.",
r"transformer\.decoder\.instance_bbox_embed\.layers\.2\.": r"detr_decoder.instance_box_head.layer3.",
r"transformer\.decoder\.ref_point_head\.layers\.0\.": r"detr_decoder.ref_point_head.layer1.",
r"transformer\.decoder\.ref_point_head\.layers\.1\.": r"detr_decoder.ref_point_head.layer2.",
r"transformer\.decoder\.boxRPB_embed_x\.layers\.0\.": r"detr_decoder.box_rpb_embed_x.layer1.",
r"transformer\.decoder\.boxRPB_embed_x\.layers\.1\.": r"detr_decoder.box_rpb_embed_x.layer2.",
r"transformer\.decoder\.boxRPB_embed_y\.layers\.0\.": r"detr_decoder.box_rpb_embed_y.layer1.",
r"transformer\.decoder\.boxRPB_embed_y\.layers\.1\.": r"detr_decoder.box_rpb_embed_y.layer2.",
r"transformer\.decoder\.layers\.(\d+)\.self_attn\.out_proj\.": r"detr_decoder.layers.\1.self_attn.o_proj.",
r"transformer\.decoder\.layers\.(\d+)\.self_attn\.": r"detr_decoder.layers.\1.self_attn.",
r"transformer\.decoder\.layers\.(\d+)\.ca_text\.out_proj\.": r"detr_decoder.layers.\1.text_cross_attn.o_proj.",
r"transformer\.decoder\.layers\.(\d+)\.ca_text\.": r"detr_decoder.layers.\1.text_cross_attn.",
r"transformer\.decoder\.layers\.(\d+)\.cross_attn\.out_proj\.": r"detr_decoder.layers.\1.vision_cross_attn.o_proj.",
r"transformer\.decoder\.layers\.(\d+)\.cross_attn\.": r"detr_decoder.layers.\1.vision_cross_attn.",
r"transformer\.decoder\.layers\.(\d+)\.linear1\.": r"detr_decoder.layers.\1.mlp.fc1.",
r"transformer\.decoder\.layers\.(\d+)\.linear2\.": r"detr_decoder.layers.\1.mlp.fc2.",
r"transformer\.decoder\.layers\.(\d+)\.norm1\.": r"detr_decoder.layers.\1.vision_cross_attn_layer_norm.",
r"transformer\.decoder\.layers\.(\d+)\.catext_norm\.": r"detr_decoder.layers.\1.text_cross_attn_layer_norm.",
r"transformer\.decoder\.layers\.(\d+)\.norm2\.": r"detr_decoder.layers.\1.self_attn_layer_norm.",
r"transformer\.decoder\.layers\.(\d+)\.norm3\.": r"detr_decoder.layers.\1.mlp_layer_norm.",
# ===========================================================================
# ot Product Scoring
# ===========================================================================
r"dot_prod_scoring\.prompt_mlp\.layers\.0\.": r"dot_product_scoring.text_mlp.layer1.",
r"dot_prod_scoring\.prompt_mlp\.layers\.1\.": r"dot_product_scoring.text_mlp.layer2.",
r"dot_prod_scoring\.prompt_mlp\.out_norm\.": r"dot_product_scoring.text_mlp_out_norm.",
r"dot_prod_scoring\.prompt_proj\.": r"dot_product_scoring.text_proj.",
r"dot_prod_scoring\.hs_proj\.": r"dot_product_scoring.query_proj.",
# ===========================================================================
# ask Decoder
# ===========================================================================
r"segmentation_head\.pixel_decoder\.conv_layers\.(\d+)\.": r"mask_decoder.pixel_decoder.conv_layers.\1.",
r"segmentation_head\.pixel_decoder\.norms\.(\d+)\.": r"mask_decoder.pixel_decoder.norms.\1.",
r"segmentation_head\.mask_embed\.layers\.(\d+)\.": r"mask_decoder.mask_embedder.layers.\1.",
r"segmentation_head\.mask_predictor\.mask_embed\.layers\.(\d+)\.": r"mask_decoder.mask_embedder.layers.\1.",
r"segmentation_head\.instance_seg_head\.": r"mask_decoder.instance_projection.",
r"segmentation_head\.semantic_seg_head\.": r"mask_decoder.semantic_projection.",
r"segmentation_head\.cross_attend_prompt\.out_proj\.": r"mask_decoder.prompt_cross_attn.o_proj.",
r"segmentation_head\.cross_attend_prompt\.": r"mask_decoder.prompt_cross_attn.",
r"segmentation_head\.cross_attn_norm\.": r"mask_decoder.prompt_cross_attn_norm.",
r"^detector_model\.vision_encoder\.tracker_neck\.": r"tracker_neck.",
}
# fmt: on
KEYS_TO_MODIFY_MAPPING = {
"iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
"iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
"iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
"mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
"mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
"mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
"mask_downscaling.0": "mask_embed.conv1",
"mask_downscaling.1": "mask_embed.layer_norm1",
"mask_downscaling.3": "mask_embed.conv2",
"mask_downscaling.4": "mask_embed.layer_norm2",
"mask_downscaling.6": "mask_embed.conv3",
"dwconv": "depthwise_conv",
"pwconv": "pointwise_conv",
"fuser": "memory_fuser",
"point_embeddings": "point_embed",
"pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
"obj_ptr_tpos_proj": "temporal_positional_encoding_projection_layer",
"no_obj_embed_spatial": "occlusion_spatial_embedding_parameter",
"sam_prompt_encoder": "prompt_encoder",
"sam_mask_decoder": "mask_decoder",
"maskmem_tpos_enc": "memory_temporal_positional_encoding",
"gamma": "scale",
"image_encoder.neck": "vision_encoder.neck",
"image_encoder": "vision_encoder.backbone",
"neck.0": "neck.conv1",
"neck.1": "neck.layer_norm1",
"neck.2": "neck.conv2",
"neck.3": "neck.layer_norm2",
"pix_feat_proj": "feature_projection",
"patch_embed.proj": "patch_embed.projection",
"no_mem_embed": "no_memory_embedding",
"no_mem_pos_enc": "no_memory_positional_encoding",
"obj_ptr": "object_pointer",
".norm": ".layer_norm",
"trunk.": "",
"out_proj": "o_proj",
}
def adapt_internal_ckpt(ov_sd):
# Replace values instead of keys, and remove any isinstance checks
sam2_sd = {k: v.replace("backbone.vision_backbone.trunk", "image_encoder.trunk") for k, v in ov_sd.items()}
sam2_sd = {k: v.replace("backbone.vision_backbone.convs", "image_encoder.neck.convs") for k, v in sam2_sd.items()}
# rename components to be consitent with paper and public release
sam2_sd = {k: v.replace("transformer.encoder", "memory_attention") for k, v in sam2_sd.items()}
sam2_sd = {k: v.replace("maskmem_backbone", "memory_encoder") for k, v in sam2_sd.items()}
sam2_sd = {
k: v.replace(
"memory_encoder.mask_downsampler.encoder.0.",
"memory_encoder.mask_downsampler.layers.0.conv.",
)
for k, v in sam2_sd.items()
}
sam2_sd = {
k: v.replace(
"memory_encoder.mask_downsampler.encoder.1.",
"memory_encoder.mask_downsampler.layers.0.layer_norm.",
)
for k, v in sam2_sd.items()
}
sam2_sd = {
k: v.replace(
"memory_encoder.mask_downsampler.encoder.3.",
"memory_encoder.mask_downsampler.layers.1.conv.",
)
for k, v in sam2_sd.items()
}
sam2_sd = {
k: v.replace(
"memory_encoder.mask_downsampler.encoder.4.",
"memory_encoder.mask_downsampler.layers.1.layer_norm.",
)
for k, v in sam2_sd.items()
}
sam2_sd = {
k: v.replace(
"memory_encoder.mask_downsampler.encoder.6.",
"memory_encoder.mask_downsampler.layers.2.conv.",
)
for k, v in sam2_sd.items()
}
sam2_sd = {
k: v.replace(
"memory_encoder.mask_downsampler.encoder.7.",
"memory_encoder.mask_downsampler.layers.2.layer_norm.",
)
for k, v in sam2_sd.items()
}
sam2_sd = {
k: v.replace(
"memory_encoder.mask_downsampler.encoder.9.",
"memory_encoder.mask_downsampler.layers.3.conv.",
)
for k, v in sam2_sd.items()
}
sam2_sd = {
k: v.replace(
"memory_encoder.mask_downsampler.encoder.10.",
"memory_encoder.mask_downsampler.layers.3.layer_norm.",
)
for k, v in sam2_sd.items()
}
sam2_sd = {
k: v.replace(
"memory_encoder.mask_downsampler.encoder.12.",
"memory_encoder.mask_downsampler.final_conv.",
)
for k, v in sam2_sd.items()
}
sam2_sd = {
k: v.replace(
"memory_encoder.o_proj.",
"memory_encoder.projection.",
)
for k, v in sam2_sd.items()
}
# MLPBLock to MLP
sam2_sd = {
k: v.replace("mask_decoder.transformer.layers.0.mlp.lin1", "mask_decoder.transformer.layers.0.mlp.layers.0")
for k, v in sam2_sd.items()
}
sam2_sd = {
k: v.replace("mask_decoder.transformer.layers.0.mlp.lin2", "mask_decoder.transformer.layers.0.mlp.layers.1")
for k, v in sam2_sd.items()
}
sam2_sd = {
k: v.replace("mask_decoder.transformer.layers.1.mlp.lin1", "mask_decoder.transformer.layers.1.mlp.layers.0")
for k, v in sam2_sd.items()
}
sam2_sd = {
k: v.replace("mask_decoder.transformer.layers.1.mlp.lin2", "mask_decoder.transformer.layers.1.mlp.layers.1")
for k, v in sam2_sd.items()
}
sam2_sd = {
k: v.replace(
"mask_decoder.transformer.layers.0.mlp.layers.0.",
"mask_decoder.transformer.layers.0.mlp.proj_in.",
)
for k, v in sam2_sd.items()
}
sam2_sd = {
k: v.replace(
"mask_decoder.transformer.layers.0.mlp.layers.1.",
"mask_decoder.transformer.layers.0.mlp.proj_out.",
)
for k, v in sam2_sd.items()
}
sam2_sd = {
k: v.replace(
"mask_decoder.transformer.layers.1.mlp.layers.0.",
"mask_decoder.transformer.layers.1.mlp.proj_in.",
)
for k, v in sam2_sd.items()
}
sam2_sd = {
k: v.replace(
"mask_decoder.transformer.layers.1.mlp.layers.1.",
"mask_decoder.transformer.layers.1.mlp.proj_out.",
)
for k, v in sam2_sd.items()
}
# FFN to MLP
# sam2_sd = {k: v.replace(".fc1", ".layers.0") for k, v in sam2_sd.items()}
# sam2_sd = {k: v.replace(".fc2", ".layers.1") for k, v in sam2_sd.items()}
return sam2_sd
def replace_keys(key_mapping: dict):
output_hypernetworks_mlps_pattern = r".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
output_mask_decoder_mlps_pattern = r"tracker_model.mask_decoder.transformer.layers.(\d+).mlp.layers.(\d+).*"
output_mask_decoder_score_head_pattern = r"tracker_model.mask_decoder.pred_obj_score_head.layers.(\d+).*"
output_vision_encoder_mlps_pattern = r"vision_encoder.backbone.blocks.(\d+).mlp.layers.(\d+).*"
output_vision_encoder_neck_pattern = r"tracker_model.vision_encoder.neck.convs.(\d+).conv"
output_memory_encoder_projection_pattern = r"tracker_model.memory_encoder.o_proj.*"
output_object_pointer_proj_pattern = r"tracker_model.object_pointer_proj.layers.(\d+).*"
output_memory_encoder_mask_downsampler_pattern = r"tracker_model.memory_encoder.mask_downsampler.encoder.(\d+).*"
key_mapping_copy = key_mapping.copy()
for value, key in key_mapping_copy.items():
if not value.startswith("sam2_predictor."):
continue
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
key = key.replace(key_to_modify, new_key)
# vision_encoder.blocks.0.mlp.layers.1.weight -> vision_encoder.blocks.0.mlp.proj_out.weight
if re.match(output_vision_encoder_mlps_pattern, key):
layer_nb = int(re.match(output_vision_encoder_mlps_pattern, key).group(2))
if layer_nb == 0:
key = key.replace("layers.0", "proj_in")
elif layer_nb == 1:
key = key.replace("layers.1", "proj_out")
# mask_decoder.transformer.layers.0.mlp.layers.1.weight -> mask_decoder.transformer.layers.1.mlp.proj_out.weight
if re.match(output_mask_decoder_mlps_pattern, key):
layer_nb = int(re.match(output_mask_decoder_mlps_pattern, key).group(2))
if layer_nb == 0:
key = key.replace("mlp.layers.0", "mlp.proj_in")
elif layer_nb == 1:
key = key.replace("mlp.layers.1", "mlp.proj_out")
# mask_decoder.pred_obj_score_head.layers.1.weight -> mask_decoder.pred_obj_score_head.proj_in.weight
if re.match(output_mask_decoder_score_head_pattern, key):
layer_nb = int(re.match(output_mask_decoder_score_head_pattern, key).group(1))
if layer_nb == 0:
key = key.replace("layers.0", "proj_in")
elif layer_nb == 1:
key = key.replace("layers.1", "layers.0")
elif layer_nb == 2:
key = key.replace("layers.2", "proj_out")
if re.match(output_hypernetworks_mlps_pattern, key):
layer_nb = int(re.match(output_hypernetworks_mlps_pattern, key).group(2))
if layer_nb == 0:
key = key.replace("layers.0", "proj_in")
elif layer_nb == 1:
key = key.replace("layers.1", "layers.0")
elif layer_nb == 2:
key = key.replace("layers.2", "proj_out")
# vision_encoder.neck.convs.1.conv.bias -> vision_encoder.neck.convs.1.bias
if re.match(output_vision_encoder_neck_pattern, key):
key = key.replace(".conv.", ".")
# memory_encoder.o_proj.weight -> memory_encoder.projection.weight
if re.match(output_memory_encoder_projection_pattern, key):
key = key.replace(".o_proj.", ".projection.")
if re.match(output_object_pointer_proj_pattern, key):
layer_nb = int(re.match(output_object_pointer_proj_pattern, key).group(1))
if layer_nb == 0:
key = key.replace("layers.0", "proj_in")
elif layer_nb == 1:
key = key.replace("layers.1", "layers.0")
elif layer_nb == 2:
key = key.replace("layers.2", "proj_out")
if re.match(output_memory_encoder_mask_downsampler_pattern, key):
layer_nb = int(re.match(output_memory_encoder_mask_downsampler_pattern, key).group(1))
if layer_nb == 12:
key = key.replace(f"encoder.{layer_nb}", "final_conv")
elif layer_nb % 3 == 0:
key = key.replace(f"encoder.{layer_nb}", f"layers.{layer_nb // 3}.conv")
elif layer_nb % 3 == 1:
key = key.replace(f"encoder.{layer_nb}", f"layers.{layer_nb // 3}.layer_norm")
key_mapping[value] = key
return key_mapping
def convert_old_keys_to_new_keys(state_dict_keys: list[str]) -> dict[str, str]:
"""
Convert original SAM3 checkpoint keys to HuggingFace format.
This function applies regex patterns to efficiently rename keys in bulk.
Args:
state_dict_keys: List of original checkpoint keys
Returns:
Dictionary mapping original keys to new keys
"""
output_dict = {}
if state_dict_keys is not None:
old_text = "\n".join(state_dict_keys)
new_text = old_text
# Apply all regex patterns
for pattern, replacement in ORIGINAL_TO_CONVERTED_KEY_MAPPING.items():
new_text = re.sub(pattern, replacement, new_text, flags=re.MULTILINE)
output_dict = dict(zip(old_text.split("\n"), new_text.split("\n")))
output_dict = replace_keys(output_dict)
output_dict = adapt_internal_ckpt(output_dict)
return output_dict
def split_qkv(state_dict: dict) -> dict:
"""
Split combined QKV weights/biases into separate Q, K, V projections.
Both the vision backbone and text encoder in the original SAM3 use combined QKV projections,
but the refactored model uses separate Q, K, V projections.
Args:
state_dict: State dictionary with combined QKV weights
Returns:
State dictionary with split Q, K, V weights
"""
# Handle vision backbone: .attention.qkv.* → .attention.{q,k,v}_proj.*
vision_keys_to_split = [key for key in state_dict.keys() if ".attention.qkv." in key]
for key in vision_keys_to_split:
qkv = state_dict.pop(key)
# Split into 3 equal chunks along dimension 0 (output dimension)
q, k, v = torch.chunk(qkv, 3, dim=0)
# Create new keys for q_proj, k_proj, v_proj
state_dict[key.replace(".qkv.", ".q_proj.")] = q
state_dict[key.replace(".qkv.", ".k_proj.")] = k
state_dict[key.replace(".qkv.", ".v_proj.")] = v
# Handle all attention layers with in_proj_* (text encoder, DETR decoder cross-attention, mask decoder)
# These use: .{attn_type}.in_proj_* → .{attn_type}.{q,k,v}_proj.*
in_proj_keys_to_split = [key for key in state_dict.keys() if ".in_proj_" in key]
for key in in_proj_keys_to_split:
in_proj = state_dict.pop(key)
# Split into 3 equal chunks along dimension 0 (output dimension)
q, k, v = torch.chunk(in_proj, 3, dim=0)
# Create new keys for q_proj, k_proj, v_proj
# Replace "in_proj_weight" with "q_proj.weight" (or "in_proj_bias" with "q_proj.bias")
if key.endswith("in_proj_weight"):
base_key = key.replace("in_proj_weight", "")
state_dict[base_key + "q_proj.weight"] = q
state_dict[base_key + "k_proj.weight"] = k
state_dict[base_key + "v_proj.weight"] = v
elif key.endswith("in_proj_bias"):
base_key = key.replace("in_proj_bias", "")
state_dict[base_key + "q_proj.bias"] = q
state_dict[base_key + "k_proj.bias"] = k
state_dict[base_key + "v_proj.bias"] = v
return state_dict
def load_original_state_dict(checkpoint_path: str) -> dict[str, torch.Tensor]:
"""Load the original SAM3 checkpoint."""
print(f"Loading original checkpoint from {checkpoint_path}")
checkpoint = torch.load(checkpoint_path, map_location="cpu")
# Handle different checkpoint formats
if "model" in checkpoint:
state_dict = checkpoint["model"]
elif "state_dict" in checkpoint:
state_dict = checkpoint["state_dict"]
else:
state_dict = checkpoint
print(f"Loaded {len(state_dict)} keys from checkpoint")
return state_dict
def get_sam3_video_config(
vision_config: dict | None = None,
text_config: dict | None = None,
) -> Sam3VideoConfig:
"""
Create SAM3 configuration.
Args:
vision_config: Optional vision encoder configuration overrides
text_config: Optional text encoder configuration overrides
Returns:
Sam3Config instance
"""
config = Sam3VideoConfig()
# Update with any provided overrides
if vision_config is not None:
for key, value in vision_config.items():
setattr(config.vision_config, key, value)
if text_config is not None:
# Text config is a CLIPTextConfig
for key, value in text_config.items():
setattr(config.text_config, key, value)
return config
def convert_sam3_checkpoint(
checkpoint_path: str,
output_path: str,
config: Sam3VideoConfig | None = None,
push_to_hub: bool = False,
repo_id: str | None = None,
):
"""
Convert SAM3 checkpoint from original format to HuggingFace format.
Args:
checkpoint_path: Path to the original checkpoint file
output_path: Path to save the converted checkpoint
config: Optional Sam3VideoConfig to use (otherwise creates default)
push_to_hub: Whether to push the model to the Hub
repo_id: Repository ID for pushing to Hub
"""
# Create output directory
os.makedirs(output_path, exist_ok=True)
# Load configuration
if config is None:
config = get_sam3_video_config()
config.architectures = ["Sam3VideoModel"]
config.save_pretrained(output_path)
print("Model config saved successfully")
# Load and convert weights
print("Loading original checkpoint...")
state_dict_old = load_original_state_dict(checkpoint_path)
print("Converting checkpoint keys...")
all_keys = list(state_dict_old.keys())
key_mapping = convert_old_keys_to_new_keys(all_keys)
# Create new state dict with converted keys
state_dict_new = {}
for old_key in all_keys:
new_key = key_mapping.get(old_key, old_key)
# Special handling: Strip cls token from vision backbone position embeddings
if new_key == "detector_model.vision_encoder.backbone.embeddings.position_embeddings":
# Original has [1, 577, 1024] with cls token, but refactored expects [1, 576, 1024] without cls token
# Strip the first position (cls token position)
state_dict_new[new_key] = state_dict_old[old_key][:, 1:, :]
else:
state_dict_new[new_key] = state_dict_old[old_key]
state_dict_new["tracker_model.shared_image_embedding.positional_embedding"] = state_dict_new[
"tracker_model.prompt_encoder.shared_embedding.positional_embedding"
]
state_dict_new["tracker_model.prompt_encoder.point_embed.weight"] = torch.cat(
[state_dict_new.pop(f"tracker_model.prompt_encoder.point_embed.{i}.weight") for i in range(4)],
dim=0,
)
del state_dict_old
gc.collect()
# Split combined QKV projections into separate Q, K, V projections
print("Splitting QKV projections...")
state_dict_new = split_qkv(state_dict_new)
# Transpose CLIP text projection (stored transposed in original)
if "detector_model.text_encoder.text_projection.weight" in state_dict_new:
print("Transposing CLIP text_projection...")
state_dict_new["detector_model.text_encoder.text_projection.weight"] = state_dict_new[
"detector_model.text_encoder.text_projection.weight"
].T
# Load into HF models
print("Loading weights into Sam3VideoModel...")
model = Sam3VideoModel(config)
missing_keys, unexpected_keys = model.load_state_dict(state_dict_new, strict=False)
if missing_keys:
logger.warning(f"Missing keys ({len(missing_keys)}):")
for key in missing_keys: # Show more keys for debugging
logger.warning(f" - {key}")
if unexpected_keys:
logger.warning(f"Unexpected keys ({len(unexpected_keys)}):")
for key in unexpected_keys: # Show more keys for debugging
logger.warning(f" - {key}")
# Note: Some missing/unexpected keys are expected:
# - vision_encoder.backbone.embeddings.patch_embeddings.projection.bias: patch projection has bias=False
# - geometry_encoder.mask_encoder.projection.*: this is nn.Identity() in original (no weights)
# - rotary_emb.rope_embeddings: pre-computed in original, computed on-the-fly in refactored
# - text_encoder.text_projection.bias: projection layer might not have bias
# Save model
print(f"Saving converted model to {output_path}")
model.save_pretrained(
output_path,
)
# Save processor
print("Creating and saving processor...")
image_processor = Sam3ImageProcessorFast()
video_processor = Sam2VideoVideoProcessor(
image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], size={"height": 1008, "width": 1008}
)
tokenizer = CLIPTokenizerFast.from_pretrained("openai/clip-vit-base-patch32", max_length=32, model_max_length=32)
processor = Sam3VideoProcessor(
image_processor=image_processor, video_processor=video_processor, tokenizer=tokenizer
)
processor.save_pretrained(output_path)
# Push to hub if requested
if push_to_hub:
if repo_id is None:
raise ValueError("repo_id must be provided when push_to_hub=True")
print(f"Pushing model to Hub: {repo_id}")
model.push_to_hub(repo_id, private=True)
processor.push_to_hub(repo_id, private=True)
print("Conversion complete!")
print(f"Model saved successfully to: {output_path}")
# Cleanup
del state_dict_new, model
gc.collect()
# Verify the conversion by reloading
print("Loading saved weights into Sam3TrackerVideoModel...")
try:
model = Sam3TrackerVideoModel.from_pretrained(output_path)
param_count = sum(p.numel() for p in model.parameters())
print(f"✓ Successfully loaded model with {param_count:,} parameters")
del model
gc.collect()
except Exception as e:
print(f"✗ Failed to reload model: {e}")
raise e
print("Loading saved weights into Sam3TrackerModel...")
try:
model = Sam3TrackerModel.from_pretrained(output_path)
param_count = sum(p.numel() for p in model.parameters())
print(f"✓ Successfully loaded model with {param_count:,} parameters")
del model
gc.collect()
except Exception as e:
print(f"✗ Failed to reload model: {e}")
raise e
print("Loading saved weights into Sam3Model...")
try:
model = Sam3Model.from_pretrained(output_path)
param_count = sum(p.numel() for p in model.parameters())
print(f"✓ Successfully loaded model with {param_count:,} parameters")
del model
gc.collect()
except Exception as e:
print(f"✗ Failed to reload model: {e}")
raise e
print("\nVerifying converted checkpoint can be loaded...")
try:
model = Sam3VideoModel.from_pretrained(output_path)
param_count = sum(p.numel() for p in model.parameters())
print(f"✓ Successfully loaded model with {param_count:,} parameters")
del model
gc.collect()
except Exception as e:
print(f"✗ Failed to reload model: {e}")
print("\n" + "=" * 80)
print("Conversion finished!")
print("=" * 80)
print(f"Output directory: {output_path}")
print("\nTo test the model, you can run:")
print(">>> from transformers import Sam3Model")
print(f">>> model = Sam3Model.from_pretrained('{output_path}')")
print("=" * 80)
def main():
parser = argparse.ArgumentParser(description="Convert SAM3 checkpoint to HuggingFace format")
parser.add_argument(
"--checkpoint_path",
type=str,
required=True,
help="Path to the original SAM3 checkpoint file",
)
parser.add_argument(
"--output_path",
type=str,
required=True,
help="Path to save the converted checkpoint",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the converted model to the Hugging Face Hub",
)
parser.add_argument(
"--repo_id",
type=str,
default=None,
help="Repository ID for pushing to Hub (e.g., 'facebook/sam3-large')",
)
args = parser.parse_args()
convert_sam3_checkpoint(
checkpoint_path=args.checkpoint_path,
output_path=args.output_path,
push_to_hub=args.push_to_hub,
repo_id=args.repo_id,
)
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/sam3_video/convert_sam3_video_to_hf.py",
"license": "Apache License 2.0",
"lines": 686,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/sam3_video/modeling_sam3_video.py | # Copyright 2025 The Meta AI Authors and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict, defaultdict
from collections.abc import Iterator
from copy import deepcopy
from dataclasses import dataclass
from typing import Any
import torch
import torch.nn.functional as F
from torch import Tensor
from tqdm.auto import tqdm
from transformers.models.sam3.modeling_sam3 import Sam3VisionNeck
from ...modeling_utils import PreTrainedModel
from ...utils import ModelOutput, auto_docstring, is_kernels_available, logging
from ..auto import AutoModel
from .configuration_sam3_video import Sam3VideoConfig
if is_kernels_available():
from ...integrations.hub_kernels import get_kernel
logger = logging.get_logger(__name__)
cv_utils_kernel = None # None = not attempted, False = failed, kernel object = success
def _load_cv_utils_kernel_once():
"""Load cv_utils_kernel once on first use."""
global cv_utils_kernel
if cv_utils_kernel is not None:
return # Already attempted loading (successfully or not)
if not is_kernels_available():
logger.warning_once(
"kernels library is not installed. NMS post-processing, hole filling, and sprinkle removal will be skipped. "
"Install it with `pip install kernels` for better mask quality."
)
cv_utils_kernel = False
return
try:
cv_utils_kernel = get_kernel("kernels-community/cv-utils")
except Exception as e:
logger.warning_once(
f"Failed to load cv_utils kernel (your torch/cuda setup may not be supported): {e}. "
"NMS post-processing, hole filling, and sprinkle removal will be skipped."
)
cv_utils_kernel = False
class Sam3VideoInferenceCache:
"""Cache for vision features and model constants."""
def __init__(
self,
inference_device: torch.device | str = "cpu",
inference_state_device: torch.device | str = "cpu",
max_vision_features_cache_size: int = 1,
):
self.inference_device = inference_device
self.inference_state_device = inference_state_device
self.max_vision_features_cache_size = max_vision_features_cache_size
self._vision_features = {}
def cache_vision_features(self, frame_idx: int, features: dict):
"""Cache vision features with automatic device management."""
cached = {}
if len(self._vision_features) >= self.max_vision_features_cache_size:
# remove the oldest frame
self._vision_features.pop(min(self._vision_features.keys()))
for key, value in features.items():
if isinstance(value, torch.Tensor):
cached[key] = value.to(self.inference_state_device, non_blocking=True)
elif isinstance(value, (list, tuple)) and value and isinstance(value[0], torch.Tensor):
cached[key] = [v.to(self.inference_state_device, non_blocking=True) for v in value]
else:
cached[key] = value
self._vision_features[frame_idx] = cached
def get_vision_features(self, frame_idx: int) -> dict | None:
"""Get cached vision features, automatically moved to inference device."""
if frame_idx not in self._vision_features:
return None
cached = self._vision_features[frame_idx]
moved = {}
for key, value in cached.items():
if isinstance(value, torch.Tensor):
moved[key] = value.to(self.inference_device, non_blocking=True)
elif isinstance(value, (list, tuple)) and value and isinstance(value[0], torch.Tensor):
moved[key] = [v.to(self.inference_device, non_blocking=True) for v in value]
else:
moved[key] = value
return moved
def clear_all(self):
"""Clear all cached data."""
self._vision_features.clear()
class Sam3VideoInferenceSession:
r"""
Manages video inference session parameters, state and cache.
Args:
video (`torch.FloatTensor`, *optional*):
The video to process. No need to provide when streaming.
video_height (`int`, *optional*):
The height of the video.
video_width (`int`, *optional*):
The width of the video.
inference_device (`torch.device`, *optional*, defaults to `"cpu"`):
The device to use for inference.
inference_state_device (`torch.device`, *optional*, defaults to `"cpu"`):
The device to store the inference state on.
video_storage_device (`torch.device`, *optional*, defaults to `"cpu"`):
The device to store the video on.
dtype (`torch.dtype`, *optional*, defaults to `"float32"`):
The dtype to use for the video.
max_vision_features_cache_size (`int`, *optional*, defaults to 1):
The maximum number of vision features to cache.
"""
def __init__(
self,
video: torch.FloatTensor | None = None,
video_height: int | None = None,
video_width: int | None = None,
inference_device: torch.device | str = "cpu",
inference_state_device: torch.device | str = "cpu",
video_storage_device: torch.device | str = "cpu",
dtype: torch.dtype | str = "float32",
max_vision_features_cache_size: int = 1,
):
# store as a dictionary to avoid double memory allocation with torch.cat when adding new frames
self.processed_frames = (
dict(enumerate(video.to(video_storage_device, dtype=dtype))) if video is not None else None
)
self.video_height = video_height
self.video_width = video_width
self.inference_device = inference_device
self.inference_state_device = inference_state_device
self.video_storage_device = video_storage_device
self.dtype = dtype
self.max_vision_features_cache_size = max_vision_features_cache_size
# Cache for computed features
self.cache = Sam3VideoInferenceCache(
inference_device=self.inference_device,
inference_state_device=self.inference_state_device,
max_vision_features_cache_size=self.max_vision_features_cache_size,
)
# Persistent object tracking state
self._obj_id_to_idx = OrderedDict()
self._obj_idx_to_id = OrderedDict()
self.obj_ids = []
self.mask_inputs_per_obj = {}
self.point_inputs_per_obj = {}
# Persistent model outputs/history
self.output_dict_per_obj = {}
self.frames_tracked_per_obj = {}
# Multi-prompt support
self.prompts = {} # prompt_id -> prompt_text
self.prompt_input_ids = {} # prompt_id -> input_ids
self.prompt_embeddings = {} # prompt_id -> text embeddings
self.prompt_attention_masks = {} # prompt_id -> attention_mask
self.obj_id_to_prompt_id = {} # obj_id -> prompt_id (assigned at detection time)
# Tracking metadata for detection-tracking fusion
self.obj_id_to_score = {} # Detection scores per object
self.obj_id_to_tracker_score_frame_wise = defaultdict(dict) # Frame-wise tracker scores
self.obj_id_to_last_occluded = {} # Last occlusion frame per object
self.max_obj_id = -1 # Maximum object ID assigned so far (-1 means no object has been assigned yet)
# Hotstart metadata
self.obj_first_frame_idx = {} # First frame index per object
self.unmatched_frame_inds = defaultdict(list) # Unmatched frame indices per object
self.overlap_pair_to_frame_inds = defaultdict(list) # Overlap tracking for duplicate detection
self.trk_keep_alive = {} # Keep-alive counters per object
self.removed_obj_ids = set() # Set of removed object IDs
self.suppressed_obj_ids = defaultdict(set) # Suppressed object IDs per frame
self.hotstart_removed_obj_ids = set() # Set of removed object IDs during hotstart
# Output buffering for hotstart delay
self.output_buffer = []
@property
def num_frames(self) -> int | None:
"""Number of frames in the video."""
return len(self.processed_frames) if self.processed_frames is not None else None
def add_prompt(self, prompt_text: str) -> int:
"""
Add a text prompt to the session and return its unique ID.
If the prompt already exists, returns the existing ID.
"""
for prompt_id, text in self.prompts.items():
if text == prompt_text:
return prompt_id
prompt_id = len(self.prompts)
self.prompts[prompt_id] = prompt_text
return prompt_id
# Object management
def obj_id_to_idx(self, obj_id: int) -> int:
"""Map object ID to index, creating new entry if needed."""
if obj_id not in self._obj_id_to_idx:
obj_idx = len(self._obj_id_to_idx)
self._obj_id_to_idx[obj_id] = obj_idx
self._obj_idx_to_id[obj_idx] = obj_id
self.obj_ids.append(obj_id)
self.mask_inputs_per_obj[obj_idx] = {}
self.point_inputs_per_obj[obj_idx] = {}
self.output_dict_per_obj[obj_idx] = {
"cond_frame_outputs": {},
"non_cond_frame_outputs": {},
}
self.frames_tracked_per_obj[obj_idx] = {}
return self._obj_id_to_idx[obj_id]
# Video Inference specific functions
def obj_idx_to_id(self, obj_idx: int) -> int:
"""Map model-side object index to client-side object id."""
return self._obj_idx_to_id[obj_idx]
def get_obj_num(self) -> int:
"""Get the total number of unique object ids received so far in this session."""
return len(self._obj_idx_to_id)
def add_mask_inputs(self, obj_idx: int, frame_idx: int, inputs: torch.Tensor):
"""Add mask inputs with automatic device placement."""
self.mask_inputs_per_obj[obj_idx][frame_idx] = inputs.to(
self.inference_device, dtype=self.dtype, non_blocking=True
)
def remove_mask_inputs(self, obj_idx: int, frame_idx: int):
"""Remove mask inputs."""
self.mask_inputs_per_obj[obj_idx].pop(frame_idx, None)
def remove_object(self, obj_id: int, strict: bool = False):
"""
Remove an object from the inference session. This would remove the object from
all frames in the video.
Args:
obj_id (`int`): The object ID to remove.
strict (`bool`, *optional*, defaults to `False`): Whether to raise an error if the object doesn't exist.
"""
old_obj_idx_to_rm = self._obj_id_to_idx.get(obj_id, None)
# Check whether this object_id to remove actually exists and possibly raise an error.
if old_obj_idx_to_rm is None:
if not strict:
return
raise RuntimeError(
f"Cannot remove object id {obj_id} as it doesn't exist. All existing object ids: {self.obj_ids}."
)
# Clean up prompt mapping
self.obj_id_to_prompt_id.pop(obj_id, None)
# If this is the only remaining object id, we simply reset the state.
if len(self._obj_id_to_idx) == 1:
self.reset_inference_session()
return
# Step 1: Update the object id mapping (note that it must be done after Step 0,
# since Step 0 still requires the old object id mappings in inference_state)
old_obj_ids = self.obj_ids
old_obj_inds = list(range(len(old_obj_ids)))
remain_old_obj_inds = old_obj_inds.copy()
remain_old_obj_inds.remove(old_obj_idx_to_rm)
new_obj_ids = [old_obj_ids[old_idx] for old_idx in remain_old_obj_inds]
new_obj_inds = list(range(len(new_obj_ids)))
# build new mappings
old_idx_to_new_idx = dict(zip(remain_old_obj_inds, new_obj_inds))
self._obj_id_to_idx = dict(zip(new_obj_ids, new_obj_inds))
self._obj_idx_to_id = dict(zip(new_obj_inds, new_obj_ids))
self.obj_ids = new_obj_ids
# Step 2: For per-object tensor storage, we shift their obj_idx in the dict keys.
def _map_keys(container):
new_kvs = []
for k in old_obj_inds:
v = container.pop(k)
if k in old_idx_to_new_idx:
new_kvs.append((old_idx_to_new_idx[k], v))
container.update(new_kvs)
_map_keys(self.point_inputs_per_obj)
_map_keys(self.mask_inputs_per_obj)
_map_keys(self.output_dict_per_obj)
_map_keys(self.frames_tracked_per_obj)
# Output management with smart device placement
def store_output(
self,
obj_idx: int,
frame_idx: int,
output_key: str | None = None,
output_value: torch.Tensor | dict | None = None,
is_conditioning_frame: bool = True,
):
"""
Store output with smart device management.
If output_key is None, the output is stored as a dictionary.
Args:
obj_idx (int): The index of the object.
frame_idx (int): The index of the frame.
output_key (Optional[str]): The key of the output. If None, the output is stored as a dictionary.
output_value (Optional[Union[torch.Tensor, dict]]): The value of the output.
is_conditioning_frame (bool): Whether the output is for a conditioning frame.
"""
storage_key = "cond_frame_outputs" if is_conditioning_frame else "non_cond_frame_outputs"
if output_key is None and isinstance(output_value, dict):
self.output_dict_per_obj[obj_idx][storage_key][frame_idx] = {}
for key, value in output_value.items():
self.store_output(obj_idx, frame_idx, key, value, is_conditioning_frame)
return
# Device placement: small tensors stay on inference device, large ones go to inference state device
if output_key in ["object_pointer", "object_score_logits"]: # Small tensors
self.output_dict_per_obj[obj_idx][storage_key][frame_idx][output_key] = output_value
elif isinstance(output_value, torch.Tensor): # Large tensors like masks, features
self.output_dict_per_obj[obj_idx][storage_key][frame_idx][output_key] = output_value.to(
self.inference_state_device, non_blocking=True
)
else:
self.output_dict_per_obj[obj_idx][storage_key][frame_idx][output_key] = output_value
def get_output(
self,
obj_idx: int,
frame_idx: int,
output_key: str,
is_conditioning_frame: bool = True,
):
"""
Get output with smart device management.
Args:
obj_idx (int): The index of the object.
frame_idx (int): The index of the frame.
output_key (str): The key of the output.
is_conditioning_frame (bool): Whether the output is for a conditioning frame.
"""
storage_key = "cond_frame_outputs" if is_conditioning_frame else "non_cond_frame_outputs"
out = self.output_dict_per_obj[obj_idx][storage_key].get(frame_idx, None)
# move to inference device if needed
if out is None:
return None
value = out[output_key]
if isinstance(value, torch.Tensor):
value = value.to(self.inference_device, non_blocking=True)
return value
# Video frame management
def add_new_frame(self, pixel_values: torch.Tensor, frame_idx: int | None = None) -> int:
"""Add new frame with automatic device placement."""
pixel_values = pixel_values.to(self.video_storage_device, dtype=self.dtype, non_blocking=True)
if pixel_values.dim() == 4:
pixel_values = pixel_values.squeeze(0)
if frame_idx is None:
frame_idx = len(self.processed_frames) if self.processed_frames is not None else 0
if self.processed_frames is None:
self.processed_frames = {frame_idx: pixel_values}
else:
self.processed_frames[frame_idx] = pixel_values
return frame_idx
def get_frame(self, frame_idx: int) -> torch.Tensor:
"""Get frame from video."""
return self.processed_frames[frame_idx].to(self.inference_device, non_blocking=True)
def reset_tracking_data(self):
"""Reset tracking data but keep cache."""
self._obj_id_to_idx.clear()
self._obj_idx_to_id.clear()
self.obj_ids.clear()
self.output_dict_per_obj.clear()
self.frames_tracked_per_obj.clear()
# Note: cache and video data are preserved
# Reset prompt mappings for objects (but keep prompts themselves)
self.obj_id_to_prompt_id.clear()
def reset_inference_session(self):
"""Reset tracking data and cache."""
self._obj_id_to_idx.clear()
self._obj_idx_to_id.clear()
self.obj_ids.clear()
self.output_dict_per_obj.clear()
self.frames_tracked_per_obj.clear()
self.cache.clear_all()
# Reset prompt mappings for objects (but keep prompts themselves)
self.obj_id_to_prompt_id.clear()
def reset_state(self):
"""Reset the inference session state."""
self._obj_id_to_idx = OrderedDict()
self._obj_idx_to_id = OrderedDict()
self.obj_ids = []
self.output_dict_per_obj = {}
self.frames_tracked_per_obj = {}
# Reset detection-tracking fusion state
self.obj_id_to_score = {}
self.obj_id_to_tracker_score_frame_wise = defaultdict(dict)
self.obj_id_to_last_occluded = {}
self.max_obj_id = 0
self.obj_first_frame_idx = {}
self.unmatched_frame_inds = defaultdict(list)
self.overlap_pair_to_frame_inds = defaultdict(list)
self.trk_keep_alive = {}
self.removed_obj_ids = set()
self.suppressed_obj_ids = defaultdict(set)
self.output_buffer = []
# Reset multi-prompt state
self.prompts.clear()
self.prompt_input_ids.clear()
self.prompt_embeddings.clear()
self.prompt_attention_masks.clear()
self.obj_id_to_prompt_id.clear()
# Clear cache
self.cache.clear_all()
@dataclass
@auto_docstring(custom_intro="Base class for the Sam3Video model's output.")
class Sam3VideoSegmentationOutput(ModelOutput):
r"""
object_ids (`list[int]`, *optional*):
List of object IDs being tracked in the current frame.
obj_id_to_mask (`dict[int, torch.FloatTensor]`, *optional*):
Dictionary mapping object IDs to their predicted low-resolution masks.
Each mask has shape `(1, H_low, W_low)`.
obj_id_to_score (`dict[int, float]`, *optional*):
Dictionary mapping object IDs to their detection scores.
obj_id_to_tracker_score (`dict[int, float]`, *optional*):
Dictionary mapping object IDs to their tracker scores for the current frame.
removed_obj_ids (`set[int]`, *optional*):
Set of object IDs that have been removed (e.g., via hotstart heuristics).
suppressed_obj_ids (`set[int]`, *optional*):
Set of object IDs that have been suppressed in the current frame.
frame_idx (`int`, *optional*):
The frame index of the video.
"""
object_ids: list[int] | None = None
obj_id_to_mask: dict[int, torch.FloatTensor] | None = None
obj_id_to_score: dict[int, float] | None = None
obj_id_to_tracker_score: dict[int, float] | None = None
removed_obj_ids: set[int] | None = None
suppressed_obj_ids: set[int] | None = None
frame_idx: int | None = None
class Sam3VideoPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = Sam3VideoConfig
base_model_prefix = "sam3_video"
main_input_name = "pixel_values"
input_modalities = ["video", "text"]
_supports_sdpa = True
_supports_flash_attn = True
_supports_flex_attn = True
_supports_attention_backend = True
@auto_docstring
class Sam3VideoModel(Sam3VideoPreTrainedModel):
def __init__(self, config: Sam3VideoConfig):
super().__init__(config)
self.config = config
self.detector_model = AutoModel.from_config(config.detector_config)
self.tracker_model = AutoModel.from_config(config.tracker_config, remove_vision_encoder=True)
self.low_res_mask_size = config.low_res_mask_size
self.score_threshold_detection = config.score_threshold_detection
self.det_nms_thresh = config.det_nms_thresh
self.assoc_iou_thresh = config.assoc_iou_thresh
self.trk_assoc_iou_thresh = config.trk_assoc_iou_thresh
self.new_det_thresh = config.new_det_thresh
self.recondition_on_trk_masks = config.recondition_on_trk_masks
# hotstart parameters
self.hotstart_delay = config.hotstart_delay
self.hotstart_unmatch_thresh = config.hotstart_unmatch_thresh
self.hotstart_dup_thresh = config.hotstart_dup_thresh
self.suppress_unmatched_only_within_hotstart = config.suppress_unmatched_only_within_hotstart
self.init_trk_keep_alive = config.init_trk_keep_alive
self.max_trk_keep_alive = config.max_trk_keep_alive
self.min_trk_keep_alive = config.min_trk_keep_alive
self.suppress_overlapping_based_on_recent_occlusion_threshold = (
config.suppress_overlapping_based_on_recent_occlusion_threshold
)
self.decrease_trk_keep_alive_for_empty_masklets = config.decrease_trk_keep_alive_for_empty_masklets
self.fill_hole_area = config.fill_hole_area
self.eval()
# the maximum object number
self.max_num_objects = config.max_num_objects
self.recondition_every_nth_frame = config.recondition_every_nth_frame
self.high_conf_thresh = config.high_conf_thresh
self.high_iou_thresh = config.high_iou_thresh
self.tracker_neck = Sam3VisionNeck(config.detector_config.vision_config)
self.post_init()
def get_vision_features_for_tracker(self, vision_embeds: torch.Tensor):
hidden_states = vision_embeds.last_hidden_state
batch_size = hidden_states.shape[0]
height, width = self.tracker_model.prompt_encoder.image_embedding_size
hidden_states_spatial = hidden_states.view(batch_size, height, width, -1).permute(0, 3, 1, 2)
fpn_hidden_states, fpn_position_encoding = self.tracker_neck(hidden_states_spatial)
# precompute projected level 0 and level 1 features in SAM decoder
# to avoid running it again on every SAM click
feature_maps = list(fpn_hidden_states[:-1])
feature_maps[0] = self.tracker_model.mask_decoder.conv_s0(feature_maps[0])
feature_maps[1] = self.tracker_model.mask_decoder.conv_s1(feature_maps[1])
# flatten NxCxHxW to HWxNxC
feature_maps = [feature_map.flatten(2).permute(2, 0, 1) for feature_map in feature_maps]
feature_maps_position_embeddings = [
feature_map_position_embedding.flatten(2).permute(2, 0, 1)
for feature_map_position_embedding in fpn_position_encoding[:-1]
]
return feature_maps, feature_maps_position_embeddings
def run_detection(
self,
inference_session: Sam3VideoInferenceSession,
vision_embeds: torch.Tensor,
):
"""
Run detection for all prompts efficiently by reusing vision embeddings.
Args:
inference_session: The inference session containing prompts and state
vision_embeds: Pre-computed vision embeddings to reuse across prompts
Returns:
Dictionary mapping prompt_id to detection outputs
"""
prompt_ids = list(inference_session.prompts.keys())
if not prompt_ids:
raise ValueError("No prompts available for detection. Please add prompts to the session first.")
all_detections = {}
for prompt_id in prompt_ids:
# Get or compute text embeddings for this prompt
if prompt_id not in inference_session.prompt_embeddings:
text_embeds = self.detector_model.get_text_features(
input_ids=inference_session.prompt_input_ids[prompt_id],
attention_mask=inference_session.prompt_attention_masks[prompt_id],
return_dict=True,
).pooler_output
inference_session.prompt_embeddings[prompt_id] = text_embeds
else:
text_embeds = inference_session.prompt_embeddings[prompt_id]
# Run detector with cached vision features (efficient!)
detector_outputs = self.detector_model(
vision_embeds=vision_embeds,
text_embeds=text_embeds,
attention_mask=inference_session.prompt_attention_masks[prompt_id],
)
pred_logits = detector_outputs.pred_logits
presence_logits = detector_outputs.presence_logits
pred_probs = pred_logits.sigmoid()
presence_scores = presence_logits.sigmoid()
pred_probs = pred_probs * presence_scores
run_nms = self.det_nms_thresh > 0.0
if run_nms:
keep = nms_masks(
pred_probs=pred_probs[0],
pred_masks=detector_outputs.pred_masks[0],
prob_threshold=self.score_threshold_detection,
iou_threshold=self.det_nms_thresh,
)
# Set suppressed detections' probabilities to 0
pred_probs[0][~keep] = 0.0
pred_boxes_xyxy = detector_outputs.pred_boxes
pred_masks = detector_outputs.pred_masks
# get the positive detection outputs above threshold
pos_pred_idx = torch.where(pred_probs > self.score_threshold_detection)
det_out = {
"bbox": pred_boxes_xyxy[pos_pred_idx[0], pos_pred_idx[1]],
"mask": pred_masks[pos_pred_idx[0], pos_pred_idx[1]],
"scores": pred_probs[pos_pred_idx[0], pos_pred_idx[1]],
}
all_detections[prompt_id] = det_out
return all_detections
def run_tracker_propagation(
self,
inference_session: Sam3VideoInferenceSession,
frame_idx: int,
reverse: bool,
):
low_res_masks_list = []
obj_scores_list = []
if len(inference_session.obj_ids) > 0:
# propagate one frame
out = self.tracker_model(
inference_session=inference_session,
frame_idx=frame_idx,
reverse=reverse,
run_mem_encoder=False,
)
out_low_res_masks = out.pred_masks
out_obj_scores = out.object_score_logits
# only 1 frames should be propagated
low_res_masks_list.append(out_low_res_masks.squeeze(1))
obj_scores_list.append(out_obj_scores.squeeze(1))
# concatenate the output masklets from all local inference states
H_mask = W_mask = self.low_res_mask_size
if len(low_res_masks_list) > 0:
low_res_masks = torch.cat(low_res_masks_list, dim=0)
obj_scores = torch.cat(obj_scores_list, dim=0)
# Apply hole filling to the masks
low_res_masks = fill_holes_in_mask_scores(
low_res_masks.unsqueeze(1),
max_area=self.fill_hole_area,
fill_holes=True,
remove_sprinkles=True,
)
low_res_masks = low_res_masks.squeeze(1)
else:
low_res_masks = torch.zeros(0, H_mask, W_mask, device=self.device)
obj_scores = torch.zeros(0, device=self.device)
return low_res_masks, obj_scores
def _associate_det_trk(
self,
det_masks: Tensor,
det_scores: Tensor,
trk_masks: Tensor,
trk_obj_ids: list[int],
det_prompt_ids: torch.Tensor,
trk_prompt_ids: torch.Tensor,
):
"""
Match detections on the current frame with the existing masklets.
Args:
- det_masks: (N, H, W) tensor of predicted masks
- det_scores: (N,) tensor of detection scores
- trk_masks: (M, H, W) tensor of track masks
- trk_obj_ids: (M,) list of object IDs corresponding to trk_masks
- det_prompt_ids: (N,) tensor of prompt IDs for each detection. Prevents cross-prompt
associations by zeroing IoUs between detections and tracks from different prompts.
- trk_prompt_ids: (M,) tensor of prompt IDs for each tracked object. Prevents cross-prompt
associations by zeroing IoUs between detections and tracks from different prompts.
Returns:
- new_det_out_inds: list of new object indices among in FA detection outputs
- unmatched_trk_obj_ids: list of existing masklet object IDs that are not matched
to any detections on this frame (for unmatched, we only count masklets with >0 area)
- det_to_matched_trk_obj_ids: dict[int, list[int]]: mapping from FA detection indices
to the list of matched tracklet object IDs
- empty_trk_obj_ids: list of existing masklet object IDs with zero area in SAM2 prediction
"""
iou_threshold = self.assoc_iou_thresh
iou_threshold_trk = self.trk_assoc_iou_thresh
new_det_thresh = self.new_det_thresh
trk_obj_ids_tensor = (
torch.tensor(trk_obj_ids, dtype=torch.long, device=det_masks.device)
if trk_obj_ids
else torch.empty(0, dtype=torch.long, device=det_masks.device)
)
if trk_masks.size(0) == 0:
# all detections are new
new_det_out_inds = list(range(det_masks.size(0)))
unmatched_trk_obj_ids = []
empty_trk_obj_ids = []
det_to_matched_trk_obj_ids = {}
trk_id_to_max_iou_high_conf_det = {}
return (
new_det_out_inds,
unmatched_trk_obj_ids,
det_to_matched_trk_obj_ids,
trk_id_to_max_iou_high_conf_det,
empty_trk_obj_ids,
)
elif det_masks.size(0) == 0:
# all previous tracklets are unmatched if they have a non-zero area
new_det_out_inds = []
trk_is_nonempty = (trk_masks > 0).any(dim=(1, 2)) # (M,) tensor
# Use tensor boolean indexing - elegant and avoids intermediate conversions
unmatched_trk_obj_ids = trk_obj_ids_tensor[trk_is_nonempty].tolist()
empty_trk_obj_ids = trk_obj_ids_tensor[~trk_is_nonempty].tolist()
det_to_matched_trk_obj_ids = {}
trk_id_to_max_iou_high_conf_det = {}
return (
new_det_out_inds,
unmatched_trk_obj_ids,
det_to_matched_trk_obj_ids,
trk_id_to_max_iou_high_conf_det,
empty_trk_obj_ids,
)
det_masks_binary = det_masks > 0
trk_masks_binary = trk_masks > 0
ious = mask_iou(det_masks_binary, trk_masks_binary) # (N, M) tensor
# Prevent cross-prompt associations by zeroing IoUs between different prompt groups.
prompt_match = det_prompt_ids.unsqueeze(1) == trk_prompt_ids.unsqueeze(0)
ious = torch.where(prompt_match, ious, torch.zeros_like(ious))
# trk_is_matched: for each track, True if matched to any detection above threshold
trk_is_matched = (ious >= iou_threshold_trk).any(dim=0) # (M,)
# Non-empty tracks not matched by Hungarian assignment above threshold are unmatched
trk_is_nonempty = trk_masks_binary.any(dim=(1, 2)) # (M,)
trk_is_unmatched = trk_is_nonempty & ~trk_is_matched # (M,)
# Use tensor boolean indexing directly - no intermediate conversions
unmatched_trk_obj_ids = trk_obj_ids_tensor[trk_is_unmatched].tolist()
empty_trk_obj_ids = trk_obj_ids_tensor[~trk_is_nonempty].tolist()
# For detections: allow many tracks to match to the same detection (many-to-one)
# So, a detection is 'new' if it does not match any track above threshold
det_matches_any_trk = (ious >= iou_threshold).any(dim=1) # (N,)
is_new_det = (det_scores >= new_det_thresh) & ~det_matches_any_trk # (N,)
new_det_out_inds = torch.where(is_new_det)[0].tolist()
# Build detection-to-track mappings using tensor operations
det_to_matched_trk_obj_ids = {}
trk_id_to_max_iou_high_conf_det = {} # trk id --> exactly one detection idx
det_to_max_iou_trk_idx = ious.argmax(dim=1) # (N,)
det_is_high_conf = (det_scores >= self.high_conf_thresh) & ~is_new_det # (N,)
det_max_iou = ious.max(dim=1)[0] # (N,)
det_is_high_iou = det_max_iou >= self.high_iou_thresh # (N,)
det_is_high_conf_and_iou = det_is_high_conf & det_is_high_iou # (N,)
high_conf_and_iou_mask = det_is_high_conf_and_iou # Keep as tensor
for det_idx in range(det_masks.size(0)):
# Find which tracks match this detection using tensor boolean indexing
matched_trk_mask = ious[det_idx] >= iou_threshold # (M,)
det_to_matched_trk_obj_ids[det_idx] = trk_obj_ids_tensor[matched_trk_mask].tolist()
if high_conf_and_iou_mask[det_idx].item():
trk_idx = det_to_max_iou_trk_idx[det_idx].item()
trk_obj_id = trk_obj_ids_tensor[trk_idx].item()
trk_id_to_max_iou_high_conf_det[trk_obj_id] = det_idx
return (
new_det_out_inds,
unmatched_trk_obj_ids,
det_to_matched_trk_obj_ids,
trk_id_to_max_iou_high_conf_det,
empty_trk_obj_ids,
)
def _process_hotstart(
self,
inference_session: Sam3VideoInferenceSession,
frame_idx: int,
reverse: bool,
det_to_matched_trk_obj_ids: dict[int, list[int]],
new_det_obj_ids: list[int],
empty_trk_obj_ids: list[int],
unmatched_trk_obj_ids: list[int],
extra_metadata: dict[str, Any],
streaming: bool = False,
):
"""
Handle hotstart heuristics to remove unmatched or duplicated objects.
In streaming mode, hotstart removal logic is disabled since we don't have
future frames to make informed decisions about object removal.
"""
# obj_id --> first frame index where the object was detected
obj_first_frame_idx = extra_metadata["obj_first_frame_idx"]
# obj_id --> [mismatched frame indices]
unmatched_frame_inds = extra_metadata["unmatched_frame_inds"]
trk_keep_alive = extra_metadata["trk_keep_alive"]
# (first_appear_obj_id, obj_id) --> [overlap frame indices]
overlap_pair_to_frame_inds = extra_metadata["overlap_pair_to_frame_inds"]
# removed_obj_ids: object IDs that are suppressed via hot-start
removed_obj_ids = extra_metadata["removed_obj_ids"]
suppressed_obj_ids = extra_metadata["suppressed_obj_ids"][frame_idx]
obj_ids_newly_removed = set() # object IDs to be newly removed on this frame
hotstart_diff = frame_idx - self.hotstart_delay if not reverse else frame_idx + self.hotstart_delay
# Step 1: log the frame index where each object ID first appears
for obj_id in new_det_obj_ids:
if obj_id not in obj_first_frame_idx:
obj_first_frame_idx[obj_id] = frame_idx
trk_keep_alive[int(obj_id)] = self.init_trk_keep_alive
matched_trks = set()
# We use the det-->tracks list to check for matched objects. Otherwise, we need to compute areas to decide whether they're occluded
for matched_trks_per_det in det_to_matched_trk_obj_ids.values():
matched_trks.update({int(obj_id) for obj_id in matched_trks_per_det})
for obj_id in matched_trks:
# NOTE: To minimize number of configurable params, we use the hotstart_unmatch_thresh to set the max value of trk_keep_alive
trk_keep_alive[int(obj_id)] = min(self.max_trk_keep_alive, trk_keep_alive[int(obj_id)] + 1)
for obj_id in unmatched_trk_obj_ids:
unmatched_frame_inds[obj_id].append(frame_idx)
# NOTE: To minimize number of configurable params, we use the hotstart_unmatch_thresh to set the min value of trk_keep_alive
# The max keep alive is 2x the min, means the model prefers to keep the prediction rather than suppress it if it was matched long enough.
trk_keep_alive[int(obj_id)] = max(self.min_trk_keep_alive, trk_keep_alive[int(obj_id)] - 1)
if self.decrease_trk_keep_alive_for_empty_masklets:
for obj_id in empty_trk_obj_ids:
# NOTE: To minimize number of configurable params, we use the hotstart_unmatch_thresh to set the min value of trk_keep_alive
trk_keep_alive[int(obj_id)] = max(self.min_trk_keep_alive, trk_keep_alive[int(obj_id)] - 1)
# Step 2: removed tracks that has not matched with detections for `hotstart_unmatch_thresh` frames with hotstart period
# a) add unmatched frame indices for each existing object ID
# note that `unmatched_trk_obj_ids` contains those frames where the SAM2 output mask
# doesn't match any FA detection; it excludes those frames where SAM2 gives an empty mask
# b) remove a masklet if it first appears after `hotstart_diff` and is unmatched for more
# than `self.hotstart_unmatch_thresh` frames
# NOTE: In streaming mode, we skip hotstart removal logic since we don't have future frames
if not streaming:
for obj_id, frame_indices in unmatched_frame_inds.items():
if obj_id in removed_obj_ids or obj_id in obj_ids_newly_removed:
continue # skip if the object is already removed
if len(frame_indices) >= self.hotstart_unmatch_thresh:
is_within_hotstart = (obj_first_frame_idx[obj_id] > hotstart_diff and not reverse) or (
obj_first_frame_idx[obj_id] < hotstart_diff and reverse
)
if is_within_hotstart:
obj_ids_newly_removed.add(obj_id)
logger.info(
f"Removing object {obj_id} at frame {frame_idx} "
f"since it is unmatched for frames: {frame_indices}"
)
if (
trk_keep_alive[obj_id] <= 0 # Object has not been matched for too long
and not self.suppress_unmatched_only_within_hotstart
and obj_id not in removed_obj_ids
and obj_id not in obj_ids_newly_removed
):
logger.debug(f"Suppressing object {obj_id} at frame {frame_idx}, due to being unmatched")
suppressed_obj_ids.add(obj_id)
# Step 3: removed tracks that overlaps with another track for `hotstart_dup_thresh` frames
# a) find overlaps tracks -- we consider overlap if they match to the same detection
# NOTE: In streaming mode, we still track overlaps for metadata but skip removal logic
for matched_trk_obj_ids in det_to_matched_trk_obj_ids.values():
if len(matched_trk_obj_ids) < 2:
continue # only count detections that are matched to multiple (>=2) masklets
# if there are multiple matched track ids, we need to find the one that appeared first;
# these later appearing ids may be removed since they may be considered as duplicates
first_appear_obj_id = (
min(matched_trk_obj_ids, key=lambda x: obj_first_frame_idx[x])
if not reverse
else max(matched_trk_obj_ids, key=lambda x: obj_first_frame_idx[x])
)
for obj_id in matched_trk_obj_ids:
if obj_id != first_appear_obj_id:
key = (first_appear_obj_id, obj_id)
overlap_pair_to_frame_inds[key].append(frame_idx)
# b) remove a masklet if it first appears after `hotstart_diff` and it overlaps with another
# masklet (that appears earlier) for more than `self.hotstart_dup_thresh` frames
# NOTE: In streaming mode, we skip hotstart removal logic since we don't have future frames
if not streaming:
for (first_obj_id, obj_id), frame_indices in overlap_pair_to_frame_inds.items():
if obj_id in removed_obj_ids or obj_id in obj_ids_newly_removed:
continue # skip if the object is already removed
if (obj_first_frame_idx[obj_id] > hotstart_diff and not reverse) or (
obj_first_frame_idx[obj_id] < hotstart_diff and reverse
):
if len(frame_indices) >= self.hotstart_dup_thresh:
obj_ids_newly_removed.add(obj_id)
logger.info(
f"Removing object {obj_id} at frame {frame_idx} "
f"since it overlaps with another track {first_obj_id} at frames: {frame_indices}"
)
removed_obj_ids.update(obj_ids_newly_removed)
return obj_ids_newly_removed, extra_metadata
def run_memory_encoder(
self,
inference_session: Sam3VideoInferenceSession,
frame_idx: int,
high_res_masks: torch.Tensor,
object_score_logits: torch.Tensor,
):
"""
Run the memory encoder on `high_res_masks`. This is usually after applying
non-overlapping constraints to object scores. Since their scores changed, their
memory also need to be computed again with the memory encoder.
"""
# Retrieve correct image features
cached_features = inference_session.cache.get_vision_features(frame_idx)
current_vision_feats = cached_features["vision_feats"]
maskmem_features, maskmem_pos_enc = self.tracker_model._encode_new_memory(
current_vision_feats=current_vision_feats[-1],
pred_masks_high_res=high_res_masks,
object_score_logits=object_score_logits,
is_mask_from_pts=False,
)
return maskmem_features, maskmem_pos_enc
def _prepare_recondition_masks(
self,
inference_session: Sam3VideoInferenceSession,
frame_idx: int,
det_out: dict[str, Tensor],
trk_masks: Tensor,
trk_id_to_max_iou_high_conf_det: dict[int, int],
tracker_obj_scores_global: Tensor,
) -> dict[int, Tensor]:
"""
Prepare high-resolution masks for reconditioned objects.
Returns a dict of obj_idx -> high_res_mask for objects that should be reconditioned.
When recondition_on_trk_masks=True, uses detector as validation signal to strengthen tracker memory.
When False, uses detector to correct tracker drift by replacing with detection masks.
"""
reconditioned_masks = {}
reconditioned_obj_ids = set()
for trk_obj_id, det_idx in trk_id_to_max_iou_high_conf_det.items():
obj_idx = inference_session.obj_id_to_idx(trk_obj_id)
obj_score = tracker_obj_scores_global[obj_idx]
if obj_score <= self.high_conf_thresh:
continue
if self.recondition_on_trk_masks:
# Validation mode: detector confirms tracker quality, strengthen memory with tracked mask
new_mask = trk_masks[obj_idx : obj_idx + 1].unsqueeze(1)
reconditioned_masks[obj_idx] = new_mask
reconditioned_obj_ids.add(trk_obj_id)
else:
# Correction mode: detector corrects drift, replace tracker mask with detection mask
new_mask = det_out["mask"][det_idx : det_idx + 1].unsqueeze(1)
reconditioned_masks[obj_idx] = new_mask >= 0.5
reconditioned_obj_ids.add(trk_obj_id)
return reconditioned_masks, reconditioned_obj_ids
def _get_objects_to_suppress_based_on_most_recently_occluded(
self,
binary_low_res_masks: Tensor,
last_occluded: list[int],
obj_ids: list[int],
reverse: bool = False,
):
# Suppress overlapping masks for objects that were most recently occluded
to_suppress = torch.zeros(
binary_low_res_masks.size(0),
device=binary_low_res_masks.device,
dtype=torch.bool,
)
if len(obj_ids) <= 1:
return to_suppress
iou = mask_iou(binary_low_res_masks, binary_low_res_masks) # [N,N]
# Create masks for upper triangular matrix (i < j) and IoU threshold
mask_iou_thresh = iou >= self.suppress_overlapping_based_on_recent_occlusion_threshold
overlapping_pairs = torch.triu(mask_iou_thresh, diagonal=1) # [N,N]
last_occ_expanded_i = last_occluded.unsqueeze(1) # (N, 1)
last_occ_expanded_j = last_occluded.unsqueeze(0) # (1, N)
# Suppress most recently occluded
cmp_op = torch.gt if not reverse else torch.lt
suppress_i_mask = (
overlapping_pairs
& cmp_op(last_occ_expanded_i, last_occ_expanded_j) # (last_occ_expanded_i > last_occ_expanded_j)
& (last_occ_expanded_j > -1) # j can suppress i only if i was previously occluded
)
suppress_j_mask = (
overlapping_pairs
& cmp_op(last_occ_expanded_j, last_occ_expanded_i)
& (last_occ_expanded_i > -1) # i can suppress j only if j was previously occluded
)
# Apply suppression
to_suppress = suppress_i_mask.any(dim=1) | suppress_j_mask.any(dim=0)
return to_suppress
def _suppress_overlapping_based_on_recent_occlusion(
self,
inference_session: Sam3VideoInferenceSession,
frame_idx: int,
tracker_low_res_masks_global: Tensor,
tracker_metadata_new: dict[str, Any],
obj_ids_newly_removed: set[int],
reverse: bool = False,
):
"""
Suppress overlapping masks based on the most recent occlusion information. If an object is removed by hotstart, we always suppress it if it overlaps with any other object.
Args:
frame_idx (int): The current frame index.
tracker_low_res_masks_global (Tensor): The low-resolution masks for the current frame.
tracker_metadata_prev (Dict[str, Any]): The metadata from the previous frame.
tracker_metadata_new (Dict[str, Any]): The metadata for the current frame.
obj_ids_newly_removed (Set[int]): The object IDs that have been removed.
Return:
Tensor: The updated low-resolution masks with some objects suppressed.
"""
obj_ids_global = inference_session.obj_ids
binary_tracker_low_res_masks_global = tracker_low_res_masks_global > 0
batch_size = tracker_low_res_masks_global.size(0)
if batch_size > 0:
NEVER_OCCLUDED = -1
ALWAYS_OCCLUDED = 100000 # This value should be larger than any possible frame index, indicates that the object was removed by hotstart logic
last_occluded_prev = torch.cat(
[
inference_session.obj_id_to_last_occluded.get(
obj_id,
torch.full(
(1,),
fill_value=(NEVER_OCCLUDED if obj_id not in obj_ids_newly_removed else ALWAYS_OCCLUDED),
device=binary_tracker_low_res_masks_global.device,
dtype=torch.long,
),
)
for obj_id in obj_ids_global
],
dim=0,
)
prompt_ids_global = torch.tensor(
[inference_session.obj_id_to_prompt_id[obj_id] for obj_id in obj_ids_global],
device=binary_tracker_low_res_masks_global.device,
dtype=torch.long,
)
to_suppress = torch.zeros(
batch_size,
device=binary_tracker_low_res_masks_global.device,
dtype=torch.bool,
)
# Only suppress overlaps within the same prompt group.
unique_prompts = prompt_ids_global.unique(sorted=True)
for prompt_id in unique_prompts:
prompt_mask = prompt_ids_global == prompt_id
prompt_indices = torch.nonzero(prompt_mask, as_tuple=True)[0]
if prompt_indices.numel() <= 1:
continue
prompt_masks = binary_tracker_low_res_masks_global[prompt_indices]
prompt_last_occ = last_occluded_prev[prompt_indices]
prompt_obj_ids = [obj_ids_global[idx] for idx in prompt_indices.tolist()]
prompt_suppress = self._get_objects_to_suppress_based_on_most_recently_occluded(
prompt_masks,
prompt_last_occ,
prompt_obj_ids,
reverse,
)
to_suppress[prompt_indices] = prompt_suppress
# Update metadata with occlusion information
is_obj_occluded = ~(binary_tracker_low_res_masks_global.any(dim=(-1, -2)))
is_obj_occluded_or_suppressed = is_obj_occluded | to_suppress
last_occluded_new = last_occluded_prev.clone()
last_occluded_new[is_obj_occluded_or_suppressed] = frame_idx
# Slice out the last occluded frame for each object
tracker_metadata_new["obj_id_to_last_occluded"] = {
obj_id: last_occluded_new[obj_idx : obj_idx + 1] for obj_idx, obj_id in enumerate(obj_ids_global)
}
# Zero out suppressed masks before memory encoding
NO_OBJ_LOGIT = -10
tracker_low_res_masks_global[to_suppress] = NO_OBJ_LOGIT
return tracker_low_res_masks_global
def _apply_non_overlapping_constraints(self, pred_masks):
"""
Apply non-overlapping constraints to the object scores in pred_masks. Here we
keep only the highest scoring object at each spatial location in pred_masks.
"""
batch_size = pred_masks.size(0)
if batch_size == 1:
return pred_masks
device = pred_masks.device
# "max_obj_inds": object index of the object with the highest score at each location
max_obj_inds = torch.argmax(pred_masks, dim=0, keepdim=True)
# "batch_obj_inds": object index of each object slice (along dim 0) in `pred_masks`
batch_obj_inds = torch.arange(batch_size, device=device)[:, None, None, None]
keep = max_obj_inds == batch_obj_inds
# suppress overlapping regions' scores below -10.0 so that the foreground regions
# don't overlap (here sigmoid(-10.0)=4.5398e-05)
pred_masks = torch.where(keep, pred_masks, torch.clamp(pred_masks, max=-10.0))
return pred_masks
def _suppress_shrinked_masks(self, pred_masks, new_pred_masks, shrink_threshold=0.3):
area_before = (pred_masks > 0).sum(dim=(-1, -2))
area_after = (new_pred_masks > 0).sum(dim=(-1, -2))
area_before = torch.clamp(area_before, min=1.0)
area_ratio = area_after / area_before
keep = area_ratio >= shrink_threshold
keep_mask = keep[..., None, None].expand_as(pred_masks)
pred_masks_after = torch.where(keep_mask, pred_masks, torch.clamp(pred_masks, max=-10.0))
return pred_masks_after
def _suppress_object_pw_area_shrinkage(
self,
pred_masks,
prompt_ids: list[int] | None = None,
):
"""
This function suppresses masks that shrink in area after applying pixelwise non-overlapping constraints.
When `prompt_ids` are provided, constraints are enforced independently per prompt group.
"""
if prompt_ids is None:
return self._suppress_object_pw_area_shrinkage_impl(pred_masks)
if len(prompt_ids) != pred_masks.size(0):
raise ValueError("prompt_ids must have the same length as pred_masks")
prompt_ids_tensor = torch.tensor(prompt_ids, device=pred_masks.device, dtype=torch.long)
pred_masks_grouped = pred_masks.clone()
for prompt_id in prompt_ids_tensor.unique(sorted=True):
indices = torch.nonzero(prompt_ids_tensor == prompt_id, as_tuple=True)[0]
if indices.numel() == 0:
continue
pred_masks_grouped[indices] = self._suppress_object_pw_area_shrinkage_impl(pred_masks_grouped[indices])
return pred_masks_grouped
def _suppress_object_pw_area_shrinkage_impl(self, pred_masks):
if pred_masks.size(0) <= 1:
return pred_masks
pixel_level_non_overlapping_masks = self._apply_non_overlapping_constraints(pred_masks)
pred_masks = self._suppress_shrinked_masks(pred_masks, pixel_level_non_overlapping_masks)
return pred_masks
def _tracker_update_memories(
self,
inference_session: Sam3VideoInferenceSession,
frame_idx: int,
low_res_masks: Tensor,
reconditioned_masks: dict[int, Tensor] | None = None,
):
"""
Run Sam3Tracker memory encoder, enforcing non-overlapping constraints globally.
Now with batched memory encoding for better performance.
Args:
inference_session: The inference session state
frame_idx: Current frame index
low_res_masks: Low-resolution tracker masks for all objects
reconditioned_masks: Optional dict of obj_idx -> high_res_mask for objects that
should use detection masks instead of tracker masks
"""
if len(inference_session.obj_ids) == 0:
return
if reconditioned_masks is None:
reconditioned_masks = {}
# Interpolate tracker masks to high resolution
high_res_masks = low_res_masks.unsqueeze(1)
# Override with detection masks for reconditioned objects
for obj_idx, recond_mask in reconditioned_masks.items():
high_res_masks[obj_idx] = recond_mask.float()
# Mark as conditioning frame for reconditioned objects
output_dict = inference_session.output_dict_per_obj[obj_idx]
if frame_idx in output_dict["non_cond_frame_outputs"]:
current_out = output_dict["non_cond_frame_outputs"].pop(frame_idx)
output_dict["cond_frame_outputs"][frame_idx] = current_out
# Apply non-overlapping constraints before memory encoding.
# Constraints are enforced independently per prompt group.
# Every object ID has a prompt_id assigned when it's created.
prompt_ids_for_objects = [
inference_session.obj_id_to_prompt_id[obj_id] for obj_id in inference_session.obj_ids
]
high_res_masks = self._suppress_object_pw_area_shrinkage(high_res_masks, prompt_ids_for_objects)
# Use mask areas as a proxy for object scores
object_score_logits = torch.where((high_res_masks > 0).any(dim=(-1, -2)), 10.0, -10.0)
# Run memory encoder in batch for all objects at once
num_objects = len(inference_session.obj_ids)
object_score_logits_batched = object_score_logits.unsqueeze(-1) # Shape: (num_objects, 1)
# Encode memories for all objects in one batch call
maskmem_features_batched, maskmem_pos_enc_batched = self.run_memory_encoder(
inference_session,
frame_idx,
high_res_masks, # Shape: (num_objects, 1, H, W)
object_score_logits_batched, # Shape: (num_objects, 1)
)
# Split and store encoded memories per object
for obj_idx in range(num_objects):
output_dict = inference_session.output_dict_per_obj[obj_idx]
# Extract per-object memory from batched result
maskmem_features = maskmem_features_batched[:, obj_idx : obj_idx + 1]
maskmem_pos_enc = maskmem_pos_enc_batched[:, obj_idx : obj_idx + 1]
for storage_key in ["cond_frame_outputs", "non_cond_frame_outputs"]:
if frame_idx not in output_dict[storage_key]:
continue
current_out = output_dict[storage_key][frame_idx]
current_out["maskmem_features"] = maskmem_features
current_out["maskmem_pos_enc"] = maskmem_pos_enc
def run_tracker_update_planning_phase(
self,
inference_session: Sam3VideoInferenceSession,
frame_idx: int,
reverse: bool,
det_out: dict[str, Tensor],
tracker_low_res_masks_global: Tensor,
tracker_obj_scores_global: Tensor,
det_idx_to_prompt_id: dict[int, int],
streaming: bool = False,
):
# initialize new metadata from previous metadata (its values will be updated later)
tracker_metadata_new = {
"obj_ids": deepcopy(inference_session.obj_ids),
"obj_id_to_score": deepcopy(inference_session.obj_id_to_score),
"obj_id_to_tracker_score_frame_wise": deepcopy(inference_session.obj_id_to_tracker_score_frame_wise),
"obj_id_to_last_occluded": {}, # will be filled later
"max_obj_id": deepcopy(inference_session.max_obj_id),
}
# Initialize reconditioned_obj_ids early to avoid UnboundLocalError
reconditioned_obj_ids = set()
# Step 1: make the update plan and resolve heuristics
det_mask_preds: Tensor = det_out["mask"] # low-res mask logits
det_scores: Tensor = det_out["scores"].float() # Keep as tensor!
# det_idx_to_prompt_id maps every detection index to its prompt_id (created by _merge_detections_from_prompts).
det_prompt_ids = (
torch.tensor(
[det_idx_to_prompt_id[idx] for idx in range(det_mask_preds.size(0))],
device=det_mask_preds.device,
dtype=torch.long,
)
if det_mask_preds.size(0) > 0
else torch.empty(0, device=det_mask_preds.device, dtype=torch.long)
)
# Get prompt IDs for tracked objects.
trk_prompt_ids = (
torch.tensor(
[inference_session.obj_id_to_prompt_id[obj_id] for obj_id in inference_session.obj_ids],
device=tracker_low_res_masks_global.device
if tracker_low_res_masks_global.numel() > 0
else det_mask_preds.device,
dtype=torch.long,
)
if tracker_low_res_masks_global.numel() > 0
else torch.empty(0, device=det_mask_preds.device, dtype=torch.long)
)
# a) match FA and SAM2 masks and find new objects
(
new_det_out_inds,
unmatched_trk_obj_ids,
det_to_matched_trk_obj_ids,
trk_id_to_max_iou_high_conf_det,
empty_trk_obj_ids,
) = self._associate_det_trk(
det_masks=det_mask_preds,
det_scores=det_scores,
trk_masks=tracker_low_res_masks_global,
trk_obj_ids=inference_session.obj_ids,
det_prompt_ids=det_prompt_ids,
trk_prompt_ids=trk_prompt_ids,
)
# check whether we've hit the maximum number of objects we can track (and if so, drop some detections)
prev_obj_num = len(inference_session.obj_ids)
new_det_num = len(new_det_out_inds)
num_obj_dropped_due_to_limit = 0
if prev_obj_num + new_det_num > self.max_num_objects:
logger.warning(f"hitting {self.max_num_objects=} with {new_det_num=} and {prev_obj_num=}")
new_det_num_to_keep = self.max_num_objects - prev_obj_num
num_obj_dropped_due_to_limit = new_det_num - new_det_num_to_keep
# Keep top scoring detections
new_det_inds_tensor = torch.tensor(new_det_out_inds, dtype=torch.long, device=det_scores.device)
scores_for_new_dets = det_scores[new_det_inds_tensor]
_, top_inds = torch.topk(scores_for_new_dets, k=new_det_num_to_keep, largest=True)
new_det_out_inds = [new_det_out_inds[i] for i in top_inds]
new_det_num = len(new_det_out_inds)
# assign object IDs to new detections
new_det_start_obj_id = inference_session.max_obj_id + 1
new_det_obj_ids = list(range(new_det_start_obj_id, new_det_start_obj_id + new_det_num))
# Assign prompt IDs to new objects based on which prompt detected them.
for obj_id, det_idx in zip(new_det_obj_ids, new_det_out_inds):
prompt_id = det_idx_to_prompt_id[det_idx]
inference_session.obj_id_to_prompt_id[obj_id] = prompt_id
# b) handle hotstart heuristics to remove objects
extra_metadata_new = deepcopy(
{
"obj_first_frame_idx": inference_session.obj_first_frame_idx,
"unmatched_frame_inds": inference_session.unmatched_frame_inds,
"trk_keep_alive": inference_session.trk_keep_alive,
"overlap_pair_to_frame_inds": inference_session.overlap_pair_to_frame_inds,
"removed_obj_ids": inference_session.removed_obj_ids,
"suppressed_obj_ids": inference_session.suppressed_obj_ids,
}
)
obj_ids_newly_removed, extra_metadata_new = self._process_hotstart(
inference_session=inference_session,
frame_idx=frame_idx,
reverse=reverse,
det_to_matched_trk_obj_ids=det_to_matched_trk_obj_ids,
new_det_obj_ids=new_det_obj_ids,
empty_trk_obj_ids=empty_trk_obj_ids,
unmatched_trk_obj_ids=unmatched_trk_obj_ids,
extra_metadata=extra_metadata_new,
streaming=streaming,
)
tracker_metadata_new["extra_metadata"] = extra_metadata_new
# Step 3 (optional): prepare reconditioned masks based on high-confidence detections
reconditioned_masks = {}
reconditioned_obj_ids = set()
should_recondition_periodic = (
self.recondition_every_nth_frame > 0
and frame_idx % self.recondition_every_nth_frame == 0
and len(trk_id_to_max_iou_high_conf_det) > 0
)
if should_recondition_periodic:
reconditioned_masks, reconditioned_obj_ids = self._prepare_recondition_masks(
inference_session=inference_session,
frame_idx=frame_idx,
det_out=det_out,
trk_masks=tracker_low_res_masks_global,
trk_id_to_max_iou_high_conf_det=trk_id_to_max_iou_high_conf_det,
tracker_obj_scores_global=tracker_obj_scores_global,
)
tracker_update_plan = {
"new_det_out_inds": new_det_out_inds, # List[int]
"new_det_obj_ids": new_det_obj_ids, # List[int]
"unmatched_trk_obj_ids": unmatched_trk_obj_ids, # List[int]
"det_to_matched_trk_obj_ids": det_to_matched_trk_obj_ids, # dict
"obj_ids_newly_removed": obj_ids_newly_removed, # set
"num_obj_dropped_due_to_limit": num_obj_dropped_due_to_limit, # int
"trk_id_to_max_iou_high_conf_det": trk_id_to_max_iou_high_conf_det, # dict
"reconditioned_obj_ids": reconditioned_obj_ids, # set
}
# Step 4: Run SAM2 memory encoder on the current frame's prediction masks
# This uses tracker masks for most objects, but detection masks for reconditioned objects
batch_size = tracker_low_res_masks_global.size(0)
if batch_size > 0:
if self.suppress_overlapping_based_on_recent_occlusion_threshold > 0.0:
# NOTE: tracker_low_res_masks_global is updated in-place then returned
tracker_low_res_masks_global = self._suppress_overlapping_based_on_recent_occlusion(
inference_session=inference_session,
frame_idx=frame_idx,
tracker_low_res_masks_global=tracker_low_res_masks_global,
tracker_metadata_new=tracker_metadata_new,
obj_ids_newly_removed=obj_ids_newly_removed,
reverse=reverse,
)
# Unified memory encoding: uses detection masks for reconditioned objects
self._tracker_update_memories(
inference_session=inference_session,
frame_idx=frame_idx,
low_res_masks=tracker_low_res_masks_global,
reconditioned_masks=reconditioned_masks,
)
# Step 5: update the SAM2 metadata based on the update plan
updated_obj_ids = tracker_metadata_new["obj_ids"]
if len(new_det_obj_ids) > 0:
updated_obj_ids = updated_obj_ids + new_det_obj_ids
if len(obj_ids_newly_removed) > 0:
updated_obj_ids = [obj_id for obj_id in updated_obj_ids if obj_id not in obj_ids_newly_removed]
tracker_metadata_new["obj_ids"] = updated_obj_ids
# update object scores and the maximum object ID assigned so far
if len(new_det_obj_ids) > 0:
# Index tensor with list of indices and convert to list
new_det_scores = det_scores[
torch.tensor(new_det_out_inds, dtype=torch.long, device=det_scores.device)
].tolist()
tracker_metadata_new["obj_id_to_score"].update(zip(new_det_obj_ids, new_det_scores))
# tracker scores are not available for new objects, use det score instead.
tracker_metadata_new["obj_id_to_tracker_score_frame_wise"][frame_idx].update(
zip(new_det_obj_ids, new_det_scores)
)
tracker_metadata_new["max_obj_id"] = max(
tracker_metadata_new["max_obj_id"],
max(new_det_obj_ids),
)
# for removed objects, we set their scores to a very low value (-1e4) but still
# keep them in "obj_id_to_score" (it's easier to handle outputs this way)
for obj_id in obj_ids_newly_removed:
tracker_metadata_new["obj_id_to_score"][obj_id] = -1e4
tracker_metadata_new["obj_id_to_tracker_score_frame_wise"][frame_idx][obj_id] = -1e4
tracker_metadata_new["obj_id_to_last_occluded"].pop(obj_id, None)
return tracker_update_plan, tracker_metadata_new
def _tracker_add_new_objects(
self,
inference_session: Sam3VideoInferenceSession,
frame_idx: int,
new_obj_ids: list[int],
new_obj_masks: Tensor,
reverse: bool = False,
):
"""Add a new object to SAM2 inference states."""
new_obj_masks = new_obj_masks >= 0.5
for obj_id, mask in zip(new_obj_ids, new_obj_masks):
obj_idx = inference_session.obj_id_to_idx(obj_id)
inference_session.add_mask_inputs(obj_idx, frame_idx, mask.unsqueeze(0).unsqueeze(0))
inference_session.obj_with_new_inputs = list(new_obj_ids)
self.tracker_model(
inference_session=inference_session,
frame_idx=frame_idx,
reverse=reverse,
run_mem_encoder=True,
)
def run_tracker_update_execution_phase(
self,
inference_session: Sam3VideoInferenceSession,
frame_idx: int,
det_out: dict[str, Tensor],
tracker_update_plan: dict,
reverse: bool = False,
):
# initialize tracking scores with detection scores
new_det_out_inds: list[int] = tracker_update_plan["new_det_out_inds"]
new_det_obj_ids: list[int] = tracker_update_plan["new_det_obj_ids"]
obj_ids_newly_removed: set[int] = tracker_update_plan["obj_ids_newly_removed"]
# Step 1: add new objects from FA detection to SAM2 inference states
if len(new_det_out_inds) > 0:
new_det_out_inds_t = torch.tensor(new_det_out_inds, dtype=torch.long)
new_det_masks: Tensor = det_out["mask"][new_det_out_inds_t]
# initialize SAM2 with new object masks
self._tracker_add_new_objects(
inference_session=inference_session,
frame_idx=frame_idx,
new_obj_ids=new_det_obj_ids,
new_obj_masks=new_det_masks,
reverse=reverse,
)
# Step 2: remove from SAM2 inference states those objects removed by heuristics
for obj_id in obj_ids_newly_removed:
inference_session.remove_object(obj_id, strict=False) # implement remove_object in inference_session?
def build_outputs(
self,
inference_session: Sam3VideoInferenceSession,
det_out: dict[str, Tensor],
tracker_low_res_masks_global: Tensor,
tracker_update_plan: dict,
reconditioned_obj_ids: set | None = None,
):
"""
Build output dictionary with low-resolution masks.
Interpolation to video resolution is handled by the processor.
Returns:
obj_id_to_mask: dict mapping obj_id to low-res mask tensor (1, H_low, W_low)
"""
new_det_out_inds: list[int] = tracker_update_plan["new_det_out_inds"]
new_det_obj_ids: list[int] = tracker_update_plan["new_det_obj_ids"]
obj_id_to_mask = {} # obj_id --> low-res mask tensor
# Part 1: masks from tracker propagation (existing objects)
existing_masklet_obj_ids = inference_session.obj_ids
for obj_id, mask in zip(existing_masklet_obj_ids, tracker_low_res_masks_global):
obj_id_to_mask[int(obj_id)] = mask.unsqueeze(0) # (1, H_low, W_low)
# Part 2: masks from new detections
if len(new_det_out_inds) > 0:
new_det_out_inds_t = torch.tensor(new_det_out_inds, dtype=torch.long, device=det_out["mask"].device)
new_det_low_res_masks = det_out["mask"][new_det_out_inds_t]
# Apply hole filling to new detection masks
new_det_low_res_masks = fill_holes_in_mask_scores(
new_det_low_res_masks.unsqueeze(1),
max_area=self.fill_hole_area,
fill_holes=True,
remove_sprinkles=True,
).squeeze(1)
for obj_id, mask in zip(new_det_obj_ids, new_det_low_res_masks):
obj_id_to_mask[int(obj_id)] = mask.unsqueeze(0) # (1, H_low, W_low)
# Part 3: Override masks for reconditioned objects using detection masks
if reconditioned_obj_ids is not None and len(reconditioned_obj_ids) > 0:
trk_id_to_max_iou_high_conf_det = tracker_update_plan.get("trk_id_to_max_iou_high_conf_det", {})
for obj_id in reconditioned_obj_ids:
det_idx = trk_id_to_max_iou_high_conf_det.get(obj_id)
if det_idx is not None:
det_mask = det_out["mask"][det_idx].unsqueeze(0) # (1, H_low, W_low)
obj_id_to_mask[int(obj_id)] = det_mask
return obj_id_to_mask
def _merge_detections_from_prompts(
self,
all_detections: dict[int, dict[str, Tensor]],
inference_session: Sam3VideoInferenceSession,
) -> tuple[dict[str, Tensor], dict[int, int]]:
"""
Merge detections from multiple prompts into a single detection output.
Assigns unique object IDs and tracks which prompt detected each object.
Args:
all_detections: Dictionary mapping prompt_id to detection outputs
inference_session: Session to track obj_id -> prompt_id mapping
Returns:
Tuple of (merged_det_out, det_idx_to_prompt_id) where det_idx_to_prompt_id
maps detection index in the merged output to the prompt that produced it.
"""
merged_bboxes, merged_masks, merged_scores = [], [], []
det_idx_to_prompt_id = {}
det_idx = 0
for prompt_id, det_out in all_detections.items():
num_dets = len(det_out["bbox"])
if num_dets > 0:
merged_bboxes.append(det_out["bbox"])
merged_masks.append(det_out["mask"])
merged_scores.append(det_out["scores"])
for i in range(num_dets):
det_idx_to_prompt_id[det_idx + i] = prompt_id
det_idx += num_dets
if merged_bboxes:
merged_det_out = {
"bbox": torch.cat(merged_bboxes),
"mask": torch.cat(merged_masks),
"scores": torch.cat(merged_scores),
}
else:
device = inference_session.inference_device
merged_det_out = {
"bbox": torch.zeros(0, 4, device=device),
"mask": torch.zeros(0, self.low_res_mask_size, self.low_res_mask_size, device=device),
"scores": torch.zeros(0, device=device),
}
return merged_det_out, det_idx_to_prompt_id
def _det_track_one_frame(
self,
inference_session: Sam3VideoInferenceSession,
frame_idx: int,
reverse: bool,
streaming: bool = False,
):
"""
This function handles one-step inference for the DenseTracking model.
- `inference_session` contains all the information needed for inference, including the input video frames, text prompts, and any other relevant metadata
- The function processes detection and tracking for a single frame
- `streaming` indicates whether this is streaming inference mode (frames provided one at a time)
"""
pixel_values = inference_session.get_frame(frame_idx).unsqueeze(0)
vision_embeds = self.detector_model.get_vision_features(pixel_values=pixel_values)
# Step 1: run detection for all prompts (efficiently reusing vision embeddings)
# Returns dict mapping prompt_id to detection outputs
all_detections = self.run_detection(
inference_session=inference_session,
vision_embeds=vision_embeds,
)
# Merge detections from all prompts into single output for tracking
det_out, det_idx_to_prompt_id = self._merge_detections_from_prompts(all_detections, inference_session)
# share the vision encoder outputs from the detector to the tracker
vision_feats, vision_pos_embeds = self.get_vision_features_for_tracker(
vision_embeds=vision_embeds,
)
inference_session.cache.cache_vision_features(
frame_idx, {"vision_feats": vision_feats, "vision_pos_embeds": vision_pos_embeds}
)
# Step 2: propagate SAM2 states to get the SAM2 prediction masks.
# The returned `tracker_low_res_masks_global` contains the masklet predictions.
# Note that this step only runs the SAM2 propagation step, but doesn't encode new memory for the predicted masks;
# we defer memory encoding to `run_tracker_update_execution_phase` after resolving all heuristics.
tracker_low_res_masks_global, tracker_obj_scores_global = self.run_tracker_propagation(
inference_session=inference_session, frame_idx=frame_idx, reverse=reverse
)
# Step 3: based on detection outputs and the propagated SAM2 prediction masks, we make plans
# for SAM2 masklet updates (i.e. which objects to add and remove, etc).
# We also run SAM2 memory encoder in this step to resolve non-overlapping constraints.
# **This step should involve all the heuristics needed for any updates.**
# This step also generates the new masklet metadata `tracker_metadata_new` (based on its previous version).
tracker_update_plan, tracker_metadata_new = self.run_tracker_update_planning_phase(
inference_session=inference_session,
frame_idx=frame_idx,
reverse=reverse,
det_out=det_out,
tracker_low_res_masks_global=tracker_low_res_masks_global,
tracker_obj_scores_global=tracker_obj_scores_global,
det_idx_to_prompt_id=det_idx_to_prompt_id,
streaming=streaming,
)
# Step 4: based on `tracker_update_plan`, execute the update w.r.t. the tracker states
self.run_tracker_update_execution_phase(
inference_session=inference_session,
frame_idx=frame_idx,
reverse=reverse,
det_out=det_out,
tracker_update_plan=tracker_update_plan,
)
# Step 5: finally, build the outputs for this frame
reconditioned_obj_ids = tracker_update_plan["reconditioned_obj_ids"]
obj_id_to_mask = self.build_outputs(
inference_session=inference_session,
det_out=det_out,
tracker_low_res_masks_global=tracker_low_res_masks_global,
tracker_update_plan=tracker_update_plan,
reconditioned_obj_ids=reconditioned_obj_ids,
)
obj_id_to_score = tracker_metadata_new["obj_id_to_score"]
# add tracker scores to metadata, it should be fired for frames except the first frame
if tracker_obj_scores_global.shape[0] > 0:
# Convert tracker_obj_scores_global to sigmoid scores before updating
tracker_obj_scores_global = tracker_obj_scores_global.sigmoid().tolist()
tracker_obj_ids = inference_session.obj_ids
tracker_metadata_new["obj_id_to_tracker_score_frame_wise"][frame_idx].update(
dict(zip(tracker_obj_ids, tracker_obj_scores_global))
)
return (
obj_id_to_mask, # a dict: obj_id --> output mask
obj_id_to_score, # a dict: obj_id --> output score (prob)
tracker_metadata_new,
tracker_obj_scores_global, # a dict: obj_id --> tracker frame-level scores
)
@torch.inference_mode()
@auto_docstring(custom_intro="Propagate the objects through a streamed video frame.")
def forward(
self,
inference_session: Sam3VideoInferenceSession,
frame_idx: int | None = None,
frame: torch.Tensor | None = None,
reverse: bool = False,
**kwargs,
):
r"""
inference_session (`Sam3VideoInferenceSession`):
The video inference session object.
frame_idx (`int`, *optional*):
The index of the frame on which to run inference. No need to provide when inferring
on a new streamed frame.
frame (`torch.Tensor`, *optional*):
The frame to process. Provide when streaming.
reverse (`bool`, *optional*, defaults to `False`):
Whether to propagate in reverse.
"""
if frame is not None:
frame_idx = inference_session.add_new_frame(frame, frame_idx)
if frame_idx is None:
raise ValueError("frame_idx must be provided when frame is not provided for streaming.")
(
obj_id_to_mask,
obj_id_to_score,
tracker_metadata_new,
_,
) = self._det_track_one_frame(
inference_session=inference_session,
frame_idx=frame_idx,
reverse=reverse,
streaming=frame is not None,
)
# use a dummy string in "previous_stages_out" to indicate this frame has outputs
# inference_session.previous_stages_out[frame_idx] = "_THIS_FRAME_HAS_OUTPUTS_"
extra_metadata = tracker_metadata_new["extra_metadata"]
removed_obj_ids = extra_metadata["removed_obj_ids"]
# Update inference session state
inference_session.obj_id_to_score = obj_id_to_score
inference_session.obj_id_to_tracker_score_frame_wise = tracker_metadata_new[
"obj_id_to_tracker_score_frame_wise"
]
inference_session.obj_id_to_last_occluded = tracker_metadata_new["obj_id_to_last_occluded"]
inference_session.max_obj_id = tracker_metadata_new["max_obj_id"]
inference_session.obj_ids = list(tracker_metadata_new["obj_ids"])
inference_session.obj_first_frame_idx = extra_metadata["obj_first_frame_idx"]
inference_session.unmatched_frame_inds = extra_metadata["unmatched_frame_inds"]
inference_session.trk_keep_alive = extra_metadata["trk_keep_alive"]
inference_session.overlap_pair_to_frame_inds = extra_metadata["overlap_pair_to_frame_inds"]
inference_session.removed_obj_ids = removed_obj_ids
inference_session.suppressed_obj_ids[frame_idx] = extra_metadata["suppressed_obj_ids"][frame_idx]
return Sam3VideoSegmentationOutput(
object_ids=list(tracker_metadata_new["obj_ids"]),
obj_id_to_mask=obj_id_to_mask,
obj_id_to_score=obj_id_to_score,
obj_id_to_tracker_score=tracker_metadata_new["obj_id_to_tracker_score_frame_wise"][frame_idx],
removed_obj_ids=removed_obj_ids,
suppressed_obj_ids=extra_metadata["suppressed_obj_ids"][frame_idx],
frame_idx=frame_idx,
)
def _get_processing_order(
self,
inference_session: Sam3VideoInferenceSession,
start_frame_idx: int,
max_frame_num_to_track: int | None = None,
reverse: bool = False,
):
num_frames = inference_session.num_frames
if max_frame_num_to_track is None:
# default: track all the frames in the video
max_frame_num_to_track = num_frames
if reverse:
end_frame_idx = start_frame_idx - max_frame_num_to_track
end_frame_idx = max(end_frame_idx, 0)
processing_order = range(start_frame_idx - 1, end_frame_idx - 1, -1)
else:
end_frame_idx = start_frame_idx + max_frame_num_to_track
end_frame_idx = min(end_frame_idx, num_frames - 1)
processing_order = range(start_frame_idx, end_frame_idx + 1)
return processing_order, end_frame_idx
@torch.inference_mode()
@auto_docstring(
custom_intro="""
Propagate the prompts to get grounding results for the entire video. Used when initializing an inference session with a whole video.
Yields Sam3VideoSegmentationOutput for each frame.
"""
)
def propagate_in_video_iterator(
self,
inference_session: Sam3VideoInferenceSession,
start_frame_idx: int = 0,
max_frame_num_to_track: int | None = None,
reverse: bool = False,
show_progress_bar: bool = False,
) -> Iterator[Sam3VideoSegmentationOutput]:
r"""
inference_session (`Sam3VideoInferenceSession`):
The video inference session object.
start_frame_idx (`int`, *optional*, defaults to `0`):
The starting frame index for propagation.
max_frame_num_to_track (`int`, *optional*):
The maximum number of frames to track. If not provided, all frames in the video will be tracked.
reverse (`bool`, *optional*, defaults to `False`):
Whether to propagate in reverse.
show_progress_bar (`bool`, *optional*, defaults to `False`):
Whether to show a progress bar during propagation.
"""
processing_order, end_frame_idx = self._get_processing_order(
inference_session,
start_frame_idx,
max_frame_num_to_track,
reverse=reverse,
)
hotstart_buffer = []
for frame_idx in tqdm(processing_order, desc="propagate in video", disable=not show_progress_bar):
out = self(inference_session=inference_session, frame_idx=frame_idx, reverse=reverse)
if self.hotstart_delay > 0:
# accumulate the outputs for the first `hotstart_delay` frames
hotstart_buffer.append(out)
# update the object IDs removed by hotstart so that we don't output them
inference_session.hotstart_removed_obj_ids.update(out.removed_obj_ids)
if frame_idx == end_frame_idx:
# we reached the end of propagation -- yield all frames in the buffer
yield_list = hotstart_buffer
hotstart_buffer = []
elif len(hotstart_buffer) >= self.hotstart_delay:
# we have enough frames -- yield and remove the first (oldest) frame from the buffer
yield_list = hotstart_buffer[:1]
hotstart_buffer = hotstart_buffer[1:]
else:
# not enough frames yet -- skip yielding
yield_list = []
else:
yield_list = [out] # output the current frame
yield from yield_list
@torch.jit.script
def fast_diag_box_iou(boxes1, boxes2):
box1_xy = boxes1[:, 2:]
box1_XY = boxes1[:, :2]
box2_xy = boxes2[:, 2:]
box2_XY = boxes2[:, :2]
area1 = (box1_xy - box1_XY).prod(-1)
area2 = (box2_xy - box2_XY).prod(-1)
lt = torch.max(box1_XY, box2_XY)
rb = torch.min(box1_xy, box2_xy)
inter = (rb - lt).clamp(min=0).prod(-1)
union = area1 + area2 - inter
iou = inter / union
return iou
def mask_iou(pred_masks: torch.Tensor, gt_masks: torch.Tensor) -> torch.Tensor:
"""
Compute the IoU (Intersection over Union) between predicted masks and ground truth masks.
Args:
- pred_masks: (N, H, W) bool Tensor, containing binary predicted segmentation masks
- gt_masks: (M, H, W) bool Tensor, containing binary ground truth segmentation masks
Returns:
- ious: (N, M) float Tensor, containing IoUs for each pair of predicted and ground truth masks
"""
N, H, W = pred_masks.shape
M, _, _ = gt_masks.shape
# Flatten masks: (N, 1, H*W) and (1, M, H*W)
pred_flat = pred_masks.view(N, 1, H * W)
gt_flat = gt_masks.view(1, M, H * W)
# Compute intersection and union: (N, M)
intersection = (pred_flat & gt_flat).sum(dim=2).float()
union = (pred_flat | gt_flat).sum(dim=2).float()
ious = intersection / union.clamp(min=1)
return ious # shape: (N, M)
def nms_masks(
pred_probs: torch.Tensor,
pred_masks: torch.Tensor,
prob_threshold: float,
iou_threshold: float,
) -> torch.Tensor:
"""
Args:
- pred_probs: (num_det,) float Tensor, containing the score (probability) of each detection
- pred_masks: (num_det, H_mask, W_mask) float Tensor, containing the binary segmentation mask of each detection
- prob_threshold: float, score threshold to prefilter detections (NMS is performed on detections above threshold)
- iou_threshold: float, mask IoU threshold for NMS
Returns:
- keep: (num_det,) bool Tensor, indicating whether each detection is kept after score thresholding + NMS
"""
# prefilter the detections with prob_threshold ("valid" are those above prob_threshold)
is_valid = pred_probs > prob_threshold # (num_det,)
probs = pred_probs[is_valid] # (num_valid,)
masks_binary = pred_masks[is_valid] > 0 # (num_valid, H_mask, W_mask)
if probs.numel() == 0:
return is_valid # no valid detection, return empty keep mask
ious = mask_iou(masks_binary, masks_binary) # (num_valid, num_valid)
# Try to use kernels for NMS, fallback to keeping all valid detections if unavailable
_load_cv_utils_kernel_once()
if not cv_utils_kernel:
return is_valid # Fallback: keep all valid detections without NMS
try:
kept_inds = cv_utils_kernel.generic_nms(ious, probs, iou_threshold, use_iou_matrix=True)
except Exception as e:
logger.warning_once(f"Failed to run NMS using kernels library: {e}. NMS post-processing will be skipped.")
return is_valid # Fallback: keep all valid detections without NMS
# valid_inds are the indices among `probs` of valid detections before NMS (or -1 for invalid)
valid_inds = torch.where(is_valid, is_valid.cumsum(dim=0) - 1, -1) # (num_det,)
keep = torch.isin(valid_inds, kept_inds) # (num_det,)
return keep
def fill_holes_in_mask_scores(mask, max_area, fill_holes=True, remove_sprinkles=True):
"""
A post processor to fill small holes in mask scores with area under `max_area`.
Holes are those small connected components in either background or foreground.
Note that it relies on the "cc_torch" package to find connected components fast. You can
install it via the following command (`TORCH_CUDA_ARCH_LIST=8.0` is for A100 GPUs):
```
pip uninstall -y cc_torch; TORCH_CUDA_ARCH_LIST=8.0 9.0 pip install git+https://github.com/ronghanghu/cc_torch
```
Otherwise, it will fallback to a slightly slower triton implementation, or skimage if the tensor is on cpu
"""
if max_area <= 0:
return mask # nothing to fill in this case
if fill_holes:
# We remove small connected components in background by changing them to foreground
# with a small positive mask score (0.1).
mask_bg = mask <= 0
bg_area_thresh = max_area
_, areas_bg = _get_connected_components_with_padding(mask_bg)
small_components_bg = mask_bg & (areas_bg <= bg_area_thresh)
mask = torch.where(small_components_bg, 0.1, mask)
if remove_sprinkles:
# We remove small connected components in foreground by changing them to background
# with a small negative mask score (-0.1). Here we only remove connected components
# whose areas are under both `max_area` and half of the entire mask's area. This
# removes sprinkles while avoids filtering out tiny objects that we want to track.
mask_fg = mask > 0
fg_area_thresh = torch.sum(mask_fg, dim=(2, 3), keepdim=True, dtype=torch.int32)
fg_area_thresh.floor_divide_(2).clamp_(max=max_area)
_, areas_fg = _get_connected_components_with_padding(mask_fg)
small_components_fg = mask_fg & (areas_fg <= fg_area_thresh)
mask = torch.where(small_components_fg, -0.1, mask)
return mask
def _get_connected_components_with_padding(mask):
"""Get connected components from masks (possibly padding them to an even size)."""
mask = mask.to(torch.uint8)
_, _, H, W = mask.shape
# Try to use kernels for connected components, fallback if unavailable
_load_cv_utils_kernel_once()
if not cv_utils_kernel:
# Fallback: return dummy labels and counts that won't trigger filtering
labels = torch.zeros_like(mask, dtype=torch.int32)
counts = torch.full_like(mask, fill_value=mask.shape[2] * mask.shape[3] + 1, dtype=torch.int32)
return labels, counts
# make sure both height and width are even (to be compatible with cc_torch)
pad_h = H % 2
pad_w = W % 2
try:
if pad_h == 0 and pad_w == 0:
labels, counts = cv_utils_kernel.cc_2d(mask.contiguous(), get_counts=True)
else:
# pad the mask to make its height and width even
# padding format is (padding_left,padding_right,padding_top,padding_bottom)
mask_pad = F.pad(mask, (0, pad_w, 0, pad_h), mode="constant", value=0)
labels, counts = cv_utils_kernel.cc_2d(mask_pad.contiguous(), get_counts=True)
labels = labels[:, :, :H, :W]
counts = counts[:, :, :H, :W]
except Exception as e:
logger.warning_once(
f"Failed to compute connected components using kernels library: {e}. "
"Hole filling and sprinkle removal will be skipped."
)
# Fallback: return dummy labels and counts that won't trigger filtering
labels = torch.zeros_like(mask, dtype=torch.int32)
counts = torch.full_like(mask, fill_value=H * W + 1, dtype=torch.int32)
return labels, counts
return labels, counts
__all__ = [
"Sam3VideoModel",
"Sam3VideoPreTrainedModel",
"Sam3VideoInferenceSession",
"Sam3VideoSegmentationOutput",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/sam3_video/modeling_sam3_video.py",
"license": "Apache License 2.0",
"lines": 1765,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/sam3_video/processing_sam3_video.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union
import torch
from torchvision.ops import masks_to_boxes
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, auto_docstring
from ...utils.import_utils import requires
from ...video_utils import VideoInput
from .modeling_sam3_video import Sam3VideoInferenceSession
@requires(backends=("torch",))
@auto_docstring
class Sam3VideoProcessor(ProcessorMixin):
def __init__(
self,
image_processor,
video_processor,
tokenizer,
target_size: int | None = None,
**kwargs,
):
r"""
target_size (`int`, *optional*):
The target size (target_size, target_size) to which the image will be resized.
"""
super().__init__(image_processor, video_processor, tokenizer, **kwargs)
self.target_size = target_size if target_size is not None else self.image_processor.size["height"]
@auto_docstring
def __call__(
self,
images: ImageInput | None = None,
segmentation_maps: ImageInput | None = None,
original_sizes: list[list[float]] | torch.Tensor | None = None,
return_tensors: str | TensorType | None = None,
**kwargs,
) -> BatchEncoding:
r"""
images (`ImageInput`, *optional*):
The image(s) to process.
segmentation_maps (`ImageInput`, *optional*):
The segmentation maps to process (optional, for image processor).
original_sizes (`list[list[float]]`, `torch.Tensor`, *optional*):
The original sizes of the images. Only used when images is not provided.
Returns:
A [`BatchEncoding`] with the following fields:
- `pixel_values` (`torch.Tensor`): The processed image(s).
- `original_sizes` (`list[list[float]]`): The original sizes of the images.
- `labels` (`torch.Tensor`, *optional*): The processed segmentation maps (if provided).
"""
if images is not None:
encoding_image_processor = self.image_processor(
images,
segmentation_maps=segmentation_maps,
return_tensors=return_tensors,
**kwargs,
)
elif original_sizes is not None:
if isinstance(original_sizes, torch.Tensor):
original_sizes = original_sizes.cpu().tolist()
encoding_image_processor = BatchEncoding({"original_sizes": original_sizes}, tensor_type=return_tensors)
else:
raise ValueError("Either images or original_sizes must be provided")
original_sizes = encoding_image_processor["original_sizes"]
# Check original_sizes is of length 1 or len(images)
if images is not None and len(original_sizes) != 1 and len(original_sizes) != len(images):
raise ValueError(
"original_sizes must be of length 1 or len(images). If you are passing a single image, you must pass a single original_size."
)
return encoding_image_processor
def add_text_prompt(self, inference_session: Sam3VideoInferenceSession, text: str | list[str]):
"""
Add text prompt(s) to the inference session.
Args:
inference_session (`Sam3VideoInferenceSession`): The inference session.
text (`str` or `list[str]`): The text prompt(s) to add.
Returns:
`Sam3VideoInferenceSession`: The inference session with the added text prompt(s).
"""
if isinstance(text, str):
text = [text]
prompt_ids = []
for prompt_text in text:
# Add prompt and get its ID (reuses existing if duplicate)
prompt_id = inference_session.add_prompt(prompt_text)
# Only encode if this is a new prompt (not already in prompt_input_ids)
if prompt_id not in inference_session.prompt_input_ids:
encoded_text = self.tokenizer(
prompt_text, return_tensors="pt", padding="max_length", max_length=32
).to(inference_session.inference_device)
inference_session.prompt_input_ids[prompt_id] = encoded_text.input_ids
inference_session.prompt_attention_masks[prompt_id] = encoded_text.attention_mask
prompt_ids.append(prompt_id)
return inference_session
def init_video_session(
self,
video: VideoInput | None = None,
inference_device: Union[str, "torch.device"] = "cpu",
inference_state_device: Union[str, "torch.device"] | None = None,
processing_device: Union[str, "torch.device"] | None = None,
video_storage_device: Union[str, "torch.device"] | None = None,
max_vision_features_cache_size: int = 1,
dtype: torch.dtype = torch.float32,
):
"""
Initializes a video session for inference.
If a video is provided (async inference), the video will be processed and stored on the `video_storage_device`.
Args:
video (`VideoInput`, *optional*):
The video to process. No need to provide when streaming.
inference_device (`str` or `torch.device`, *optional*, defaults to "cpu"):
The device to use for inference.
inference_state_device (`str` or `torch.device`, *optional*):
The device to store the inference state on.
processing_device (`str` or `torch.device`, *optional*):
The device to use for video processing.
video_storage_device (`str` or `torch.device`, *optional*):
The device to store the processed video frames on.
max_vision_features_cache_size (`int`, *optional*, defaults to 1):
The maximum number of vision features to cache.
dtype (`torch.dtype`, *optional*, defaults to `torch.float32`):
The torch dtype to use for the whole session.
"""
video_storage_device = video_storage_device if video_storage_device is not None else inference_device
inference_state_device = inference_state_device if inference_state_device is not None else inference_device
processing_device = processing_device if processing_device is not None else inference_device
pixel_values_video = None
video_height = None
video_width = None
if video is not None:
processed_video = self.video_processor(videos=video, device=processing_device, return_tensors="pt")
pixel_values_video = processed_video.pixel_values_videos[0]
video_height = processed_video.original_sizes[0][0]
video_width = processed_video.original_sizes[0][1]
inference_session = Sam3VideoInferenceSession(
video=pixel_values_video,
video_height=video_height,
video_width=video_width,
inference_device=inference_device,
video_storage_device=video_storage_device,
inference_state_device=inference_state_device,
dtype=dtype,
max_vision_features_cache_size=max_vision_features_cache_size,
)
return inference_session
def _apply_non_overlapping_constraints(self, pred_masks):
"""
Apply non-overlapping constraints to the object scores in pred_masks. Here we
keep only the highest scoring object at each spatial location in pred_masks.
"""
batch_size = pred_masks.size(0)
if batch_size == 1:
return pred_masks
device = pred_masks.device
# "max_obj_inds": object index of the object with the highest score at each location
max_obj_inds = torch.argmax(pred_masks, dim=0, keepdim=True)
# "batch_obj_inds": object index of each object slice (along dim 0) in `pred_masks`
batch_obj_inds = torch.arange(batch_size, device=device)[:, None, None, None]
keep = max_obj_inds == batch_obj_inds
# suppress overlapping regions' scores below -10.0 so that the foreground regions
# don't overlap (here sigmoid(-10.0)=4.5398e-05)
pred_masks = torch.where(keep, pred_masks, torch.clamp(pred_masks, max=-10.0))
return pred_masks
def _apply_object_wise_non_overlapping_constraints(
self,
pred_masks,
obj_scores,
background_value=-10.0,
prompt_ids=None,
):
"""
Applies non-overlapping constraints object wise (i.e. only one object can claim the overlapping region).
Constraints are enforced independently for each prompt group when `prompt_ids` are provided.
"""
if prompt_ids is None:
return self._apply_object_wise_non_overlapping_constraints_impl(pred_masks, obj_scores, background_value)
if len(prompt_ids) != pred_masks.size(0):
raise ValueError("prompt_ids must have the same length as pred_masks")
pred_masks_grouped = pred_masks.clone()
prompt_ids_tensor = torch.tensor(prompt_ids, device=pred_masks.device, dtype=torch.long)
for prompt_id in prompt_ids_tensor.unique(sorted=True):
indices = torch.nonzero(prompt_ids_tensor == prompt_id, as_tuple=True)[0]
if indices.numel() == 0:
continue
prompt_masks = self._apply_object_wise_non_overlapping_constraints_impl(
pred_masks_grouped[indices],
obj_scores[indices],
background_value,
)
pred_masks_grouped[indices] = prompt_masks.to(pred_masks_grouped.dtype)
return pred_masks_grouped
def _apply_object_wise_non_overlapping_constraints_impl(self, pred_masks, obj_scores, background_value=-10.0):
pred_masks_single_score = torch.where(pred_masks > 0, obj_scores[..., None, None], background_value)
pixel_level_non_overlapping_masks = self._apply_non_overlapping_constraints(pred_masks_single_score)
pred_masks = torch.where(
pixel_level_non_overlapping_masks > 0,
pred_masks,
torch.clamp(pred_masks, max=background_value),
)
return pred_masks.to(pred_masks_single_score.dtype)
def postprocess_outputs(
self,
inference_session,
model_outputs,
original_sizes: list[list[float]] | torch.Tensor | None = None,
):
"""
Post-process model outputs to get final masks, boxes, and scores.
Args:
inference_session (`Sam3VideoInferenceSession`):
The inference session object.
model_outputs (`Sam3VideoSegmentationOutput`):
The raw model output from `Sam3VideoModel.forward()`.
original_sizes (`list[list[float]]` or `torch.Tensor`, *optional*):
Optional original frame sizes [height, width]. Required for streaming inference
when video_height/video_width are not set in the session.
Returns:
`dict`: A dictionary containing the following keys:
- **object_ids** (`torch.Tensor` of shape `(num_objects,)`): Object IDs for each detected object.
- **scores** (`torch.Tensor` of shape `(num_objects,)`): Detection scores for each object.
- **boxes** (`torch.Tensor` of shape `(num_objects, 4)`): Bounding boxes in XYXY format
(top_left_x, top_left_y, bottom_right_x, bottom_right_y).
- **masks** (`torch.Tensor` of shape `(num_objects, height, width)`): Binary segmentation masks
for each object at the original video resolution.
- **prompt_to_obj_ids** (`dict[str, list[int]]`): Mapping from prompt text to list of
object IDs detected by that prompt.
"""
obj_id_to_mask = model_outputs["obj_id_to_mask"] # low res masks (1, H_low, W_low)
curr_obj_ids = sorted(obj_id_to_mask.keys())
# Get video dimensions - use original_sizes for streaming inference if session doesn't have them
if inference_session.video_height is not None and inference_session.video_width is not None:
H_video, W_video = inference_session.video_height, inference_session.video_width
elif original_sizes is not None:
if isinstance(original_sizes, torch.Tensor):
original_sizes = original_sizes.cpu().tolist()
# original_sizes is a list of [height, width] pairs, take the first one
if isinstance(original_sizes[0], list):
H_video, W_video = int(original_sizes[0][0]), int(original_sizes[0][1])
else:
H_video, W_video = int(original_sizes[0]), int(original_sizes[1])
else:
raise ValueError(
"Either inference_session.video_height/video_width must be set, "
"or original_sizes must be provided for streaming inference."
)
if len(curr_obj_ids) == 0:
out_obj_ids = torch.zeros(0, dtype=torch.int64)
out_probs = torch.zeros(0, dtype=torch.float32)
out_binary_masks = torch.zeros(0, H_video, W_video, dtype=torch.bool)
out_boxes_xyxy = torch.zeros(0, 4, dtype=torch.float32)
else:
out_obj_ids = torch.tensor(curr_obj_ids, dtype=torch.int64)
out_probs = torch.tensor([model_outputs["obj_id_to_score"][obj_id] for obj_id in curr_obj_ids])
out_tracker_probs = torch.tensor(
[model_outputs["obj_id_to_tracker_score"].get(obj_id, 0.0) for obj_id in curr_obj_ids]
)
# Interpolate low-res masks to video resolution
low_res_masks = torch.cat([obj_id_to_mask[obj_id] for obj_id in curr_obj_ids], dim=0) # (N, H_low, W_low)
# Add channel dimension for interpolation: (N, H, W) -> (N, 1, H, W)
out_binary_masks = torch.nn.functional.interpolate(
low_res_masks.unsqueeze(1),
size=(H_video, W_video),
mode="bilinear",
align_corners=False,
).squeeze(1) # (N, H_video, W_video)
out_binary_masks = out_binary_masks > 0
assert out_binary_masks.dtype == torch.bool
keep = out_binary_masks.any(dim=(1, 2)).cpu() # remove masks with 0 areas
# hide outputs for those object IDs in `obj_ids_to_hide`
obj_ids_to_hide = []
if model_outputs["suppressed_obj_ids"] is not None:
obj_ids_to_hide.extend(list(model_outputs["suppressed_obj_ids"]))
if len(inference_session.hotstart_removed_obj_ids) > 0:
obj_ids_to_hide.extend(list(inference_session.hotstart_removed_obj_ids))
if len(obj_ids_to_hide) > 0:
obj_ids_to_hide_t = torch.tensor(obj_ids_to_hide, dtype=torch.int64)
keep &= ~torch.isin(out_obj_ids, obj_ids_to_hide_t)
# slice those valid entries from the original outputs
keep_idx = torch.nonzero(keep, as_tuple=True)[0]
keep_idx_gpu = keep_idx.to(device=out_binary_masks.device, non_blocking=True)
out_obj_ids = torch.index_select(out_obj_ids, 0, keep_idx)
out_probs = torch.index_select(out_probs, 0, keep_idx)
out_tracker_probs = torch.index_select(out_tracker_probs, 0, keep_idx)
out_binary_masks = torch.index_select(out_binary_masks, 0, keep_idx_gpu)
out_boxes_xyxy = masks_to_boxes(out_binary_masks)
# Apply non-overlapping constraints on the existing masklets.
# Constraints are enforced independently per prompt group.
if out_binary_masks.shape[0] > 1:
assert len(out_binary_masks) == len(out_tracker_probs)
prompt_ids_filtered = [
inference_session.obj_id_to_prompt_id[int(obj_id)] for obj_id in out_obj_ids.tolist()
]
out_binary_masks = (
self._apply_object_wise_non_overlapping_constraints(
out_binary_masks.unsqueeze(1),
out_tracker_probs.unsqueeze(1).to(out_binary_masks.device),
background_value=0,
prompt_ids=prompt_ids_filtered,
).squeeze(1)
) > 0
# Build prompt_to_obj_ids mapping: group object IDs by their associated prompt text.
prompt_to_obj_ids = {}
for obj_id in out_obj_ids.tolist():
prompt_id = inference_session.obj_id_to_prompt_id[obj_id]
prompt_text = inference_session.prompts[prompt_id]
prompt_to_obj_ids.setdefault(prompt_text, []).append(obj_id)
outputs = {
"object_ids": out_obj_ids,
"scores": out_probs,
"boxes": out_boxes_xyxy,
"masks": out_binary_masks,
"prompt_to_obj_ids": prompt_to_obj_ids,
}
return outputs
__all__ = ["Sam3VideoProcessor"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/sam3_video/processing_sam3_video.py",
"license": "Apache License 2.0",
"lines": 327,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/sam3/test_modeling_sam3.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch SAM3 model."""
import gc
import tempfile
import unittest
import requests
from transformers.testing_utils import (
backend_empty_cache,
require_deterministic_for_xpu,
require_torch,
slow,
torch_device,
)
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers.models.sam3.configuration_sam3 import (
Sam3Config,
Sam3DETRDecoderConfig,
Sam3DETREncoderConfig,
Sam3GeometryEncoderConfig,
Sam3MaskDecoderConfig,
Sam3VisionConfig,
Sam3ViTConfig,
)
from transformers.models.sam3.modeling_sam3 import Sam3Model, Sam3VisionModel
from transformers.models.sam3.processing_sam3 import Sam3Processor
if is_vision_available():
from PIL import Image
class Sam3VisionModelTester:
def __init__(
self,
parent,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=64,
num_channels=3,
image_size=224,
patch_size=14,
window_size=8,
global_attn_indexes=None,
fpn_hidden_size=32,
scale_factors=None,
batch_size=2,
is_training=True,
):
if global_attn_indexes is None:
global_attn_indexes = [0, 1]
if scale_factors is None:
scale_factors = [4.0, 2.0, 1.0, 0.5]
self.parent = parent
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.num_channels = num_channels
self.image_size = image_size
self.patch_size = patch_size
self.window_size = window_size
self.global_attn_indexes = global_attn_indexes
self.fpn_hidden_size = fpn_hidden_size
self.scale_factors = scale_factors
self.batch_size = batch_size
self.is_training = is_training
def get_config(self):
backbone_config = Sam3ViTConfig(
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
num_channels=self.num_channels,
image_size=self.image_size,
patch_size=self.patch_size,
window_size=self.window_size,
global_attn_indexes=self.global_attn_indexes,
)
return Sam3VisionConfig(
backbone_config=backbone_config,
fpn_hidden_size=self.fpn_hidden_size,
scale_factors=self.scale_factors,
)
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
config = self.get_config()
return config, pixel_values
def create_and_check_model(self, config, pixel_values):
model = Sam3VisionModel(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
result = model(pixel_values)
# Check FPN outputs
self.parent.assertEqual(len(result.fpn_hidden_states), len(self.scale_factors))
self.parent.assertEqual(len(result.fpn_position_encoding), len(self.scale_factors))
# Check last hidden state shape
expected_seq_len = (self.image_size // self.patch_size) ** 2
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, self.hidden_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class Sam3VisionModelTest(ModelTesterMixin, unittest.TestCase):
"""
Tests for SAM3 Vision Model (ViT backbone + FPN neck).
"""
all_model_classes = (Sam3VisionModel,) if is_torch_available() else ()
test_resize_embeddings = False
def setUp(self):
self.model_tester = Sam3VisionModelTester(self)
self.config_tester = ConfigTester(self, config_class=Sam3VisionConfig, has_text_modality=False)
def test_config(self):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="SAM3's vision encoder does not use inputs_embeds")
def test_inputs_embeds(self):
pass
def test_model_get_set_embeddings(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, nn.Linear))
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
# Force eager attention to support output attentions
config._attn_implementation = "eager"
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class._from_config(config, attn_implementation="eager")
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# Check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
config.backbone_config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# For windowed attention, check the attention shape
# Attention shape: (batch_size, num_heads, seq_len, seq_len) for global attention
# or windowed shape for local attention
self.assertIsNotNone(attentions[0])
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
# SAM3VisionModel doesn't return hidden_states in the same way as SAM2
# It returns last_hidden_state, fpn_hidden_states, and fpn_position_encoding
self.assertIsNotNone(outputs.last_hidden_state)
self.assertIsNotNone(outputs.fpn_hidden_states)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
check_hidden_states_output(inputs_dict, config, model_class)
def test_batching_equivalence(self, atol=5e-4, rtol=5e-4):
super().test_batching_equivalence(atol=atol, rtol=rtol)
@unittest.skip(reason="SAM3 model can't be compiled dynamic yet")
def test_sdpa_can_compile_dynamic(self):
pass
@unittest.skip(reason="SAM3VisionModel has FPN channel mismatch with flex attention")
def test_flex_attention_with_grads(self):
pass
class Sam3ModelTester:
def __init__(
self,
parent,
num_channels=3,
image_size=224, # Keep reasonable size: 224 = 16 * 14
hidden_size=32,
patch_size=14,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=64,
window_size=8, # 224/14 = 16 patches, 16/2 = 8 per window
global_attn_indexes=None,
fpn_hidden_size=32,
scale_factors=None,
geometry_encoder_hidden_size=32,
geometry_encoder_num_layers=1, # Reduced from 2 to 1
detr_encoder_hidden_size=32,
detr_encoder_num_layers=1, # Reduced from 2 to 1
detr_decoder_hidden_size=32,
detr_decoder_num_layers=1, # Reduced from 2 to 1
detr_decoder_num_queries=5, # Reduced from 10 to 5
mask_decoder_hidden_size=32,
batch_size=2,
is_training=True,
):
if global_attn_indexes is None:
global_attn_indexes = [0, 1]
if scale_factors is None:
scale_factors = [2.0, 1.0] # Just 2 scales to reduce params
self.parent = parent
self.num_channels = num_channels
self.image_size = image_size
self.hidden_size = hidden_size
self.patch_size = patch_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.window_size = window_size
self.global_attn_indexes = global_attn_indexes
self.fpn_hidden_size = fpn_hidden_size
self.scale_factors = scale_factors
self.batch_size = batch_size
self.is_training = is_training
# Geometry encoder
self.geometry_encoder_hidden_size = geometry_encoder_hidden_size
self.geometry_encoder_num_layers = geometry_encoder_num_layers
# DETR encoder/decoder
self.detr_encoder_hidden_size = detr_encoder_hidden_size
self.detr_encoder_num_layers = detr_encoder_num_layers
self.detr_decoder_hidden_size = detr_decoder_hidden_size
self.detr_decoder_num_layers = detr_decoder_num_layers
self.detr_decoder_num_queries = detr_decoder_num_queries
# Mask decoder
self.mask_decoder_hidden_size = mask_decoder_hidden_size
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
# Simple text input (will be processed by text encoder)
input_ids = torch.randint(0, 1000, (self.batch_size, 16), device=torch_device)
attention_mask = torch.ones_like(input_ids)
config = self.get_config()
return config, pixel_values, input_ids, attention_mask
def get_config(self):
backbone_config = Sam3ViTConfig(
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
num_channels=self.num_channels,
image_size=self.image_size,
patch_size=self.patch_size,
window_size=self.window_size,
global_attn_indexes=self.global_attn_indexes,
)
vision_config = Sam3VisionConfig(
backbone_config=backbone_config,
fpn_hidden_size=self.fpn_hidden_size,
scale_factors=self.scale_factors,
)
# Small text config for testing (instead of default full CLIP model)
text_config = {
"vocab_size": 1000, # Keep at 1000 for stability
"hidden_size": 32,
"intermediate_size": 64,
"projection_dim": 32,
"num_hidden_layers": self.num_hidden_layers,
"num_attention_heads": 4,
"max_position_embeddings": 32, # Keep at 32 for stability
"hidden_act": "gelu",
}
geometry_encoder_config = Sam3GeometryEncoderConfig(
hidden_size=self.geometry_encoder_hidden_size,
num_layers=self.geometry_encoder_num_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
mask_fuser_hidden_size=self.geometry_encoder_hidden_size, # Match hidden_size to reduce params
mask_fuser_num_layers=1, # Reduce from default 2 to 1
)
detr_encoder_config = Sam3DETREncoderConfig(
hidden_size=self.detr_encoder_hidden_size,
num_layers=self.detr_encoder_num_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
)
detr_decoder_config = Sam3DETRDecoderConfig(
hidden_size=self.detr_decoder_hidden_size,
num_layers=self.detr_decoder_num_layers,
num_queries=self.detr_decoder_num_queries,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
)
mask_decoder_config = Sam3MaskDecoderConfig(
hidden_size=self.mask_decoder_hidden_size,
num_upsampling_stages=2, # Reduced from 3 to 2
)
return Sam3Config(
vision_config=vision_config,
text_config=text_config,
geometry_encoder_config=geometry_encoder_config,
detr_encoder_config=detr_encoder_config,
detr_decoder_config=detr_decoder_config,
mask_decoder_config=mask_decoder_config,
)
def create_and_check_model(self, config, pixel_values, input_ids, attention_mask):
model = Sam3Model(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
result = model(pixel_values=pixel_values, input_ids=input_ids, attention_mask=attention_mask)
# Check output shapes
self.parent.assertIsNotNone(result.pred_masks)
self.parent.assertIsNotNone(result.pred_boxes)
self.parent.assertIsNotNone(result.pred_logits)
# Masks should be [batch_size, num_queries, H, W]
self.parent.assertEqual(result.pred_masks.shape[0], self.batch_size)
self.parent.assertEqual(result.pred_masks.shape[1], self.detr_decoder_num_queries)
# Boxes should be [batch_size, num_queries, 4]
self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.detr_decoder_num_queries, 4))
# Logits should be [batch_size, num_queries]
self.parent.assertEqual(result.pred_logits.shape, (self.batch_size, self.detr_decoder_num_queries))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values, input_ids, attention_mask = config_and_inputs
inputs_dict = {
"pixel_values": pixel_values,
"input_ids": input_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class Sam3ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
"""
Tests for SAM3 full model.
"""
all_model_classes = (Sam3Model,) if is_torch_available() else ()
pipeline_model_mapping = {"mask-generation": Sam3Model} if is_torch_available() else {}
test_resize_embeddings = False
_is_composite = True
def setUp(self):
self.model_tester = Sam3ModelTester(self)
common_properties = ["initializer_range"]
self.config_tester = ConfigTester(
self, config_class=Sam3Config, has_text_modality=False, common_properties=common_properties
)
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(reason="SAM3 does not use inputs_embeds")
def test_inputs_embeds(self):
pass
def test_model_get_set_embeddings(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
# Vision encoder has input embeddings
self.assertIsInstance(model.vision_encoder.get_input_embeddings(), (nn.Module))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, nn.Linear))
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_batching_equivalence(self, atol=5e-4, rtol=5e-4):
super().test_batching_equivalence(atol=atol, rtol=rtol)
# Override as SAM3Model has component-specific attention outputs
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class._from_config(config, attn_implementation="eager")
config = model.config
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
# Check that we have the component-specific attention outputs
# Note: Some may be empty tuples if attentions aren't collected for that component
self.assertIsNotNone(outputs.vision_attentions)
self.assertIsNotNone(outputs.detr_encoder_attentions)
self.assertIsNotNone(outputs.detr_decoder_attentions)
self.assertIsNotNone(outputs.mask_decoder_attentions)
# Check vision attentions (from ViT backbone) - should be properly collected
if outputs.vision_attentions:
vision_attentions = outputs.vision_attentions
self.assertEqual(len(vision_attentions), self.model_tester.num_hidden_layers)
# Check that at least vision attentions are present (others may require different collection mechanism)
self.assertTrue(
len(outputs.vision_attentions) > 0,
"At least vision attentions should be collected when output_attentions=True",
)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
for k in config.sub_configs:
if (subconfig := getattr(config, k)) is not None:
subconfig.output_attentions = True
# Sam3 has a vision subconfig with itself a sub config....
for k in subconfig.sub_configs:
if (subsubconfig := getattr(subconfig, k)) is not None:
subsubconfig.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
# Verify again with config-based setting
self.assertIsNotNone(outputs.vision_attentions)
self.assertIsNotNone(outputs.detr_encoder_attentions)
self.assertIsNotNone(outputs.detr_decoder_attentions)
self.assertIsNotNone(outputs.mask_decoder_attentions)
# Override as SAM3Model has component-specific attention/hidden state outputs
def test_retain_grad_hidden_states_attentions(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for k in config.sub_configs:
if getattr(config, k) is not None:
getattr(config, k).output_hidden_states = True
getattr(config, k).output_attentions = True
config.output_hidden_states = True
config.output_attentions = True
config._attn_implementation = "eager"
# Use first model class
model_class = self.all_model_classes[0]
model = model_class._from_config(config, attn_implementation="eager")
model.to(torch_device)
inputs = self._prepare_for_class(inputs_dict, model_class)
outputs = model(**inputs)
output = outputs[0]
# SAM3 has component-specific hidden states and attentions
# Check vision hidden states and attentions
if outputs.vision_hidden_states is not None and len(outputs.vision_hidden_states) > 0:
vision_hidden_states = outputs.vision_hidden_states[0]
vision_hidden_states.retain_grad()
if outputs.vision_attentions is not None and len(outputs.vision_attentions) > 0:
vision_attentions = outputs.vision_attentions[0]
vision_attentions.retain_grad()
# Check DETR encoder hidden states and attentions
if outputs.encoder_hidden_states is not None and len(outputs.encoder_hidden_states) > 0:
encoder_hidden_states = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
if outputs.detr_encoder_attentions is not None and len(outputs.detr_encoder_attentions) > 0:
detr_encoder_attentions = outputs.detr_encoder_attentions[0]
detr_encoder_attentions.retain_grad()
# Check DETR decoder hidden states and attentions
if outputs.decoder_hidden_states is not None and len(outputs.decoder_hidden_states) > 0:
decoder_hidden_states = outputs.decoder_hidden_states[0]
decoder_hidden_states.retain_grad()
if outputs.detr_decoder_attentions is not None and len(outputs.detr_decoder_attentions) > 0:
detr_decoder_attentions = outputs.detr_decoder_attentions[0]
detr_decoder_attentions.retain_grad()
# Check mask decoder attentions
if outputs.mask_decoder_attentions is not None and len(outputs.mask_decoder_attentions) > 0:
mask_decoder_attentions = outputs.mask_decoder_attentions[0]
mask_decoder_attentions.retain_grad()
output.flatten()[0].backward(retain_graph=True)
# Check gradients are not None
if outputs.vision_hidden_states is not None and len(outputs.vision_hidden_states) > 0:
self.assertIsNotNone(vision_hidden_states.grad)
if outputs.vision_attentions is not None and len(outputs.vision_attentions) > 0:
self.assertIsNotNone(vision_attentions.grad)
if outputs.encoder_hidden_states is not None and len(outputs.encoder_hidden_states) > 0:
self.assertIsNotNone(encoder_hidden_states.grad)
if outputs.detr_encoder_attentions is not None and len(outputs.detr_encoder_attentions) > 0:
self.assertIsNotNone(detr_encoder_attentions.grad)
if outputs.decoder_hidden_states is not None and len(outputs.decoder_hidden_states) > 0:
self.assertIsNotNone(decoder_hidden_states.grad)
if outputs.detr_decoder_attentions is not None and len(outputs.detr_decoder_attentions) > 0:
self.assertIsNotNone(detr_decoder_attentions.grad)
if outputs.mask_decoder_attentions is not None and len(outputs.mask_decoder_attentions) > 0:
self.assertIsNotNone(mask_decoder_attentions.grad)
def test_hidden_states_output(self):
"""Test that SAM3 properly outputs component-specific hidden states."""
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Enable hidden states output
config.output_hidden_states = True
for k in config.sub_configs:
if getattr(config, k) is not None:
getattr(config, k).output_hidden_states = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
# SAM3 has component-specific hidden states
# Check vision hidden states
if outputs.vision_hidden_states is not None:
vision_hidden_states = outputs.vision_hidden_states
self.assertIsInstance(vision_hidden_states, (list, tuple))
# Vision encoder outputs hidden states from each layer
expected_num_vision_layers = self.model_tester.num_hidden_layers + 1 # +1 for embeddings
self.assertEqual(len(vision_hidden_states), expected_num_vision_layers)
# Check DETR encoder hidden states (stored as encoder_hidden_states)
if outputs.encoder_hidden_states is not None:
encoder_hidden_states = outputs.encoder_hidden_states
self.assertIsInstance(encoder_hidden_states, (list, tuple))
# Check DETR decoder hidden states (stored as decoder_hidden_states)
if outputs.decoder_hidden_states is not None:
decoder_hidden_states = outputs.decoder_hidden_states
self.assertIsInstance(decoder_hidden_states, (list, tuple))
@unittest.skip(reason="SAM3VisionModel has FPN channel mismatch with flex attention")
def test_flex_attention_with_grads(self):
pass
@unittest.skip(
reason="Sam3Model creates attention masks from features (with gradients), "
"which is incompatible with flash attention's expectation of binary masks"
)
def test_flash_attn_2_inference_equivalence(self):
pass
@unittest.skip(
reason="Sam3Model creates attention masks from features (with gradients), "
"which is incompatible with flash attention's expectation of binary masks"
)
def test_flash_attn_2_inference_equivalence_right_padding(self):
pass
@unittest.skip(
reason="Sam3Model creates attention masks from features (with gradients), "
"which is incompatible with flash attention's expectation of binary masks"
)
def test_flash_attn_3_inference_equivalence(self):
pass
@unittest.skip(
reason="Sam3Model creates attention masks from features (with gradients), "
"which is incompatible with flash attention's expectation of binary masks"
)
def test_flash_attn_3_inference_equivalence_right_padding(self):
pass
@unittest.skip(
reason="Sam3Model creates attention masks from features (with gradients), "
"which is incompatible with flash attention's expectation of binary masks"
)
def test_flash_attn_kernels_inference_equivalence(self):
pass
@unittest.skip(
reason="Sam3Model creates attention masks from features (with gradients), "
"which is incompatible with flash attention's expectation of binary masks"
)
def test_flash_attn_kernels_mps_inference_equivalence(self):
pass
def test_sdpa_can_dispatch_composite_models(self):
"""
Tests if composite models dispatch correctly on SDPA/eager when requested.
SAM3 has multiple sub-models: vision_encoder, text_encoder, geometry_encoder,
detr_encoder, detr_decoder, mask_decoder.
"""
if not self.has_attentions:
self.skipTest(reason="Model architecture does not support attentions")
if not self._is_composite:
self.skipTest(f"{self.all_model_classes[0].__name__} does not support SDPA")
for model_class in self.all_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model_sdpa = model_class.from_pretrained(tmpdirname, attn_implementation="sdpa")
model_sdpa = model_sdpa.eval().to(torch_device)
# Get all sub-models that support attention
vision_encoder_sdpa = getattr(model_sdpa, "vision_encoder")
text_encoder_sdpa = getattr(model_sdpa, "text_encoder", None)
detr_encoder_sdpa = getattr(model_sdpa, "detr_encoder", None)
detr_decoder_sdpa = getattr(model_sdpa, "detr_decoder", None)
mask_decoder_sdpa = getattr(model_sdpa, "mask_decoder", None)
# Check that sub-models dispatch to SDPA if they support it
self.assertTrue(vision_encoder_sdpa.config._attn_implementation == "sdpa")
if text_encoder_sdpa is not None and hasattr(text_encoder_sdpa, "_supports_sdpa"):
# Text encoder from CLIP should support SDPA
self.assertTrue(text_encoder_sdpa.config._attn_implementation == "sdpa")
if detr_encoder_sdpa is not None:
self.assertTrue(detr_encoder_sdpa.config._attn_implementation == "sdpa")
if detr_decoder_sdpa is not None:
self.assertTrue(detr_decoder_sdpa.config._attn_implementation == "sdpa")
if mask_decoder_sdpa is not None:
self.assertTrue(mask_decoder_sdpa.config._attn_implementation == "sdpa")
# Now test with eager
model_eager = model_class.from_pretrained(tmpdirname, attn_implementation="eager")
model_eager = model_eager.eval().to(torch_device)
self.assertTrue(getattr(model_eager, "vision_encoder").config._attn_implementation == "eager")
if hasattr(model_eager, "text_encoder"):
self.assertTrue(model_eager.text_encoder.config._attn_implementation == "eager")
if hasattr(model_eager, "detr_encoder"):
self.assertTrue(model_eager.detr_encoder.config._attn_implementation == "eager")
if hasattr(model_eager, "detr_decoder"):
self.assertTrue(model_eager.detr_decoder.config._attn_implementation == "eager")
if hasattr(model_eager, "mask_decoder"):
self.assertTrue(model_eager.mask_decoder.config._attn_implementation == "eager")
# Verify no SDPA layers in eager model
for name, submodule in model_eager.named_modules():
class_name = submodule.__class__.__name__
if (
class_name.endswith("Attention")
and getattr(submodule, "config", None)
and submodule.config._attn_implementation == "sdpa"
):
raise ValueError("The eager model should not have SDPA attention layers")
def test_forward_with_text_embeds(self):
"""Test that text_embeds parameter works correctly."""
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
# First get text embeddings
with torch.no_grad():
text_embeds = model.get_text_features(
input_ids=inputs_dict["input_ids"], attention_mask=inputs_dict["attention_mask"], return_dict=True
).pooler_output
# Forward with text_embeds (remove input_ids)
inputs_with_embeds = {
"pixel_values": inputs_dict["pixel_values"],
"text_embeds": text_embeds,
}
with torch.no_grad():
outputs_with_embeds = model(**inputs_with_embeds)
# Forward with input_ids
with torch.no_grad():
outputs_with_ids = model(**inputs_dict)
# Outputs should be very close
self.assertTrue(torch.allclose(outputs_with_embeds.pred_logits, outputs_with_ids.pred_logits, atol=1e-5))
self.assertTrue(torch.allclose(outputs_with_embeds.pred_boxes, outputs_with_ids.pred_boxes, atol=1e-5))
def test_forward_with_both_input_ids_and_text_embeds_raises_error(self):
"""Test that passing both input_ids and text_embeds raises an error."""
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
# Get text embeddings
with torch.no_grad():
text_embeds = model.get_text_features(
input_ids=inputs_dict["input_ids"], attention_mask=inputs_dict["attention_mask"]
)
# Try to pass both (should raise error)
inputs_with_both = {
"pixel_values": inputs_dict["pixel_values"],
"input_ids": inputs_dict["input_ids"],
"text_embeds": text_embeds,
}
with self.assertRaises(ValueError):
model(**inputs_with_both)
def test_forward_with_vision_embeds(self):
"""Test that vision_embeds parameter works correctly."""
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
# First get vision embeddings
with torch.no_grad():
vision_embeds = model.get_vision_features(pixel_values=inputs_dict["pixel_values"])
# Forward with vision_embeds (remove pixel_values)
inputs_with_embeds = {
"vision_embeds": vision_embeds,
"input_ids": inputs_dict["input_ids"],
"attention_mask": inputs_dict["attention_mask"],
}
with torch.no_grad():
outputs_with_embeds = model(**inputs_with_embeds)
# Forward with pixel_values
with torch.no_grad():
outputs_with_pixels = model(**inputs_dict)
# Outputs should be very close
self.assertTrue(
torch.allclose(outputs_with_embeds.pred_logits, outputs_with_pixels.pred_logits, atol=1e-5)
)
self.assertTrue(torch.allclose(outputs_with_embeds.pred_boxes, outputs_with_pixels.pred_boxes, atol=1e-5))
def test_forward_with_both_pixel_values_and_vision_embeds_raises_error(self):
"""Test that passing both pixel_values and vision_embeds raises an error."""
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
# Get vision embeddings
with torch.no_grad():
vision_embeds = model.get_vision_features(pixel_values=inputs_dict["pixel_values"])
# Try to pass both (should raise error)
inputs_with_both = {
"pixel_values": inputs_dict["pixel_values"],
"vision_embeds": vision_embeds,
"input_ids": inputs_dict["input_ids"],
"attention_mask": inputs_dict["attention_mask"],
}
with self.assertRaises(ValueError):
model(**inputs_with_both)
def test_custom_image_size(self):
"""Test that custom image size can be set and propagates correctly through nested configs."""
config = self.model_tester.get_config()
config.image_size = 560
self.assertEqual(config.image_size, 560)
self.assertEqual(config.vision_config.image_size, 560)
self.assertEqual(config.vision_config.backbone_config.image_size, 560)
# Verify model works with custom size
model = Sam3Model(config=config).to(torch_device).eval()
pixel_values = floats_tensor([self.model_tester.batch_size, self.model_tester.num_channels, 560, 560]).to(
torch_device
)
input_ids = torch.randint(0, 1000, (self.model_tester.batch_size, 16), device=torch_device)
with torch.no_grad():
outputs = model(pixel_values=pixel_values, input_ids=input_ids, attention_mask=torch.ones_like(input_ids))
self.assertIsNotNone(outputs.pred_masks)
self.assertIsNotNone(outputs.pred_boxes)
self.assertIsNotNone(outputs.pred_logits)
@unittest.skip(reason="SAM3 model can't be compiled dynamic yet")
def test_sdpa_can_compile_dynamic(self):
pass
@unittest.skip(
reason="SAM3 uses CLIP text encoder which has two attention masks: `causal_attention_mask` and `attention_mask`."
)
def test_sdpa_can_dispatch_on_flash(self):
pass
def test_model_outputs_equivalence(self):
"""
Test that tuple and dict outputs are equivalent.
SAM3 returns complex outputs with component-specific fields, so we need to ensure proper conversion.
"""
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(t):
t[t != t] = 0
return t
def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}):
with torch.no_grad():
tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs)
dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple()
def recursive_check(tuple_object, dict_object):
if isinstance(tuple_object, (list, tuple)):
for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif isinstance(tuple_object, dict):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values(), dict_object.values()
):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif tuple_object is None:
return
# model might return non-tensors objects (e.g. Cache class)
elif isinstance(tuple_object, torch.Tensor):
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5
),
msg=(
"Tuple and dict output are not equal. Difference:"
f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:"
f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has"
f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}."
),
)
recursive_check(tuple_output, dict_output)
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs)
# Test with output_hidden_states
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})
# Test with output_attentions if supported
if self.has_attentions:
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True})
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(
model, tuple_inputs, dict_inputs, {"output_hidden_states": True, "output_attentions": True}
)
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
"""Override to ensure input_ids and attention_mask are always present for Sam3Model."""
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
# Sam3Model always requires input_ids and attention_mask for text encoding
if model_class == Sam3Model:
if "input_ids" not in inputs_dict or inputs_dict.get("input_ids") is None:
# Create dummy input_ids if not present
# Get batch_size from pixel_values or vision_embeds
if "pixel_values" in inputs_dict and inputs_dict.get("pixel_values") is not None:
batch_size = inputs_dict["pixel_values"].shape[0]
elif "vision_embeds" in inputs_dict and inputs_dict.get("vision_embeds") is not None:
vision_embeds = inputs_dict["vision_embeds"]
if vision_embeds.fpn_hidden_states is not None and len(vision_embeds.fpn_hidden_states) > 0:
batch_size = vision_embeds.fpn_hidden_states[0].shape[0]
elif vision_embeds.last_hidden_state is not None:
batch_size = vision_embeds.last_hidden_state.shape[0]
else:
batch_size = 2
else:
batch_size = 2
config = self.model_tester.get_config()
# text_config might be a dict or a config object
if isinstance(config.text_config, dict):
vocab_size = config.text_config.get("vocab_size", 1000)
else:
vocab_size = getattr(config.text_config, "vocab_size", 1000)
inputs_dict["input_ids"] = torch.randint(0, vocab_size, (batch_size, 16), device=torch_device)
if "attention_mask" not in inputs_dict or inputs_dict.get("attention_mask") is None:
inputs_dict["attention_mask"] = torch.ones_like(inputs_dict["input_ids"])
return inputs_dict
def prepare_coco_cat_image():
"""Prepare COCO cat and laptop image (from batched inference notebook)."""
img_url = "http://images.cocodataset.org/val2017/000000077595.jpg"
raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB")
return raw_image
def prepare_coco_kitchen_image():
"""Prepare COCO kitchen scene image (from batched inference notebook)."""
img_url = "http://images.cocodataset.org/val2017/000000136466.jpg"
raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB")
return raw_image
@slow
class Sam3ModelIntegrationTest(unittest.TestCase):
"""Integration tests for SAM3 model with real pretrained weights."""
def setUp(self):
super().setUp()
model_name = "facebook/sam3"
self.model = Sam3Model.from_pretrained(model_name).to(torch.float32)
self.processor = Sam3Processor.from_pretrained(model_name)
self.model.to(torch_device)
self.model.eval()
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def test_inference_text_prompt_only(self):
"""Test inference with text prompt only (from multiway_prompting notebook)."""
# Example from notebook: "short hair" text prompt
raw_image = prepare_coco_cat_image()
text = "ear"
inputs = self.processor(images=raw_image, text=text, return_tensors="pt").to(torch_device)
with torch.no_grad():
outputs = self.model(**inputs)
# Check exact output shapes
self.assertEqual(outputs.pred_masks.shape, (1, 200, 288, 288))
self.assertEqual(outputs.pred_boxes.shape, (1, 200, 4))
self.assertEqual(outputs.pred_logits.shape, (1, 200))
# Check that predictions have reasonable scores (after sigmoid)
scores = torch.sigmoid(outputs.pred_logits)
self.assertTrue((scores >= 0).all() and (scores <= 1).all())
# Check exact values
sorted_indices = torch.argsort(scores.squeeze(), descending=True)
top_scores = scores.squeeze()[sorted_indices[:3]]
top_logits = outputs.pred_logits.squeeze()[sorted_indices[:3]]
top_idx = sorted_indices[0].item()
torch.testing.assert_close(
top_scores, torch.tensor([0.9381, 0.9214, 0.0910]).to(torch_device), atol=1e-4, rtol=1e-4
)
torch.testing.assert_close(
top_logits, torch.tensor([2.7182, 2.4618, -2.3020]).to(torch_device), atol=1e-4, rtol=1e-4
)
torch.testing.assert_close(
outputs.pred_boxes[0, top_idx],
torch.tensor([0.4704, 0.2014, 0.5615, 0.3770]).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
torch.testing.assert_close(
outputs.pred_masks[0, top_idx, :3, :3],
torch.tensor(
[[-2.1815, -6.2767, -7.0687], [-5.7988, -10.2704, -10.9379], [-8.5194, -10.7892, -9.9152]]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
# Test post-processing
results = self.processor.post_process_instance_segmentation(
outputs, threshold=0.5, mask_threshold=0.5, target_sizes=inputs.get("original_sizes").tolist()
)
self.assertEqual(len(results), 1)
result = results[0]
# Check that we have detections
self.assertGreater(len(result["masks"]), 0)
self.assertGreater(len(result["boxes"]), 0)
self.assertGreater(len(result["scores"]), 0)
# Check exact values for top detection
top_pp_score = result["scores"][0]
top_pp_box = result["boxes"][0]
torch.testing.assert_close(top_pp_score, torch.tensor(0.9210).to(torch_device), atol=1e-4, rtol=1e-4)
torch.testing.assert_close(
top_pp_box, torch.tensor([402.1755, 90.1420, 459.6165, 156.3702]).to(torch_device), atol=1e-4, rtol=1e-4
)
def test_inference_single_box_prompt(self):
"""Test inference with a single bounding box prompt (from batched_inference notebook)."""
raw_image = prepare_coco_cat_image()
# Example from notebook: laptop region in image 1
# Box in xyxy format: [100, 150, 500, 450]
box_xyxy = [100, 150, 500, 450]
input_boxes = [[box_xyxy]]
inputs = self.processor(
images=raw_image,
input_boxes=input_boxes,
input_boxes_labels=[[1]], # Positive box
return_tensors="pt",
).to(torch_device)
with torch.no_grad():
outputs = self.model(**inputs)
# Check exact output shapes
self.assertEqual(outputs.pred_masks.shape, (1, 200, 288, 288))
self.assertEqual(outputs.pred_boxes.shape, (1, 200, 4))
self.assertEqual(outputs.pred_logits.shape, (1, 200))
# Check exact values
scores = torch.sigmoid(outputs.pred_logits)
sorted_indices = torch.argsort(scores.squeeze(), descending=True)
top_scores = scores.squeeze()[sorted_indices[:3]]
top_logits = outputs.pred_logits.squeeze()[sorted_indices[:3]]
top_idx = sorted_indices[0].item()
torch.testing.assert_close(
top_scores, torch.tensor([0.9308, 0.1617, 0.1336]).to(torch_device), atol=1e-4, rtol=1e-4
)
torch.testing.assert_close(
top_logits, torch.tensor([2.5988, -1.6460, -1.8699]).to(torch_device), atol=1e-4, rtol=1e-4
)
torch.testing.assert_close(
outputs.pred_boxes[0, top_idx],
torch.tensor([0.1631, 0.4140, 0.7510, 0.9931]).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
torch.testing.assert_close(
outputs.pred_masks[0, top_idx, :3, :3],
torch.tensor([[-1.8726, -3.5063, -3.7716], [-3.1987, -5.3820, -5.6782], [-3.8850, -5.4164, -5.8604]]).to(
torch_device
),
atol=1e-4,
rtol=1e-4,
)
# Test post-processing
results = self.processor.post_process_instance_segmentation(
outputs, threshold=0.5, mask_threshold=0.5, target_sizes=inputs.get("original_sizes").tolist()
)
self.assertEqual(len(results), 1)
result = results[0]
# Check that we have detections
self.assertGreater(len(result["masks"]), 0)
# Check exact values for top detection
top_pp_score = result["scores"][0]
top_pp_box = result["boxes"][0]
torch.testing.assert_close(top_pp_score, torch.tensor(0.9307).to(torch_device), atol=1e-4, rtol=1e-4)
torch.testing.assert_close(
top_pp_box, torch.tensor([104.3945, 175.9433, 480.6293, 422.0826]).to(torch_device), atol=1e-4, rtol=1e-4
)
def test_inference_multi_box_prompt(self):
"""Test inference with multiple box prompts with positive and negative labels (from batched_inference notebook)."""
raw_image = prepare_coco_kitchen_image()
# Example from notebook: multiple positive boxes (dial + button)
# Dial box (xyxy): [59, 144, 76, 163]
# Button box (xyxy): [87, 148, 104, 159]
box1_xyxy = [59, 144, 76, 163]
box2_xyxy = [87, 148, 104, 159]
input_boxes = [[box1_xyxy, box2_xyxy]]
input_boxes_labels = [[1, 1]] # Both positive
inputs = self.processor(
images=raw_image, input_boxes=input_boxes, input_boxes_labels=input_boxes_labels, return_tensors="pt"
).to(torch_device)
with torch.no_grad():
outputs = self.model(**inputs)
# Check exact output shapes
self.assertEqual(outputs.pred_masks.shape, (1, 200, 288, 288))
self.assertEqual(outputs.pred_boxes.shape, (1, 200, 4))
self.assertEqual(outputs.pred_logits.shape, (1, 200))
# Check exact values
scores = torch.sigmoid(outputs.pred_logits)
sorted_indices = torch.argsort(scores.squeeze(), descending=True)
top_scores = scores.squeeze()[sorted_indices[:3]]
top_logits = outputs.pred_logits.squeeze()[sorted_indices[:3]]
top_idx = sorted_indices[0].item()
torch.testing.assert_close(
top_scores, torch.tensor([0.9611, 0.9379, 0.8348]).to(torch_device), atol=1e-4, rtol=1e-4
)
torch.testing.assert_close(
top_logits, torch.tensor([3.2071, 2.7154, 1.6198]).to(torch_device), atol=1e-4, rtol=1e-4
)
torch.testing.assert_close(
outputs.pred_boxes[0, top_idx],
torch.tensor([0.1757, 0.2888, 0.2296, 0.3259]).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
torch.testing.assert_close(
outputs.pred_masks[0, top_idx, :3, :3],
torch.tensor(
[[-8.6138, -14.5615, -17.9965], [-13.6695, -20.4994, -25.6705], [-14.9681, -23.0616, -17.0045]]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
# Test post-processing
results = self.processor.post_process_instance_segmentation(
outputs, threshold=0.5, mask_threshold=0.5, target_sizes=inputs.get("original_sizes").tolist()
)
self.assertEqual(len(results), 1)
result = results[0]
# Check that we have detections
self.assertGreater(len(result["masks"]), 0)
# Check exact values for top detection
top_pp_score = result["scores"][0]
top_pp_box = result["boxes"][0]
torch.testing.assert_close(top_pp_score, torch.tensor(0.9379).to(torch_device), atol=1e-4, rtol=1e-4)
torch.testing.assert_close(
top_pp_box, torch.tensor([86.8687, 147.5269, 104.4475, 159.6138]).to(torch_device), atol=1e-4, rtol=1e-4
)
def test_inference_combined_prompts(self):
"""Test inference with combined text and geometry prompts (text + negative box from batched_inference notebook)."""
raw_image = prepare_coco_kitchen_image()
# Example from notebook: text "handle" + negative box to exclude oven handle
text = "handle"
# Negative box covering the oven handle area (xyxy): [40, 183, 318, 204]
oven_handle_box = [40, 183, 318, 204]
input_boxes = [[oven_handle_box]]
inputs = self.processor(
images=raw_image,
text=text,
input_boxes=input_boxes,
input_boxes_labels=[[0]], # 0 = negative
return_tensors="pt",
).to(torch_device)
with torch.no_grad():
outputs = self.model(**inputs)
# Check exact output shapes
self.assertEqual(outputs.pred_masks.shape, (1, 200, 288, 288))
self.assertEqual(outputs.pred_boxes.shape, (1, 200, 4))
self.assertEqual(outputs.pred_logits.shape, (1, 200))
def test_inference_batched_images(self):
"""Test batched inference with multiple images (from batched_inference notebook)."""
# Example from notebook: batch of 2 images with different text prompts
raw_image1 = prepare_coco_cat_image()
raw_image2 = prepare_coco_kitchen_image()
# Batch of 2 images with different text prompts: "ear" for cat, "dial" for kitchen
inputs = self.processor(images=[raw_image1, raw_image2], text=["ear", "dial"], return_tensors="pt").to(
torch_device
)
with torch.no_grad():
outputs = self.model(**inputs)
# Check exact output shapes
self.assertEqual(outputs.pred_masks.shape, (2, 200, 288, 288))
self.assertEqual(outputs.pred_boxes.shape, (2, 200, 4))
self.assertEqual(outputs.pred_logits.shape, (2, 200))
# Check scores are reasonable
scores = torch.sigmoid(outputs.pred_logits)
self.assertTrue((scores >= 0).all() and (scores <= 1).all())
# Check exact values
sorted_indices_0 = torch.argsort(scores[0], descending=True)
sorted_indices_1 = torch.argsort(scores[1], descending=True)
top_scores_0 = scores[0][sorted_indices_0[:3]]
top_scores_1 = scores[1][sorted_indices_1[:3]]
top_logits_0 = outputs.pred_logits[0][sorted_indices_0[:3]]
top_logits_1 = outputs.pred_logits[1][sorted_indices_1[:3]]
top_idx_0 = sorted_indices_0[0].item()
top_idx_1 = sorted_indices_1[0].item()
torch.testing.assert_close(
top_scores_0, torch.tensor([0.9381, 0.9214, 0.0910]).to(torch_device), atol=1e-4, rtol=1e-4
)
torch.testing.assert_close(
top_scores_1, torch.tensor([0.8863, 0.8849, 0.8841]).to(torch_device), atol=1e-4, rtol=1e-4
)
torch.testing.assert_close(
top_logits_0, torch.tensor([2.7182, 2.4618, -2.3020]).to(torch_device), atol=1e-4, rtol=1e-4
)
torch.testing.assert_close(
top_logits_1, torch.tensor([2.0534, 2.0395, 2.0320]).to(torch_device), atol=1e-4, rtol=1e-4
)
torch.testing.assert_close(
outputs.pred_boxes[0, top_idx_0],
torch.tensor([0.4704, 0.2014, 0.5615, 0.3770]).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
torch.testing.assert_close(
outputs.pred_boxes[1, top_idx_1],
torch.tensor([0.6162, 0.2769, 0.6838, 0.3238]).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
torch.testing.assert_close(
outputs.pred_masks[0, top_idx_0, :3, :3],
torch.tensor(
[[-2.1815, -6.2767, -7.0687], [-5.7988, -10.2704, -10.9379], [-8.5194, -10.7892, -9.9152]]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
torch.testing.assert_close(
outputs.pred_masks[1, top_idx_1, :3, :3],
torch.tensor(
[[-7.4371, -13.5898, -13.6496], [-11.8669, -20.6416, -23.0941], [-12.8623, -20.3439, -16.6497]]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
# Test post-processing
results = self.processor.post_process_instance_segmentation(
outputs, threshold=0.3, mask_threshold=0.5, target_sizes=inputs.get("original_sizes").tolist()
)
self.assertEqual(len(results), 2)
# Check that both have detections
self.assertGreater(len(results[0]["masks"]), 0)
self.assertGreater(len(results[1]["masks"]), 0)
# Check exact values for top detection in each image
top_pp_score_0 = results[0]["scores"][0]
top_pp_box_0 = results[0]["boxes"][0]
top_pp_score_1 = results[1]["scores"][0]
top_pp_box_1 = results[1]["boxes"][0]
torch.testing.assert_close(top_pp_score_0, torch.tensor(0.9210).to(torch_device), atol=1e-4, rtol=1e-4)
torch.testing.assert_close(
top_pp_box_0, torch.tensor([402.1755, 90.1421, 459.6165, 156.3701]).to(torch_device), atol=1e-4, rtol=1e-4
)
torch.testing.assert_close(top_pp_score_1, torch.tensor(0.6641).to(torch_device), atol=1e-4, rtol=1e-4)
torch.testing.assert_close(
top_pp_box_1, torch.tensor([110.6279, 271.1848, 137.3600, 301.3683]).to(torch_device), atol=1e-4, rtol=1e-4
)
def test_inference_batched_mixed_prompts(self):
"""Test batched inference with mixed prompt types (from batched_inference notebook)."""
# Example from notebook: Image 1 with text "laptop", Image 2 with visual prompt (dial)
raw_image1 = prepare_coco_cat_image()
raw_image2 = prepare_coco_kitchen_image()
# Box for dial in image 2 (xyxy): [59, 144, 76, 163]
box2_xyxy = [59, 144, 76, 163]
inputs = self.processor(
images=[raw_image1, raw_image2],
text=["laptop", None], # Only first image has text
input_boxes=[None, [box2_xyxy]], # Only second image has box
input_boxes_labels=[None, [1]],
return_tensors="pt",
).to(torch_device)
with torch.no_grad():
outputs = self.model(**inputs)
# Check exact output shapes
self.assertEqual(outputs.pred_masks.shape, (2, 200, 288, 288))
self.assertEqual(outputs.pred_boxes.shape, (2, 200, 4))
self.assertEqual(outputs.pred_logits.shape, (2, 200))
# Check exact values
scores = torch.sigmoid(outputs.pred_logits)
sorted_indices_0 = torch.argsort(scores[0], descending=True)
sorted_indices_1 = torch.argsort(scores[1], descending=True)
top_scores_0 = scores[0][sorted_indices_0[:3]]
top_scores_1 = scores[1][sorted_indices_1[:3]]
top_logits_0 = outputs.pred_logits[0][sorted_indices_0[:3]]
top_logits_1 = outputs.pred_logits[1][sorted_indices_1[:3]]
top_idx_0 = sorted_indices_0[0].item()
top_idx_1 = sorted_indices_1[0].item()
torch.testing.assert_close(
top_scores_0, torch.tensor([0.9756, 0.1352, 0.0701]).to(torch_device), atol=1e-4, rtol=1e-4
)
torch.testing.assert_close(
top_scores_1, torch.tensor([0.9683, 0.8310, 0.8222]).to(torch_device), atol=1e-4, rtol=1e-4
)
torch.testing.assert_close(
top_logits_0, torch.tensor([3.6865, -1.8555, -2.5854]).to(torch_device), atol=1e-4, rtol=1e-4
)
torch.testing.assert_close(
top_logits_1, torch.tensor([3.4183, 1.5929, 1.5315]).to(torch_device), atol=1e-4, rtol=1e-4
)
torch.testing.assert_close(
outputs.pred_boxes[0, top_idx_0],
torch.tensor([-0.0013, 0.0016, 0.4521, 0.9964]).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
torch.testing.assert_close(
outputs.pred_boxes[1, top_idx_1],
torch.tensor([0.1774, 0.2876, 0.2296, 0.3261]).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
torch.testing.assert_close(
outputs.pred_masks[0, top_idx_0, :3, :3],
torch.tensor([[0.0520, 0.3121, 0.4103], [0.6820, 1.0069, 1.0949], [0.8418, 1.0318, 1.0365]]).to(
torch_device
),
atol=1e-4,
rtol=1e-4,
)
torch.testing.assert_close(
outputs.pred_masks[1, top_idx_1, :3, :3],
torch.tensor(
[[-8.7447, -14.3499, -17.5662], [-13.6804, -20.3728, -25.5098], [-15.2996, -22.9116, -17.6658]]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
# Test post-processing
results = self.processor.post_process_instance_segmentation(
outputs, threshold=0.3, mask_threshold=0.5, target_sizes=inputs.get("original_sizes").tolist()
)
self.assertEqual(len(results), 2)
# Check that both have detections
self.assertGreater(len(results[0]["masks"]), 0)
self.assertGreater(len(results[1]["masks"]), 0)
# Check exact values for top detection in each image
top_pp_score_0 = results[0]["scores"][0]
top_pp_box_0 = results[0]["boxes"][0]
top_pp_score_1 = results[1]["scores"][0]
top_pp_box_1 = results[1]["boxes"][0]
torch.testing.assert_close(top_pp_score_0, torch.tensor(0.9655).to(torch_device), atol=1e-4, rtol=1e-4)
torch.testing.assert_close(
top_pp_box_0, torch.tensor([-0.8481, 0.6668, 289.3758, 423.4723]).to(torch_device), atol=1e-4, rtol=1e-4
)
torch.testing.assert_close(top_pp_score_1, torch.tensor(0.8222).to(torch_device), atol=1e-4, rtol=1e-4)
torch.testing.assert_close(
top_pp_box_1, torch.tensor([168.9376, 137.3257, 191.7281, 161.3243]).to(torch_device), atol=1e-4, rtol=1e-4
)
# TODO add exact values
def test_semantic_segmentation_output(self):
"""Test that semantic segmentation output is produced."""
raw_image = prepare_coco_cat_image()
inputs = self.processor(images=raw_image, text="ear", return_tensors="pt").to(torch_device)
with torch.no_grad():
outputs = self.model(**inputs)
# Check exact semantic segmentation output shape
self.assertEqual(outputs.semantic_seg.shape, (1, 1, 288, 288))
# Check that semantic seg has same spatial size as pred_masks
self.assertEqual(outputs.semantic_seg.shape[-2:], outputs.pred_masks.shape[-2:])
@require_deterministic_for_xpu
def test_efficient_multi_prompt_single_image(self):
"""Test efficient inference with multiple prompts on a single image using get_vision_features."""
raw_image = prepare_coco_cat_image()
# Pre-compute vision embeddings once
img_inputs = self.processor(images=raw_image, return_tensors="pt").to(torch_device)
with torch.no_grad():
vision_embeds = self.model.get_vision_features(pixel_values=img_inputs.pixel_values)
# Run multiple text prompts efficiently
text_prompts = ["ear", "eye"]
all_results = []
for prompt in text_prompts:
text_inputs = self.processor(text=prompt, return_tensors="pt").to(torch_device)
with torch.no_grad():
outputs = self.model(vision_embeds=vision_embeds, **text_inputs)
results = self.processor.post_process_instance_segmentation(
outputs,
threshold=0.5,
mask_threshold=0.5,
target_sizes=img_inputs.get("original_sizes").tolist(),
)[0]
all_results.append(results)
# Check that we get results for both prompts
self.assertEqual(len(all_results), 2)
# Verify outputs are equivalent to running with pixel_values directly
text_inputs = self.processor(text="ear", return_tensors="pt").to(torch_device)
with torch.no_grad():
outputs_with_embeds = self.model(vision_embeds=vision_embeds, **text_inputs)
inputs_direct = self.processor(images=raw_image, text="ear", return_tensors="pt").to(torch_device)
with torch.no_grad():
outputs_direct = self.model(**inputs_direct)
# Outputs should be identical
torch.testing.assert_close(outputs_with_embeds.pred_logits, outputs_direct.pred_logits, atol=1e-5, rtol=1e-5)
torch.testing.assert_close(outputs_with_embeds.pred_boxes, outputs_direct.pred_boxes, atol=1e-5, rtol=1e-5)
torch.testing.assert_close(outputs_with_embeds.pred_masks, outputs_direct.pred_masks, atol=1e-5, rtol=1e-5)
@require_deterministic_for_xpu
def test_efficient_single_prompt_multi_images(self):
"""Test efficient inference with same prompt on multiple images using get_text_features."""
raw_image1 = prepare_coco_cat_image()
raw_image2 = prepare_coco_kitchen_image()
# Pre-compute text embeddings once
text_prompt = "handle"
text_inputs = self.processor(text=text_prompt, return_tensors="pt").to(torch_device)
with torch.no_grad():
text_embeds = self.model.get_text_features(**text_inputs)
# Run inference on multiple images reusing text embeddings
# Note: attention_mask must be passed along with text_embeds for proper masking
images = [raw_image1, raw_image2]
all_results = []
for image in images:
img_inputs = self.processor(images=image, return_tensors="pt").to(torch_device)
with torch.no_grad():
outputs = self.model(
text_embeds=text_embeds,
attention_mask=text_inputs.attention_mask,
**img_inputs,
)
results = self.processor.post_process_instance_segmentation(
outputs,
threshold=0.5,
mask_threshold=0.5,
target_sizes=img_inputs.get("original_sizes").tolist(),
)[0]
all_results.append(results)
# Check that we get results for both images
self.assertEqual(len(all_results), 2)
# Verify outputs are equivalent to running with input_ids directly
img_inputs = self.processor(images=raw_image2, return_tensors="pt").to(torch_device)
with torch.no_grad():
outputs_with_embeds = self.model(
text_embeds=text_embeds,
attention_mask=text_inputs.attention_mask,
**img_inputs,
)
inputs_direct = self.processor(images=raw_image2, text=text_prompt, return_tensors="pt").to(torch_device)
with torch.no_grad():
outputs_direct = self.model(**inputs_direct)
# Outputs should be identical
torch.testing.assert_close(outputs_with_embeds.pred_logits, outputs_direct.pred_logits, atol=1e-5, rtol=1e-5)
torch.testing.assert_close(outputs_with_embeds.pred_boxes, outputs_direct.pred_boxes, atol=1e-5, rtol=1e-5)
torch.testing.assert_close(outputs_with_embeds.pred_masks, outputs_direct.pred_masks, atol=1e-5, rtol=1e-5)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/sam3/test_modeling_sam3.py",
"license": "Apache License 2.0",
"lines": 1314,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/models/sam3_tracker/test_modeling_sam3_tracker.py | # Copyright 2025 the HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch SAM2 model."""
import gc
import tempfile
import unittest
import requests
from transformers import (
Sam3TrackerConfig,
Sam3TrackerMaskDecoderConfig,
Sam3TrackerPromptEncoderConfig,
pipeline,
)
from transformers.testing_utils import (
backend_empty_cache,
require_torch,
slow,
torch_device,
)
from transformers.utils import is_torch_available, is_vision_available
from transformers.video_utils import load_video
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import Sam3TrackerModel, Sam3TrackerProcessor, Sam3VisionConfig, Sam3ViTConfig
if is_vision_available():
from PIL import Image
class Sam3TrackerPromptEncoderTester:
def __init__(
self,
hidden_size=32,
input_image_size=128,
patch_size=16,
mask_input_channels=8,
num_point_embeddings=4,
hidden_act="gelu",
is_training=True,
):
self.hidden_size = hidden_size
self.input_image_size = input_image_size
self.patch_size = patch_size
self.mask_input_channels = mask_input_channels
self.num_point_embeddings = num_point_embeddings
self.hidden_act = hidden_act
self.is_training = is_training
def get_config(self):
return Sam3TrackerPromptEncoderConfig(
image_size=self.input_image_size,
patch_size=self.patch_size,
mask_input_channels=self.mask_input_channels,
hidden_size=self.hidden_size,
num_point_embeddings=self.num_point_embeddings,
hidden_act=self.hidden_act,
)
def prepare_config_and_inputs(self):
dummy_points = floats_tensor([self.batch_size, 3, 2])
config = self.get_config()
return config, dummy_points
class Sam3TrackerMaskDecoderTester:
def __init__(
self,
hidden_size=32,
hidden_act="relu",
mlp_dim=64,
num_hidden_layers=2,
num_attention_heads=4,
attention_downsample_rate=2,
num_multimask_outputs=3,
iou_head_depth=3,
iou_head_hidden_dim=32,
is_training=True,
):
self.hidden_size = hidden_size
self.hidden_act = hidden_act
self.mlp_dim = mlp_dim
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.attention_downsample_rate = attention_downsample_rate
self.num_multimask_outputs = num_multimask_outputs
self.iou_head_depth = iou_head_depth
self.iou_head_hidden_dim = iou_head_hidden_dim
self.is_training = is_training
def get_config(self):
return Sam3TrackerMaskDecoderConfig(
hidden_size=self.hidden_size,
hidden_act=self.hidden_act,
mlp_dim=self.mlp_dim,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
attention_downsample_rate=self.attention_downsample_rate,
num_multimask_outputs=self.num_multimask_outputs,
iou_head_depth=self.iou_head_depth,
iou_head_hidden_dim=self.iou_head_hidden_dim,
)
def prepare_config_and_inputs(self):
config = self.get_config()
dummy_inputs = {
"image_embedding": floats_tensor([self.batch_size, self.hidden_size]),
}
return config, dummy_inputs
class Sam3TrackerModelTester:
def __init__(
self,
parent,
num_channels=3,
image_size=224, # Keep reasonable size: 224 = 16 * 14
hidden_size=32,
patch_size=14,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=64,
window_size=8, # 224/14 = 16 patches, 16/2 = 8 per window
global_attn_indexes=None,
fpn_hidden_size=32,
scale_factors=None,
backbone_feature_sizes=[[32, 32], [16, 16], [8, 8]],
memory_encoder_hidden_size=32,
batch_size=2,
is_training=True,
):
if global_attn_indexes is None:
global_attn_indexes = [0, 1]
if scale_factors is None:
scale_factors = [2.0, 1.0, 0.5] # 3 scales to match backbone_feature_sizes
self.parent = parent
self.num_channels = num_channels
self.image_size = image_size
self.hidden_size = hidden_size
self.patch_size = patch_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.window_size = window_size
self.global_attn_indexes = global_attn_indexes
self.fpn_hidden_size = fpn_hidden_size
self.scale_factors = scale_factors
self.backbone_feature_sizes = backbone_feature_sizes
self.batch_size = batch_size
self.is_training = is_training
self.memory_encoder_hidden_size = memory_encoder_hidden_size
self.prompt_encoder_tester = Sam3TrackerPromptEncoderTester()
self.mask_decoder_tester = Sam3TrackerMaskDecoderTester()
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
config = self.get_config()
return config, pixel_values
def get_config(self):
backbone_config = Sam3ViTConfig(
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
num_channels=self.num_channels,
image_size=self.image_size,
patch_size=self.patch_size,
window_size=self.window_size,
global_attn_indexes=self.global_attn_indexes,
)
vision_config = Sam3VisionConfig(
backbone_config=backbone_config,
fpn_hidden_size=self.fpn_hidden_size,
scale_factors=self.scale_factors,
backbone_feature_sizes=self.backbone_feature_sizes,
)
prompt_encoder_config = self.prompt_encoder_tester.get_config()
mask_decoder_config = self.mask_decoder_tester.get_config()
return Sam3TrackerConfig(
vision_config=vision_config,
prompt_encoder_config=prompt_encoder_config,
mask_decoder_config=mask_decoder_config,
memory_attention_hidden_size=self.hidden_size,
memory_encoder_hidden_size=self.memory_encoder_hidden_size,
image_size=self.image_size,
mask_downsampler_embed_dim=32,
memory_fuser_embed_dim=32,
memory_attention_num_layers=1,
memory_attention_feed_forward_hidden_size=32,
)
def create_and_check_model(self, config, pixel_values):
model = Sam3TrackerModel(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
result = model(pixel_values)
self.parent.assertEqual(result.iou_scores.shape, (self.batch_size, 1, 3))
self.parent.assertEqual(result.pred_masks.shape[:3], (self.batch_size, 1, 3))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class Sam3TrackerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as SAM's vision encoder does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (Sam3TrackerModel,) if is_torch_available() else ()
pipeline_model_mapping = (
{"feature-extraction": Sam3TrackerModel, "mask-generation": Sam3TrackerModel} if is_torch_available() else {}
)
test_resize_embeddings = False
_is_composite = True
def setUp(self):
self.model_tester = Sam3TrackerModelTester(self)
common_properties = ["initializer_range"]
self.config_tester = ConfigTester(
self, config_class=Sam3TrackerConfig, has_text_modality=False, common_properties=common_properties
)
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(reason="SAM's vision encoder does not use inputs_embeds")
def test_inputs_embeds(self):
pass
def test_model_get_set_embeddings(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, nn.Linear))
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
# Overriding as Sam3TrackerModel returns vision_attentions
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class._from_config(config, attn_implementation="eager")
config = model.config
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.vision_attentions
expected_num_attentions = self.model_tester.num_hidden_layers
self.assertEqual(len(attentions), expected_num_attentions)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.mask_decoder_config.output_attentions = True
config.vision_config.output_attentions = True
config.vision_config.backbone_config.output_attentions = True
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.vision_attentions
self.assertEqual(len(attentions), expected_num_attentions)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.vision_attentions
self.assertEqual(len(attentions), expected_num_attentions)
# Override as Sam3TrackerModel has different sub-modules
def test_sdpa_can_dispatch_composite_models(self):
"""
Tests if composite models dispatch correctly on SDPA/eager when requested so when loading the model.
This tests only by looking at layer names, as usually SDPA layers are called "SDPAAttention".
In contrast to the above test, this one checks if the "config._attn_implementation" is a dict after the model
is loaded, because we manually replicate requested attn implementation on each sub-config when loading.
See https://github.com/huggingface/transformers/pull/32238 for more info
The test tries to cover most general cases of composite models, VLMs with vision and text configs. Any model
that has a different set of sub-configs has to overwrite this test.
"""
if not self.has_attentions:
self.skipTest(reason="Model architecture does not support attentions")
if not self._is_composite:
self.skipTest(f"{self.all_model_classes[0].__name__} does not support SDPA")
for model_class in self.all_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model_sdpa = model_class.from_pretrained(tmpdirname, attn_implementation="sdpa")
model_sdpa = model_sdpa.eval().to(torch_device)
vision_encoder_sdpa = getattr(model_sdpa, "vision_encoder")
mask_decoder_sdpa = getattr(model_sdpa, "mask_decoder")
# `None` as it is the requested one which will be assigned to each sub-config
# Sub-model will dispatch to SDPA if it can (checked below that `SDPA` layers are present)
self.assertTrue(mask_decoder_sdpa.config._attn_implementation == "sdpa")
self.assertTrue(vision_encoder_sdpa.config._attn_implementation == "sdpa")
model_eager = model_class.from_pretrained(tmpdirname, attn_implementation="eager")
model_eager = model_eager.eval().to(torch_device)
self.assertTrue(getattr(model_eager, "mask_decoder").config._attn_implementation == "eager")
self.assertTrue(getattr(model_eager, "vision_encoder").config._attn_implementation == "eager")
for name, submodule in model_eager.named_modules():
class_name = submodule.__class__.__name__
if (
class_name.endswith("Attention")
and getattr(submodule, "config", None)
and submodule.config._attn_implementation == "sdpa"
):
raise ValueError("The eager model should not have SDPA attention layers")
# Override as Sam3TrackerModel doesn't have hidden states
def flash_attn_inference_equivalence(
self, attn_implementation: str, padding_side: str, atol: float = 4e-2, rtol: float = 4e-2
):
r"""
Tests the equivalence between the eager and flash attention implementations.
This test is only for inference and runs with `dtype=torch.bfloat16`.
"""
if not self.has_attentions:
self.skipTest(reason="Model architecture does not support attentions")
# TODO take a look at this
# head size needs to be a multiple of 8 but needs more adjustments than our current `_prepare_config_headdim`
if attn_implementation != "flash_attention_2":
self.skipTest(
reason="Model fails for every other FA implementation than FA2 due to dim incompatibilities."
)
for model_class in self.all_model_classes:
if not getattr(model_class, "_supports_flash_attn"):
self.skipTest(f"{model_class.__name__} does not support Flash Attention")
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model_fa = model_class.from_pretrained(
tmpdirname, dtype=torch.bfloat16, attn_implementation=attn_implementation
)
model_fa.to(torch_device)
model = model_class.from_pretrained(tmpdirname, dtype=torch.bfloat16)
model.to(torch_device)
dummy_input = inputs_dict[model.main_input_name][:1]
if dummy_input.dtype in [torch.float32, torch.float16]:
dummy_input = dummy_input.to(torch.bfloat16)
dummy_attention_mask = inputs_dict.get("attention_mask", None)
if dummy_attention_mask is not None:
dummy_attention_mask = dummy_attention_mask[:1]
if padding_side == "left":
dummy_attention_mask[:, 1:] = 1
dummy_attention_mask[:, :1] = 0
else:
dummy_attention_mask[:, :-1] = 1
dummy_attention_mask[:, -1:] = 0
if model.config.is_encoder_decoder:
decoder_input_ids = inputs_dict.get("decoder_input_ids", dummy_input)[:1]
outputs = model(dummy_input, decoder_input_ids=decoder_input_ids, output_hidden_states=True)
outputs_fa = model_fa(dummy_input, decoder_input_ids=decoder_input_ids, output_hidden_states=True)
else:
outputs = model(dummy_input, output_hidden_states=True)
outputs_fa = model_fa(dummy_input, output_hidden_states=True)
logits = outputs.vision_hidden_states[-1]
logits_fa = outputs_fa.vision_hidden_states[-1]
assert torch.allclose(logits_fa, logits, atol=atol, rtol=rtol)
if model.config.is_encoder_decoder:
other_inputs = {
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": dummy_attention_mask,
"output_hidden_states": True,
}
if dummy_attention_mask is not None:
other_inputs["attention_mask"] = dummy_attention_mask
outputs = model(dummy_input, **other_inputs)
outputs_fa = model_fa(dummy_input, **other_inputs)
else:
other_inputs = {
"output_hidden_states": True,
}
if dummy_attention_mask is not None:
other_inputs["attention_mask"] = dummy_attention_mask
outputs = model(dummy_input, **other_inputs)
outputs_fa = model_fa(dummy_input, **other_inputs)
logits = outputs.vision_hidden_states[-1]
logits_fa = outputs_fa.vision_hidden_states[-1]
if padding_side == "left":
assert torch.allclose(logits_fa[1:], logits[1:], atol=atol, rtol=rtol)
# check with inference + dropout
model.train()
_ = model_fa(dummy_input, **other_inputs)
else:
assert torch.allclose(logits_fa[:-1], logits[:-1], atol=atol, rtol=rtol)
# Override as difference slightly higher than the threshold
def test_batching_equivalence(self, atol=5e-4, rtol=5e-4):
super().test_batching_equivalence(atol=atol, rtol=rtol)
@unittest.skip(reason="Hidden_states is tested in sub modules tests")
def test_hidden_states_output(self):
pass
@unittest.skip(reason="Tested on the vision only counterpart; only works if vision related input is given")
def test_retain_grad_hidden_states_attentions(self):
pass
@slow
def test_model_from_pretrained(self):
model_name = "facebook/sam2.1-hiera-tiny"
model = Sam3TrackerModel.from_pretrained(model_name)
self.assertIsNotNone(model)
def test_sdpa_can_compile_dynamic(self):
self.skipTest(reason="SAM2 model can't be compiled dynamic yet")
def prepare_image():
img_url = "https://huggingface.co/datasets/hf-internal-testing/sam2-fixtures/resolve/main/truck.jpg"
raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB")
return raw_image
def prepare_groceries_image():
img_url = "https://huggingface.co/datasets/hf-internal-testing/sam2-fixtures/resolve/main/groceries.jpg"
raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB")
return raw_image
def prepare_dog_img():
img_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/dog-sam.png"
raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB")
return raw_image
def prepare_video():
video_url = "https://huggingface.co/datasets/hf-internal-testing/sam2-fixtures/resolve/main/bedroom.mp4"
raw_video, _ = load_video(video_url)
return raw_video
@slow
class Sam3TrackerModelIntegrationTest(unittest.TestCase):
def setUp(self):
super().setUp()
checkpoint_path = "facebook/sam3"
self.model = Sam3TrackerModel.from_pretrained(checkpoint_path).to(torch.float32)
self.processor = Sam3TrackerProcessor.from_pretrained(checkpoint_path)
self.model.to(torch_device)
self.model.eval()
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def test_inference_mask_generation_one_point_multimask(self):
raw_image = prepare_image()
input_points = [[[[500, 375]]]]
input_labels = [[[1]]]
inputs = self.processor(
images=raw_image, input_points=input_points, input_labels=input_labels, return_tensors="pt"
).to(torch_device)
with torch.no_grad():
outputs = self.model(**inputs)
self.assertEqual(outputs.iou_scores.shape, (1, 1, 3))
self.assertEqual(outputs.pred_masks.shape, (1, 1, 3, 288, 288))
sorted_indices = torch.argsort(outputs.iou_scores.squeeze(), descending=True)
scores = outputs.iou_scores.squeeze()[sorted_indices]
masks_logits = outputs.pred_masks.squeeze()[sorted_indices][0, :3, :3]
torch.testing.assert_close(
scores,
torch.tensor([0.9106, 0.5326, 0.0379]).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
torch.testing.assert_close(
masks_logits,
torch.tensor(
[
[-18.9093, -31.1757, -23.6851],
[-20.3388, -31.0213, -29.8815],
[-20.7554, -29.4530, -30.1776],
]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
def test_inference_mask_generation_one_point_no_multimask(self):
raw_image = prepare_image()
input_points = [[[[500, 375]]]]
input_labels = [[[1]]]
inputs = self.processor(
images=raw_image, input_points=input_points, input_labels=input_labels, return_tensors="pt"
).to(torch_device)
with torch.no_grad():
outputs = self.model(**inputs, multimask_output=False)
self.assertEqual(outputs.iou_scores.shape, (1, 1, 1))
self.assertEqual(outputs.pred_masks.shape, (1, 1, 1, 288, 288))
scores = outputs.iou_scores.squeeze((0, 1))
masks_logits = outputs.pred_masks.squeeze((0, 1))[0, :3, :3]
torch.testing.assert_close(scores, torch.tensor([0.9474]).to(torch_device), atol=1e-4, rtol=1e-4)
torch.testing.assert_close(
masks_logits,
torch.tensor(
[
[-8.1500, -12.3282, -9.6828],
[-9.0512, -11.6470, -11.6363],
[-9.2391, -11.9863, -12.4858],
]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
def test_inference_mask_generation_batched_images_multi_points(self):
raw_image1 = prepare_image()
raw_image2 = prepare_dog_img()
input_points = [[[[500, 375]]], [[[770, 200], [730, 120]]]]
input_labels = [[[1]], [[1, 0]]]
inputs = self.processor(
images=[raw_image1, raw_image2], input_points=input_points, input_labels=input_labels, return_tensors="pt"
).to(torch_device)
with torch.no_grad():
outputs = self.model(**inputs)
self.assertEqual(outputs.iou_scores.shape, (2, 1, 3))
self.assertEqual(outputs.pred_masks.shape, (2, 1, 3, 288, 288))
sorted_indices = torch.argsort(outputs.iou_scores[0].squeeze(), descending=True)
scores1 = outputs.iou_scores[0].squeeze()[sorted_indices]
masks_logits1 = outputs.pred_masks[0].squeeze()[sorted_indices][0, :3, :3]
sorted_indices = torch.argsort(outputs.iou_scores[1].squeeze(), descending=True)
scores2 = outputs.iou_scores[1].squeeze()[sorted_indices]
masks_logits2 = outputs.pred_masks[1].squeeze()[sorted_indices][0, :3, :3]
torch.testing.assert_close(
scores1,
torch.tensor([0.8837, 0.5837, 0.0372]).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
torch.testing.assert_close(
masks_logits1,
torch.tensor(
[
[-19.4976, -32.4384, -24.2687],
[-20.9939, -32.2782, -31.2067],
[-21.2991, -30.3071, -31.1489],
]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
torch.testing.assert_close(
scores2,
torch.tensor([0.7675, 0.7505, 0.5348]).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
torch.testing.assert_close(
masks_logits2,
torch.tensor(
[
[-10.3051, -9.9056, -10.5699],
[-8.8009, -11.1684, -10.7158],
[-9.6653, -10.9755, -10.3231],
]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
def test_inference_mask_generation_batched_images_batched_points_multi_points(self):
raw_image1 = prepare_image()
raw_image2 = prepare_groceries_image()
input_points = [[[[500, 375]], [[650, 750]]], [[[400, 300]], [[630, 300], [550, 300]]]]
input_labels = [[[1], [1]], [[1], [1, 1]]]
inputs = self.processor(
images=[raw_image1, raw_image2], input_points=input_points, input_labels=input_labels, return_tensors="pt"
).to(torch_device)
with torch.no_grad():
outputs = self.model(**inputs, multimask_output=False)
self.assertEqual(outputs.iou_scores.shape, (2, 2, 1))
self.assertEqual(outputs.pred_masks.shape, (2, 2, 1, 288, 288))
torch.testing.assert_close(
outputs.iou_scores,
torch.tensor([[[0.9370], [0.9425]], [[0.9734], [0.9262]]]).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
torch.testing.assert_close(
outputs.pred_masks[:, :, :, :2, :2],
torch.tensor(
[
[
[[[-7.6936, -11.7077], [-8.6289, -11.0604]]],
[[[-6.2675, -9.9616], [-6.5427, -9.0548]]],
],
[
[[[-10.3143, -13.0117], [-10.2967, -12.3099]]],
[[[-9.1198, -10.1437], [-8.2902, -10.6460]]],
],
]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
def test_inference_batched_images_batched_boxes(self):
raw_image1 = prepare_image()
raw_image2 = prepare_groceries_image()
input_boxes = [
[[75, 275, 1725, 850], [425, 600, 700, 875], [1375, 550, 1650, 800], [1240, 675, 1400, 750]],
[[450, 170, 520, 350], [350, 190, 450, 350], [500, 170, 580, 350], [580, 170, 640, 350]],
]
inputs = self.processor(images=[raw_image1, raw_image2], input_boxes=input_boxes, return_tensors="pt").to(
torch_device
)
with torch.no_grad():
outputs = self.model(**inputs, multimask_output=False)
self.assertEqual(outputs.iou_scores.shape, (2, 4, 1))
self.assertEqual(outputs.pred_masks.shape, (2, 4, 1, 288, 288))
torch.testing.assert_close(
outputs.iou_scores,
torch.tensor(
[
[[0.9862], [0.9666], [0.9588], [0.9331]],
[[0.9757], [0.9838], [0.9785], [0.9755]],
]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
torch.testing.assert_close(
outputs.pred_masks[:, :, :, :2, :2],
torch.tensor(
[
[
[[[-12.5972, -19.5327], [-12.4126, -18.3935]]],
[[[-20.2715, -31.6163], [-22.3341, -27.6888]]],
[[[-20.9112, -31.4296], [-22.9174, -26.5892]]],
[[[-23.6995, -37.8614], [-26.3752, -31.1497]]],
],
[
[[[-21.7436, -29.5702], [-24.3507, -25.5635]]],
[[[-28.0691, -38.6044], [-31.3014, -33.8172]]],
[[[-25.3085, -33.9384], [-27.7918, -30.1258]]],
[[[-26.7339, -36.4405], [-28.8027, -31.8549]]],
],
]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
def test_inference_mask_generation_from_existing_points_and_mask(self):
raw_image = prepare_image()
input_points = [[[[500, 375]]]]
input_labels = [[[1]]]
original_inputs = self.processor(
images=raw_image, input_points=input_points, input_labels=input_labels, return_tensors="pt"
).to(torch_device)
with torch.no_grad():
outputs = self.model(**original_inputs)
# best mask to use as input for new points
mask_input = outputs.pred_masks[:, :, torch.argmax(outputs.iou_scores)]
new_input_points = [[[[500, 375], [1125, 625]]]]
new_input_labels = [[[1, 1]]]
inputs = self.processor(
input_points=new_input_points,
input_labels=new_input_labels,
original_sizes=original_inputs["original_sizes"],
return_tensors="pt",
).to(torch_device)
with torch.no_grad():
outputs = self.model(
**inputs,
input_masks=mask_input,
image_embeddings=outputs.image_embeddings,
multimask_output=False,
)
self.assertEqual(outputs.iou_scores.shape, (1, 1, 1))
self.assertEqual(outputs.pred_masks.shape, (1, 1, 1, 288, 288))
torch.testing.assert_close(
outputs.iou_scores, torch.tensor([[[0.9809]]]).to(torch_device), atol=1e-4, rtol=1e-4
)
torch.testing.assert_close(
outputs.pred_masks[:, :, 0, :3, :3],
torch.tensor(
[
[
[
[-5.3111, -7.4920, -5.5444],
[-4.7685, -6.3513, -6.2969],
[-4.8471, -5.1722, -6.5492],
]
]
]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
# with negative point
new_input_points = [[[[500, 375], [1125, 625]]]]
new_input_labels = [[[1, 0]]]
inputs = self.processor(
input_points=new_input_points,
input_labels=new_input_labels,
original_sizes=original_inputs["original_sizes"],
return_tensors="pt",
).to(torch_device)
with torch.no_grad():
outputs = self.model(
**inputs,
input_masks=mask_input,
image_embeddings=outputs.image_embeddings,
multimask_output=False,
)
self.assertEqual(outputs.iou_scores.shape, (1, 1, 1))
self.assertEqual(outputs.pred_masks.shape, (1, 1, 1, 288, 288))
torch.testing.assert_close(
outputs.iou_scores, torch.tensor([[[0.9625]]]).to(torch_device), atol=1e-4, rtol=1e-4
)
torch.testing.assert_close(
outputs.pred_masks[:, :, 0, :3, :3],
torch.tensor(
[
[
[
[-13.4726, -19.9250, -16.3620],
[-13.5886, -18.7266, -17.6766],
[-14.6962, -19.3814, -19.9888],
]
]
]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
def test_dummy_pipeline_generation(self):
generator = pipeline("mask-generation", model="facebook/sam3", device=torch_device)
raw_image = prepare_image()
_ = generator(raw_image, points_per_batch=64)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/sam3_tracker/test_modeling_sam3_tracker.py",
"license": "Apache License 2.0",
"lines": 722,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/models/sam3_tracker_video/test_modeling_sam3_tracker_video.py | # Copyright 2025 the HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch SAM2 model."""
import gc
import unittest
import requests
from transformers.testing_utils import (
backend_empty_cache,
is_torch_bf16_available_on_device,
is_torch_fp16_available_on_device,
slow,
torch_device,
)
from transformers.utils import is_torch_available, is_vision_available
from transformers.video_utils import load_video
if is_torch_available():
import torch
from transformers import Sam3TrackerVideoModel, Sam3TrackerVideoProcessor
if is_vision_available():
from PIL import Image
def prepare_image():
img_url = "https://huggingface.co/datasets/hf-internal-testing/sam2-fixtures/resolve/main/truck.jpg"
raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB")
return raw_image
def prepare_groceries_image():
img_url = "https://huggingface.co/datasets/hf-internal-testing/sam2-fixtures/resolve/main/groceries.jpg"
raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB")
return raw_image
def prepare_dog_img():
img_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/dog-sam.png"
raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB")
return raw_image
def prepare_video():
video_url = "https://huggingface.co/datasets/hf-internal-testing/sam2-fixtures/resolve/main/bedroom.mp4"
raw_video, _ = load_video(video_url)
return raw_video
@slow
class Sam3TrackerVideoModelIntegrationTest(unittest.TestCase):
def setUp(self):
super().setUp()
self.video_model = Sam3TrackerVideoModel.from_pretrained("facebook/sam3").to(torch.float32)
self.processor = Sam3TrackerVideoProcessor.from_pretrained("facebook/sam3")
self.video_model.to(torch_device)
self.video_model.eval()
def tearDown(self):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
backend_empty_cache(torch_device)
def test_inference_mask_generation_video_one_point(self):
raw_video = prepare_video()
inference_session = self.processor.init_video_session(video=raw_video, inference_device=torch_device)
ann_frame_idx = 0 # the frame index we interact with
ann_obj_id = 1 # give a unique id to each object we interact with (it can be any integers)
self.processor.add_inputs_to_inference_session(
inference_session=inference_session,
frame_idx=ann_frame_idx,
obj_ids=ann_obj_id,
input_points=[[[[210, 350]]]],
input_labels=[[[1]]],
)
outputs = self.video_model(inference_session=inference_session, frame_idx=ann_frame_idx)
low_res_masks = outputs.pred_masks
self.assertEqual(low_res_masks.shape, (1, 1, 288, 288))
video_res_masks = self.processor.post_process_masks([low_res_masks], [raw_video.shape[-3:-1]], binarize=False)[
0
]
self.assertEqual(video_res_masks.shape, (1, 1, raw_video.shape[-3], raw_video.shape[-2]))
torch.testing.assert_close(
video_res_masks[0, 0, :3, :3],
torch.tensor(
[[-13.5762, -13.5762, -13.7167], [-13.0870, -13.0870, -13.5405], [-12.2173, -12.2173, -13.2273]]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
# test propagate in video frames
frames = []
for sam2_video_output in self.video_model.propagate_in_video_iterator(
inference_session=inference_session,
max_frame_num_to_track=2,
):
video_res_masks = self.processor.post_process_masks(
[sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
frames.append(video_res_masks)
frames = torch.stack(frames, dim=0)
self.assertEqual(frames.shape, (3, 1, 1, raw_video.shape[-3], raw_video.shape[-2]))
torch.testing.assert_close(
frames[:3, :, :, :2, :2],
torch.tensor(
[
[[[[-13.5762, -13.5762], [-13.0870, -13.0870]]]],
[[[[-19.1203, -19.1203], [-19.5488, -19.5488]]]],
[[[[-19.9951, -19.9951], [-20.5353, -20.5353]]]],
],
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
def test_inference_mask_generation_video_one_point_propagate_in_video_directly(self):
raw_video = prepare_video()
inference_session = self.processor.init_video_session(video=raw_video, inference_device=torch_device)
ann_frame_idx = 0 # the frame index we interact with
ann_obj_id = 1 # give a unique id to each object we interact with (it can be any integers)
self.processor.add_inputs_to_inference_session(
inference_session=inference_session,
frame_idx=ann_frame_idx,
obj_ids=ann_obj_id,
input_points=[[[[210, 350]]]],
input_labels=[[[1]]],
)
# test propagate in video frames
frames = []
for sam2_video_output in self.video_model.propagate_in_video_iterator(
inference_session=inference_session,
start_frame_idx=ann_frame_idx,
max_frame_num_to_track=2,
):
video_res_masks = self.processor.post_process_masks(
[sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
frames.append(video_res_masks)
frames = torch.stack(frames, dim=0)
self.assertEqual(frames.shape, (3, 1, 1, raw_video.shape[-3], raw_video.shape[-2]))
torch.testing.assert_close(
frames[:3, :, :, :2, :2],
torch.tensor(
[
[[[[-13.5762, -13.5762], [-13.0870, -13.0870]]]],
[[[[-19.1203, -19.1203], [-19.5488, -19.5488]]]],
[[[[-19.9951, -19.9951], [-20.5353, -20.5353]]]],
]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
def test_inference_mask_generation_video_multi_points(self):
raw_video = prepare_video()
inference_session = self.processor.init_video_session(video=raw_video, inference_device=torch_device)
ann_frame_idx = 0 # the frame index we interact with
ann_obj_id = 1 # give a unique id to each object we interact with (it can be any integers)
self.processor.add_inputs_to_inference_session(
inference_session=inference_session,
frame_idx=ann_frame_idx,
obj_ids=ann_obj_id,
input_points=[[[[210, 350], [250, 220]]]],
input_labels=[[[1, 1]]],
)
outputs = self.video_model(inference_session=inference_session, frame_idx=ann_frame_idx)
low_res_masks = outputs.pred_masks
video_res_masks = self.processor.post_process_masks(
[outputs.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
self.assertEqual(low_res_masks.shape, (1, 1, 288, 288))
self.assertEqual(video_res_masks.shape, (1, 1, raw_video.shape[-3], raw_video.shape[-2]))
torch.testing.assert_close(
video_res_masks[0, 0, :3, :3],
torch.tensor(
[[-11.9889, -11.9889, -12.2238], [-11.6383, -11.6383, -12.0873], [-11.0150, -11.0150, -11.8446]]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
# test propagate in video frames
frames = []
for sam2_video_output in self.video_model.propagate_in_video_iterator(
inference_session=inference_session,
start_frame_idx=ann_frame_idx,
max_frame_num_to_track=2,
):
video_res_masks = self.processor.post_process_masks(
[sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
frames.append(video_res_masks)
frames = torch.stack(frames, dim=0)
self.assertEqual(frames.shape, (3, 1, 1, raw_video.shape[-3], raw_video.shape[-2]))
# higher tolerance due to errors propagating from frame to frame
torch.testing.assert_close(
frames[:3, :, :, :2, :2],
torch.tensor(
[
[[[[-11.9889, -11.9889], [-11.6383, -11.6383]]]],
[[[[-20.4502, -20.4502], [-20.6929, -20.6929]]]],
[[[[-22.0344, -22.0344], [-22.4522, -22.4522]]]],
]
).to(torch_device),
atol=1e-2,
rtol=1e-2,
)
def test_inference_mask_generation_video_one_bb(self):
raw_video = prepare_video()
inference_session = self.processor.init_video_session(video=raw_video, inference_device=torch_device)
ann_frame_idx = 0 # the frame index we interact with
ann_obj_id = 1 # give a unique id to each object we interact with (it can be any integers)
self.processor.add_inputs_to_inference_session(
inference_session=inference_session,
frame_idx=ann_frame_idx,
obj_ids=ann_obj_id,
input_boxes=[[[300, 0, 500, 400]]],
)
outputs = self.video_model(inference_session=inference_session, frame_idx=ann_frame_idx)
low_res_masks = outputs.pred_masks
video_res_masks = self.processor.post_process_masks(
[outputs.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
self.assertEqual(low_res_masks.shape, (1, 1, 288, 288))
self.assertEqual(video_res_masks.shape, (1, 1, raw_video.shape[-3], raw_video.shape[-2]))
torch.testing.assert_close(
video_res_masks[0, 0, :3, :3],
torch.tensor(
[[-17.2589, -17.2589, -17.5130], [-17.2777, -17.2777, -17.9154], [-17.3111, -17.3111, -18.6309]]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
# test propagate in video frames
frames = []
for sam2_video_output in self.video_model.propagate_in_video_iterator(
inference_session=inference_session,
start_frame_idx=ann_frame_idx,
max_frame_num_to_track=2,
):
video_res_masks = self.processor.post_process_masks(
[sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
frames.append(video_res_masks)
frames = torch.stack(frames, dim=0)
self.assertEqual(frames.shape, (3, 1, 1, raw_video.shape[-3], raw_video.shape[-2]))
# higher tolerance due to errors propagating from frame to frame
torch.testing.assert_close(
frames[:3, :, :, :2, :2],
torch.tensor(
[
[[[[-17.2589, -17.2589], [-17.2777, -17.2777]]]],
[[[[-17.8107, -17.8107], [-18.1581, -18.1581]]]],
[[[[-17.9432, -17.9432], [-18.4637, -18.4637]]]],
]
).to(torch_device),
atol=1e-2,
rtol=1e-2,
)
def test_inference_mask_generation_video_one_point_one_bb(self):
raw_video = prepare_video()
inference_session = self.processor.init_video_session(video=raw_video, inference_device=torch_device)
ann_frame_idx = 0 # the frame index we interact with
ann_obj_id = 1 # give a unique id to each object we interact with (it can be any integers)
self.processor.add_inputs_to_inference_session(
inference_session=inference_session,
frame_idx=ann_frame_idx,
obj_ids=ann_obj_id,
input_boxes=[[[300, 0, 500, 400]]],
input_points=[[[[460, 60]]]],
input_labels=[[[1]]],
)
outputs = self.video_model(inference_session=inference_session, frame_idx=ann_frame_idx)
low_res_masks = outputs.pred_masks
video_res_masks = self.processor.post_process_masks(
[outputs.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
self.assertEqual(low_res_masks.shape, (1, 1, 288, 288))
self.assertEqual(video_res_masks.shape, (1, 1, raw_video.shape[-3], raw_video.shape[-2]))
torch.testing.assert_close(
video_res_masks[0, 0, :3, :3],
torch.tensor(
[[-14.0206, -14.0206, -14.1225], [-14.0568, -14.0568, -14.4570], [-14.1212, -14.1212, -15.0516]]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
# test propagate in video frames
frames = []
for sam2_video_output in self.video_model.propagate_in_video_iterator(
inference_session=inference_session,
start_frame_idx=ann_frame_idx,
max_frame_num_to_track=2,
):
video_res_masks = self.processor.post_process_masks(
[sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
frames.append(video_res_masks)
frames = torch.stack(frames, dim=0)
self.assertEqual(frames.shape, (3, 1, 1, raw_video.shape[-3], raw_video.shape[-2]))
# higher tolerance due to errors propagating from frame to frame
torch.testing.assert_close(
frames[:3, :, :, :2, :2],
torch.tensor(
[
[[[[-14.0206, -14.0206], [-14.0568, -14.0568]]]],
[[[[-16.8155, -16.8155], [-17.2954, -17.2954]]]],
[[[[-16.2909, -16.2909], [-16.8887, -16.8887]]]],
]
).to(torch_device),
atol=1e-2,
rtol=1e-2,
)
def test_inference_mask_generation_video_multi_objects_multi_points(self):
raw_video = prepare_video()
inference_session = self.processor.init_video_session(video=raw_video, inference_device=torch_device)
ann_frame_idx = 0 # the frame index we interact with
ann_obj_ids = [2, 3] # give a unique id to each object we interact with (it can be any integers)
self.processor.add_inputs_to_inference_session(
inference_session=inference_session,
frame_idx=ann_frame_idx,
obj_ids=ann_obj_ids,
input_points=[[[[200, 300], [230, 250], [275, 175]], [[400, 150]]]],
input_labels=[[[1, 1, 0], [1]]],
)
outputs = self.video_model(inference_session=inference_session, frame_idx=ann_frame_idx)
low_res_masks = outputs.pred_masks
video_res_masks = self.processor.post_process_masks(
[outputs.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
self.assertEqual(low_res_masks.shape, (2, 1, 288, 288))
self.assertEqual(video_res_masks.shape, (2, 1, raw_video.shape[-3], raw_video.shape[-2]))
torch.testing.assert_close(
video_res_masks[:, 0, :2, :2], # first object
torch.tensor(
[[[-12.8567, -12.8567], [-13.0618, -13.0618]], [[-12.1054, -12.1054], [-11.6056, -11.6056]]]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
# test propagate in video frames
frames = []
for sam2_video_output in self.video_model.propagate_in_video_iterator(
inference_session=inference_session,
start_frame_idx=ann_frame_idx,
max_frame_num_to_track=2,
):
video_res_masks = self.processor.post_process_masks(
[sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
frames.append(video_res_masks)
frames = torch.stack(frames, dim=0)
self.assertEqual(frames.shape, (3, 2, 1, raw_video.shape[-3], raw_video.shape[-2]))
torch.testing.assert_close(
frames[:3, :, :, :2, :2],
torch.tensor(
[
[[[[-12.8567, -12.8567], [-13.0618, -13.0618]]], [[[-12.1054, -12.1054], [-11.6056, -11.6056]]]],
[[[[-22.5194, -22.5194], [-22.7973, -22.7973]]], [[[-20.6199, -20.6199], [-21.0607, -21.0607]]]],
[[[[-25.0871, -25.0871], [-25.6355, -25.6355]]], [[[-19.9508, -19.9508], [-20.4212, -20.4212]]]],
]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
def test_inference_mask_generation_video_batched_bb(self):
raw_video = prepare_video()
inference_session = self.processor.init_video_session(video=raw_video, inference_device=torch_device)
ann_frame_idx = 0 # the frame index we interact with
ann_obj_ids = [2, 3] # give a unique id to each object we interact with (it can be any integers)
self.processor.add_inputs_to_inference_session(
inference_session=inference_session,
frame_idx=ann_frame_idx,
obj_ids=ann_obj_ids,
input_boxes=[[[300, 0, 500, 400], [400, 0, 600, 400]]],
)
frames = []
for sam2_video_output in self.video_model.propagate_in_video_iterator(
inference_session=inference_session,
start_frame_idx=ann_frame_idx,
max_frame_num_to_track=2,
):
video_res_masks = self.processor.post_process_masks(
[sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
frames.append(video_res_masks)
frames = torch.stack(frames, dim=0)
self.assertEqual(frames.shape, (3, 2, 1, raw_video.shape[-3], raw_video.shape[-2]))
torch.testing.assert_close(
frames[:3, :, :, :2, :2],
torch.tensor(
[
[[[[-17.2589, -17.2589], [-17.2777, -17.2777]]], [[[-8.5523, -8.5523], [-8.5103, -8.5103]]]],
[[[[-17.8107, -17.8107], [-18.1581, -18.1581]]], [[[-9.1150, -9.1150], [-9.2327, -9.2327]]]],
[[[[-17.9432, -17.9432], [-18.4637, -18.4637]]], [[[-10.9026, -10.9026], [-11.0184, -11.0184]]]],
]
).to(torch_device),
atol=1e-4,
rtol=1e-3,
)
def test_inference_propagate_video_from_mask_input(self):
raw_video = prepare_video()
inference_session = self.processor.init_video_session(video=raw_video, inference_device=torch_device)
ann_frame_idx = 0 # the frame index we interact with
ann_obj_id = 1 # give a unique id to each object we interact with (it can be any integers)
# get input_mask
self.processor.add_inputs_to_inference_session(
inference_session=inference_session,
frame_idx=ann_frame_idx,
obj_ids=ann_obj_id,
input_points=[[[[210, 350], [250, 220]]]],
input_labels=[[[1, 1]]],
)
sam2_video_output = self.video_model(inference_session=inference_session, frame_idx=ann_frame_idx)
# set mask as input
self.processor.add_inputs_to_inference_session(
inference_session=inference_session,
frame_idx=ann_frame_idx,
obj_ids=ann_obj_id,
input_masks=self.processor.post_process_masks(
[sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0],
)
sam2_video_output = self.video_model(inference_session=inference_session, frame_idx=ann_frame_idx)
low_res_masks = sam2_video_output.pred_masks
self.assertEqual(low_res_masks.shape, (1, 1, 288, 288))
video_res_masks = self.processor.post_process_masks(
[sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
self.assertEqual(video_res_masks.shape, (1, 1, raw_video.shape[-3], raw_video.shape[-2]))
torch.testing.assert_close(
video_res_masks[0, 0, :3, :3],
torch.tensor(
[[-10.0000, -10.0000, -10.0000], [-10.0000, -10.0000, -10.0000], [-10.0000, -10.0000, -10.0000]]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
# test propagate in video frames
frames = []
for sam2_video_output in self.video_model.propagate_in_video_iterator(
inference_session=inference_session,
start_frame_idx=ann_frame_idx,
max_frame_num_to_track=2,
):
video_res_masks = self.processor.post_process_masks(
[sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
frames.append(video_res_masks)
frames = torch.stack(frames, dim=0)
self.assertEqual(frames.shape, (3, 1, 1, raw_video.shape[-3], raw_video.shape[-2]))
torch.testing.assert_close(
frames[:3, :, :, :2, :2],
torch.tensor(
[
[[[[-10.0000, -10.0000], [-10.0000, -10.0000]]]],
[[[[-21.3700, -21.3700], [-21.7191, -21.7191]]]],
[[[[-22.2242, -22.2242], [-22.7148, -22.7148]]]],
],
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
def test_inference_propagate_on_streamed_video(self):
raw_video = prepare_video()
inference_session = self.processor.init_video_session(inference_device=torch_device)
video_res_masks = []
max_frame_num_to_track = 3
for frame_idx, frame in enumerate(raw_video):
if frame_idx >= max_frame_num_to_track:
break
inputs = self.processor(images=frame, device=torch_device, return_tensors="pt")
if frame_idx == 0:
self.processor.add_inputs_to_inference_session(
inference_session,
frame_idx=0,
obj_ids=1,
input_points=[[[[210, 350], [250, 220]]]],
input_labels=[[[1, 1]]],
original_size=inputs.original_sizes[0],
)
sam2_video_output = self.video_model(inference_session=inference_session, frame=inputs.pixel_values[0])
video_res_masks.append(
self.processor.post_process_masks(
[sam2_video_output.pred_masks], inputs.original_sizes, binarize=False
)[0]
)
video_res_masks = torch.stack(video_res_masks, dim=0)
self.assertEqual(
video_res_masks.shape, (max_frame_num_to_track, 1, 1, raw_video.shape[-3], raw_video.shape[-2])
)
# higher tolerance due to errors propagating from frame to frame
torch.testing.assert_close(
video_res_masks[:3, :, :, :2, :2],
torch.tensor(
[
[[[[-11.9889, -11.9889], [-11.6383, -11.6383]]]],
[[[[-20.4502, -20.4502], [-20.6929, -20.6929]]]],
[[[[-22.0344, -22.0344], [-22.4522, -22.4522]]]],
]
).to(torch_device),
atol=1e-2,
rtol=1e-2,
)
def test_inference_with_different_dtypes(self):
"""Test that inference works correctly for float32, bfloat16, and float16 dtypes."""
raw_video = prepare_video()
dtypes_to_test = [
(torch.float32, None), # float32 is always available
(torch.bfloat16, is_torch_bf16_available_on_device),
(torch.float16, is_torch_fp16_available_on_device),
]
for dtype, availability_check in dtypes_to_test:
with self.subTest(dtype=dtype):
# Skip if dtype is not available on device
if availability_check is not None and not availability_check(torch_device):
self.skipTest(f"{dtype} not supported on {torch_device}")
# Load model with specific dtype
video_model = Sam3TrackerVideoModel.from_pretrained("facebook/sam3", torch_dtype=dtype).to(
torch_device
)
video_model.eval()
# Initialize inference session
inference_session = self.processor.init_video_session(
video=raw_video, inference_device=torch_device, dtype=dtype
)
ann_frame_idx = 0
ann_obj_id = 1
# Add inputs
self.processor.add_inputs_to_inference_session(
inference_session=inference_session,
frame_idx=ann_frame_idx,
obj_ids=ann_obj_id,
input_points=[[[[210, 350]]]],
input_labels=[[[1]]],
)
# Run inference on first frame
outputs = video_model(inference_session=inference_session, frame_idx=ann_frame_idx)
low_res_masks = outputs.pred_masks
# Verify output shape and dtype
self.assertEqual(low_res_masks.shape, (1, 1, 288, 288))
self.assertEqual(low_res_masks.dtype, dtype)
# Post-process masks
video_res_masks = self.processor.post_process_masks(
[low_res_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
self.assertEqual(video_res_masks.shape, (1, 1, raw_video.shape[-3], raw_video.shape[-2]))
# Test propagation across multiple frames to test memory handling
frames = []
max_frame_num_to_track = 2
for sam2_video_output in video_model.propagate_in_video_iterator(
inference_session=inference_session,
start_frame_idx=ann_frame_idx,
max_frame_num_to_track=max_frame_num_to_track,
):
video_res_masks = self.processor.post_process_masks(
[sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
frames.append(video_res_masks)
# Verify dtype is maintained during propagation
self.assertEqual(sam2_video_output.pred_masks.dtype, dtype)
frames = torch.stack(frames, dim=0)
# Verify we got the expected number of frames (initial frame + max_frame_num_to_track)
self.assertEqual(
frames.shape, (max_frame_num_to_track + 1, 1, 1, raw_video.shape[-3], raw_video.shape[-2])
)
# Verify dtype is maintained in stacked frames
self.assertEqual(frames.dtype, dtype)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/sam3_tracker_video/test_modeling_sam3_tracker_video.py",
"license": "Apache License 2.0",
"lines": 562,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/models/sam3_video/test_modeling_sam3_video.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch SAM3 Video model."""
import gc
import unittest
from transformers.testing_utils import (
backend_empty_cache,
is_torch_bf16_available_on_device,
is_torch_fp16_available_on_device,
slow,
torch_device,
)
from transformers.utils import is_torch_available
from transformers.video_utils import load_video
if is_torch_available():
import torch
from transformers import Sam3VideoModel, Sam3VideoProcessor
def prepare_video():
video_url = "https://huggingface.co/datasets/hf-internal-testing/sam2-fixtures/resolve/main/bedroom.mp4"
raw_video, _ = load_video(video_url)
return raw_video
@slow
class Sam3VideoModelIntegrationTest(unittest.TestCase):
def setUp(self):
super().setUp()
checkpoint_path = "facebook/sam3"
self.video_model = Sam3VideoModel.from_pretrained(checkpoint_path).to(torch.float32)
self.processor = Sam3VideoProcessor.from_pretrained(checkpoint_path)
self.video_model.to(torch_device)
self.video_model.eval()
def tearDown(self):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
backend_empty_cache(torch_device)
def test_inference_video_propagate_with_text_prompt(self):
raw_video = prepare_video()
inference_session = self.processor.init_video_session(
video=raw_video,
inference_device=torch_device,
processing_device="cpu",
video_storage_device="cpu",
)
# Add text prompt
text = "person"
inference_session = self.processor.add_text_prompt(
inference_session=inference_session,
text=text,
)
# Propagate through video frames
outputs_per_frame = {}
model_outputs_per_frame = {}
for model_outputs in self.video_model.propagate_in_video_iterator(
inference_session=inference_session,
max_frame_num_to_track=3,
):
processed_outputs = self.processor.postprocess_outputs(inference_session, model_outputs)
outputs_per_frame[model_outputs.frame_idx] = processed_outputs
model_outputs_per_frame[model_outputs.frame_idx] = model_outputs
# Check we processed the expected number of frames
self.assertGreaterEqual(len(outputs_per_frame), 1)
self.assertLessEqual(len(outputs_per_frame), 4) # frame 0 + up to 3 more
# Check output structure for each frame
for processed_outputs in outputs_per_frame.values():
self.assertIn("object_ids", processed_outputs)
self.assertIn("scores", processed_outputs)
self.assertIn("boxes", processed_outputs)
self.assertIn("masks", processed_outputs)
num_objects = len(processed_outputs["object_ids"])
if num_objects > 0:
self.assertEqual(processed_outputs["scores"].shape, (num_objects,))
self.assertEqual(processed_outputs["boxes"].shape, (num_objects, 4))
self.assertEqual(
processed_outputs["masks"].shape, (num_objects, raw_video.shape[-3], raw_video.shape[-2])
)
# Check boxes are in XYXY format (absolute coordinates)
boxes = processed_outputs["boxes"]
self.assertTrue(torch.all(boxes[:, 2] >= boxes[:, 0])) # x2 >= x1
self.assertTrue(torch.all(boxes[:, 3] >= boxes[:, 1])) # y2 >= y1
# Check numeric values for first frame
if len(outputs_per_frame) > 0:
first_frame_idx = min(outputs_per_frame.keys())
first_outputs = outputs_per_frame[first_frame_idx]
num_objects = len(first_outputs["object_ids"])
if num_objects > 0:
# Move outputs to CPU for comparison (postprocess_outputs may return CPU tensors)
object_ids = (
first_outputs["object_ids"].cpu()
if isinstance(first_outputs["object_ids"], torch.Tensor)
else torch.tensor(first_outputs["object_ids"])
)
scores = (
first_outputs["scores"].cpu()
if isinstance(first_outputs["scores"], torch.Tensor)
else torch.tensor(first_outputs["scores"])
)
boxes = (
first_outputs["boxes"].cpu()
if isinstance(first_outputs["boxes"], torch.Tensor)
else torch.tensor(first_outputs["boxes"])
)
masks = (
first_outputs["masks"].cpu()
if isinstance(first_outputs["masks"], torch.Tensor)
else torch.tensor(first_outputs["masks"])
)
torch.testing.assert_close(
object_ids,
torch.tensor([0, 1], dtype=torch.int64),
)
torch.testing.assert_close(
scores,
torch.tensor([0.968647837638855, 0.9736108779907227], dtype=torch.float32),
atol=1e-4,
rtol=1e-4,
)
torch.testing.assert_close(
boxes[0],
torch.tensor([146.0, 135.0, 291.0, 404.0], dtype=torch.float32),
atol=1e-4,
rtol=1e-4,
)
torch.testing.assert_close(
masks[0, :3, :3].float(),
torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], dtype=torch.float32),
atol=1e-4,
rtol=1e-4,
)
# Check raw model_outputs mask values for first frame
if len(model_outputs_per_frame) > 0:
first_frame_idx = min(model_outputs_per_frame.keys())
first_model_outputs = model_outputs_per_frame[first_frame_idx]
num_objects = len(first_model_outputs.object_ids)
if num_objects > 0:
# Check raw mask from model_outputs (low-resolution, before post-processing)
first_obj_id = first_model_outputs.object_ids[0]
raw_mask = first_model_outputs.obj_id_to_mask[first_obj_id].cpu()
torch.testing.assert_close(
raw_mask[:1, :3, :3].float(),
torch.tensor(
[
[
[-2.952317476272583, -5.94632625579834, -7.991223335266113],
[-6.916913986206055, -10.058566093444824, -11.114638328552246],
[-8.195585250854492, -9.787644386291504, -10.39273452758789],
]
],
dtype=torch.float32,
),
atol=5e-3, # Higher tolerance for raw logits
rtol=5e-3,
)
# Check numeric values for last frame (to verify propagation consistency)
if len(outputs_per_frame) > 1:
last_frame_idx = max(outputs_per_frame.keys())
last_outputs = outputs_per_frame[last_frame_idx]
num_objects = len(last_outputs["object_ids"])
if num_objects > 0:
# Move outputs to CPU for comparison
object_ids = (
last_outputs["object_ids"].cpu()
if isinstance(last_outputs["object_ids"], torch.Tensor)
else torch.tensor(last_outputs["object_ids"])
)
scores = (
last_outputs["scores"].cpu()
if isinstance(last_outputs["scores"], torch.Tensor)
else torch.tensor(last_outputs["scores"])
)
boxes = (
last_outputs["boxes"].cpu()
if isinstance(last_outputs["boxes"], torch.Tensor)
else torch.tensor(last_outputs["boxes"])
)
masks = (
last_outputs["masks"].cpu()
if isinstance(last_outputs["masks"], torch.Tensor)
else torch.tensor(last_outputs["masks"])
)
torch.testing.assert_close(
object_ids,
torch.tensor([0, 1], dtype=torch.int64),
)
torch.testing.assert_close(
scores,
torch.tensor([0.968647837638855, 0.9736108779907227], dtype=torch.float32),
atol=1e-4,
rtol=1e-4,
)
torch.testing.assert_close(
boxes[0],
torch.tensor([157.0, 116.0, 295.0, 382.0], dtype=torch.float32),
atol=1e-4,
rtol=1e-4,
)
torch.testing.assert_close(
masks[0, :3, :3].float(),
torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], dtype=torch.float32),
atol=1e-4,
rtol=1e-4,
)
# Check raw model_outputs mask values for last frame
if len(model_outputs_per_frame) > 1:
last_frame_idx = max(model_outputs_per_frame.keys())
last_model_outputs = model_outputs_per_frame[last_frame_idx]
num_objects = len(last_model_outputs.object_ids)
if num_objects > 0:
# Check raw mask from model_outputs (low-resolution, before post-processing)
first_obj_id = last_model_outputs.object_ids[0]
raw_mask = last_model_outputs.obj_id_to_mask[first_obj_id].cpu()
torch.testing.assert_close(
raw_mask[:1, :3, :3].float(),
torch.tensor(
[
[
[-23.023313522338867, -27.02887535095215, -22.29985237121582],
[-24.373233795166016, -31.428438186645508, -24.268810272216797],
[-24.550016403198242, -32.607383728027344, -26.500947952270508],
]
],
dtype=torch.float32,
),
atol=5e-3, # Higher tolerance for raw logits
rtol=5e-3,
)
def test_inference_video_streaming_with_text_prompt(self):
raw_video = prepare_video()
# Initialize session for streaming (no video provided)
inference_session = self.processor.init_video_session(
inference_device=torch_device,
processing_device="cpu",
video_storage_device="cpu",
)
# Add text prompt
text = "person"
inference_session = self.processor.add_text_prompt(
inference_session=inference_session,
text=text,
)
# Process frames one by one (streaming mode)
outputs_per_frame = {}
model_outputs_per_frame = {}
max_frame_num_to_track = 3
for frame_idx, frame in enumerate(raw_video):
if frame_idx >= max_frame_num_to_track:
break
# Process frame using processor
inputs = self.processor(images=frame, device=torch_device, return_tensors="pt")
# Process frame using streaming inference
model_outputs = self.video_model(
inference_session=inference_session,
frame=inputs.pixel_values[0], # Provide processed frame - this enables streaming mode
reverse=False,
)
# Post-process outputs with original_sizes for proper resolution handling
processed_outputs = self.processor.postprocess_outputs(
inference_session,
model_outputs,
original_sizes=inputs.original_sizes, # Required for streaming inference
)
outputs_per_frame[frame_idx] = processed_outputs
model_outputs_per_frame[frame_idx] = model_outputs
# Check we processed the expected number of frames
self.assertEqual(len(outputs_per_frame), max_frame_num_to_track)
# Check output structure for each frame
for frame_idx, processed_outputs in outputs_per_frame.items():
self.assertIn("object_ids", processed_outputs)
self.assertIn("scores", processed_outputs)
self.assertIn("boxes", processed_outputs)
self.assertIn("masks", processed_outputs)
num_objects = len(processed_outputs["object_ids"])
if num_objects > 0:
self.assertEqual(processed_outputs["scores"].shape, (num_objects,))
self.assertEqual(processed_outputs["boxes"].shape, (num_objects, 4))
# For streaming, masks should be at original frame resolution
H_orig, W_orig = raw_video[frame_idx].shape[0], raw_video[frame_idx].shape[1]
self.assertEqual(processed_outputs["masks"].shape, (num_objects, H_orig, W_orig))
# Check boxes are in XYXY format (absolute coordinates)
boxes = processed_outputs["boxes"]
self.assertTrue(torch.all(boxes[:, 2] >= boxes[:, 0])) # x2 >= x1
self.assertTrue(torch.all(boxes[:, 3] >= boxes[:, 1])) # y2 >= y1
# Check numeric values for first frame
if len(outputs_per_frame) > 0:
first_frame_idx = min(outputs_per_frame.keys())
first_outputs = outputs_per_frame[first_frame_idx]
num_objects = len(first_outputs["object_ids"])
if num_objects > 0:
# Move outputs to CPU for comparison (postprocess_outputs may return CPU tensors)
object_ids = (
first_outputs["object_ids"].cpu()
if isinstance(first_outputs["object_ids"], torch.Tensor)
else torch.tensor(first_outputs["object_ids"])
)
scores = (
first_outputs["scores"].cpu()
if isinstance(first_outputs["scores"], torch.Tensor)
else torch.tensor(first_outputs["scores"])
)
boxes = (
first_outputs["boxes"].cpu()
if isinstance(first_outputs["boxes"], torch.Tensor)
else torch.tensor(first_outputs["boxes"])
)
masks = (
first_outputs["masks"].cpu()
if isinstance(first_outputs["masks"], torch.Tensor)
else torch.tensor(first_outputs["masks"])
)
torch.testing.assert_close(
object_ids,
torch.tensor([0, 1], dtype=torch.int64),
)
torch.testing.assert_close(
scores,
torch.tensor([0.9683944582939148, 0.9740181565284729], dtype=torch.float32),
atol=1e-4,
rtol=1e-4,
)
torch.testing.assert_close(
boxes[0],
torch.tensor([146.0, 135.0, 291.0, 404.0], dtype=torch.float32),
atol=1e-4,
rtol=1e-4,
)
torch.testing.assert_close(
masks[0, :3, :3].float(),
torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], dtype=torch.float32),
atol=1e-4,
rtol=1e-4,
)
# Check raw model_outputs mask values for first frame
if len(model_outputs_per_frame) > 0:
first_frame_idx = min(model_outputs_per_frame.keys())
first_model_outputs = model_outputs_per_frame[first_frame_idx]
num_objects = len(first_model_outputs.object_ids)
if num_objects > 0:
# Check raw mask from model_outputs (low-resolution, before post-processing)
first_obj_id = first_model_outputs.object_ids[0]
raw_mask = first_model_outputs.obj_id_to_mask[first_obj_id].cpu()
torch.testing.assert_close(
raw_mask[:1, :3, :3].float(),
torch.tensor(
[
[
[-2.987567901611328, -5.944897651672363, -7.973854064941406],
[-7.017378330230713, -10.088018417358398, -11.089308738708496],
[-8.274458885192871, -9.851463317871094, -10.428947448730469],
]
],
dtype=torch.float32,
),
atol=5e-3, # Higher tolerance for raw logits
rtol=5e-3,
)
# Check numeric values for last frame (to verify propagation consistency)
if len(outputs_per_frame) > 1:
last_frame_idx = max(outputs_per_frame.keys())
last_outputs = outputs_per_frame[last_frame_idx]
num_objects = len(last_outputs["object_ids"])
if num_objects > 0:
# Move outputs to CPU for comparison
object_ids = (
last_outputs["object_ids"].cpu()
if isinstance(last_outputs["object_ids"], torch.Tensor)
else torch.tensor(last_outputs["object_ids"])
)
scores = (
last_outputs["scores"].cpu()
if isinstance(last_outputs["scores"], torch.Tensor)
else torch.tensor(last_outputs["scores"])
)
boxes = (
last_outputs["boxes"].cpu()
if isinstance(last_outputs["boxes"], torch.Tensor)
else torch.tensor(last_outputs["boxes"])
)
masks = (
last_outputs["masks"].cpu()
if isinstance(last_outputs["masks"], torch.Tensor)
else torch.tensor(last_outputs["masks"])
)
torch.testing.assert_close(
object_ids,
torch.tensor([0, 1], dtype=torch.int64),
)
torch.testing.assert_close(
scores,
torch.tensor([0.9683944582939148, 0.9740181565284729], dtype=torch.float32),
atol=1e-4,
rtol=1e-4,
)
torch.testing.assert_close(
boxes[0],
torch.tensor([154.0, 117.0, 294.0, 395.0], dtype=torch.float32),
atol=1e-4,
rtol=1e-4,
)
torch.testing.assert_close(
masks[0, :3, :3].float(),
torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], dtype=torch.float32),
atol=1e-4,
rtol=1e-4,
)
# Check raw model_outputs mask values for last frame
if len(model_outputs_per_frame) > 1:
last_frame_idx = max(model_outputs_per_frame.keys())
last_model_outputs = model_outputs_per_frame[last_frame_idx]
num_objects = len(last_model_outputs.object_ids)
if num_objects > 0:
# Check raw mask from model_outputs (low-resolution, before post-processing)
first_obj_id = last_model_outputs.object_ids[0]
raw_mask = last_model_outputs.obj_id_to_mask[first_obj_id].cpu()
torch.testing.assert_close(
raw_mask[:1, :3, :3].float(),
torch.tensor(
[
[
[-23.935535430908203, -27.967025756835938, -23.519914627075195],
[-25.742399215698242, -32.65046310424805, -24.71213150024414],
[-25.263212203979492, -33.807132720947266, -27.463823318481445],
]
],
dtype=torch.float32,
),
atol=5e-3, # Higher tolerance for raw logits
rtol=5e-3,
)
def test_inference_video_multi_prompt(self):
"""Test multi-prompt tracking - detecting multiple object categories in one pass."""
raw_video = prepare_video()
inference_session = self.processor.init_video_session(
video=raw_video,
inference_device=torch_device,
processing_device="cpu",
video_storage_device="cpu",
)
# Add multiple text prompts
prompts = ["person", "bed"]
self.processor.add_text_prompt(
inference_session=inference_session,
text=prompts,
)
# Propagate through video frames
outputs_per_frame = {}
for model_outputs in self.video_model.propagate_in_video_iterator(
inference_session=inference_session,
max_frame_num_to_track=3,
):
processed_outputs = self.processor.postprocess_outputs(inference_session, model_outputs)
outputs_per_frame[model_outputs.frame_idx] = processed_outputs
# Check we processed the expected number of frames
self.assertGreaterEqual(len(outputs_per_frame), 1)
self.assertLessEqual(len(outputs_per_frame), 4)
# Check output structure for each frame
for processed_outputs in outputs_per_frame.values():
self.assertIn("object_ids", processed_outputs)
self.assertIn("scores", processed_outputs)
self.assertIn("boxes", processed_outputs)
self.assertIn("masks", processed_outputs)
self.assertIn("prompt_to_obj_ids", processed_outputs) # Multi-prompt specific
# Check prompt_to_obj_ids structure
prompt_to_obj_ids = processed_outputs["prompt_to_obj_ids"]
self.assertIsInstance(prompt_to_obj_ids, dict)
for prompt, obj_ids in prompt_to_obj_ids.items():
self.assertIsInstance(prompt, str)
self.assertIsInstance(obj_ids, list)
# Each object ID should be in the main object_ids list
for obj_id in obj_ids:
self.assertIn(obj_id, processed_outputs["object_ids"].tolist())
# Check that we detected objects from multiple prompts
first_frame_outputs = outputs_per_frame[min(outputs_per_frame.keys())]
prompt_to_obj_ids = first_frame_outputs["prompt_to_obj_ids"]
# Should have at least one prompt with detections
self.assertGreater(len(prompt_to_obj_ids), 0)
# All prompts in prompt_to_obj_ids should be from our original prompts
for prompt in prompt_to_obj_ids.keys():
self.assertIn(prompt, prompts)
def test_custom_image_size(self):
"""Test that custom image size can be set and propagates correctly to detector and tracker configs."""
from transformers import Sam3VideoConfig
config = Sam3VideoConfig.from_pretrained("facebook/sam3")
config.image_size = 560
self.assertEqual(config.image_size, 560)
self.assertEqual(config.detector_config.image_size, 560)
self.assertEqual(config.tracker_config.image_size, 560)
self.assertEqual(config.detector_config.vision_config.image_size, 560)
self.assertEqual(config.detector_config.vision_config.backbone_config.image_size, 560)
model = Sam3VideoModel.from_pretrained("facebook/sam3", config=config).to(torch_device).eval()
self.assertEqual(model.config.image_size, 560)
def test_inference_with_different_dtypes(self):
"""Test that inference works correctly for float32, bfloat16, and float16 dtypes."""
raw_video = prepare_video()
dtypes_to_test = [
(torch.float32, None), # float32 is always available
(torch.bfloat16, is_torch_bf16_available_on_device),
(torch.float16, is_torch_fp16_available_on_device),
]
for dtype, availability_check in dtypes_to_test:
with self.subTest(dtype=dtype):
# Skip if dtype is not available on device
if availability_check is not None and not availability_check(torch_device):
self.skipTest(f"{dtype} not supported on {torch_device}")
# Load model with specific dtype
video_model = Sam3VideoModel.from_pretrained("facebook/sam3", torch_dtype=dtype).to(torch_device)
video_model.eval()
# Initialize inference session
inference_session = self.processor.init_video_session(
video=raw_video,
inference_device=torch_device,
processing_device="cpu",
video_storage_device="cpu",
dtype=dtype,
)
# Add text prompt
text = "person"
inference_session = self.processor.add_text_prompt(
inference_session=inference_session,
text=text,
)
# Run inference on first frame
outputs_per_frame = {}
model_outputs_per_frame = {}
max_frame_num_to_track = 2
for model_outputs in video_model.propagate_in_video_iterator(
inference_session=inference_session,
max_frame_num_to_track=max_frame_num_to_track,
):
processed_outputs = self.processor.postprocess_outputs(inference_session, model_outputs)
outputs_per_frame[model_outputs.frame_idx] = processed_outputs
model_outputs_per_frame[model_outputs.frame_idx] = model_outputs
# Verify dtype is maintained in model outputs
if len(model_outputs.object_ids) > 0:
first_obj_id = model_outputs.object_ids[0]
raw_mask = model_outputs.obj_id_to_mask[first_obj_id]
self.assertEqual(raw_mask.dtype, dtype)
# Verify we processed frames
self.assertGreaterEqual(len(outputs_per_frame), 1)
self.assertLessEqual(len(outputs_per_frame), max_frame_num_to_track + 1)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/sam3_video/test_modeling_sam3_video.py",
"license": "Apache License 2.0",
"lines": 549,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/glm46v/modular_glm46v.py | # Copyright 2025 the HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from ...configuration_utils import PreTrainedConfig
from ...video_utils import VideoMetadata
from ..auto import CONFIG_MAPPING, AutoConfig, AutoModel
from ..glm4v.image_processing_glm4v import Glm4vImageProcessor
from ..glm4v.image_processing_glm4v_fast import Glm4vImageProcessorFast
from ..glm4v.modeling_glm4v import Glm4vForConditionalGeneration, Glm4vModel, Glm4vPreTrainedModel
from ..glm4v.processing_glm4v import Glm4vProcessor
from ..glm4v.video_processing_glm4v import Glm4vVideoProcessor
class Glm46VConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Glm4vModel`]. It is used to instantiate a
GLM-4.6V model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of
GLM-4.1V-9B-Thinking [zai-org/GLM-4.1V-9B-Thinking](https://huggingface.co/zai-org/GLM-4.1V-9B-Thinking).
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
text_config (`Union[PreTrainedConfig, dict]`, *optional*, defaults to `Glm4vTextConfig`):
The config object or dictionary of the text backbone.
vision_config (`Union[PreTrainedConfig, dict]`, *optional*, defaults to `Glm4vVisionConfig`):
The config object or dictionary of the vision backbone.
image_token_id (`int`, *optional*, defaults to 151343):
The image token index to encode the image prompt.
video_token_id (`int`, *optional*, defaults to 151344):
The video token index to encode the image prompt.
image_start_token_id (`int`, *optional*, defaults to 151339):
The image start token index to encode the start of image.
image_end_token_id (`int`, *optional*, defaults to 151340):
The image end token index to encode the end of image.
video_start_token_id (`int`, *optional*, defaults to 151361):
The video start token index to encode the start of video.
video_end_token_id (`int`, *optional*, defaults to 151362):
The video end token index to encode the end of video.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
```python
>>> from transformers import Glm46VForConditionalGeneration, Glm46VConfig
>>> # Initializing a GLM-4.6V style configuration
>>> configuration = Glm46VConfig()
>>> # Initializing a model from the GLM-4.6V style configuration
>>> model = Glm4vForConditionalGeneration(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "glm46v"
sub_configs = {"text_config": AutoConfig, "vision_config": AutoConfig}
keys_to_ignore_at_inference = ["past_key_values"]
def __init__(
self,
text_config=None,
vision_config=None,
image_token_id=151343,
video_token_id=151344,
image_start_token_id=151339,
image_end_token_id=151340,
video_start_token_id=151361,
video_end_token_id=151362,
tie_word_embeddings=False,
**kwargs,
):
if isinstance(vision_config, dict):
vision_config["model_type"] = vision_config.get("model_type", "glm4v_vision")
self.vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config)
elif vision_config is None:
self.vision_config = CONFIG_MAPPING["glm4v_vision"]()
if isinstance(text_config, dict):
text_config["model_type"] = text_config.get("model_type", "glm4v_text")
self.text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config)
elif text_config is None:
self.text_config = CONFIG_MAPPING["glm4v_text"]()
self.image_token_id = image_token_id
self.video_token_id = video_token_id
self.video_start_token_id = video_start_token_id
self.video_end_token_id = video_end_token_id
self.image_start_token_id = image_start_token_id
self.image_end_token_id = image_end_token_id
self.tie_word_embeddings = tie_word_embeddings
super().__init__(**kwargs)
class Glm46VPreTrainedModel(Glm4vPreTrainedModel):
_can_record_outputs = None
_no_split_modules = None
def _init_weights(self, module):
raise AttributeError("Not needed")
class Glm46VModel(Glm4vModel):
_no_split_modules = None
def __init__(self, config):
super().__init__(config)
self.visual = AutoModel.from_config(config.vision_config)
self.language_model = AutoModel.from_config(config.text_config)
class Glm46VForConditionalGeneration(Glm4vForConditionalGeneration):
pass
class Glm46VProcessor(Glm4vProcessor):
def replace_frame_token_id(self, timestamp_sec):
return f"<|begin_of_image|>{self.image_token}<|end_of_image|>{timestamp_sec:.1f} seconds"
class Glm46VImageProcessor(Glm4vImageProcessor):
pass
class Glm46VImageProcessorFast(Glm4vImageProcessorFast):
pass
class Glm46VVideoProcessor(Glm4vVideoProcessor):
def sample_frames(
self,
metadata: VideoMetadata,
fps: int | float | None = None,
**kwargs,
):
if metadata is None or getattr(metadata, "fps", None) is None:
raise ValueError(
"Asked to sample frames per second but no video metadata was provided which is required when sampling in Glm46V. "
"Please pass in `VideoMetadata` object or set `do_sample_frames=False`"
)
total_frames = metadata.total_num_frames
max_frame_idx = total_frames - 1
duration = metadata.duration or round(max_frame_idx / metadata.fps) + 1
DYNAMIC_FPS_THRES = {30: 3, 300: 1, 2400: 0.5}
MAX_FRAME_COUNT_DYNAMIC = 640
MAX_DURATION = 2400
effective_duration = min(duration, MAX_DURATION)
if effective_duration <= 30:
target_fps = DYNAMIC_FPS_THRES[30]
elif effective_duration <= 300:
target_fps = DYNAMIC_FPS_THRES[300]
else:
target_fps = DYNAMIC_FPS_THRES[2400]
extract_t = int(effective_duration * target_fps * self.temporal_patch_size)
extract_t = min(extract_t, MAX_FRAME_COUNT_DYNAMIC)
duration_per_frame = 1 / metadata.fps
timestamps = [i * duration_per_frame for i in range(total_frames)]
max_second = int(duration)
if total_frames < extract_t:
frame_indices = np.linspace(0, total_frames - 1, extract_t, dtype=int).tolist()
else:
frame_indices = []
current_second = 0
inv_fps = 1 / (self.temporal_patch_size * target_fps)
for frame_index in range(total_frames):
if timestamps[frame_index] >= current_second:
current_second += inv_fps
frame_indices.append(frame_index)
if current_second >= max_second:
break
if len(frame_indices) < extract_t:
if len(frame_indices) == 0:
start, end = 0, max(total_frames - 1, 0)
else:
start, end = frame_indices[0], frame_indices[-1]
frame_indices = np.linspace(start, end, extract_t, dtype=int).tolist()
elif len(frame_indices) > extract_t:
frame_indices = np.linspace(0, total_frames - 1, extract_t, dtype=int).tolist()
seen, uniq = set(), []
for idx in frame_indices:
if idx not in seen:
seen.add(idx)
uniq.append(idx)
if len(uniq) & 1:
uniq.append(uniq[-1])
return np.array(uniq)
__all__ = [
"Glm46VConfig",
"Glm46VModel",
"Glm46VPreTrainedModel",
"Glm46VForConditionalGeneration",
"Glm46VProcessor",
"Glm46VImageProcessor",
"Glm46VImageProcessorFast",
"Glm46VVideoProcessor",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/glm46v/modular_glm46v.py",
"license": "Apache License 2.0",
"lines": 180,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/glm46v/test_modeling_glm46v.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch GLM-4.6V model."""
import copy
import unittest
from transformers import (
AutoProcessor,
Glm46VConfig,
Glm46VForConditionalGeneration,
Glm46VModel,
is_torch_available,
)
from transformers.testing_utils import (
Expectations,
cleanup,
require_deterministic_for_xpu,
require_flash_attn,
require_torch,
require_torch_accelerator,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import (
ModelTesterMixin,
floats_tensor,
ids_tensor,
)
if is_torch_available():
import torch
class Glm46VVisionText2TextModelTester:
def __init__(
self,
parent,
batch_size=3,
seq_length=7,
num_channels=3,
ignore_index=-100,
image_size=112,
video_start_token_id=3,
video_end_token_id=4,
image_start_token_id=5,
image_end_token_id=6,
image_token_id=7,
video_token_id=8,
is_training=True,
text_config={
"vocab_size": 99,
"hidden_size": 16,
"intermediate_size": 22,
"num_hidden_layers": 2,
"num_attention_heads": 2,
"num_key_value_heads": 1,
"output_channels": 64,
"hidden_act": "silu",
"max_position_embeddings": 512,
"rope_parameters": {"type": "default", "mrope_section": [2, 1, 1]},
"rope_theta": 10000,
"tie_word_embeddings": True,
"bos_token_id": 0,
"eos_token_id": 0,
"pad_token_id": 0,
},
vision_config={
"depth": 2,
"hidden_act": "silu",
"hidden_size": 48,
"out_hidden_size": 16,
"intermediate_size": 22,
"patch_size": 14,
"spatial_merge_size": 1,
"temporal_patch_size": 2,
},
):
self.parent = parent
self.ignore_index = ignore_index
self.bos_token_id = text_config["bos_token_id"]
self.eos_token_id = text_config["eos_token_id"]
self.pad_token_id = text_config["pad_token_id"]
self.video_start_token_id = video_start_token_id
self.video_end_token_id = video_end_token_id
self.image_start_token_id = image_start_token_id
self.image_end_token_id = image_end_token_id
self.image_token_id = image_token_id
self.video_token_id = video_token_id
self.text_config = text_config
self.vision_config = vision_config
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.is_training = is_training
self.hidden_size = text_config["hidden_size"]
self.num_hidden_layers = text_config["num_hidden_layers"]
self.num_attention_heads = text_config["num_attention_heads"]
self.vocab_size = text_config["vocab_size"]
self.num_image_tokens = 64
self.seq_length = seq_length + self.num_image_tokens
def get_config(self):
return Glm46VConfig(
text_config=self.text_config,
vision_config=self.vision_config,
image_token_id=self.image_token_id,
video_token_id=self.video_token_id,
video_start_token_id=self.video_start_token_id,
video_end_token_id=self.video_end_token_id,
image_start_token_id=self.image_start_token_id,
image_end_token_id=self.image_end_token_id,
)
def prepare_config_and_inputs(self):
config = self.get_config()
patch_size = config.vision_config.patch_size
temporal_patch_size = config.vision_config.temporal_patch_size
pixel_values = floats_tensor(
[
self.batch_size * (self.image_size**2) // (patch_size**2),
self.num_channels * (patch_size**2) * temporal_patch_size,
]
)
return config, pixel_values
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
input_ids[input_ids == self.video_token_id] = self.pad_token_id
input_ids[input_ids == self.image_token_id] = self.pad_token_id
input_ids[input_ids == self.video_start_token_id] = self.pad_token_id
input_ids[input_ids == self.image_start_token_id] = self.pad_token_id
input_ids[input_ids == self.video_end_token_id] = self.pad_token_id
input_ids[input_ids == self.image_end_token_id] = self.pad_token_id
input_ids[:, 0] = self.image_start_token_id
input_ids[:, 1 : 1 + self.num_image_tokens] = self.image_token_id
input_ids[:, 1 + self.num_image_tokens] = self.image_end_token_id
patch_size = config.vision_config.patch_size
patches_per_side = self.image_size // patch_size
mm_token_type_ids = torch.zeros_like(input_ids)
mm_token_type_ids[:, 1 : 1 + self.num_image_tokens] = 1
inputs_dict = {
"pixel_values": pixel_values,
"image_grid_thw": torch.tensor(
[[1, patches_per_side, patches_per_side]] * self.batch_size, device=torch_device
),
"input_ids": input_ids,
"attention_mask": attention_mask,
"mm_token_type_ids": mm_token_type_ids,
}
return config, inputs_dict
@require_torch
class Glm46VModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = (Glm46VModel, Glm46VForConditionalGeneration) if is_torch_available() else ()
model_split_percents = [0.7, 0.9] # model too big to split at 0.5
_is_composite = True
def setUp(self):
self.model_tester = Glm46VVisionText2TextModelTester(self)
self.config_tester = ConfigTester(self, config_class=Glm46VConfig, has_text_modality=False)
def test_config(self):
self.config_tester.run_common_tests()
# GLM4V has images shaped as (bs*patch_len, dim) so we can't slice to batches in generate
def prepare_config_and_inputs_for_generate(self, batch_size=2):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# We don't want a few model inputs in our model input dictionary for generation tests
input_keys_to_ignore = [
# we don't want to mask attention heads
# we don't want encoder-decoder models to start from filled decoder ids
"decoder_input_ids",
"decoder_attention_mask",
# we'll set cache use in each test differently
"use_cache",
# Ignore labels if it is in the input dict
"labels",
# model-specific exceptions should overload/overwrite this function
]
# The diff from the general `prepare_config_and_inputs_for_generate` lies here
patch_size = config.vision_config.patch_size
filtered_image_length = batch_size * (self.model_tester.image_size**2) // (patch_size**2)
filtered_inputs_dict = {
k: v[:batch_size, ...] if isinstance(v, torch.Tensor) else v
for k, v in inputs_dict.items()
if k not in input_keys_to_ignore
}
filtered_inputs_dict["pixel_values"] = inputs_dict["pixel_values"][:filtered_image_length]
# It is important set `eos_token_id` to `None` to avoid early stopping (would break for length-based checks)
text_gen_config = config.get_text_config(decoder=True)
if text_gen_config.eos_token_id is not None and text_gen_config.pad_token_id is None:
text_gen_config.pad_token_id = (
text_gen_config.eos_token_id
if isinstance(text_gen_config.eos_token_id, int)
else text_gen_config.eos_token_id[0]
)
text_gen_config.eos_token_id = None
text_gen_config.forced_eos_token_id = None
return config, filtered_inputs_dict
@unittest.skip(reason="No available kernels - not supported")
def test_sdpa_can_dispatch_on_flash(self):
pass
@unittest.skip(reason="Size mismatch")
def test_multi_gpu_data_parallel_forward(self):
pass
@unittest.skip("Error with compilation")
def test_generate_from_inputs_embeds_with_static_cache(self):
pass
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
input_ids = inputs["input_ids"]
del inputs["input_ids"]
del inputs["pixel_values"]
del inputs["image_grid_thw"]
wte = model.get_input_embeddings()
inputs["inputs_embeds"] = wte(input_ids)
with torch.no_grad():
model(**inputs)[0]
def test_inputs_embeds_matches_input_ids(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = self._prepare_for_class(inputs_dict, model_class)
input_ids = inputs["input_ids"]
del inputs["input_ids"]
del inputs["pixel_values"]
del inputs["image_grid_thw"]
inputs_embeds = model.get_input_embeddings()(input_ids)
with torch.no_grad():
out_ids = model(input_ids=input_ids, **inputs)[0]
out_embeds = model(inputs_embeds=inputs_embeds, **inputs)[0]
torch.testing.assert_close(out_embeds, out_ids)
@require_torch
class Glm46VIntegrationTest(unittest.TestCase):
def setUp(self):
cleanup(torch_device, gc_collect=True)
self.processor = AutoProcessor.from_pretrained("THUDM/GLM-4.1V-9B-Thinking")
self.message = [
{
"role": "user",
"content": [
{
"type": "image",
"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg",
},
{"type": "text", "text": "What kind of dog is this?"},
],
}
]
self.message2 = [
{
"role": "user",
"content": [
{
"type": "image",
"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/coco_sample.png",
},
{"type": "text", "text": "What kind of dog is this?"},
],
}
]
def tearDown(self):
cleanup(torch_device, gc_collect=True)
@slow
def test_small_model_integration_test(self):
model = Glm46VForConditionalGeneration.from_pretrained(
"THUDM/GLM-4.1V-9B-Thinking", dtype="auto", device_map="auto"
)
inputs = self.processor.apply_chat_template(
self.message, tokenize=True, add_generation_prompt=True, return_dict=True, return_tensors="pt"
)
expected_input_ids = [151331, 151333, 151336, 198, 151339, 151343, 151343, 151343, 151343, 151343, 151343, 151343, 151343, 151343, 151343, 151343, 151343] # fmt: skip
assert expected_input_ids == inputs.input_ids[0].tolist()[:17]
expected_pixel_slice = torch.tensor(
[
[-0.0988, -0.0842, -0.0842],
[-0.5660, -0.5514, -0.4200],
[-0.0259, -0.0259, -0.0259],
[-0.1280, -0.0988, -0.2010],
[-0.4638, -0.5806, -0.6974],
[-1.2083, -1.2229, -1.2083],
],
dtype=torch.float32,
device="cpu",
)
assert torch.allclose(expected_pixel_slice, inputs.pixel_values[:6, :3], atol=3e-3)
# verify generation
inputs = inputs.to(torch_device)
# This model on the hub has `do_sample=True`.
torch.manual_seed(42)
output = model.generate(**inputs, max_new_tokens=30)
EXPECTED_DECODED_TEXT = "\nWhat kind of dog is this?\n<think>Got it, let's look at the image. The animal in the picture doesn't look like a dog; it's actually a cat. Specifically"
self.assertEqual(
self.processor.decode(output[0], skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
@slow
def test_small_model_integration_test_batch(self):
model = Glm46VForConditionalGeneration.from_pretrained(
"THUDM/GLM-4.1V-9B-Thinking", dtype="auto", device_map="auto"
)
batch_messages = [self.message] * 2
inputs = self.processor.apply_chat_template(
batch_messages, tokenize=True, add_generation_prompt=True, return_dict=True, return_tensors="pt"
).to(torch_device)
# This model on the hub has `do_sample=True`.
torch.manual_seed(42)
# it should not matter whether two images are the same size or not
output = model.generate(**inputs, max_new_tokens=30)
EXPECTED_DECODED_TEXT = [
"\nWhat kind of dog is this?\n<think>Got it, let's look at the image. The animal in the picture doesn't look like a dog; it's actually a cat. Specifically",
"\nWhat kind of dog is this?\n<think>Got it, let's look at the image. The animal in the picture has a stocky body, thick fur, and a face that's"
] # fmt: skip
self.assertEqual(
self.processor.batch_decode(output, skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
@slow
def test_small_model_integration_test_with_video(self):
processor = AutoProcessor.from_pretrained("THUDM/GLM-4.1V-9B-Thinking", max_image_size={"longest_edge": 50176})
model = Glm46VForConditionalGeneration.from_pretrained(
"THUDM/GLM-4.1V-9B-Thinking", dtype=torch.float16, device_map="auto"
)
questions = ["Describe this video."]
video_urls = ["https://huggingface.co/datasets/hf-internal-testing/fixtures_videos/resolve/main/tennis.mp4"]
messages = [
[
{
"role": "user",
"content": [
{
"type": "video",
"video": video_url,
},
{"type": "text", "text": question},
],
}
]
for question, video_url in zip(questions, video_urls)
]
inputs = processor.apply_chat_template(
messages, tokenize=True, add_generation_prompt=True, return_dict=True, return_tensors="pt", padding=True
).to(torch_device)
# This model on the hub has `do_sample=True`.
torch.manual_seed(42)
output = model.generate(**inputs, max_new_tokens=30)
EXPECTED_DECODED_TEXT = ["\n012345Describe this video.\n<think>Got it, let's analyze the video. First, the scene is an indoor tennis court. There are two players: one in a white shirt"] # fmt: skip
self.assertEqual(
processor.batch_decode(output, skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
@slow
@require_deterministic_for_xpu
def test_small_model_integration_test_expand(self):
model = Glm46VForConditionalGeneration.from_pretrained(
"THUDM/GLM-4.1V-9B-Thinking", dtype="auto", device_map="auto"
)
inputs = self.processor.apply_chat_template(
self.message, tokenize=True, add_generation_prompt=True, return_dict=True, return_tensors="pt"
).to(torch_device)
# This model on the hub has `do_sample=True`.
torch.manual_seed(42)
output = model.generate(**inputs, max_new_tokens=30, do_sample=False, num_beams=2, num_return_sequences=2)
# fmt: off
EXPECTED_DECODED_TEXTS = Expectations(
{
(None, None): ["\nWhat kind of dog is this?\n<think>Got it, let's look at the image. The animal in the picture doesn't look like a dog; it's actually a cat. Specifically",
"\nWhat kind of dog is this?\n<think>Got it, let's look at the image. The animal in the picture doesn't look like a dog; it's actually a cat, specifically"
],
("xpu", None): ["\nWhat kind of dog is this?\n<think>Got it, let's look at the image. The animal in the picture is not a dog; it's a cat. Specifically, it looks",
"\nWhat kind of dog is this?\n<think>Got it, let's look at the image. The animal in the picture is not a dog; it's a cat, specifically a Pallas"
],
}
)
# fmt: on
EXPECTED_DECODED_TEXT = EXPECTED_DECODED_TEXTS.get_expectation()
decoded_text = self.processor.batch_decode(output, skip_special_tokens=True)
self.assertEqual(decoded_text, EXPECTED_DECODED_TEXT)
@slow
def test_small_model_integration_test_batch_wo_image(self):
model = Glm46VForConditionalGeneration.from_pretrained(
"THUDM/GLM-4.1V-9B-Thinking", dtype="auto", device_map="auto"
)
message_wo_image = [
{"role": "user", "content": [{"type": "text", "text": "Who are you?"}]},
]
batched_messages = [self.message, message_wo_image]
inputs = self.processor.apply_chat_template(
batched_messages,
tokenize=True,
add_generation_prompt=True,
return_dict=True,
return_tensors="pt",
padding=True,
).to(torch_device)
# This model on the hub has `do_sample=True`.
torch.manual_seed(42)
# it should not matter whether two images are the same size or not
output = model.generate(**inputs, max_new_tokens=30)
EXPECTED_DECODED_TEXT = [
"\nWhat kind of dog is this?\n<think>Got it, let's look at the image. The animal in the picture doesn't look like a dog; it's actually a cat. Specifically",
"\nWho are you?\n<think>Got it, let's look at the user's question: \"Who are you?\" This is a common question when someone is just starting a conversation"
] # fmt: skip
self.assertEqual(
self.processor.batch_decode(output, skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
@slow
def test_small_model_integration_test_batch_different_resolutions(self):
model = Glm46VForConditionalGeneration.from_pretrained(
"THUDM/GLM-4.1V-9B-Thinking", dtype="auto", device_map="auto"
)
batched_messages = [self.message, self.message2]
inputs = self.processor.apply_chat_template(
batched_messages,
tokenize=True,
add_generation_prompt=True,
return_dict=True,
return_tensors="pt",
padding=True,
).to(torch_device)
# This model on the hub has `do_sample=True`.
torch.manual_seed(42)
# it should not matter whether two images are the same size or not
output = model.generate(**inputs, max_new_tokens=30)
EXPECTED_DECODED_TEXT = [
"\nWhat kind of dog is this?\n<think>Got it, let's look at the image. The animal in the picture doesn't look like a dog; it's actually a cat. Specifically",
"\nWhat kind of dog is this?\n<think>Got it, let's look at the image. Wait, the animals here are cats, not dogs. The question is about a dog, but",
] # fmt: skip
self.assertEqual(
self.processor.batch_decode(output, skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
@slow
@require_flash_attn
@require_torch_accelerator
def test_small_model_integration_test_batch_flashatt2(self):
model = Glm46VForConditionalGeneration.from_pretrained(
"THUDM/GLM-4.1V-9B-Thinking",
dtype=torch.bfloat16,
attn_implementation="flash_attention_2",
device_map="auto",
)
batched_messages = [self.message, self.message2]
inputs = self.processor.apply_chat_template(
batched_messages,
tokenize=True,
add_generation_prompt=True,
return_dict=True,
return_tensors="pt",
padding=True,
).to(torch_device)
# This model on the hub has `do_sample=True`.
torch.manual_seed(42)
# it should not matter whether two images are the same size or not
output = model.generate(**inputs, max_new_tokens=30)
EXPECTED_DECODED_TEXT = [
"\nWhat kind of dog is this?\n<think>Got it, let's look at the image. The animal in the picture doesn't look like a dog. Wait, it's a cat,",
"\nWhat kind of dog is this?\n<think>Got it, let's look at the image. Wait, the animals here are cats, not dogs. The question is about a dog, but"
] # fmt: skip
self.assertEqual(
self.processor.batch_decode(output, skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
@slow
@require_flash_attn
@require_torch_accelerator
def test_small_model_integration_test_batch_wo_image_flashatt2(self):
model = Glm46VForConditionalGeneration.from_pretrained(
"THUDM/GLM-4.1V-9B-Thinking",
dtype=torch.bfloat16,
attn_implementation="flash_attention_2",
device_map="auto",
)
message_wo_image = [
{"role": "user", "content": [{"type": "text", "text": "Who are you?"}]},
]
batched_messages = [self.message, message_wo_image]
inputs = self.processor.apply_chat_template(
batched_messages,
tokenize=True,
add_generation_prompt=True,
return_dict=True,
return_tensors="pt",
padding=True,
).to(torch_device)
# This model on the hub has `do_sample=True`.
torch.manual_seed(42)
# it should not matter whether two images are the same size or not
output = model.generate(**inputs, max_new_tokens=30)
EXPECTED_DECODED_TEXT = [
"\nWhat kind of dog is this?\n<think>Got it, let's look at the image. The animal in the picture doesn't look like a dog; it's actually a cat. Specifically",
"\nWho are you?\n<think>Got it, let's look at the user's question: \"Who are you?\" This is a common question when someone is just starting a conversation"
] # fmt: skip
self.assertEqual(
self.processor.batch_decode(output, skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/glm46v/test_modeling_glm46v.py",
"license": "Apache License 2.0",
"lines": 510,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/models/glm46v/test_processor_glm46v.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import unittest
import numpy as np
from transformers.testing_utils import require_av, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_processing_common import ProcessorTesterMixin, url_to_local_path
if is_vision_available():
from transformers import Glm46VProcessor
if is_torch_available():
import torch
@require_vision
@require_torch
class Glm46VProcessorTest(ProcessorTesterMixin, unittest.TestCase):
processor_class = Glm46VProcessor
model_id = "THUDM/GLM-4.1V-9B-Thinking"
@classmethod
def _setup_test_attributes(cls, processor):
cls.image_token = processor.image_token
@classmethod
def _setup_from_pretrained(cls, model_id, **kwargs):
return super()._setup_from_pretrained(
model_id,
do_sample_frames=False,
patch_size=4,
size={"shortest_edge": 12 * 12, "longest_edge": 18 * 18},
**kwargs,
)
@require_torch
@require_av
def _test_apply_chat_template(
self,
modality: str,
batch_size: int,
return_tensors: str,
input_name: str,
processor_name: str,
input_data: list[str],
):
processor = self.get_processor()
if processor.chat_template is None:
self.skipTest("Processor has no chat template")
if processor_name not in self.processor_class.get_attributes():
self.skipTest(f"{processor_name} attribute not present in {self.processor_class}")
batch_messages = [
[
{
"role": "user",
"content": [{"type": "text", "text": "Describe this."}],
},
]
] * batch_size
# Test that jinja can be applied
formatted_prompt = processor.apply_chat_template(batch_messages, add_generation_prompt=True, tokenize=False)
self.assertEqual(len(formatted_prompt), batch_size)
# Test that tokenizing with template and directly with `self.tokenizer` gives same output
formatted_prompt_tokenized = processor.apply_chat_template(
batch_messages, add_generation_prompt=True, tokenize=True, return_tensors=return_tensors
)
add_special_tokens = True
if processor.tokenizer.bos_token is not None and formatted_prompt[0].startswith(processor.tokenizer.bos_token):
add_special_tokens = False
tok_output = processor.tokenizer(
formatted_prompt, return_tensors=return_tensors, add_special_tokens=add_special_tokens
)
expected_output = tok_output.input_ids
self.assertListEqual(expected_output.tolist(), formatted_prompt_tokenized.tolist())
# Test that kwargs passed to processor's `__call__` are actually used
tokenized_prompt_100 = processor.apply_chat_template(
batch_messages,
add_generation_prompt=True,
tokenize=True,
padding="max_length",
truncation=True,
return_tensors=return_tensors,
max_length=100,
)
self.assertEqual(len(tokenized_prompt_100[0]), 100)
# Test that `return_dict=True` returns text related inputs in the dict
out_dict_text = processor.apply_chat_template(
batch_messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors=return_tensors,
)
self.assertTrue(all(key in out_dict_text for key in ["input_ids", "attention_mask"]))
self.assertEqual(len(out_dict_text["input_ids"]), batch_size)
self.assertEqual(len(out_dict_text["attention_mask"]), batch_size)
# Test that with modality URLs and `return_dict=True`, we get modality inputs in the dict
for idx, url in enumerate(input_data[:batch_size]):
batch_messages[idx][0]["content"] = [batch_messages[idx][0]["content"][0], {"type": modality, "url": url}]
out_dict = processor.apply_chat_template(
batch_messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors=return_tensors,
fps=2
if isinstance(input_data[0], str)
else None, # by default no more than 2 frames per second, otherwise too slow
do_sample_frames=bool(isinstance(input_data[0], str)), # don't sample frames if decoded video is used
)
input_name = getattr(self, input_name)
self.assertTrue(input_name in out_dict)
self.assertEqual(len(out_dict["input_ids"]), batch_size)
self.assertEqual(len(out_dict["attention_mask"]), batch_size)
if modality == "video":
# qwen pixels don't scale with bs same way as other models, calculate expected video token count based on video_grid_thw
expected_video_token_count = 0
for thw in out_dict["video_grid_thw"]:
expected_video_token_count += thw[0] * thw[1] * thw[2]
mm_len = expected_video_token_count
else:
mm_len = batch_size * 4
self.assertEqual(len(out_dict[input_name]), mm_len)
return_tensor_to_type = {"pt": torch.Tensor, "np": np.ndarray, None: list}
for k in out_dict:
self.assertIsInstance(out_dict[k], return_tensor_to_type[return_tensors])
@require_av
def test_apply_chat_template_video_frame_sampling(self):
processor = self.get_processor()
if processor.chat_template is None:
self.skipTest("Processor has no chat template")
signature = inspect.signature(processor.__call__)
if "videos" not in {*signature.parameters.keys()} or (
signature.parameters.get("videos") is not None
and signature.parameters["videos"].annotation == inspect._empty
):
self.skipTest("Processor doesn't accept videos at input")
messages = [
[
{
"role": "user",
"content": [
{"type": "video"},
{"type": "text", "text": "What is shown in this video?"},
],
},
]
]
formatted_prompt = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)
self.assertEqual(len(formatted_prompt), 1)
formatted_prompt_tokenized = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True)
expected_output = processor.tokenizer(formatted_prompt, return_tensors=None).input_ids
self.assertListEqual(expected_output, formatted_prompt_tokenized)
out_dict = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True, return_dict=True)
self.assertListEqual(list(out_dict.keys()), ["input_ids", "attention_mask", "mm_token_type_ids"])
# Add video URL for return dict and load with `num_frames` arg
messages[0][0]["content"][0] = {
"type": "video",
"url": url_to_local_path(
"https://huggingface.co/datasets/raushan-testing-hf/videos-test/resolve/main/tiny_video.mp4"
),
}
# Load with `video_fps` arg
video_fps = 10
out_dict_with_video = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
fps=video_fps,
)
self.assertTrue(self.videos_input_name in out_dict_with_video)
self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 8)
# Load the whole video
out_dict_with_video = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
do_sample_frames=False,
)
self.assertTrue(self.videos_input_name in out_dict_with_video)
self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 24)
# Load video as a list of frames (i.e. images). NOTE: each frame should have same size
# because we assume they come from one video
messages[0][0]["content"][0] = {
"type": "video",
"url": [
url_to_local_path(
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
),
url_to_local_path(
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
),
],
}
out_dict_with_video = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
do_sample_frames=False,
)
self.assertTrue(self.videos_input_name in out_dict_with_video)
self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 4)
# When the inputs are frame URLs/paths we expect that those are already
# sampled and will raise an error is asked to sample again.
with self.assertRaisesRegex(
ValueError, "Sampling frames from a list of images is not supported! Set `do_sample_frames=False`"
):
out_dict_with_video = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
do_sample_frames=True,
)
def test_model_input_names(self):
processor = self.get_processor()
text = self.prepare_text_inputs(modalities=["image", "video"])
image_input = self.prepare_image_inputs()
video_inputs = self.prepare_video_inputs()
inputs_dict = {"text": text, "images": image_input, "videos": video_inputs}
inputs = processor(**inputs_dict, return_tensors="pt", do_sample_frames=False)
self.assertSetEqual(set(inputs.keys()), set(processor.model_input_names))
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/glm46v/test_processor_glm46v.py",
"license": "Apache License 2.0",
"lines": 230,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/models/glm46v/test_video_processing_glm46v.py | # Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from transformers.image_utils import IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available
from ...test_video_processing_common import VideoProcessingTestMixin, prepare_video_inputs
if is_torch_available():
from PIL import Image
if is_vision_available():
if is_torchvision_available():
from transformers import Glm46VVideoProcessor
from transformers.models.glm46v.video_processing_glm46v import smart_resize
class Glm46VVideoProcessingTester:
def __init__(
self,
parent,
batch_size=5,
num_frames=8,
num_channels=3,
min_resolution=30,
max_resolution=80,
temporal_patch_size=2,
patch_size=14,
merge_size=2,
do_resize=True,
size=None,
do_normalize=True,
image_mean=IMAGENET_STANDARD_MEAN,
image_std=IMAGENET_STANDARD_STD,
do_convert_rgb=True,
):
size = size if size is not None else {"longest_edge": 20, "shortest_edge": 10}
self.parent = parent
self.batch_size = batch_size
self.num_frames = num_frames
self.num_channels = num_channels
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.do_convert_rgb = do_convert_rgb
self.temporal_patch_size = temporal_patch_size
self.patch_size = patch_size
self.merge_size = merge_size
def prepare_video_processor_dict(self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
"do_sample_frames": True,
}
def prepare_video_metadata(self, videos):
video_metadata = []
for video in videos:
if isinstance(video, list):
num_frames = len(video)
elif hasattr(video, "shape"):
if len(video.shape) == 4: # (T, H, W, C)
num_frames = video.shape[0]
else:
num_frames = 1
else:
num_frames = self.num_frames
metadata = {
"fps": 2,
"duration": num_frames / 2,
"total_num_frames": num_frames,
}
video_metadata.append(metadata)
return video_metadata
def expected_output_video_shape(self, videos):
grid_t = self.num_frames // self.temporal_patch_size
hidden_dim = self.num_channels * self.temporal_patch_size * self.patch_size * self.patch_size
seq_len = 0
for video in videos:
if isinstance(video, list) and isinstance(video[0], Image.Image):
video = np.stack([np.array(frame) for frame in video])
elif hasattr(video, "shape"):
pass
else:
video = np.array(video)
if hasattr(video, "shape") and len(video.shape) >= 3:
if len(video.shape) == 4:
t, height, width = video.shape[:3]
elif len(video.shape) == 3:
height, width = video.shape[:2]
t = 1
else:
t, height, width = self.num_frames, self.min_resolution, self.min_resolution
else:
t, height, width = self.num_frames, self.min_resolution, self.min_resolution
resized_height, resized_width = smart_resize(
t,
height,
width,
factor=self.patch_size * self.merge_size,
min_pixels=self.size["shortest_edge"],
max_pixels=self.size["longest_edge"],
)
grid_h, grid_w = resized_height // self.patch_size, resized_width // self.patch_size
seq_len += grid_t * grid_h * grid_w
return [seq_len, hidden_dim]
def prepare_video_inputs(self, equal_resolution=False, return_tensors="pil"):
videos = prepare_video_inputs(
batch_size=self.batch_size,
num_frames=self.num_frames,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
return_tensors=return_tensors,
)
return videos
@require_torch
@require_vision
class Glm46VVideoProcessingTest(VideoProcessingTestMixin, unittest.TestCase):
fast_video_processing_class = Glm46VVideoProcessor if is_torchvision_available() else None
input_name = "pixel_values_videos"
def setUp(self):
super().setUp()
self.video_processor_tester = Glm46VVideoProcessingTester(self)
@property
def video_processor_dict(self):
return self.video_processor_tester.prepare_video_processor_dict()
def test_video_processor_from_dict_with_kwargs(self):
video_processor = self.fast_video_processing_class.from_dict(self.video_processor_dict)
self.assertEqual(video_processor.size, {"longest_edge": 20, "shortest_edge": 10})
video_processor = self.fast_video_processing_class.from_dict(
self.video_processor_dict, size={"longest_edge": 42, "shortest_edge": 42}
)
self.assertEqual(video_processor.size, {"longest_edge": 42, "shortest_edge": 42})
def test_call_pil(self):
for video_processing_class in self.video_processor_list:
video_processing = video_processing_class(**self.video_processor_dict)
video_inputs = self.video_processor_tester.prepare_video_inputs(
equal_resolution=False, return_tensors="pil"
)
for video in video_inputs:
self.assertIsInstance(video[0], Image.Image)
video_metadata = self.video_processor_tester.prepare_video_metadata(video_inputs)
encoded_videos = video_processing(
video_inputs[0], video_metadata=[video_metadata[0]], return_tensors="pt"
)[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]])
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
encoded_videos = video_processing(video_inputs, video_metadata=video_metadata, return_tensors="pt")[
self.input_name
]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs)
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
def test_call_numpy(self):
for video_processing_class in self.video_processor_list:
video_processing = video_processing_class(**self.video_processor_dict)
video_inputs = self.video_processor_tester.prepare_video_inputs(
equal_resolution=False, return_tensors="np"
)
video_metadata = self.video_processor_tester.prepare_video_metadata(video_inputs)
encoded_videos = video_processing(
video_inputs[0], video_metadata=[video_metadata[0]], return_tensors="pt"
)[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]])
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
encoded_videos = video_processing(video_inputs, video_metadata=video_metadata, return_tensors="pt")[
self.input_name
]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs)
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
def test_call_pytorch(self):
for video_processing_class in self.video_processor_list:
video_processing = video_processing_class(**self.video_processor_dict)
video_inputs = self.video_processor_tester.prepare_video_inputs(
equal_resolution=False, return_tensors="pt"
)
video_metadata = self.video_processor_tester.prepare_video_metadata(video_inputs)
encoded_videos = video_processing(
video_inputs[0], video_metadata=[video_metadata[0]], return_tensors="pt"
)[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]])
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
encoded_videos = video_processing(video_inputs, video_metadata=video_metadata, return_tensors="pt")[
self.input_name
]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs)
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
@unittest.skip("Skip for now, the test needs adjustment for GLM-4.1V")
def test_call_numpy_4_channels(self):
for video_processing_class in self.video_processor_list:
# Test that can process videos which have an arbitrary number of channels
# Initialize video_processing
video_processor = video_processing_class(**self.video_processor_dict)
# create random numpy tensors
self.video_processor_tester.num_channels = 4
video_inputs = self.video_processor_tester.prepare_video_inputs(
equal_resolution=False, return_tensors="np"
)
# Test not batched input
encoded_videos = video_processor(
video_inputs[0],
return_tensors="pt",
input_data_format="channels_last",
image_mean=(0.0, 0.0, 0.0, 0.0),
image_std=(1.0, 1.0, 1.0, 1.0),
)[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]])
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
# Test batched
encoded_videos = video_processor(
video_inputs,
return_tensors="pt",
input_data_format="channels_last",
image_mean=(0.0, 0.0, 0.0, 0.0),
image_std=(1.0, 1.0, 1.0, 1.0),
)[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs)
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
def test_nested_input(self):
"""Tests that the processor can work with nested list where each video is a list of arrays"""
for video_processing_class in self.video_processor_list:
video_processing = video_processing_class(**self.video_processor_dict)
video_inputs = self.video_processor_tester.prepare_video_inputs(
equal_resolution=False, return_tensors="np"
)
video_inputs_nested = [list(video) for video in video_inputs]
video_metadata = self.video_processor_tester.prepare_video_metadata(video_inputs)
# Test not batched input
encoded_videos = video_processing(
video_inputs_nested[0], video_metadata=[video_metadata[0]], return_tensors="pt"
)[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]])
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
# Test batched
encoded_videos = video_processing(video_inputs_nested, video_metadata=video_metadata, return_tensors="pt")[
self.input_name
]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs)
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
def test_call_sample_frames(self):
for video_processing_class in self.video_processor_list:
video_processor_dict = self.video_processor_dict.copy()
video_processing = video_processing_class(**video_processor_dict)
prev_num_frames = self.video_processor_tester.num_frames
self.video_processor_tester.num_frames = 8
prev_min_resolution = getattr(self.video_processor_tester, "min_resolution", None)
prev_max_resolution = getattr(self.video_processor_tester, "max_resolution", None)
self.video_processor_tester.min_resolution = 56
self.video_processor_tester.max_resolution = 112
video_inputs = self.video_processor_tester.prepare_video_inputs(
equal_resolution=False,
return_tensors="torch",
)
metadata = [[{"total_num_frames": 8, "fps": 4}]]
batched_metadata = metadata * len(video_inputs)
encoded_videos = video_processing(video_inputs[0], return_tensors="pt", video_metadata=metadata)[
self.input_name
]
encoded_videos_batched = video_processing(
video_inputs, return_tensors="pt", video_metadata=batched_metadata
)[self.input_name]
self.assertIsNotNone(encoded_videos)
self.assertIsNotNone(encoded_videos_batched)
self.assertEqual(len(encoded_videos.shape), 2)
self.assertEqual(len(encoded_videos_batched.shape), 2)
with self.assertRaises(ValueError):
video_processing(video_inputs[0], return_tensors="pt")[self.input_name]
self.video_processor_tester.num_frames = prev_num_frames
if prev_min_resolution is not None:
self.video_processor_tester.min_resolution = prev_min_resolution
if prev_max_resolution is not None:
self.video_processor_tester.max_resolution = prev_max_resolution
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/glm46v/test_video_processing_glm46v.py",
"license": "Apache License 2.0",
"lines": 288,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/initialization.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import sys
from collections import defaultdict
from contextlib import contextmanager
import torch
# Record all the torch primitives in advance, so that we can use them without them being modified when we patch torch
# in context managers
TORCH_INIT_FUNCTIONS = {
"uniform_": torch.nn.init.uniform_,
"normal_": torch.nn.init.normal_,
"constant_": torch.nn.init.constant_,
"ones_": torch.nn.init.ones_,
"zeros_": torch.nn.init.zeros_,
"eye_": torch.nn.init.eye_,
"dirac_": torch.nn.init.dirac_,
"xavier_uniform_": torch.nn.init.xavier_uniform_,
"xavier_normal_": torch.nn.init.xavier_normal_,
"kaiming_uniform_": torch.nn.init.kaiming_uniform_,
"kaiming_normal_": torch.nn.init.kaiming_normal_,
"trunc_normal_": torch.nn.init.trunc_normal_,
"orthogonal_": torch.nn.init.orthogonal_,
"sparse_": torch.nn.init.sparse_,
}
def uniform_(
tensor: torch.Tensor, a: float = 0.0, b: float = 1.0, generator: torch.Generator | None = None
) -> torch.Tensor:
if not getattr(tensor, "_is_hf_initialized", False):
return TORCH_INIT_FUNCTIONS["uniform_"](tensor, a=a, b=b, generator=generator)
return tensor
def normal_(
tensor: torch.Tensor, mean: float = 0.0, std: float = 1.0, generator: torch.Generator | None = None
) -> torch.Tensor:
if not getattr(tensor, "_is_hf_initialized", False):
return TORCH_INIT_FUNCTIONS["normal_"](tensor, mean=mean, std=std, generator=generator)
return tensor
def constant_(tensor: torch.Tensor, val: float) -> torch.Tensor:
if not getattr(tensor, "_is_hf_initialized", False):
return TORCH_INIT_FUNCTIONS["constant_"](tensor, val=val)
return tensor
def ones_(tensor: torch.Tensor) -> torch.Tensor:
if not getattr(tensor, "_is_hf_initialized", False):
return TORCH_INIT_FUNCTIONS["ones_"](tensor)
return tensor
def zeros_(tensor: torch.Tensor) -> torch.Tensor:
if not getattr(tensor, "_is_hf_initialized", False):
return TORCH_INIT_FUNCTIONS["zeros_"](tensor)
return tensor
def eye_(tensor: torch.Tensor) -> torch.Tensor:
if not getattr(tensor, "_is_hf_initialized", False):
return TORCH_INIT_FUNCTIONS["eye_"](tensor)
return tensor
def dirac_(tensor: torch.Tensor, groups: int = 1) -> torch.Tensor:
if not getattr(tensor, "_is_hf_initialized", False):
return TORCH_INIT_FUNCTIONS["dirac_"](tensor, groups=groups)
return tensor
def xavier_uniform_(tensor: torch.Tensor, gain: float = 1.0, generator: torch.Generator | None = None) -> torch.Tensor:
if not getattr(tensor, "_is_hf_initialized", False):
return TORCH_INIT_FUNCTIONS["xavier_uniform_"](tensor, gain=gain, generator=generator)
return tensor
def xavier_normal_(tensor: torch.Tensor, gain: float = 1.0, generator: torch.Generator | None = None) -> torch.Tensor:
if not getattr(tensor, "_is_hf_initialized", False):
return TORCH_INIT_FUNCTIONS["xavier_normal_"](tensor, gain=gain, generator=generator)
return tensor
def kaiming_uniform_(
tensor: torch.Tensor,
a: float = 0,
mode: str = "fan_in",
nonlinearity: str = "leaky_relu",
generator: torch.Generator | None = None,
) -> torch.Tensor:
if not getattr(tensor, "_is_hf_initialized", False):
return TORCH_INIT_FUNCTIONS["kaiming_uniform_"](
tensor, a=a, mode=mode, nonlinearity=nonlinearity, generator=generator
)
return tensor
def kaiming_normal_(
tensor: torch.Tensor,
a: float = 0,
mode: str = "fan_in",
nonlinearity: str = "leaky_relu",
generator: torch.Generator | None = None,
) -> torch.Tensor:
if not getattr(tensor, "_is_hf_initialized", False):
return TORCH_INIT_FUNCTIONS["kaiming_normal_"](
tensor, a=a, mode=mode, nonlinearity=nonlinearity, generator=generator
)
return tensor
def trunc_normal_(
tensor: torch.Tensor,
mean: float = 0.0,
std: float = 1.0,
a: float = -2.0,
b: float = 2.0,
generator: torch.Generator | None = None,
) -> torch.Tensor:
if not getattr(tensor, "_is_hf_initialized", False):
return TORCH_INIT_FUNCTIONS["trunc_normal_"](tensor, mean=mean, std=std, a=a, b=b, generator=generator)
return tensor
def orthogonal_(
tensor: torch.Tensor,
gain: float = 1,
generator: torch.Generator | None = None,
) -> torch.Tensor:
if not getattr(tensor, "_is_hf_initialized", False):
return TORCH_INIT_FUNCTIONS["orthogonal_"](tensor, gain=gain, generator=generator)
return tensor
def sparse_(
tensor: torch.Tensor, sparsity: float, std: float = 0.01, generator: torch.Generator | None = None
) -> torch.Tensor:
if not getattr(tensor, "_is_hf_initialized", False):
return TORCH_INIT_FUNCTIONS["sparse_"](tensor, sparsity=sparsity, std=std, generator=generator)
return tensor
def copy_(tensor: torch.Tensor, other: torch.Tensor) -> torch.Tensor:
if not getattr(tensor, "_is_hf_initialized", False):
with torch.no_grad():
return tensor.copy_(other)
return tensor
def _variance_scaling(tensor, mode="fan_in", distribution="normal"):
fan_in, fan_out = torch.nn.init._calculate_fan_in_and_fan_out(tensor)
if mode == "fan_in":
denom = fan_in
elif mode == "fan_out":
denom = fan_out
elif mode == "fan_avg":
denom = (fan_in + fan_out) / 2
variance = 1.0 / denom
if distribution == "truncated_normal":
trunc_normal_(tensor, std=math.sqrt(variance) / 0.87962566103423978)
elif distribution == "normal":
normal_(tensor, std=math.sqrt(variance))
elif distribution == "uniform":
bound = math.sqrt(3 * variance)
uniform_(tensor, -bound, bound)
else:
raise ValueError(f"invalid distribution {distribution}")
def lecun_normal_(tensor):
if not getattr(tensor, "_is_hf_initialized", False):
_variance_scaling(tensor, mode="fan_in", distribution="truncated_normal")
return tensor
def default_flax_embed_init_(tensor):
if not getattr(tensor, "_is_hf_initialized", False):
_variance_scaling(tensor, mode="fan_in", distribution="normal")
return tensor
# Here, we need to check several modules imported, and hot patch all of them, as sometimes torch does
# something like `from torch.nn.init import xavier_uniform_` in their internals (e.g in torch.nn.modules.activations,
# where MultiHeadAttention lives), so the function name is binded at import time and just doing
# `setattr(torch.nn.init, name, globals()[name])` is thus not enough
# The following list should be enough for all torch versions we work with
TORCH_MODULES_TO_PATCH = (
"torch.nn.init",
"torch.nn.modules.activation",
"torch.nn.modules.transformer",
"torch.nn.modules.linear",
"torch.nn.modules.loss",
"torch.nn.modules.batchnorm",
"torch.nn.modules.conv",
"torch.nn.modules.normalization",
"torch.nn.modules.rnn",
"torch.nn.modules.sparse",
)
@contextmanager
def guard_torch_init_functions():
"""
Guard the `torch.nn.init` primitive functions to behave exactly like the functions in this file, i.e. be
protected against the `_is_hf_initialized` flag to avoid re-init if the param was already loaded.
Usually, all models are using the init from `transformers` which are already guarded, but just to make extra sure
and for remote code, we also use this context manager.
"""
originals = defaultdict(dict)
try:
# Replace all torch funcs by the ones in this file
for module_name in TORCH_MODULES_TO_PATCH:
if module_name in sys.modules:
module = sys.modules[module_name]
for func_name in TORCH_INIT_FUNCTIONS.keys():
if hasattr(module, func_name):
originals[module][func_name] = getattr(module, func_name)
setattr(module, func_name, globals()[func_name])
yield
finally:
# Set back the original functions on all modules
for module, functions in originals.items():
for func_name, func in functions.items():
setattr(module, func_name, func)
@contextmanager
def no_init_weights():
"""
Disable weight initialization both at the torch-level, and at the transformers-level (`init_weights`).
This is used to speed-up initializing an empty model with deepspeed, as we do not initialize the model on meta device
with deepspeed, but we still don't need to run expensive weight initializations as we are loading params afterwards.
"""
from .modeling_utils import PreTrainedModel
def empty_func(*args, **kwargs):
pass
originals = defaultdict(dict)
try:
# Replace all torch funcs by empty ones
for module_name in TORCH_MODULES_TO_PATCH:
if module_name in sys.modules:
module = sys.modules[module_name]
for func_name in TORCH_INIT_FUNCTIONS.keys():
if hasattr(module, func_name):
originals[module][func_name] = getattr(module, func_name)
setattr(module, func_name, empty_func)
# Also patch our own `init_weights`
original_init_weights = PreTrainedModel.init_weights
PreTrainedModel.init_weights = empty_func
yield
finally:
# Set back the original torch functions on all modules
for module, functions in originals.items():
for func_name, func in functions.items():
setattr(module, func_name, func)
# Set back `init_weights`
PreTrainedModel.init_weights = original_init_weights
@contextmanager
def no_tie_weights():
"""
Disable weight tying during loading with `from_pretrained`. This is needed as we want to have access to ALL
weights in the state_dict during `from_pretrained`, and otherwise tying them would remove them from it, as it's
called in `post_init` when instantiating.
"""
from .modeling_utils import PreTrainedModel
def empty_func(*args, **kwargs):
pass
try:
original_tie_weights = PreTrainedModel.tie_weights
PreTrainedModel.tie_weights = empty_func
yield
finally:
# Set back the original
PreTrainedModel.tie_weights = original_tie_weights
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/initialization.py",
"license": "Apache License 2.0",
"lines": 245,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/conversion_mapping.py | # Copyright (C) 2025 the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from copy import deepcopy
from typing import TYPE_CHECKING
from .core_model_loading import (
Chunk,
Concatenate,
ErnieFuseAndSplitTextVisionExperts,
Force16BytesAlignment,
MergeModulelist,
Transpose,
WeightConverter,
WeightRenaming,
)
if TYPE_CHECKING:
from .modeling_utils import PreTrainedModel
from .quantizers import HfQuantizer
_MODEL_TO_CONVERSION_PATTERN = {
# Mixtral-style MoE
"mixtral": "mixtral",
"minimax": "mixtral",
"minimax_m2": "mixtral",
# Qwen2-style MoE
"qwen2_moe": "qwen2_moe",
"deepseek_v2": "qwen2_moe",
"deepseek_v3": "qwen2_moe",
"dots1": "qwen2_moe",
"ernie4_5_moe": "qwen2_moe",
"glm4_moe": "qwen2_moe",
"glm4_moe_lite": "qwen2_moe",
"glm_moe_dsa": "qwen2_moe",
"glm4v_moe": "qwen2_moe",
"longcat_flash": "qwen2_moe",
"solar_open": "qwen2_moe",
"qwen3_moe": "qwen2_moe",
"qwen3_omni_moe": "qwen2_moe",
"qwen3_omni_moe_thinker": "qwen2_moe",
"qwen3_next": "qwen2_moe",
"qwen3_5_moe": "qwen2_moe",
"hunyuan_v1_moe": "qwen2_moe",
"flex_olmo": "qwen2_moe",
"olmoe": "qwen2_moe",
"exaone_moe": "qwen2_moe",
"rt_detr_v2": "rt_detr",
"pp_doclayout_v2": "rt_detr",
"pp_doclayout_v3": "rt_detr",
}
def _build_checkpoint_conversion_mapping():
mapping = {
"timesfm2_5": [
WeightRenaming("ff0", "fc1"),
WeightRenaming("ff1", "fc2"),
],
"olmo_hybrid": [
WeightRenaming("attention_layer_norm", "input_layernorm"),
WeightRenaming("feedforward_layer_norm", "post_attention_layernorm"),
],
"qwen3_5_text": [
WeightRenaming(source_patterns=r"^model.language_model", target_patterns="model"),
],
"t5gemma2": [
WeightRenaming(r"(?<!vision_model\.)encoder.embed_tokens.", "encoder.text_model.embed_tokens."),
WeightRenaming(r"(?<!vision_model\.)encoder.norm.", "encoder.text_model.norm."),
WeightRenaming(r"(?<!vision_model\.)encoder.layers.", "encoder.text_model.layers."),
],
"t5gemma2_encoder": [
WeightRenaming("^embed_tokens.", "text_model.embed_tokens."),
WeightRenaming("^norm.", "text_model.norm."),
WeightRenaming("^layers.", "text_model.layers."),
],
"gpt_oss": [
# NOTE: These converters are only applied if the model is being loaded from pre-dequantized checkpoint.
# If you are dequantizing the model on the fly, these converters will be ignored because the tensors
# that match these patterns are only created after dequantization.
# That's not an issue for now since the dequantization converters already ensure 16 bytes alignment
# by enforcing contiguity.
WeightConverter(
source_patterns="mlp.experts.gate_up_proj$",
target_patterns="mlp.experts.gate_up_proj",
operations=[Force16BytesAlignment()],
),
WeightConverter(
source_patterns="mlp.experts.down_proj$",
target_patterns="mlp.experts.down_proj",
operations=[Force16BytesAlignment()],
),
],
"mixtral": [
WeightRenaming(".block_sparse_moe.", ".mlp."),
WeightConverter(
source_patterns=[
".experts.*.w1.weight",
".experts.*.w3.weight",
], # you give me a list of 2 keys, I collect a list of a list of tensors
target_patterns=".experts.gate_up_proj", # target key gets the list of two tensors
operations=[
MergeModulelist(
dim=0
), # each process has two lists of tensors, we cat each list. -> we end up with 2 tensors
Concatenate(dim=1), # each process has 2 tensors, gate and up, we concat them into gate_up
], # we want the loading to add this shard operation here. Though we can't shard after concats and merge, needs to be first
),
WeightConverter(
source_patterns=[
".experts.*.w2.weight",
],
target_patterns=".experts.down_proj", # target key gets the list of two tensors
operations=[
MergeModulelist(
dim=0
), # each process has two lists of tensors, we cat each list. -> we end up with 2 tensors
], # we want the loading to add this shard operation here. Though we can't shard after concats and merge, needs to be first
),
],
"qwen2_moe": [
WeightConverter(
source_patterns=[
"mlp.experts.*.gate_proj.weight",
"mlp.experts.*.up_proj.weight",
],
target_patterns="mlp.experts.gate_up_proj",
operations=[MergeModulelist(dim=0), Concatenate(dim=1)],
),
WeightConverter(
source_patterns="mlp.experts.*.down_proj.weight",
target_patterns="mlp.experts.down_proj",
operations=[MergeModulelist(dim=0)],
),
],
"qwen3_vl_moe": [
WeightConverter(
source_patterns="mlp.experts.gate_up_proj",
target_patterns="mlp.experts.gate_up_proj",
operations=[Transpose(1, 2, check_dims=True), Force16BytesAlignment()],
),
WeightConverter(
source_patterns="mlp.experts.down_proj",
target_patterns="mlp.experts.down_proj",
operations=[Transpose(1, 2, check_dims=True), Force16BytesAlignment()],
),
],
"phimoe": [
WeightRenaming(".block_sparse_moe.", ".mlp."),
WeightRenaming(".gate.weight", ".router.weight"),
WeightConverter(
source_patterns=[
".experts.*.w1.weight",
".experts.*.w3.weight",
],
target_patterns=".experts.gate_up_proj",
operations=[MergeModulelist(dim=0), Concatenate(dim=1)],
),
WeightConverter(
source_patterns=".experts.*.w2.weight",
target_patterns=".experts.down_proj",
operations=[MergeModulelist(dim=0)],
),
],
"lfm2_moe": [
WeightConverter(
source_patterns=[
"feed_forward.experts.*.w1.weight",
"feed_forward.experts.*.w3.weight",
],
target_patterns="feed_forward.experts.gate_up_proj",
operations=[MergeModulelist(dim=0), Concatenate(dim=1)],
),
WeightConverter(
source_patterns="feed_forward.experts.*.w2.weight",
target_patterns="feed_forward.experts.down_proj",
operations=[MergeModulelist(dim=0)],
),
],
"ernie4_5_vl_moe": [
# vision
WeightRenaming("vision_model", "vision_tower"),
# resampler
WeightRenaming("spatial_linear.0", "spatial_linear.fc1"),
WeightRenaming("spatial_linear.2", "spatial_linear.fc2"),
WeightRenaming("spatial_linear.3", "spatial_linear.ln"),
WeightRenaming("temporal_linear.0", "temporal_linear.fc1"),
WeightRenaming("temporal_linear.2", "temporal_linear.fc2"),
WeightRenaming("temporal_linear.3", "temporal_linear.ln"),
# language model
WeightRenaming(r"(?<!language_model\.)embed_tokens", "language_model.embed_tokens"),
WeightRenaming(r"(?<!language_model\.)layers", "language_model.layers"),
WeightConverter(
source_patterns="mlp.gate.weight_1",
target_patterns="mlp.vision_moe.gate.weight",
operations=[Transpose(dim0=0, dim1=1)],
),
WeightConverter(
source_patterns="mlp.gate.weight",
target_patterns="mlp.text_moe.gate.weight",
operations=[Transpose(dim0=0, dim1=1)],
),
WeightConverter(
source_patterns=["mlp.moe_statics.e_score_correction_bias"],
target_patterns=[
"mlp.text_moe.gate.moe_statics.e_score_correction_bias",
"mlp.vision_moe.gate.moe_statics.e_score_correction_bias",
],
operations=[Chunk(dim=0)],
),
WeightConverter(
source_patterns=["experts.*.down_proj.weight"],
target_patterns=[
"text_moe.experts.down_proj",
"vision_moe.experts.down_proj",
],
operations=[ErnieFuseAndSplitTextVisionExperts(stack_dim=0, concat_dim=1)],
),
WeightConverter(
source_patterns=[
"experts.*.gate_proj.weight",
"experts.*.up_proj.weight",
],
target_patterns=[
"text_moe.experts.gate_up_proj",
"vision_moe.experts.gate_up_proj",
],
operations=[ErnieFuseAndSplitTextVisionExperts(stack_dim=0, concat_dim=1)],
),
],
"detr": [
WeightRenaming("backbone.conv_encoder", "backbone"),
WeightRenaming("out_proj", "o_proj"),
WeightRenaming(r"layers.(\d+).fc1", r"layers.\1.mlp.fc1"),
WeightRenaming(r"layers.(\d+).fc2", r"layers.\1.mlp.fc2"),
],
"rt_detr": [
WeightRenaming("out_proj", "o_proj"),
WeightRenaming(r"layers.(\d+).fc1", r"layers.\1.mlp.fc1"),
WeightRenaming(r"layers.(\d+).fc2", r"layers.\1.mlp.fc2"),
WeightRenaming(r"encoder.encoder.(\d+).layers", r"encoder.aifi.\1.layers"),
],
"conditional_detr": [
WeightRenaming("backbone.conv_encoder", "backbone"),
WeightRenaming("self_attn.out_proj", "self_attn.o_proj"),
WeightRenaming("encoder_attn.out_proj", "encoder_attn.o_proj"),
WeightRenaming(r"layers.(\d+).fc1", r"layers.\1.mlp.fc1"),
WeightRenaming(r"layers.(\d+).fc2", r"layers.\1.mlp.fc2"),
# Decoder self-attention projections moved into self_attn module
WeightRenaming(r"decoder.layers.(\d+).sa_qcontent_proj", r"decoder.layers.\1.self_attn.q_content_proj"),
WeightRenaming(r"decoder.layers.(\d+).sa_qpos_proj", r"decoder.layers.\1.self_attn.q_pos_proj"),
WeightRenaming(r"decoder.layers.(\d+).sa_kcontent_proj", r"decoder.layers.\1.self_attn.k_content_proj"),
WeightRenaming(r"decoder.layers.(\d+).sa_kpos_proj", r"decoder.layers.\1.self_attn.k_pos_proj"),
WeightRenaming(r"decoder.layers.(\d+).sa_v_proj", r"decoder.layers.\1.self_attn.v_proj"),
# Decoder cross-attention projections moved into encoder_attn module
WeightRenaming(r"decoder.layers.(\d+).ca_qcontent_proj", r"decoder.layers.\1.encoder_attn.q_content_proj"),
WeightRenaming(r"decoder.layers.(\d+).ca_qpos_proj", r"decoder.layers.\1.encoder_attn.q_pos_proj"),
WeightRenaming(r"decoder.layers.(\d+).ca_kcontent_proj", r"decoder.layers.\1.encoder_attn.k_content_proj"),
WeightRenaming(r"decoder.layers.(\d+).ca_kpos_proj", r"decoder.layers.\1.encoder_attn.k_pos_proj"),
WeightRenaming(r"decoder.layers.(\d+).ca_v_proj", r"decoder.layers.\1.encoder_attn.v_proj"),
WeightRenaming(
r"decoder.layers.(\d+).ca_qpos_sine_proj", r"decoder.layers.\1.encoder_attn.q_pos_sine_proj"
),
],
"deformable_detr": [
WeightRenaming("backbone.conv_encoder", "backbone"),
WeightRenaming("self_attn.out_proj", "self_attn.o_proj"),
WeightRenaming(r"layers.(\d+).fc1", r"layers.\1.mlp.fc1"),
WeightRenaming(r"layers.(\d+).fc2", r"layers.\1.mlp.fc2"),
],
"d_fine": [
WeightRenaming("out_proj", "o_proj"),
WeightRenaming(r"layers.(\d+).fc1", r"layers.\1.mlp.layers.0"),
WeightRenaming(r"layers.(\d+).fc2", r"layers.\1.mlp.layers.1"),
WeightRenaming(r"encoder.encoder.(\d+).layers", r"encoder.aifi.\1.layers"),
],
"jamba": [
WeightConverter(
source_patterns=[
"feed_forward.experts.*.gate_proj.weight",
"feed_forward.experts.*.up_proj.weight",
],
target_patterns="feed_forward.experts.gate_up_proj",
operations=[MergeModulelist(dim=0), Concatenate(dim=1)],
),
WeightConverter(
source_patterns="feed_forward.experts.*.down_proj.weight",
target_patterns="feed_forward.experts.down_proj",
operations=[MergeModulelist(dim=0)],
),
],
"timm_wrapper": [
# Simply add the prefix `timm_model`
# TODO: Would be probably much cleaner with a `add_prefix` argument in WeightRenaming
WeightRenaming(
source_patterns=r"(.+)",
target_patterns=r"timm_model.\1",
)
],
"legacy": [
WeightRenaming(
source_patterns="LayerNorm.gamma",
target_patterns="LayerNorm.weight",
),
WeightRenaming(
source_patterns="LayerNorm.beta",
target_patterns="LayerNorm.bias",
),
],
}
mapping["legacy"] += [
WeightRenaming(
source_patterns=".weight_g$",
target_patterns=".parametrizations.weight.original0",
),
WeightRenaming(
source_patterns=".weight_v$",
target_patterns=".parametrizations.weight.original1",
),
]
mapping["ernie4_5_moe"] = mapping["qwen2_moe"].copy()
mapping["ernie4_5_moe"] += [
WeightRenaming("mlp.moe_statics.e_score_correction_bias", "mlp.gate.moe_statics.e_score_correction_bias")
]
mapping["minimax_m2"] = mapping["mixtral"].copy()
mapping["minimax_m2"] += [
WeightRenaming(".block_sparse_moe.e_score_correction_bias", ".mlp.e_score_correction_bias"),
]
mapping["exaone_moe"] = mapping["qwen2_moe"].copy()
mapping["exaone_moe"] += [WeightRenaming("mlp.e_score_correction_bias", "mlp.gate.e_score_correction_bias")]
mapping["qwen3_5_moe_text"] = mapping["qwen3_5_text"].copy()
mapping["qwen3_5_moe_text"] += mapping["qwen2_moe"].copy()
for model_type, base_pattern in _MODEL_TO_CONVERSION_PATTERN.items():
if model_type in mapping:
continue
mapping[model_type] = mapping[base_pattern].copy()
return mapping
_checkpoint_conversion_mapping_cache = None
def get_checkpoint_conversion_mapping(model_type):
global _checkpoint_conversion_mapping_cache
if _checkpoint_conversion_mapping_cache is None:
_checkpoint_conversion_mapping_cache = _build_checkpoint_conversion_mapping()
return deepcopy(_checkpoint_conversion_mapping_cache.get(model_type))
def register_checkpoint_conversion_mapping(
model_type: str, mapping: list[WeightConverter | WeightRenaming], overwrite: bool = False
) -> None:
global _checkpoint_conversion_mapping_cache
if _checkpoint_conversion_mapping_cache is None:
_checkpoint_conversion_mapping_cache = _build_checkpoint_conversion_mapping()
if model_type in _checkpoint_conversion_mapping_cache and not overwrite:
raise ValueError(f"Model type {model_type} already exists in the checkpoint conversion mapping.")
_checkpoint_conversion_mapping_cache[model_type] = mapping
# DO NOT MODIFY, KEPT FOR BC ONLY
VLMS = [
"aria",
"ayavision",
"colpali",
"emu3",
"fuyu",
"gotocr2",
"gemma3",
"internvl",
"llava", # all llava prefixed models fall under this check
"mistral3",
"mllama",
"paligemma",
"shieldgemma2",
"qwen2vl",
"qwen2_5_vl",
"videollava",
"vipllava",
"sam3_video",
"sam3",
"sam3_tracker",
"sam3_tracker_video",
"paddleocrvl",
# NOTE: Slightly different from `model_type` (to follow naming conventions in vllm/sglang)
"ernie4_5_vlmoe",
"ernie4_5_vl_moe", # BC alias
"detr",
]
def get_model_conversion_mapping(
model: PreTrainedModel,
key_mapping: dict[str, str] | None = None,
hf_quantizer: HfQuantizer | None = None,
add_legacy: bool = True,
) -> list[WeightConverter | WeightRenaming]:
"""
For a given `model`, obtain the weight conversion mapping if any are registered either as a simple renaming
`_checkpoint_conversion_mapping` class argument, or in the general WeightConverter mapping.
"""
weight_conversions = []
# Load models with explicit, user-provided key mapping
if key_mapping is not None:
weight_conversions = [WeightRenaming(source_patterns=k, target_patterns=v) for k, v in key_mapping.items()]
elif any(
allowed_name in class_name.__name__.lower()
for class_name in model.__class__.__mro__[:-1]
for allowed_name in VLMS
):
weight_conversions = [
WeightRenaming(source_patterns=k, target_patterns=v)
for k, v in model._checkpoint_conversion_mapping.items()
]
# TODO: should be checked recursively on submodels!!
model_type = getattr(model.config, "model_type", None)
if model_type is not None:
model_specific_conversions = get_checkpoint_conversion_mapping(model_type)
if model_specific_conversions is not None:
weight_conversions.extend(model_specific_conversions)
if add_legacy:
weight_conversions.extend(get_checkpoint_conversion_mapping("legacy"))
# Add the ones from the quantizer as well if provided
if hf_quantizer is not None:
# NOTE: Since get_weight_conversions() only serve to dequantize, we would normally want to apply them first.
# However, for now it's not possible to cascade converters (i.e., applying model-specific conversions on top
# of tensors created by the dequantization conversions)
# This means that if a model has model-specific conversions and is being dequantized, the model-specific conversion
# that relies on tensors created by dequantization conversions will not be applied.
# GptOss example: with Mxfp4Config(dequantize=True), Force16BytesAlignment converters are ignored because the tensors
# "mlp.experts.gate_up_proj$" and "mlp.experts.down_proj$" are only created after dequantization conversions are applied.
weight_conversions.extend(hf_quantizer.get_weight_conversions())
return weight_conversions
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/conversion_mapping.py",
"license": "Apache License 2.0",
"lines": 429,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/utils/loading_report.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
import shutil
import sys
from collections import OrderedDict, defaultdict
from dataclasses import dataclass
from typing import Any
_DIGIT_RX = re.compile(r"(?<=\.)(\d+)(?=\.|$)") # numbers between dots or at the end
def _pattern_of(key: str) -> str:
"""Replace every dot-delimited integer with '*' to get the structure."""
return _DIGIT_RX.sub("*", key)
def _fmt_indices(values: list[int], cutoff=10) -> str:
"""Format a list of ints as single number, {a, ..., b}, or first...last."""
if len(values) == 1:
return str(values[0])
values = sorted(values)
if len(values) > cutoff:
return f"{values[0]}...{values[-1]}"
return ", ".join(map(str, values))
def update_key_name(mapping: dict[str, Any]) -> dict[str, Any]:
"""
Merge keys like 'layers.0.x', 'layers.1.x' into 'layers.{0, 1}.x'
BUT only merge together keys that have the exact same value.
Returns a new dict {merged_key: value}.
"""
# (pattern, value) -> list[set[int]] (per-star index values)
not_mapping = False
if not isinstance(mapping, dict):
mapping = {k: k for k in mapping}
not_mapping = True
bucket: dict[str, list[set[int] | Any]] = defaultdict(list)
for key, val in mapping.items():
digs = _DIGIT_RX.findall(key)
patt = _pattern_of(key)
for i, d in enumerate(digs):
if len(bucket[patt]) <= i:
bucket[patt].append(set())
bucket[patt][i].add(int(d))
bucket[patt].append(val)
out_items = {}
for patt, values in bucket.items():
sets, val = values[:-1], values[-1]
parts = patt.split("*") # stars are between parts
final = parts[0]
for i in range(1, len(parts)):
if i - 1 < len(sets) and sets[i - 1]:
insert = _fmt_indices(sorted(sets[i - 1]))
if len(sets[i - 1]) > 1:
final += "{" + insert + "}"
else:
final += insert
else:
final += "*"
final += parts[i]
out_items[final] = val
out = OrderedDict(out_items)
if not_mapping:
return out.keys()
return out
_ansi_re = re.compile(r"\x1b\[[0-9;]*m")
def _strip_ansi(s: str) -> str:
return _ansi_re.sub("", str(s))
def _pad(text, width):
t = str(text)
pad = max(0, width - len(_strip_ansi(t)))
return t + " " * pad
def _make_table(rows, headers):
# compute display widths while ignoring ANSI codes
cols = list(zip(*([headers] + rows))) if rows else [headers]
widths = [max(len(_strip_ansi(x)) for x in col) for col in cols]
header_line = " | ".join(_pad(h, w) for h, w in zip(headers, widths))
sep_line = "-+-".join("-" * w for w in widths)
body = [" | ".join(_pad(c, w) for c, w in zip(r, widths)) for r in rows]
return "\n".join([header_line, sep_line] + body)
PALETTE = {
"reset": "[0m",
"red": "[31m",
"yellow": "[33m",
"orange": "[38;5;208m",
"purple": "[35m",
"bold": "[1m",
"italic": "[3m",
"dim": "[2m",
}
def _color(s, color):
"""Return color-formatted input `s` if `sys.stdout` is interactive, e.g. connected to a terminal."""
if sys.stdout.isatty():
return f"{PALETTE[color]}{s}{PALETTE['reset']}"
else:
return s
def _get_terminal_width(default=80):
try:
return shutil.get_terminal_size().columns
except Exception:
return default
@dataclass
class LoadStateDictInfo:
"""
Mutable container for state-dict loading results and diagnostics. Each entry in this structure is mutable,
and will usually be mutated in-place during the loading pipeline.
Attributes:
missing_keys (`set[str]`):
Keys that are missing from the loaded checkpoints but expected in the model's architecture.
unexpected_keys (`set[str]`):
Keys that are found in the checkpoints, but not expected in the model's architecture.
mismatched_keys (`set[tuple[str, tuple[int], tuple[int]]]`):
Keys that are found in the checkpoints and are expected in the model's architecture, but with a different shape.
error_msgs ( `list[str]`):
Some potential error messages.
conversion_errors (`dict[str, str]`):
Errors happening during the on-the-fly weight conversion process.
"""
missing_keys: set[str]
unexpected_keys: set[str]
mismatched_keys: set[tuple[str, tuple[int], tuple[int]]]
error_msgs: list[str]
conversion_errors: dict[str, str]
def missing_and_mismatched(self):
"""Return all effective missing keys, including `missing` and `mismatched` keys."""
return self.missing_keys | {k[0] for k in self.mismatched_keys}
def to_dict(self):
# Does not include the `conversion_errors` to be coherent with legacy reporting in the tests
return {
"missing_keys": self.missing_keys,
"unexpected_keys": self.unexpected_keys,
"mismatched_keys": self.mismatched_keys,
"error_msgs": self.error_msgs,
}
def create_loading_report(self) -> str | None:
"""Generate the minimal table of a loading report."""
term_w = _get_terminal_width()
rows = []
tips = ""
if self.unexpected_keys:
tips += (
f"\n- {_color('UNEXPECTED', 'orange') + PALETTE['italic']}\t:can be ignored when loading from different "
"task/architecture; not ok if you expect identical arch."
)
for k in update_key_name(self.unexpected_keys):
status = _color("UNEXPECTED", "orange")
rows.append([k, status, "", ""])
if self.missing_keys:
tips += (
f"\n- {_color('MISSING', 'red') + PALETTE['italic']}\t:those params were newly initialized because missing "
"from the checkpoint. Consider training on your downstream task."
)
for k in update_key_name(self.missing_keys):
status = _color("MISSING", "red")
rows.append([k, status, ""])
if self.mismatched_keys:
tips += (
f"\n- {_color('MISMATCH', 'yellow') + PALETTE['italic']}\t:ckpt weights were loaded, but they did not match "
"the original empty weight shapes."
)
iterator = {a: (b, c) for a, b, c in self.mismatched_keys}
for key, (shape_ckpt, shape_model) in update_key_name(iterator).items():
status = _color("MISMATCH", "yellow")
data = [
key,
status,
f"Reinit due to size mismatch - ckpt: {str(shape_ckpt)} vs model:{str(shape_model)}",
]
rows.append(data)
if self.conversion_errors:
tips += f"\n- {_color('CONVERSION', 'purple') + PALETTE['italic']}\t:originate from the conversion scheme"
for k, v in update_key_name(self.conversion_errors).items():
status = _color("CONVERSION", "purple")
_details = f"\n\n{v}\n\n"
rows.append([k, status, _details])
# If nothing is wrong, return None
if len(rows) == 0:
return None
headers = ["Key", "Status"]
if term_w > 200:
headers += ["Details"]
else:
headers += ["", ""]
table = _make_table(rows, headers=headers)
tips = f"\n\n{PALETTE['italic']}Notes:{tips}{PALETTE['reset']}"
report = table + tips
return report
def log_state_dict_report(
model,
pretrained_model_name_or_path: str,
ignore_mismatched_sizes: bool,
loading_info: LoadStateDictInfo,
logger: logging.Logger | None = None,
):
"""
Log a readable report about state_dict loading issues.
This version is terminal-size aware: for very small terminals it falls back to a compact
Key | Status view so output doesn't wrap badly.
"""
if logger is None:
logger = logging.getLogger(__name__)
# Re-raise errors early if needed
if loading_info.error_msgs:
error_msg = "\n\t".join(loading_info.error_msgs)
if "size mismatch" in error_msg:
error_msg += (
"\n\tYou may consider adding `ignore_mismatched_sizes=True` to `from_pretrained(...)` if appropriate."
)
raise RuntimeError(f"Error(s) in loading state_dict for {model.__class__.__name__}:\n\t{error_msg}")
# Create the report table
report = loading_info.create_loading_report()
if report is None:
return
prelude = f"{PALETTE['bold']}{model.__class__.__name__} LOAD REPORT{PALETTE['reset']} from: {pretrained_model_name_or_path}\n"
# Log the report as warning
logger.warning(prelude + report)
# Re-raise in those case, after the report
if loading_info.conversion_errors:
raise RuntimeError(
"We encountered some issues during automatic conversion of the weights. For details look at the `CONVERSION` entries of "
"the above report!"
)
if not ignore_mismatched_sizes and loading_info.mismatched_keys:
raise RuntimeError(
"You set `ignore_mismatched_sizes` to `False`, thus raising an error. For details look at the above report!"
)
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/utils/loading_report.py",
"license": "Apache License 2.0",
"lines": 233,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/utils/pytest_helpers.py | import argparse
import json
import re
from collections import Counter
from pathlib import Path
def _base_test_name(nodeid: str) -> str:
# Strip parameters like [param=..] from the last component
name = nodeid.split("::")[-1]
return re.sub(r"\[.*\]$", "", name)
def _class_name(nodeid: str) -> str | None:
parts = nodeid.split("::")
# nodeid can be: file::Class::test or file::test
if len(parts) >= 3:
return parts[-2]
return None
def _file_path(nodeid: str) -> str:
return nodeid.split("::")[0]
def _modeling_key(file_path: str) -> str | None:
# Extract "xxx" from test_modeling_xxx.py
m = re.search(r"test_modeling_([A-Za-z0-9_]+)\.py$", file_path)
if m:
return m.group(1)
return None
def summarize(report_path: str):
p = Path(report_path)
if not p.exists():
raise FileNotFoundError(f"Report file not found: {p.resolve()}")
data = json.loads(p.read_text())
tests = data.get("tests", [])
# Overall counts
outcomes = Counter(t.get("outcome", "unknown") for t in tests)
# Filter failures (pytest-json-report uses "failed" and may have "error")
failed = [t for t in tests if t.get("outcome") in ("failed", "error")]
# 1) Failures per test file
failures_per_file = Counter(_file_path(t.get("nodeid", "")) for t in failed)
# 2) Failures per class (if any; otherwise "NO_CLASS")
failures_per_class = Counter((_class_name(t.get("nodeid", "")) or "NO_CLASS") for t in failed)
# 3) Failures per base test name (function), aggregating parametrized cases
failures_per_testname = Counter(_base_test_name(t.get("nodeid", "")) for t in failed)
# 4) Failures per test_modeling_xxx (derived from filename)
failures_per_modeling_key = Counter()
for t in failed:
key = _modeling_key(_file_path(t.get("nodeid", "")))
if key:
failures_per_modeling_key[key] += 1
return {
"outcomes": outcomes,
"failures_per_file": failures_per_file,
"failures_per_class": failures_per_class,
"failures_per_testname": failures_per_testname,
"failures_per_modeling_key": failures_per_modeling_key,
}
def main():
parser = argparse.ArgumentParser(description="Summarize pytest JSON report failures")
parser.add_argument(
"--report", default="report.json", help="Path to pytest JSON report file (default: report.json)"
)
args = parser.parse_args()
try:
summary = summarize(args.report)
except FileNotFoundError as e:
print(str(e))
return
outcomes = summary["outcomes"]
print("=== Overall ===")
total = sum(outcomes.values())
print(f"Total tests: {total}")
for k in sorted(outcomes):
print(f"{k:>10}: {outcomes[k]}")
def _print_counter(title, counter: Counter, label=""):
print(f"\n=== {title} ===")
if not counter:
print("None")
return
for key, cnt in sorted(counter.items(), key=lambda x: (x[1], x[0])):
if label:
print(f"{cnt:4d} {label}{key}")
else:
print(f"{cnt:4d} {key}")
_print_counter("Failures per test class", summary["failures_per_class"], label="class ")
_print_counter("Failures per test_modeling_xxx", summary["failures_per_modeling_key"], label="model ")
_print_counter("Failures per test file", summary["failures_per_file"])
_print_counter("Failures per test name (base)", summary["failures_per_testname"])
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/utils/pytest_helpers.py",
"license": "Apache License 2.0",
"lines": 85,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
huggingface/transformers:tests/utils/test_core_model_loading.py | # Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from types import SimpleNamespace
import torch
import torch.nn as nn
from transformers import PretrainedConfig
from transformers.conversion_mapping import get_checkpoint_conversion_mapping, register_checkpoint_conversion_mapping
from transformers.core_model_loading import (
Chunk,
Concatenate,
ErnieFuseAndSplitTextVisionExperts,
MergeModulelist,
PermuteForRope,
WeightConverter,
WeightRenaming,
build_glob_alternation,
convert_and_load_state_dict_in_model,
rename_source_key,
revert_weight_conversion,
)
from transformers.modeling_utils import LoadStateDictConfig
from transformers.utils.import_utils import is_triton_available
from ..test_modeling_common import compare_state_dicts
class TestWeightGlobMatching(unittest.TestCase):
def setUp(self):
self.weight_globs_digits = [
"model.layers.*.mlp.gate_up_proj.weight",
"model.layers.*.self_attn.q_proj.weight",
"embed_tokens.weight",
]
self.alt_digits, self.map_digits, _ = build_glob_alternation(self.weight_globs_digits)
self.weight_globs_any = [
"model.layers.*.mlp.gate_up_proj.weight",
"model.layers.*.self_attn.q_proj.weight",
"embed_tokens.weight",
]
self.alt_any, self.map_any, _ = build_glob_alternation(self.weight_globs_any)
@staticmethod
def _match_glob(key, alt, mapping):
matched = alt.search(key)
return mapping.get(matched.lastgroup) if matched else None
def test_exact_match(self):
self.assertEqual(
self._match_glob("embed_tokens.weight", self.alt_digits, self.map_digits), "embed_tokens.weight"
)
def test_digits_only_star_accepts_digits(self):
self.assertEqual(
self._match_glob("model.layers.0.mlp.gate_up_proj.weight", self.alt_digits, self.map_digits),
"model.layers.*.mlp.gate_up_proj.weight",
)
self.assertEqual(
self._match_glob("model.layers.12.self_attn.q_proj.weight", self.alt_digits, self.map_digits),
"model.layers.*.self_attn.q_proj.weight",
)
def test_anychar_star_accepts_nondigits(self):
self.assertEqual(
self._match_glob("model.layers.a.mlp.gate_up_proj.weight", self.alt_any, self.map_any),
"model.layers.*.mlp.gate_up_proj.weight",
)
self.assertEqual(
self._match_glob("model.layers.00x.mlp.gate_up_proj.weight", self.alt_any, self.map_any),
"model.layers.*.mlp.gate_up_proj.weight",
)
def test_no_match(self):
self.assertIsNone(self._match_glob("model.layers.0.mlp.up_proj.weight", self.alt_digits, self.map_digits))
def test_leftmost_alternative_wins_for_overlapping_patterns(self):
# Overlapping patterns: both could match; ensure leftmost wins
globs = [
"model.layers.*.mlp.*.weight", # broader (first)
"model.layers.0.mlp.gate_up_proj.weight", # more specific (second)
]
alt, mapping, _ = build_glob_alternation(globs)
# Both branches match; Python's regex picks the leftmost alternative → index 0
self.assertEqual(
self._match_glob("model.layers.0.mlp.gate_up_proj.weight", alt, mapping), "model.layers.*.mlp.*.weight"
)
def test_multiple_patterns_same_prefix(self):
globs = [
"model.layers.*.self_attn.q_proj.weight",
"model.layers.*.self_attn.k_proj.weight",
"model.layers.*.self_attn.v_proj.weight",
]
alt, mapping, _ = build_glob_alternation(
globs,
)
self.assertEqual(
self._match_glob("model.layers.3.self_attn.q_proj.weight", alt, mapping),
"model.layers.*.self_attn.q_proj.weight",
)
self.assertEqual(
self._match_glob("model.layers.3.self_attn.k_proj.weight", alt, mapping),
"model.layers.*.self_attn.k_proj.weight",
)
self.assertEqual(
self._match_glob("model.layers.3.self_attn.v_proj.weight", alt, mapping),
"model.layers.*.self_attn.v_proj.weight",
)
def test_anchor_full_match_only(self):
self.assertIsNotNone(
self._match_glob("model.layers.0.mlp.gate_up_proj.weight.bar", self.alt_any, self.map_any)
)
def test_large_batch_performance_smoke(self):
# Not a perf benchmark, but ensures building and matching a larger alternation is OK
globs = [f"model.layers.*.mlp.block{i}.weight" for i in range(200)]
alt, mapping, _ = build_glob_alternation(globs)
key = "model.layers.123.mlp.block57.weight"
self.assertEqual(self._match_glob(key, alt, mapping), "model.layers.*.mlp.block57.weight")
def test_sub_key_rewrites_targets(self):
renamings = [
WeightRenaming("block_sparse_moe.experts.*.w1.weight", "mlp.experts.gate_up_proj"),
WeightRenaming("block_sparse_moe.experts.*.w2.weight", "mlp.experts.down_proj"),
WeightRenaming("model.language_model.*", "language_model"),
]
self.assertEqual(
rename_source_key("foo.block_sparse_moe.experts.3.w1.weight", renamings, [])[0],
"foo.mlp.experts.gate_up_proj",
)
self.assertEqual(
rename_source_key("foo.block_sparse_moe.experts.3.w2.weight", renamings, [])[0],
"foo.mlp.experts.down_proj",
)
self.assertEqual(rename_source_key("model.language_model.lm_head.weight", renamings, [])[0], "language_model")
def test_sub_key_no_match_returns_original(self):
renamings = [
WeightRenaming("block_sparse_moe.experts.*.w1.weight", "*.mlp.experts.gate_up_proj"),
]
key = "unrelated.key"
renamed_key, _ = rename_source_key(key, renamings, [])
self.assertEqual(renamed_key, key)
class DummyParamModule(nn.Module):
def __init__(self, shape):
super().__init__()
self.weight = nn.Parameter(torch.zeros(shape))
class DummySelfAttn(nn.Module):
def __init__(self):
super().__init__()
self.q_proj = DummyParamModule((1, 2))
self.k_proj = DummyParamModule((1, 2))
self.v_proj = DummyParamModule((1, 2))
class DummyExperts(nn.Module):
def __init__(self):
super().__init__()
self.gate_up_proj = DummyParamModule((2, 4, 2))
self.down_proj = DummyParamModule((2, 2, 2))
class DummyLayer(nn.Module):
def __init__(self, add_extra_moe=False):
super().__init__()
self.self_attn = DummySelfAttn()
self.experts = DummyExperts()
if add_extra_moe:
self.extra_experts = DummyExperts()
class DummyTopModel(nn.Module):
def __init__(self, add_extra_moe=False):
super().__init__()
self.layers = nn.ModuleList([DummyLayer(add_extra_moe), DummyLayer(add_extra_moe)])
class DummyMLP(nn.Module):
def __init__(self):
super().__init__()
self.down_proj = DummyParamModule((2, 2))
class DummyRoot(nn.Module):
base_model_prefix = "model"
config: PretrainedConfig
def __init__(self, add_extra_moe=False):
super().__init__()
self.model = DummyTopModel(add_extra_moe)
self.mlp = DummyMLP()
class TestConvertAndLoadStateDict(unittest.TestCase):
def test_moe_and_qkv_conversion(self):
model = DummyRoot()
model.config = PretrainedConfig()
raw_tensors = {
"model.layers.0.experts.0.w1.weight": torch.tensor([[0.0, 1.0], [2.0, 3.0]]),
"model.layers.0.experts.1.w1.weight": torch.tensor([[10.0, 11.0], [12.0, 13.0]]),
"model.layers.0.experts.0.w3.weight": torch.tensor([[4.0, 5.0], [6.0, 7.0]]),
"model.layers.0.experts.1.w3.weight": torch.tensor([[14.0, 15.0], [16.0, 17.0]]),
"model.layers.0.experts.0.w2.weight": torch.tensor([[20.0, 21.0], [22.0, 23.0]]),
"model.layers.0.experts.1.w2.weight": torch.tensor([[24.0, 25.0], [26.0, 27.0]]),
"model.layers.1.experts.0.w1.weight": torch.tensor([[30.0, 31.0], [32.0, 33.0]]),
"model.layers.1.experts.1.w1.weight": torch.tensor([[34.0, 35.0], [36.0, 37.0]]),
"model.layers.1.experts.0.w3.weight": torch.tensor([[38.0, 39.0], [40.0, 41.0]]),
"model.layers.1.experts.1.w3.weight": torch.tensor([[42.0, 43.0], [44.0, 45.0]]),
"model.layers.1.experts.0.w2.weight": torch.tensor([[46.0, 47.0], [48.0, 49.0]]),
"model.layers.1.experts.1.w2.weight": torch.tensor([[50.0, 51.0], [52.0, 53.0]]),
"model.layers.0.self_attn.qkv_proj.weight": torch.tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]),
"model.layers.1.self_attn.qkv_proj.weight": torch.tensor([[7.0, 8.0], [9.0, 10.0], [11.0, 12.0]]),
"mlp.w2.weight": torch.tensor([[60.0, 61.0], [62.0, 63.0]]),
}
state_dict = {k: v.clone() for k, v in raw_tensors.items()}
weight_mapping = [
WeightConverter(
["experts.*.w1.weight", "experts.*.w3.weight"],
"experts.gate_up_proj.weight",
operations=[MergeModulelist(dim=0), Concatenate(dim=1)],
),
WeightConverter(
"experts.*.w2.weight",
"experts.down_proj.weight",
operations=[MergeModulelist(dim=0)],
),
WeightConverter(
"model.layers.0.self_attn.qkv_proj.weight",
[
"model.layers.0.self_attn.q_proj.weight",
"model.layers.0.self_attn.k_proj.weight",
"model.layers.0.self_attn.v_proj.weight",
],
operations=[Chunk(dim=0)],
),
WeightRenaming("mlp.w2.weight", "mlp.down_proj.weight"),
]
load_config = LoadStateDictConfig(
weight_mapping=weight_mapping,
)
loading_info, _ = convert_and_load_state_dict_in_model(
model,
state_dict,
load_config,
tp_plan=None,
)
self.assertEqual(
loading_info.missing_keys,
{
"model.layers.1.self_attn.k_proj.weight",
"model.layers.1.self_attn.v_proj.weight",
"model.layers.1.self_attn.q_proj.weight",
},
)
self.assertEqual(loading_info.unexpected_keys, {"model.layers.1.self_attn.qkv_proj.weight"})
self.assertEqual(loading_info.mismatched_keys, set())
self.assertEqual(loading_info.conversion_errors, {})
model_state = model.state_dict()
def cat_gate(layer_prefix: str) -> torch.Tensor:
w1 = [
raw_tensors[f"{layer_prefix}.experts.0.w1.weight"],
raw_tensors[f"{layer_prefix}.experts.1.w1.weight"],
]
w3 = [
raw_tensors[f"{layer_prefix}.experts.0.w3.weight"],
raw_tensors[f"{layer_prefix}.experts.1.w3.weight"],
]
return torch.cat([torch.stack(w1, dim=0), torch.stack(w3, dim=0)], dim=1)
torch.testing.assert_close(
model_state["model.layers.0.experts.gate_up_proj.weight"], cat_gate("model.layers.0")
)
torch.testing.assert_close(
model_state["model.layers.1.experts.gate_up_proj.weight"], cat_gate("model.layers.1")
)
def stack_down(layer_prefix: str) -> torch.Tensor:
return torch.stack(
[
raw_tensors[f"{layer_prefix}.experts.0.w2.weight"],
raw_tensors[f"{layer_prefix}.experts.1.w2.weight"],
],
dim=0,
)
torch.testing.assert_close(
model_state["model.layers.0.experts.down_proj.weight"], stack_down("model.layers.0")
)
torch.testing.assert_close(
model_state["model.layers.1.experts.down_proj.weight"], stack_down("model.layers.1")
)
for layer_idx in range(2):
key = f"model.layers.{layer_idx}.self_attn.qkv_proj.weight"
expected_q, expected_k, expected_v = torch.chunk(raw_tensors[key], chunks=3, dim=0)
prefix = f"model.layers.{layer_idx}.self_attn"
if layer_idx == 1:
# These were missing and thus not loaded
continue
torch.testing.assert_close(model_state[f"{prefix}.q_proj.weight"], expected_q)
torch.testing.assert_close(model_state[f"{prefix}.k_proj.weight"], expected_k)
torch.testing.assert_close(model_state[f"{prefix}.v_proj.weight"], expected_v)
torch.testing.assert_close(model_state["mlp.down_proj.weight"], raw_tensors["mlp.w2.weight"])
def test_moe_and_qkv_conversion_reversed(self):
model = DummyRoot()
model.config = PretrainedConfig()
raw_tensors = {
"model.layers.0.experts.0.w1.weight": torch.tensor([[0.0, 1.0], [2.0, 3.0]]),
"model.layers.0.experts.1.w1.weight": torch.tensor([[10.0, 11.0], [12.0, 13.0]]),
"model.layers.0.experts.0.w3.weight": torch.tensor([[4.0, 5.0], [6.0, 7.0]]),
"model.layers.0.experts.1.w3.weight": torch.tensor([[14.0, 15.0], [16.0, 17.0]]),
"model.layers.0.experts.0.w2.weight": torch.tensor([[20.0, 21.0], [22.0, 23.0]]),
"model.layers.0.experts.1.w2.weight": torch.tensor([[24.0, 25.0], [26.0, 27.0]]),
"model.layers.1.experts.0.w1.weight": torch.tensor([[30.0, 31.0], [32.0, 33.0]]),
"model.layers.1.experts.1.w1.weight": torch.tensor([[34.0, 35.0], [36.0, 37.0]]),
"model.layers.1.experts.0.w3.weight": torch.tensor([[38.0, 39.0], [40.0, 41.0]]),
"model.layers.1.experts.1.w3.weight": torch.tensor([[42.0, 43.0], [44.0, 45.0]]),
"model.layers.1.experts.0.w2.weight": torch.tensor([[46.0, 47.0], [48.0, 49.0]]),
"model.layers.1.experts.1.w2.weight": torch.tensor([[50.0, 51.0], [52.0, 53.0]]),
"model.layers.0.self_attn.qkv_proj.weight": torch.tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]),
"model.layers.1.self_attn.qkv_proj.weight": torch.tensor([[7.0, 8.0], [9.0, 10.0], [11.0, 12.0]]),
"mlp.w2.weight": torch.tensor([[60.0, 61.0], [62.0, 63.0]]),
}
state_dict = {k: v.clone() for k, v in raw_tensors.items()}
weight_mapping = [
WeightConverter(
["experts.*.w1.weight", "experts.*.w3.weight"],
"experts.gate_up_proj.weight",
operations=[MergeModulelist(dim=0), Concatenate(dim=1)],
),
WeightConverter(
"experts.*.w2.weight",
"experts.down_proj.weight",
operations=[MergeModulelist(dim=0)],
),
WeightConverter(
"self_attn.qkv_proj.weight",
[
"self_attn.q_proj.weight",
"self_attn.k_proj.weight",
"self_attn.v_proj.weight",
],
operations=[Chunk(dim=0)],
),
WeightRenaming("mlp.w2.weight", "mlp.down_proj.weight"),
]
# Use the mapping to load
load_config = LoadStateDictConfig(
weight_mapping=weight_mapping,
)
loading_info, _ = convert_and_load_state_dict_in_model(
model,
state_dict,
load_config,
tp_plan=None,
)
self.assertTrue(len(loading_info.missing_keys) == 0)
self.assertTrue(len(loading_info.unexpected_keys) == 0)
self.assertTrue(len(loading_info.mismatched_keys) == 0)
self.assertTrue(len(loading_info.conversion_errors) == 0)
# Try to revert the mapping
reversed_state_dict = revert_weight_conversion(model, model.state_dict())
# Make sure both saved state_dict are identical
self.assertTrue(compare_state_dicts(reversed_state_dict, state_dict))
def test_qkv_chunk_rope_permute_with_fp8_quantization(self):
if is_triton_available():
from transformers.integrations.finegrained_fp8 import Fp8Dequantize
else:
self.skipTest("Fine-grained FP8 integration tests require Triton to be installed.")
n_heads = 2
head_dim = 4
in_dim = 4
out_dim = n_heads * head_dim
block_size = (4, 4)
class RopeProjector(nn.Module):
def __init__(self, *, with_scale: bool = False):
super().__init__()
self.weight = nn.Parameter(torch.zeros(out_dim, in_dim))
if with_scale:
scale_shape = (out_dim // block_size[0], in_dim // block_size[1])
self.weight_scale_inv = nn.Parameter(torch.ones(scale_shape))
class RopeSelfAttn(nn.Module):
def __init__(self):
super().__init__()
self.q_proj = RopeProjector(with_scale=True)
self.k_proj = RopeProjector()
self.v_proj = RopeProjector()
class RopeLayer(nn.Module):
def __init__(self):
super().__init__()
self.self_attn = RopeSelfAttn()
class RopeModel(nn.Module):
base_model_prefix = "model"
def __init__(self):
super().__init__()
self.layers = nn.ModuleList([RopeLayer()])
model = RopeModel()
model.config = PretrainedConfig()
model.config.num_attention_heads = n_heads
raw_q = torch.tensor(
[
[1.0, -1.0, 1.0, -1.0],
[0.5, -0.5, 0.5, -0.5],
[-1.0, 1.0, -1.0, 1.0],
[-0.5, 0.5, -0.5, 0.5],
[1.0, 1.0, -1.0, -1.0],
[0.5, 0.5, -0.5, -0.5],
[-1.0, -1.0, 1.0, 1.0],
[-0.5, -0.5, 0.5, 0.5],
],
dtype=torch.float32,
)
raw_k = torch.arange(out_dim * in_dim, dtype=torch.float32).reshape(out_dim, in_dim)
raw_v = torch.arange(out_dim * in_dim, dtype=torch.float32).reshape(out_dim, in_dim) + 100.0
raw_qkv = torch.cat([raw_q, raw_k, raw_v], dim=0)
state_dict = {"model.layers.0.self_attn.qkv_proj.weight": raw_qkv.clone()}
quantizer_cls = type(
"FineGrainedFP8HfQuantizer",
(),
{
"__init__": lambda self, bs=block_size: setattr(
self, "quantization_config", SimpleNamespace(weight_block_size=bs)
),
"param_needs_quantization": lambda self, _model, param_name: param_name.endswith("q_proj.weight"),
"pre_quantized": False,
},
)
quantizer = quantizer_cls()
weight_mapping = [
WeightConverter(
"model.layers.*.self_attn.qkv_proj.weight",
[
"model.layers.*.self_attn.q_proj.weight",
"model.layers.*.self_attn.k_proj.weight",
"model.layers.*.self_attn.v_proj.weight",
],
operations=[Chunk(dim=0), PermuteForRope()],
)
]
load_config = LoadStateDictConfig(weight_mapping=weight_mapping, hf_quantizer=quantizer)
loading_info, _ = convert_and_load_state_dict_in_model(model, state_dict, load_config, tp_plan=None)
self.assertEqual(loading_info.missing_keys, set())
self.assertEqual(loading_info.unexpected_keys, set())
self.assertEqual(loading_info.mismatched_keys, set())
self.assertEqual(loading_info.conversion_errors, {})
permute_op = PermuteForRope()
permute_op.config = model.config
expected_q = permute_op._apply(raw_q)
expected_k = permute_op._apply(raw_k)
expected_v = permute_op._apply(raw_v)
model_state = model.state_dict()
self.assertFalse(torch.allclose(raw_k, expected_k))
torch.testing.assert_close(model_state["model.layers.0.self_attn.k_proj.weight"], expected_k)
torch.testing.assert_close(model_state["model.layers.0.self_attn.v_proj.weight"], expected_v)
q_weight_key = "model.layers.0.self_attn.q_proj.weight"
scale_key = "model.layers.0.self_attn.q_proj.weight_scale_inv"
self.assertIn(scale_key, model_state)
expected_dtype = torch.float8_e4m3fn if hasattr(torch, "float8_e4m3fn") else torch.int8
self.assertEqual(model_state[q_weight_key].dtype, expected_dtype)
self.assertEqual(model_state[q_weight_key].shape, torch.Size((out_dim, in_dim)))
self.assertEqual(model_state[scale_key].dtype, torch.float32)
self.assertEqual(
model_state[scale_key].shape,
torch.Size((out_dim // block_size[0], in_dim // block_size[1])),
)
dequant = Fp8Dequantize(block_size=block_size)
dequantized_q = dequant.convert(
[model_state[q_weight_key], model_state[scale_key]],
context={"quantization_config": quantizer.quantization_config},
)
torch.testing.assert_close(dequantized_q, expected_q, rtol=1e-2, atol=1e-2)
def test_ernie4_5_vl_moe_conversion(self):
model = DummyRoot(add_extra_moe=True)
model.config = PretrainedConfig()
raw_tensors = {
"model.layers.0.experts.0.w1.weight": torch.tensor([[0.0, 1.0], [2.0, 3.0]]),
"model.layers.0.experts.1.w1.weight": torch.tensor([[10.0, 11.0], [12.0, 13.0]]),
"model.layers.0.experts.2.w1.weight": torch.tensor([[11.0, 12.0], [13.0, 14.0]]),
"model.layers.0.experts.3.w1.weight": torch.tensor([[12.0, 13.0], [14.0, 15.0]]),
"model.layers.0.experts.0.w3.weight": torch.tensor([[4.0, 5.0], [6.0, 7.0]]),
"model.layers.0.experts.1.w3.weight": torch.tensor([[14.0, 15.0], [16.0, 17.0]]),
"model.layers.0.experts.2.w3.weight": torch.tensor([[15.0, 16.0], [17.0, 18.0]]),
"model.layers.0.experts.3.w3.weight": torch.tensor([[16.0, 17.0], [18.0, 19.0]]),
"model.layers.0.experts.0.w2.weight": torch.tensor([[20.0, 21.0], [22.0, 23.0]]),
"model.layers.0.experts.1.w2.weight": torch.tensor([[24.0, 25.0], [26.0, 27.0]]),
"model.layers.0.experts.2.w2.weight": torch.tensor([[25.0, 26.0], [27.0, 28.0]]),
"model.layers.0.experts.3.w2.weight": torch.tensor([[26.0, 27.0], [28.0, 29.0]]),
"model.layers.1.experts.0.w1.weight": torch.tensor([[30.0, 31.0], [32.0, 33.0]]),
"model.layers.1.experts.1.w1.weight": torch.tensor([[34.0, 35.0], [36.0, 37.0]]),
"model.layers.1.experts.2.w1.weight": torch.tensor([[35.0, 36.0], [37.0, 38.0]]),
"model.layers.1.experts.3.w1.weight": torch.tensor([[36.0, 37.0], [38.0, 39.0]]),
"model.layers.1.experts.0.w3.weight": torch.tensor([[38.0, 39.0], [40.0, 41.0]]),
"model.layers.1.experts.1.w3.weight": torch.tensor([[42.0, 43.0], [44.0, 45.0]]),
"model.layers.1.experts.2.w3.weight": torch.tensor([[43.0, 44.0], [45.0, 46.0]]),
"model.layers.1.experts.3.w3.weight": torch.tensor([[44.0, 45.0], [46.0, 47.0]]),
"model.layers.1.experts.0.w2.weight": torch.tensor([[46.0, 47.0], [48.0, 49.0]]),
"model.layers.1.experts.1.w2.weight": torch.tensor([[50.0, 51.0], [52.0, 53.0]]),
"model.layers.1.experts.2.w2.weight": torch.tensor([[51.0, 52.0], [53.0, 54.0]]),
"model.layers.1.experts.3.w2.weight": torch.tensor([[52.0, 53.0], [54.0, 55.0]]),
"model.layers.0.self_attn.qkv_proj.weight": torch.tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]),
"model.layers.1.self_attn.qkv_proj.weight": torch.tensor([[7.0, 8.0], [9.0, 10.0], [11.0, 12.0]]),
"mlp.w2.weight": torch.tensor([[60.0, 61.0], [62.0, 63.0]]),
}
state_dict = {k: v.clone() for k, v in raw_tensors.items()}
weight_mapping = [
WeightConverter(
["experts.*.w1.weight", "experts.*.w3.weight"],
["experts.gate_up_proj.weight", "extra_experts.gate_up_proj.weight"],
operations=[ErnieFuseAndSplitTextVisionExperts(stack_dim=0, concat_dim=1)],
),
WeightConverter(
"experts.*.w2.weight",
["experts.down_proj.weight", "extra_experts.down_proj.weight"],
operations=[ErnieFuseAndSplitTextVisionExperts(stack_dim=0, concat_dim=1)],
),
WeightConverter(
"self_attn.qkv_proj.weight",
[
"self_attn.q_proj.weight",
"self_attn.k_proj.weight",
"self_attn.v_proj.weight",
],
operations=[Chunk(dim=0)],
),
WeightRenaming("mlp.w2.weight", "mlp.down_proj.weight"),
]
loading_info, _ = convert_and_load_state_dict_in_model(
model, state_dict, LoadStateDictConfig(weight_mapping=weight_mapping), tp_plan=None
)
self.assertEqual(loading_info.missing_keys, set())
self.assertEqual(loading_info.unexpected_keys, set())
self.assertEqual(loading_info.mismatched_keys, set())
self.assertEqual(loading_info.conversion_errors, {})
model_state = model.state_dict()
def cat_gate(layer_prefix: str) -> torch.Tensor:
moe_1_w1 = [
raw_tensors[f"{layer_prefix}.experts.0.w1.weight"],
raw_tensors[f"{layer_prefix}.experts.1.w1.weight"],
]
moe_2_w1 = [
raw_tensors[f"{layer_prefix}.experts.2.w1.weight"],
raw_tensors[f"{layer_prefix}.experts.3.w1.weight"],
]
moe_1_w3 = [
raw_tensors[f"{layer_prefix}.experts.0.w3.weight"],
raw_tensors[f"{layer_prefix}.experts.1.w3.weight"],
]
moe_2_w3 = [
raw_tensors[f"{layer_prefix}.experts.2.w3.weight"],
raw_tensors[f"{layer_prefix}.experts.3.w3.weight"],
]
moe_1 = torch.cat([torch.stack(moe_1_w1, dim=0), torch.stack(moe_1_w3, dim=0)], dim=1)
moe_2 = torch.cat([torch.stack(moe_2_w1, dim=0), torch.stack(moe_2_w3, dim=0)], dim=1)
return moe_1, moe_2
moe_1, moe_2 = cat_gate("model.layers.0")
torch.testing.assert_close(model_state["model.layers.0.experts.gate_up_proj.weight"], moe_1)
torch.testing.assert_close(model_state["model.layers.0.extra_experts.gate_up_proj.weight"], moe_2)
moe_1, moe_2 = cat_gate("model.layers.1")
torch.testing.assert_close(model_state["model.layers.1.experts.gate_up_proj.weight"], moe_1)
torch.testing.assert_close(model_state["model.layers.1.extra_experts.gate_up_proj.weight"], moe_2)
def stack_down(layer_prefix: str) -> torch.Tensor:
moe_1 = torch.stack(
[
raw_tensors[f"{layer_prefix}.experts.0.w2.weight"],
raw_tensors[f"{layer_prefix}.experts.1.w2.weight"],
],
dim=0,
)
moe_2 = torch.stack(
[
raw_tensors[f"{layer_prefix}.experts.2.w2.weight"],
raw_tensors[f"{layer_prefix}.experts.3.w2.weight"],
],
dim=0,
)
return moe_1, moe_2
moe_1, moe_2 = stack_down("model.layers.0")
torch.testing.assert_close(model_state["model.layers.0.experts.down_proj.weight"], moe_1)
torch.testing.assert_close(model_state["model.layers.0.extra_experts.down_proj.weight"], moe_2)
moe_1, moe_2 = stack_down("model.layers.1")
torch.testing.assert_close(model_state["model.layers.1.experts.down_proj.weight"], moe_1)
torch.testing.assert_close(model_state["model.layers.1.extra_experts.down_proj.weight"], moe_2)
def test_ernie4_5_vl_moe_conversion_reversed(self):
model = DummyRoot(add_extra_moe=True)
model.config = PretrainedConfig()
raw_tensors = {
"model.layers.0.experts.0.w1.weight": torch.tensor([[0.0, 1.0], [2.0, 3.0]]),
"model.layers.0.experts.1.w1.weight": torch.tensor([[10.0, 11.0], [12.0, 13.0]]),
"model.layers.0.experts.2.w1.weight": torch.tensor([[11.0, 12.0], [13.0, 14.0]]),
"model.layers.0.experts.3.w1.weight": torch.tensor([[12.0, 13.0], [14.0, 15.0]]),
"model.layers.0.experts.0.w3.weight": torch.tensor([[4.0, 5.0], [6.0, 7.0]]),
"model.layers.0.experts.1.w3.weight": torch.tensor([[14.0, 15.0], [16.0, 17.0]]),
"model.layers.0.experts.2.w3.weight": torch.tensor([[15.0, 16.0], [17.0, 18.0]]),
"model.layers.0.experts.3.w3.weight": torch.tensor([[16.0, 17.0], [18.0, 19.0]]),
"model.layers.0.experts.0.w2.weight": torch.tensor([[20.0, 21.0], [22.0, 23.0]]),
"model.layers.0.experts.1.w2.weight": torch.tensor([[24.0, 25.0], [26.0, 27.0]]),
"model.layers.0.experts.2.w2.weight": torch.tensor([[25.0, 26.0], [27.0, 28.0]]),
"model.layers.0.experts.3.w2.weight": torch.tensor([[26.0, 27.0], [28.0, 29.0]]),
"model.layers.1.experts.0.w1.weight": torch.tensor([[30.0, 31.0], [32.0, 33.0]]),
"model.layers.1.experts.1.w1.weight": torch.tensor([[34.0, 35.0], [36.0, 37.0]]),
"model.layers.1.experts.2.w1.weight": torch.tensor([[35.0, 36.0], [37.0, 38.0]]),
"model.layers.1.experts.3.w1.weight": torch.tensor([[36.0, 37.0], [38.0, 39.0]]),
"model.layers.1.experts.0.w3.weight": torch.tensor([[38.0, 39.0], [40.0, 41.0]]),
"model.layers.1.experts.1.w3.weight": torch.tensor([[42.0, 43.0], [44.0, 45.0]]),
"model.layers.1.experts.2.w3.weight": torch.tensor([[43.0, 44.0], [45.0, 46.0]]),
"model.layers.1.experts.3.w3.weight": torch.tensor([[44.0, 45.0], [46.0, 47.0]]),
"model.layers.1.experts.0.w2.weight": torch.tensor([[46.0, 47.0], [48.0, 49.0]]),
"model.layers.1.experts.1.w2.weight": torch.tensor([[50.0, 51.0], [52.0, 53.0]]),
"model.layers.1.experts.2.w2.weight": torch.tensor([[51.0, 52.0], [53.0, 54.0]]),
"model.layers.1.experts.3.w2.weight": torch.tensor([[52.0, 53.0], [54.0, 55.0]]),
"model.layers.0.self_attn.qkv_proj.weight": torch.tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]),
"model.layers.1.self_attn.qkv_proj.weight": torch.tensor([[7.0, 8.0], [9.0, 10.0], [11.0, 12.0]]),
"mlp.w2.weight": torch.tensor([[60.0, 61.0], [62.0, 63.0]]),
}
state_dict = {k: v.clone() for k, v in raw_tensors.items()}
weight_mapping = [
WeightConverter(
["experts.*.w1.weight", "experts.*.w3.weight"],
["experts.gate_up_proj.weight", "extra_experts.gate_up_proj.weight"],
operations=[ErnieFuseAndSplitTextVisionExperts(stack_dim=0, concat_dim=1)],
),
WeightConverter(
"experts.*.w2.weight",
["experts.down_proj.weight", "extra_experts.down_proj.weight"],
operations=[ErnieFuseAndSplitTextVisionExperts(stack_dim=0, concat_dim=1)],
),
WeightConverter(
"self_attn.qkv_proj.weight",
[
"self_attn.q_proj.weight",
"self_attn.k_proj.weight",
"self_attn.v_proj.weight",
],
operations=[Chunk(dim=0)],
),
WeightRenaming("mlp.w2.weight", "mlp.down_proj.weight"),
]
# Use the mapping to load
loading_info, _ = convert_and_load_state_dict_in_model(
model, state_dict, LoadStateDictConfig(weight_mapping=weight_mapping), tp_plan=None
)
self.assertTrue(len(loading_info.missing_keys) == 0)
self.assertTrue(len(loading_info.unexpected_keys) == 0)
self.assertTrue(len(loading_info.mismatched_keys) == 0)
self.assertTrue(len(loading_info.conversion_errors) == 0)
# Try to revert the mapping
reversed_state_dict = revert_weight_conversion(model, model.state_dict())
# Make sure both saved state_dict are identical
self.assertTrue(compare_state_dicts(reversed_state_dict, state_dict))
class TestConversionMapping(unittest.TestCase):
def test_register_checkpoint_conversion_mapping(self):
register_checkpoint_conversion_mapping(
"foobar",
[
WeightRenaming(".block_sparse_moe.gate", ".mlp.gate"),
],
)
self.assertEqual(len(get_checkpoint_conversion_mapping("foobar")), 1)
def test_register_checkpoint_conversion_mapping_overwrites(self):
register_checkpoint_conversion_mapping(
"foobarbaz",
[
WeightRenaming(".block_sparse_moe.gate", ".mlp.gate"),
],
)
with self.assertRaises(ValueError):
register_checkpoint_conversion_mapping(
"foobarbaz",
[
WeightRenaming(".block_sparse_moe.foo", ".mlp.foo"),
WeightRenaming(".block_sparse_moe.bar", ".mlp.bar"),
],
)
register_checkpoint_conversion_mapping(
"foobarbaz",
[
WeightRenaming(".block_sparse_moe.foo", ".mlp.foo"),
WeightRenaming(".block_sparse_moe.bar", ".mlp.bar"),
],
overwrite=True,
)
self.assertEqual(len(get_checkpoint_conversion_mapping("foobarbaz")), 2)
if __name__ == "__main__":
unittest.main()
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/utils/test_core_model_loading.py",
"license": "Apache License 2.0",
"lines": 665,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/audioflamingo3/configuration_audioflamingo3.py | # Copyright 2025 NVIDIA CORPORATION and the HuggingFace Inc. team. All rights
# reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING, AutoConfig
logger = logging.get_logger(__name__)
class AudioFlamingo3EncoderConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of an [`AudioFlamingo3Encoder`]. It is used to instantiate an
AudioFlamingo3 audio encoder according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the audio encoder of the AudioFlamingo3
architecture.
e.g. [nvidia/audio-flamingo-3-hf](https://huggingface.co/nvidia/audio-flamingo-3-hf)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
num_mel_bins (`int`, *optional*, defaults to 128):
Number of mel features used per input features. Should correspond to the value used in the
`AudioFlamingo3Processor` class.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of encoder layers.
num_attention_heads (`int`, *optional*, defaults to 20):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 5120):
Dimensionality of the "intermediate" (often named feed-forward) layer in encoder.
layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the encoder. See the [LayerDrop paper](https://huggingface.co/papers/1909.11556)
for more details.
activation_function (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
hidden_size (`int`, *optional*, defaults to 1280):
Dimensionality of the layers.
dropout (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for activations inside the fully connected layer.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
scale_embedding (`bool`, *optional*, defaults to `False`):
Scale embeddings by dividing by sqrt(hidden_size).
max_source_positions (`int`, *optional*, defaults to 1500):
The maximum sequence length of log-mel filter-bank features that this model might ever be used with.
Example:
```python
>>> from transformers import AudioFlamingo3EncoderConfig, AudioFlamingo3Encoder
>>> # Initializing an AudioFlamingo3EncoderConfig
>>> configuration = AudioFlamingo3EncoderConfig()
>>> # Initializing an AudioFlamingo3Encoder (with random weights)
>>> model = AudioFlamingo3Encoder(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "audioflamingo3_encoder"
attribute_map = {
"d_model": "hidden_size",
"encoder_layers": "num_hidden_layers",
"encoder_attention_heads": "num_attention_heads",
"encoder_ffn_dim": "intermediate_size",
"encoder_layerdrop": "layerdrop",
}
def __init__(
self,
num_mel_bins=128,
num_hidden_layers=32,
num_attention_heads=20,
intermediate_size=5120,
layerdrop=0.0,
activation_function="gelu",
hidden_size=1280,
dropout=0.0,
attention_dropout=0.0,
activation_dropout=0.0,
initializer_range=0.02,
scale_embedding=False,
max_source_positions=1500,
**kwargs,
):
super().__init__(**kwargs)
self.num_mel_bins = num_mel_bins
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.initializer_range = initializer_range
self.layerdrop = layerdrop
self.num_hidden_layers = num_hidden_layers
self.scale_embedding = scale_embedding
self.max_source_positions = max_source_positions
class AudioFlamingo3Config(PretrainedConfig):
r"""
This is the configuration class to store the configuration of an [`AudioFlamingo3ForConditionalGeneration`]. It is used to instantiate an
AudioFlamingo3 model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the AudioFlamingo3.
e.g. [nvidia/audio-flamingo-3-hf](https://huggingface.co/nvidia/audio-flamingo-3-hf)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
audio_config (`Union[AudioFlamingo3EncoderConfig, dict]`, *optional*, defaults to `AudioFlamingo3EncoderConfig`):
The config object or dictionary of the audio backbone.
text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `Qwen2Config`):
The config object or dictionary of the text backbone.
audio_token_id (`int`, *optional*, defaults to 151669):
The audio token index to encode the audio prompt.
projector_hidden_act (`str`, *optional*, defaults to `"gelu"`):
Activation function used in the projector.
projector_bias (`bool`, *optional*, defaults to `True`):
Whether to include bias terms in the projector.
Example:
```python
>>> from transformers import AudioFlamingo3ForConditionalGeneration, AudioFlamingo3Config, AudioFlamingo3EncoderConfig, Qwen2Config
>>> # Initializing an AudioFlamingo3Encoder config
>>> audio_config = AudioFlamingo3EncoderConfig()
>>> # Initializing a Qwen2 config
>>> text_config = Qwen2Config()
>>> # Initializing an AudioFlamingo3 configuration
>>> configuration = AudioFlamingo3Config(audio_config, text_config)
>>> # Initializing a model from the audioflamingo3 style configuration
>>> model = AudioFlamingo3ForConditionalGeneration(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "audioflamingo3"
sub_configs = {
"audio_config": AudioFlamingo3EncoderConfig,
"text_config": AutoConfig,
}
def __init__(
self,
audio_config=None,
text_config=None,
audio_token_id=151669,
projector_hidden_act="gelu",
projector_bias=True,
**kwargs,
):
self.audio_token_id = audio_token_id
if isinstance(audio_config, dict):
audio_config["model_type"] = audio_config.get("model_type", "audioflamingo3_encoder")
audio_config = CONFIG_MAPPING[audio_config["model_type"]](**audio_config)
elif audio_config is None:
audio_config = CONFIG_MAPPING["audioflamingo3_encoder"]()
self.audio_config = audio_config
if isinstance(text_config, dict):
text_config["model_type"] = text_config.get("model_type", "qwen2")
text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config)
elif text_config is None:
text_config = CONFIG_MAPPING["qwen2"]()
self.text_config = text_config
self.projector_hidden_act = projector_hidden_act
self.projector_bias = projector_bias
super().__init__(**kwargs)
__all__ = ["AudioFlamingo3Config", "AudioFlamingo3EncoderConfig"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/audioflamingo3/configuration_audioflamingo3.py",
"license": "Apache License 2.0",
"lines": 171,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/audioflamingo3/convert_audioflamingo3_to_hf.py | # Copyright 2025 NVIDIA CORPORATION and the HuggingFace Inc. team. All rights
# reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert AudioFlamingo3 checkpoints into a Hugging Face repository layout."""
from __future__ import annotations
import argparse
import json
import logging
from collections import defaultdict
from pathlib import Path
from typing import Any
import torch
from safetensors.torch import safe_open
from transformers import (
AudioFlamingo3Config,
AudioFlamingo3ForConditionalGeneration,
AudioFlamingo3Processor,
AutoTokenizer,
GenerationConfig,
Qwen2Config,
WhisperFeatureExtractor,
)
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s")
def _load_json(p: Path):
if not p.is_file():
raise FileNotFoundError(f"Missing JSON: {p}")
with p.open("r", encoding="utf-8") as f:
return json.load(f)
def write_processor(src_root: Path, dst_root: Path):
llm_dir = src_root / "llm"
# fmt: off
tokenizer_chat_template = (
"{% if messages[0]['role'] != 'system' %}"
"{{ '<|im_start|>system\\nYou are a helpful assistant.<|im_end|>\\n' }}"
"{% endif %}"
"{% for message in messages if message['content'] is not none %}"
"{{ '<|im_start|>' + message['role'] + '\\n' + message['content'] + '<|im_end|>' + '\\n' }}"
"{% endfor %}"
"{% if add_generation_prompt %}"
"{{ '<|im_start|>assistant\\n' }}"
"{% endif %}"
)
# fmt: on
# fmt: off
processor_chat_template = (
"{% if messages[0]['role'] != 'system' %}"
"<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n"
"{% endif %}"
"{% for m in messages if m['content'] is not none %}"
"<|im_start|>{{ m['role'] }}\n"
"{% if m['content'] is string %}"
"{{ m['content'] }}"
"{% else %}"
"{% set audio = namespace(found=False) %}"
"{% set text_buf = namespace(v='') %}"
"{% for c in m['content'] %}"
"{% if c.get('type') == 'audio' or 'audio' in c %}"
"{% set audio.found = True %}"
"{% elif c.get('type') == 'text' or 'text' in c %}"
"{% set text_buf.v = text_buf.v + c['text'] %}"
"{% endif %}"
"{% endfor %}"
"{% if audio.found %}{{ '<sound>' }}{% endif %}{{ text_buf.v }}"
"{% endif %}"
"<|im_end|>\n"
"{% endfor %}"
"{% if add_generation_prompt %}"
"<|im_start|>assistant\n"
"{% endif %}"
)
# fmt: on
processor = AudioFlamingo3Processor(
feature_extractor=WhisperFeatureExtractor(feature_size=128, return_attention_mask=True),
tokenizer=AutoTokenizer.from_pretrained(str(llm_dir), chat_template=tokenizer_chat_template, use_fast=True),
chat_template=processor_chat_template,
)
processor.save_pretrained(str(dst_root))
logger.info("processor (tokenizer + preprocessor)")
return processor
PREFIX_MAP = {
"llm": "language_model",
"sound_tower": "audio_tower",
"sound_mm_projector": "multi_modal_projector",
}
def _resolve_component_dir(dirpath: Path):
if not dirpath.is_dir():
return None
idx = dirpath / "model.safetensors.index.json"
mono = dirpath / "model.safetensors"
if idx.exists():
wm = _load_json(idx).get("weight_map") or {}
by_shard: dict[str, list[str]] = defaultdict(list)
for k, shard in wm.items():
by_shard[shard].append(k)
return ("sharded", dirpath, {k: sorted(v) for k, v in sorted(by_shard.items())})
if mono.exists():
return ("file", mono)
cands = sorted([x for x in dirpath.iterdir() if x.suffix == ".safetensors"])
return ("file", cands[0]) if len(cands) == 1 else None
def merge_and_shard_weights(src_root: Path, dst_root: Path, processor: AudioFlamingo3Processor):
state: dict[str, Any] = {}
for tag in PREFIX_MAP.keys():
comp = _resolve_component_dir(src_root / tag)
if not comp:
continue
out_prefix = PREFIX_MAP.get(tag, tag)
if comp[0] == "file":
fp: Path = comp[1]
with safe_open(str(fp), framework="pt", device="cpu") as f:
for k in f.keys():
if k == "__metadata__":
continue
state[f"{out_prefix}.{k}"] = f.get_tensor(k)
else:
base: Path = comp[1]
shard_map: dict[str, list[str]] = comp[2]
for shard, keys in shard_map.items():
sp = base / shard
with safe_open(str(sp), framework="pt", device="cpu") as f:
for k in keys:
state[f"{out_prefix}.{k}"] = f.get_tensor(k)
if not state:
raise FileNotFoundError("No tensors found in llm/, sound_tower/, or sound_mm_projector/.")
tok = processor.tokenizer
text_config = Qwen2Config(
bos_token_id=tok.bos_token_id,
eos_token_id=tok.eos_token_id,
pad_token_id=tok.pad_token_id,
vocab_size=len(tok),
hidden_size=3584,
intermediate_size=18944,
model_max_length=8192,
num_attention_heads=28,
num_hidden_layers=28,
num_key_value_heads=4,
rope_theta=1000000.0,
use_cache=False,
)
config = AudioFlamingo3Config(text_config=text_config, audio_token_id=tok.get_vocab()["<sound>"])
model = AudioFlamingo3ForConditionalGeneration(config).to(dtype=torch.bfloat16)
# Update state dict to new key names if necessary
projector_key_mapping = {
"multi_modal_projector.layers.0.weight": "multi_modal_projector.linear_1.weight",
"multi_modal_projector.layers.0.bias": "multi_modal_projector.linear_1.bias",
"multi_modal_projector.layers.2.weight": "multi_modal_projector.linear_2.weight",
"multi_modal_projector.layers.2.bias": "multi_modal_projector.linear_2.bias",
}
for old_key, new_key in projector_key_mapping.items():
if old_key in state:
state[new_key] = state.pop(old_key)
# Load weights into the instantiated model so we can push via `push_to_hub` later.
load_res = model.load_state_dict(state, strict=True)
# Enforce a clean load
if getattr(load_res, "missing_keys", None) and load_res.missing_keys:
mk = load_res.missing_keys
raise ValueError(f"Missing keys when loading: {mk[:10]}{' ...' if len(mk) > 10 else ''}")
if getattr(load_res, "unexpected_keys", None) and load_res.unexpected_keys:
uk = load_res.unexpected_keys
raise ValueError(f"Unexpected keys when loading: {uk[:10]}{' ...' if len(uk) > 10 else ''}")
generation_config = GenerationConfig(
bos_token_id=tok.bos_token_id,
eos_token_id=tok.eos_token_id,
pad_token_id=tok.pad_token_id,
max_new_tokens=2048,
)
model.generation_config = generation_config
model.save_pretrained(save_directory=str(dst_root))
logger.info("model.safetensors index and shards")
return model
"""
Reproducible Usage
==================
1) Download the original AudioFlamingo-3 weights from NVIDIA (requires Git LFS):
```
git lfs install
git clone https://huggingface.co/nvidia/audio-flamingo-3
```
This will create a folder `audio-flamingo-3/` containing the original components:
`llm/`, `sound_tower/`, and `sound_mm_projector/`.
2) Convert to the Hugging Face Transformers format (locally):
```
python src/transformers/models/audioflamingo3/convert_audioflamingo3_to_hf.py \
--src_dir audio-flamingo-3 \
--dst_dir audio-flamingo-3-hf
```
3) Convert and push directly to the Hub (requires `huggingface-cli login` or `HF_TOKEN`):
```
python src/transformers/models/audioflamingo3/convert_audioflamingo3_to_hf.py \
--src_dir audio-flamingo-3 \
--dst_dir audio-flamingo-3-hf \
--push_to_hub <username-or-org>/audio-flamingo-3
```
This command uploads both the processor (tokenizer + feature extractor) and the converted
model (sharded safetensors + configs) to the specified Hub repository.
"""
def main() -> None:
ap = argparse.ArgumentParser(description="Convert AudioFlamingo3 to Hugging Face format.")
ap.add_argument("--src_dir", required=True, help="Source model root directory")
ap.add_argument("--dst_dir", required=True, help="Destination directory for converted model")
ap.add_argument(
"--push_to_hub",
default=None,
type=str,
help=(
"Optional repository ID to push the converted assets to the Hugging Face Hub, "
"e.g. 'username/audio-flamingo-3'."
),
)
args = ap.parse_args()
src_root = Path(args.src_dir).resolve()
if not src_root.is_dir():
raise FileNotFoundError(f"Source directory not found: {src_root}")
dst_root = Path(args.dst_dir).resolve()
if dst_root.exists():
raise FileExistsError(f"Destination already exists: {dst_root}")
processor = write_processor(src_root, dst_root)
model = merge_and_shard_weights(src_root, dst_root, processor)
# Optionally push converted assets using native push_to_hub only
if args.push_to_hub:
logger.info("Pushing processor to the Hub ...")
processor.push_to_hub(args.push_to_hub)
logger.info("Pushing model to the Hub ...")
model.push_to_hub(args.push_to_hub)
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/audioflamingo3/convert_audioflamingo3_to_hf.py",
"license": "Apache License 2.0",
"lines": 237,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/audioflamingo3/modular_audioflamingo3.py | # Copyright 2025 NVIDIA CORPORATION and the HuggingFace Inc. team. All rights
# reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch import nn
from ...activations import ACT2FN
from ...cache_utils import Cache
from ...masking_utils import create_bidirectional_mask
from ...modeling_outputs import BaseModelOutputWithPooling, CausalLMOutputWithPast
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
from ...utils.generic import merge_with_config_defaults
from ...utils.output_capturing import capture_outputs
from ..qwen2_audio.modeling_qwen2_audio import (
Qwen2AudioEncoder,
Qwen2AudioPreTrainedModel,
)
from ..voxtral.modeling_voxtral import VoxtralForConditionalGeneration, VoxtralMultiModalProjector
from ..whisper.modeling_whisper import WhisperAttention, WhisperEncoderLayer
from .configuration_audioflamingo3 import AudioFlamingo3Config
logger = logging.get_logger(__name__)
class AudioFlamingo3Attention(WhisperAttention):
pass
class AudioFlamingo3EncoderLayer(WhisperEncoderLayer):
pass
class AudioFlamingo3PreTrainedModel(Qwen2AudioPreTrainedModel):
pass
@auto_docstring(
custom_intro="""
The audio model from AudioFlamingo3 without any head or projection on top.
"""
)
class AudioFlamingo3Encoder(Qwen2AudioEncoder):
"""
AudioFlamingo3 encoder: Whisper encoder, average pool (time/2), then LayerNorm.
"""
_can_record_outputs = {
"hidden_states": AudioFlamingo3EncoderLayer,
"attentions": AudioFlamingo3Attention,
}
@merge_with_config_defaults
@capture_outputs
def forward(
self,
input_features: torch.Tensor,
input_features_mask: torch.Tensor | None = None,
**kwargs,
) -> tuple | BaseModelOutputWithPooling:
r"""
Args:
input_features (`torch.FloatTensor` of shape `(batch_size, feature_size, sequence_length)`):
Log-Mel features extracted from raw audio. Use the processor/feature extractor to compute and pad
these features from waveform input.
input_features_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding feature indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
"""
seq_len = (input_features.shape[-1] - 1) // 2 + 1 # After conv2 downsampling
input_features_lengths = input_features_mask.sum(-1)
input_features_lengths = (input_features_lengths - 1) // 2 + 1 # conv2 downsampling
input_features_mask = torch.arange(seq_len, device=input_features.device) < input_features_lengths[:, None]
# Conv front-end
inputs_embeds = nn.functional.gelu(self.conv1(input_features))
inputs_embeds = nn.functional.gelu(self.conv2(inputs_embeds))
inputs_embeds = inputs_embeds.permute(0, 2, 1)
# Add positions, dropout
hidden_states = inputs_embeds + self.embed_positions.weight
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
attention_mask = create_bidirectional_mask(
config=self.config,
inputs_embeds=hidden_states,
attention_mask=input_features_mask,
)
# Transformer stack
for layer in self.layers:
drop = self.training and torch.rand([]) < self.layerdrop
if not drop:
hidden_states = layer(hidden_states, attention_mask)[0]
# AvgPool (time/2) + LayerNorm
hidden_states = hidden_states.permute(0, 2, 1)
hidden_states = self.avg_pooler(hidden_states).permute(0, 2, 1)
hidden_states = self.layer_norm(hidden_states)
return BaseModelOutputWithPooling(
last_hidden_state=hidden_states,
)
class AudioFlamingo3MultiModalProjector(VoxtralMultiModalProjector):
"""
Audio adaptor (small MLP) that projects AudioFlamingo3Encoder features
to the LLM embedding space so they can replace `<sound>` tokens.
"""
def __init__(self, config: AudioFlamingo3Config):
super().__init__()
self.linear_1 = nn.Linear(
config.audio_config.hidden_size, config.text_config.hidden_size, bias=config.projector_bias
)
self.act = ACT2FN[config.projector_hidden_act]
self.linear_2 = nn.Linear(
config.text_config.hidden_size, config.text_config.hidden_size, bias=config.projector_bias
)
@auto_docstring(
custom_intro="""
The AudioFlamingo3 model which consists of a fine-tuned Whisper encoder, a multi-modal projector and a Qwen2 language model.
"""
)
class AudioFlamingo3ForConditionalGeneration(VoxtralForConditionalGeneration):
_tp_plan = None
_pp_plan = None
_keep_in_fp32_modules_strict = None
def __init__(self, config):
super().__init__(config)
@can_return_tuple
@auto_docstring(
custom_intro="This method is used to get the audio embeddings from input features (a log mel spectrogram), meaning inferring the audio encoder and the multi-modal projector."
)
def get_audio_features(
self,
input_features: torch.FloatTensor,
input_features_mask: torch.Tensor,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | BaseModelOutputWithPooling:
r"""
input_features (`torch.FloatTensor`):
Float values of mel features extracted from the raw speech waveform. Raw speech waveform can be
obtained by loading a `.flac` or `.wav` audio file into an array of type `list[float]` or a
`numpy.ndarray`, *e.g.* via the soundfile library (`pip install soundfile`). To prepare the array into
`input_features`, the [`AutoFeatureExtractor`] should be used for extracting the mel features, padding
and conversion into a tensor of type `torch.FloatTensor`. See [`~WhisperFeatureExtractor.__call__`]
input_features_mask (`torch.Tensor` of shape `(batch_size, feature_sequence_length)`):
Mask to avoid performing attention on padded feature indices.
"""
# Encode audio
audio_output = self.audio_tower(
input_features, input_features_mask=input_features_mask, return_dict=True, **kwargs
)
audio_embeds = self.multi_modal_projector(audio_output.last_hidden_state)
# Mask according to avg pooling (which is after attention blocks)
post_lengths = (input_features_mask.sum(-1) - 2) // 2 + 1
valid_mask = torch.arange(audio_embeds.shape[1], device=post_lengths.device)[None, :] < post_lengths[:, None]
audio_embeds = audio_embeds[valid_mask.to(audio_embeds.device)]
audio_output.pooler_output = audio_embeds
return audio_output
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
input_features: torch.FloatTensor | None = None,
input_features_mask: torch.Tensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
labels: torch.LongTensor | None = None,
use_cache: bool | None = None,
cache_position: torch.LongTensor | None = None,
logits_to_keep: int | torch.Tensor = 0,
**kwargs: Unpack[TransformersKwargs],
) -> CausalLMOutputWithPast:
r"""
input_features_mask (`torch.Tensor` of shape `(batch_size, feature_sequence_length)`):
Mask to avoid performing attention on padding feature indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AudioFlamingo3ForConditionalGeneration, AutoProcessor
>>> model_id = "nvidia/audio-flamingo-3-hf"
>>> processor = AutoProcessor.from_pretrained(model_id)
>>> model = AudioFlamingo3ForConditionalGeneration.from_pretrained(model_id, device_map="auto")
>>> conversations = [
>>> [
>>> {
>>> "role": "user",
>>> "content": [
>>> {"type": "text", "text": "Transcribe the input speech."},
>>> {
>>> "type": "audio",
>>> "path": "https://huggingface.co/datasets/nvidia/AudioSkills/resolve/main/assets/t_837b89f2-26aa-4ee2-bdf6-f73f0dd59b26.wav",
>>> },
>>> ],
>>> }
>>> ],
>>> [
>>> {
>>> "role": "user",
>>> "content": [
>>> {
>>> "type": "text",
>>> "text": "This track feels really peaceful and introspective. What elements make it feel so calming and meditative?",
>>> },
>>> {"type": "audio", "path": "https://huggingface.co/datasets/nvidia/AudioSkills/resolve/main/assets/FPSbCAANfbJLVSwD.mp3"},
>>> ],
>>> }
>>> ],
>>> ]
>>> inputs = processor.apply_chat_template(
>>> conversations,
>>> tokenize=True,
>>> add_generation_prompt=True,
>>> return_dict=True,
>>> ).to(model.device)
>>> outputs = model.generate(**inputs, max_new_tokens=500)
>>> decoded_outputs = processor.batch_decode(
>>> outputs[:, inputs["input_ids"].shape[1]:], skip_special_tokens=True
>>> )
>>> print(decoded_outputs)
["The spoken content of the audio is...", "The track's calming and meditative feel can be attributed to..."]
```"""
if inputs_embeds is None:
inputs_embeds = self.get_input_embeddings()(input_ids)
if input_features is not None and input_ids is not None:
audio_embeds = self.get_audio_features(input_features, input_features_mask, return_dict=True).pooler_output
# replace text-audio token placeholders with audio embeddings
audio_token_mask = (input_ids == self.config.audio_token_id).unsqueeze(-1)
inputs_embeds = inputs_embeds.masked_scatter(
audio_token_mask.to(inputs_embeds.device), audio_embeds.to(inputs_embeds.device)
)
outputs: CausalLMOutputWithPast = self.language_model(
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
labels=labels,
use_cache=use_cache,
cache_position=cache_position,
logits_to_keep=logits_to_keep,
**kwargs,
)
return outputs
def prepare_inputs_for_generation(self, *args, **kwargs):
# Overwritten -- we should not pass input_features when we are in cached decoding stage
input_features = kwargs.pop("input_features", None)
input_features_mask = kwargs.pop("input_features_mask", None)
cache_position = kwargs.get("cache_position")
model_inputs = super().prepare_inputs_for_generation(*args, **kwargs)
if cache_position is not None and model_inputs["cache_position"][0] == 0:
# input_features should only be passed when we are not in cached decoding stage
if input_features is not None:
model_inputs["input_features"] = input_features
if input_features_mask is not None:
model_inputs["input_features_mask"] = input_features_mask
return model_inputs
__all__ = ["AudioFlamingo3ForConditionalGeneration", "AudioFlamingo3PreTrainedModel", "AudioFlamingo3Encoder"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/audioflamingo3/modular_audioflamingo3.py",
"license": "Apache License 2.0",
"lines": 259,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/audioflamingo3/processing_audioflamingo3.py | # Copyright 2025 NVIDIA CORPORATION and the HuggingFace Inc. team. All rights
# reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import numpy as np
from ...audio_utils import AudioInput, make_list_of_audio
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessingKwargs, ProcessorMixin, Unpack
from ...tokenization_utils_base import TextInput
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
logger = logging.get_logger(__name__)
class AudioFlamingo3ProcessorKwargs(ProcessingKwargs, total=False):
_defaults = {
"text_kwargs": {
"padding": True,
},
"audio_kwargs": {
"sampling_rate": 16000,
"chunk_length": 30.0,
"return_attention_mask": True,
"padding": "max_length",
},
"common_kwargs": {
"return_tensors": "pt",
"padding_side": "left",
},
}
class AudioFlamingo3Processor(ProcessorMixin):
r"""
Constructs an AudioFlamingo3 processor which wraps an AudioFlamingo3 feature extractor and an AudioFlamingo3
tokenizer into a single processor.
[`AudioFlamingo3Processor`] offers all the functionalities of [`WhisperFeatureExtractor`] and
[`Qwen2TokenizerFast`]. See the [`~AudioFlamingo3Processor.__call__`] for more information.
Args:
feature_extractor ([`WhisperFeatureExtractor`]):
The feature extractor is a required input.
tokenizer ([`Qwen2TokenizerFast`]):
The tokenizer is a required input.
chat_template (`Optional[str]`, *optional*):
The Jinja template to use for formatting the conversation. If not provided, the tokenizer's default chat
template will be used.
audio_token (`Optional[str]`, *optional*, defaults to `"<sound>"`):
Special token used to represent audio inputs in the chat template.
default_transcription_prompt (`str`, *optional*, defaults to `"Transcribe the input speech."`):
Default prompt to use for transcription tasks when applying transcription requests.
max_audio_len (`int`, *optional*, defaults to 600):
Maximum length of audio sequences in seconds. Audio longer than this will be truncated.
"""
def __init__(
self,
feature_extractor,
tokenizer,
chat_template=None,
audio_token="<sound>",
default_transcription_prompt="Transcribe the input speech.",
max_audio_len=600,
):
self.audio_token = audio_token
self.audio_token_id = tokenizer.convert_tokens_to_ids(audio_token)
self.default_transcription_prompt = default_transcription_prompt
self.max_audio_len = max_audio_len
super().__init__(feature_extractor, tokenizer, chat_template=chat_template)
def _get_audio_token_length(self, audio_lengths: "torch.Tensor") -> "torch.Tensor":
conv_output_lengths = (audio_lengths - 1) // 2 + 1 # After conv2 downsampling
audio_tokens_lengths = (conv_output_lengths - 2) // 2 + 1 # After avg pooling
return audio_tokens_lengths
def __call__(
self,
text: TextInput | list[TextInput],
audio: AudioInput | None = None,
output_labels: bool | None = False,
**kwargs: Unpack[AudioFlamingo3ProcessorKwargs],
) -> BatchFeature:
r"""
Main method to prepare one or several text sequence(s) and audio waveform(s) for the model. This
method expands `<sound>` placeholders in the text based on the post-pool frame counts of the
audio windows, then tokenizes the provided strings as-is, and extracts log-mel features
with [`WhisperFeatureExtractor`]. If `audio` is `None`, no audio processing is performed and
the text is tokenized as-is (LM-only behavior).
Args:
text (`str` or `list[str]`):
Input sequence or batch of sequences.
audio (`np.ndarray` or `list[np.ndarray]`):
Input audio or batch of audios as NumPy arrays. If provided, there must be as many `text` inputs as
`audio` inputs.
output_labels (bool, *optional*, default=False):
Whether to return labels for training.
Returns:
[`BatchFeature`]: A dictionary with tokenized text (`input_ids`, `attention_mask`) and
audio features (`input_features`, `input_features_mask`).
"""
# Merge defaults with user kwargs
call_kwargs = self._merge_kwargs(
AudioFlamingo3ProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
text_kwargs = call_kwargs["text_kwargs"]
audio_kwargs = call_kwargs["audio_kwargs"]
return_tensors = text_kwargs.get("return_tensors")
if return_tensors != "pt":
raise ValueError(f"{self.__class__.__name__} only supports `return_tensors='pt'`.")
if isinstance(text, str):
text = [text]
elif not (isinstance(text, (list, tuple)) and all(isinstance(t, str) for t in text)):
raise ValueError("Invalid input text. Please provide a string, or a list of strings")
audio_inputs = {}
if audio is not None:
audio = make_list_of_audio(audio)
if len(text) != len(audio):
raise ValueError(f"Got {len(text)} text but {len(audio)} audios; they must match 1:1.")
# Determine number of chunks per sample, and flatten
window_size = int(audio_kwargs["sampling_rate"] * audio_kwargs["chunk_length"])
max_windows = int(self.max_audio_len // audio_kwargs["chunk_length"])
per_sample_windows: list[int] = []
flat_chunks: list[np.ndarray] = []
for audio_el in audio:
n_samples = int(audio_el.shape[0])
n_win = max(1, (n_samples + window_size - 1) // window_size)
if n_win > max_windows:
logger.warning(
f"Audio duration ({n_samples / audio_kwargs['sampling_rate']:.1f}s) exceeds {self.max_audio_len}s; truncating to first {self.max_audio_len}s."
)
n_win = max_windows
per_sample_windows.append(n_win)
time_cap = min(n_samples, n_win * window_size)
for i in range(n_win):
start = i * window_size
end = min((i + 1) * window_size, time_cap)
flat_chunks.append(audio_el[start:end])
# Feature extraction
audio_inputs = self.feature_extractor(flat_chunks, **audio_kwargs)
padding_mask = audio_inputs.pop("attention_mask")
audio_inputs["input_features_mask"] = padding_mask
# Compute sequence lengths token counting
audio_lengths = torch.stack([s.sum() for s in torch.split(padding_mask.sum(-1), per_sample_windows)])
audio_tokens_lengths = self._get_audio_token_length(audio_lengths)
# expand audio tokens in text
for i, audio_length in enumerate(audio_tokens_lengths):
expanded = re.sub(re.escape(self.audio_token), self.audio_token * audio_length, text[i])
text[i] = expanded
# Tokenize
text_inputs = self.tokenizer(text, **text_kwargs)
data = {**text_inputs, **audio_inputs}
if output_labels:
labels = data["input_ids"].clone()
labels[labels == self.audio_token_id] = -100
labels[labels == self.tokenizer.pad_token_id] = -100
data["labels"] = labels
return BatchFeature(data=data, tensor_type=return_tensors)
@property
def model_input_names(self) -> list[str]:
tok_names = self.tokenizer.model_input_names
fea_names = self.feature_extractor.model_input_names
return list(dict.fromkeys(tok_names + fea_names + ["input_features_mask"]))
def apply_transcription_request(
self,
audio: str | list[str] | AudioInput,
prompt: str | list[str] | None = None,
**kwargs: Unpack[AudioFlamingo3ProcessorKwargs],
) -> BatchFeature:
"""
Prepare inputs for automatic speech recognition without manually writing the default transcription prompt.
Args:
audio (`str`, `list[str]`, `np.ndarray`, `torch.Tensor`, `list[np.ndarray]`, `list[torch.Tensor]`):
Audio to transcribe. Strings are interpreted as local paths or URLs and will be loaded automatically by
the chat template loader; NumPy arrays and PyTorch tensors are forwarded directly.
prompt (`str` or `list[str]`, *optional*):
Custom prompt(s) to include in the user turn. A list must be the same length as the batch. When `None`,
each sample uses `"Transcribe the input speech."`.
**kwargs:
Additional keyword arguments forwarded to [`~AudioFlamingo3Processor.apply_chat_template`] (for example
`text_kwargs`, `audio_kwargs`, ...).
Returns:
[`BatchFeature`]: Processor outputs ready to be passed to [`AudioFlamingo3ForConditionalGeneration.generate`].
"""
if isinstance(audio, str):
audio_items: list[str | np.ndarray] = [audio]
elif isinstance(audio, (list, tuple)) and audio and all(isinstance(el, str) for el in audio):
audio_items = list(audio)
else:
audio_items = list(make_list_of_audio(audio))
if is_torch_available():
audio_items = [el.detach().cpu().numpy() if isinstance(el, torch.Tensor) else el for el in audio_items]
batch_size = len(audio_items)
if batch_size == 0:
raise ValueError("`audio` must contain at least one sample.")
if prompt is None:
prompts = [self.default_transcription_prompt] * batch_size
elif isinstance(prompt, str):
prompts = [prompt] * batch_size
elif isinstance(prompt, (list, tuple)):
if len(prompt) != batch_size:
raise ValueError(
f"Received {len(prompt)} prompt(s) for {batch_size} audio sample(s); counts must match."
)
prompts = []
for item in prompt:
if item is None:
prompts.append(self.default_transcription_prompt)
elif isinstance(item, str):
prompts.append(item)
else:
raise TypeError("Each prompt must be a string or `None`.")
else:
raise TypeError("`prompt` must be a string, a sequence of strings, or `None`.")
conversations = [
[
{
"role": "user",
"content": [
{"type": "text", "text": prompt_text},
{"type": "audio", "path": audio_item}
if isinstance(audio_item, str)
else {"type": "audio", "audio": audio_item},
],
}
]
for prompt_text, audio_item in zip(prompts, audio_items)
]
return self.apply_chat_template(
conversations,
tokenize=True,
add_generation_prompt=True,
return_dict=True,
**kwargs,
)
def batch_decode(self, *args, strip_prefix=False, **kwargs):
"""
Forward arguments to [`~PreTrainedTokenizer.batch_decode`] and optionally remove the assistant framing the model
was trained to produce.
AF3 transcription requests respond with sentences such as `"The spoken content of the audio is \"...\"."`.
Setting `strip_prefix=True` trims the fixed prefix for just the transcription text.
"""
decoded = self.tokenizer.batch_decode(*args, **kwargs)
if strip_prefix:
decoded = [self._strip_assistant_prefix_and_quotes(text) for text in decoded]
return decoded
def _strip_assistant_prefix_and_quotes(self, text: str) -> str:
"""
Remove the assistant prefix and surrounding quotes from a decoded transcription string.
"""
stripped = text.strip()
for prefix in (
"The spoken content of the audio is",
"The transcription of the audio is",
):
if stripped.startswith(prefix):
stripped = stripped[len(prefix) :].strip()
break
if stripped.endswith("."):
stripped = stripped[:-1].strip()
if len(stripped) >= 2 and stripped[0] == stripped[-1] and stripped[0] in {"'", '"'}:
stripped = stripped[1:-1].strip()
return stripped
__all__ = ["AudioFlamingo3Processor"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/audioflamingo3/processing_audioflamingo3.py",
"license": "Apache License 2.0",
"lines": 269,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/audioflamingo3/test_modeling_audioflamingo3.py | # Copyright 2025 NVIDIA CORPORATION and the HuggingFace Inc. team. All rights
# reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch AudioFlamingo3 model."""
import json
import tempfile
import unittest
from pathlib import Path
import pytest
from transformers import (
AudioFlamingo3Config,
AudioFlamingo3ForConditionalGeneration,
AutoProcessor,
is_torch_available,
)
from transformers.testing_utils import (
cleanup,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
if is_torch_available():
import torch
class AudioFlamingo3ModelTester:
"""
Builds a tiny AudioFlamingo3 config and synthetic inputs that respect AF3's
post-pool token accounting: num <sound> tokens per sample == post-pool frame count.
"""
def __init__(
self,
parent,
audio_token_id=0,
seq_length=25,
feat_seq_length=60,
text_config=None,
audio_config=None,
is_training=True,
):
self.parent = parent
self.audio_token_id = audio_token_id
self.seq_length = seq_length
self.feat_seq_length = feat_seq_length
self.is_training = is_training
# Small text backbone (Qwen2-ish)
if text_config is None:
text_config = {
"model_type": "qwen2",
"intermediate_size": 36,
"initializer_range": 0.02,
"hidden_size": 32,
"max_position_embeddings": 52,
"num_hidden_layers": 2,
"num_attention_heads": 4,
"num_key_value_heads": 2,
"use_labels": True,
"use_mrope": False,
"vocab_size": 99,
"pad_token_id": 1, # Ensure pad token != audio token
}
# Small audio encoder (AF3 Whisper-style)
if audio_config is None:
audio_config = {
"model_type": "audioflamingo3_encoder",
"hidden_size": 16,
"num_attention_heads": 4,
"intermediate_size": 16,
"num_hidden_layers": 2,
"num_mel_bins": 80,
"max_source_positions": 30,
"initializer_range": 0.02,
}
self.text_config = text_config
self.audio_config = audio_config
self.batch_size = 3
self.vocab_size = text_config["vocab_size"]
self.hidden_size = text_config["hidden_size"]
self.num_attention_heads = text_config["num_attention_heads"]
self.num_hidden_layers = text_config["num_hidden_layers"]
self.encoder_seq_length = seq_length
def get_config(self):
return AudioFlamingo3Config(
text_config=self.text_config,
audio_config=self.audio_config,
audio_token_id=self.audio_token_id,
)
def prepare_config_and_inputs(self):
# (#windows == batch_size, n_mels, T_mel)
input_features_values = floats_tensor(
[self.batch_size, self.audio_config["num_mel_bins"], self.feat_seq_length]
)
config = self.get_config()
# Per-window mel validity (all ones => full length)
input_features_mask = torch.ones([self.batch_size, self.feat_seq_length], dtype=torch.bool).to(torch_device)
return config, input_features_values, input_features_mask
def _post_pool_tokens_per_window(self, T_mel):
# Mirror AF3 processor math:
pre = (T_mel - 1) // 2 + 1
post = (pre - 2) // 2 + 1
return post
def prepare_config_and_inputs_for_common(self):
config, input_features_values, input_features_mask = self.prepare_config_and_inputs()
# Every window has same T_mel here
num_audio_tokens_per_sample = self._post_pool_tokens_per_window(input_features_values.shape[-1])
# Build token ids with valid range and K <sound> tokens
input_ids = ids_tensor([self.batch_size, self.seq_length], config.text_config.vocab_size - 2) + 2
attention_mask = torch.ones_like(input_ids, dtype=torch.long, device=torch_device)
attention_mask[:, :1] = 0 # left padding sentinel
# Fill first K positions (after padding) with the audio token id, for each sample
input_ids[:, 1 : 1 + num_audio_tokens_per_sample] = config.audio_token_id
inputs_dict = {
"input_features": input_features_values,
"input_features_mask": input_features_mask,
"input_ids": input_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class AudioFlamingo3ForConditionalGenerationModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
"""
Model tester for `AudioFlamingo3ForConditionalGeneration`.
"""
all_model_classes = (AudioFlamingo3ForConditionalGeneration,) if is_torch_available() else ()
# TODO: @eustlb, this is incorrect
pipeline_model_mapping = (
{
"text-to-speech": AudioFlamingo3ForConditionalGeneration,
"audio-text-to-text": AudioFlamingo3ForConditionalGeneration,
}
if is_torch_available()
else {}
)
_is_composite = True
def setUp(self):
self.model_tester = AudioFlamingo3ModelTester(self)
self.config_tester = ConfigTester(self, config_class=AudioFlamingo3Config, has_text_modality=False)
@unittest.skip(
reason="This test does not apply to AudioFlamingo3 since inputs_embeds corresponding to audio tokens are replaced when input features are provided."
)
def test_inputs_embeds_matches_input_ids(self):
pass
@unittest.skip(reason="Compile not yet supported for AudioFlamingo3 models")
@pytest.mark.torch_compile_test
def test_sdpa_can_compile_dynamic(self):
pass
@unittest.skip(reason="Compile not yet supported for AudioFlamingo3 models")
def test_sdpa_can_dispatch_on_flash(self):
pass
@unittest.skip(reason="AudioFlamingo3 tests avoid right-padding equivalence; fusion is in-place.")
def test_flash_attn_2_inference_equivalence_right_padding(self):
pass
@unittest.skip(reason="AudioFlamingo3 has no separate base model without a head.")
def test_model_base_model_prefix(self):
pass
def test_sdpa_can_dispatch_composite_models(self):
# AF3 is audio+text composite; verify SDPA toggles propagate to submodules.
if not self.has_attentions:
self.skipTest(reason="Model architecture does not support attentions")
if not self._is_composite:
self.skipTest(f"{self.all_model_classes[0].__name__} does not support SDPA")
for model_class in self.all_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
# SDPA (default)
model_sdpa = model_class.from_pretrained(tmpdirname)
model_sdpa = model_sdpa.eval().to(torch_device)
text_attn = "sdpa" if model.language_model._supports_sdpa else "eager"
audio_attn = "sdpa" if model.audio_tower._supports_sdpa else "eager"
self.assertTrue(model_sdpa.config._attn_implementation == "sdpa")
self.assertTrue(model.language_model.config._attn_implementation == text_attn)
self.assertTrue(model.audio_tower.config._attn_implementation == audio_attn)
# Eager
model_eager = model_class.from_pretrained(tmpdirname, attn_implementation="eager")
model_eager = model_eager.eval().to(torch_device)
self.assertTrue(model_eager.config._attn_implementation == "eager")
self.assertTrue(model_eager.language_model.config._attn_implementation == "eager")
self.assertTrue(model_eager.audio_tower.config._attn_implementation == "eager")
for _, submodule in model_eager.named_modules():
class_name = submodule.__class__.__name__
if "SdpaAttention" in class_name or "SdpaSelfAttention" in class_name:
raise ValueError("The eager model should not have SDPA attention layers")
@require_torch
class AudioFlamingo3ForConditionalGenerationIntegrationTest(unittest.TestCase):
"""
Slow tests against the public checkpoint to validate processor-model alignment and in-place fusion.
"""
@classmethod
def setUp(cls):
cleanup(torch_device, gc_collect=True)
cls.checkpoint = "nvidia/audio-flamingo-3-hf"
cls.processor = AutoProcessor.from_pretrained(cls.checkpoint)
def tearDown(self):
cleanup(torch_device, gc_collect=True)
@slow
def test_fixture_single_matches(self):
"""
reproducer (creates JSON directly in repo): https://gist.github.com/ebezzam/c979f0f1a2b9223fa137faf1c02022d4#file-reproducer-py
"""
path = Path(__file__).parent.parent.parent / "fixtures/audioflamingo3/expected_results_single.json"
with open(path, "r", encoding="utf-8") as f:
raw = json.load(f)
exp_ids = torch.tensor(raw["token_ids"])
exp_txt = raw["transcriptions"]
conversation = [
{
"role": "user",
"content": [
{
"type": "text",
"text": "Transcribe the input speech.",
},
{
"type": "audio",
"path": "https://huggingface.co/datasets/nvidia/AudioSkills/resolve/main/assets/Why_do_we_ask_questions_converted.wav",
},
],
}
]
model = AudioFlamingo3ForConditionalGeneration.from_pretrained(
self.checkpoint, device_map=torch_device, dtype=torch.bfloat16
).eval()
batch = self.processor.apply_chat_template(
conversation, tokenize=True, add_generation_prompt=True, return_dict=True
).to(model.device, dtype=model.dtype)
seq = model.generate(**batch)
inp_len = batch["input_ids"].shape[1]
gen_ids = seq[:, inp_len:] if seq.shape[1] >= inp_len else seq
torch.testing.assert_close(gen_ids.cpu(), exp_ids)
txt = self.processor.batch_decode(gen_ids, skip_special_tokens=True)
self.assertListEqual(txt, exp_txt)
@slow
def test_fixture_batched_matches(self):
"""
reproducer (creates JSON directly in repo): https://gist.github.com/ebezzam/c979f0f1a2b9223fa137faf1c02022d4#file-reproducer-py
"""
path = Path(__file__).parent.parent.parent / "fixtures/audioflamingo3/expected_results_batched.json"
with open(path, "r", encoding="utf-8") as f:
raw = json.load(f)
exp_ids = torch.tensor(raw["token_ids"])
exp_txt = raw["transcriptions"]
conversations = [
[
{
"role": "user",
"content": [
{
"type": "text",
"text": "What is surprising about the relationship between the barking and the music?",
},
{
"type": "audio",
"path": "https://huggingface.co/datasets/nvidia/AudioSkills/resolve/main/assets/dogs_barking_in_sync_with_the_music.wav",
},
],
}
],
[
{
"role": "user",
"content": [
{
"type": "text",
"text": "Why is the philosopher's name mentioned in the lyrics? "
"(A) To express a sense of nostalgia "
"(B) To indicate that language cannot express clearly, satirizing the inversion of black and white in the world "
"(C) To add depth and complexity to the lyrics "
"(D) To showcase the wisdom and influence of the philosopher",
},
{
"type": "audio",
"path": "https://huggingface.co/datasets/nvidia/AudioSkills/resolve/main/assets/Ch6Ae9DT6Ko_00-04-03_00-04-31.wav",
},
],
}
],
]
model = AudioFlamingo3ForConditionalGeneration.from_pretrained(
self.checkpoint, device_map=torch_device, dtype=torch.bfloat16
).eval()
batch = self.processor.apply_chat_template(
conversations, tokenize=True, add_generation_prompt=True, return_dict=True
).to(model.device, dtype=model.dtype)
seq = model.generate(**batch)
inp_len = batch["input_ids"].shape[1]
gen_ids = seq[:, inp_len:] if seq.shape[1] >= inp_len else seq
torch.testing.assert_close(gen_ids.cpu(), exp_ids)
txt = self.processor.batch_decode(gen_ids, skip_special_tokens=True)
self.assertListEqual(txt, exp_txt)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/audioflamingo3/test_modeling_audioflamingo3.py",
"license": "Apache License 2.0",
"lines": 303,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/models/audioflamingo3/test_processing_audioflamingo3.py | # Copyright 2025 NVIDIA CORPORATION and the HuggingFace Inc. team. All rights
# reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import shutil
import tempfile
import unittest
from parameterized import parameterized
from transformers import (
AudioFlamingo3Processor,
AutoProcessor,
AutoTokenizer,
WhisperFeatureExtractor,
)
from transformers.testing_utils import require_librosa, require_torch, require_torchaudio
from ...test_processing_common import MODALITY_INPUT_DATA, ProcessorTesterMixin
class AudioFlamingo3ProcessorTest(ProcessorTesterMixin, unittest.TestCase):
processor_class = AudioFlamingo3Processor
@classmethod
@require_torch
@require_torchaudio
def setUpClass(cls):
cls.checkpoint = "nvidia/audio-flamingo-3-hf"
cls.tmpdirname = tempfile.mkdtemp()
processor = AudioFlamingo3Processor.from_pretrained(cls.checkpoint)
processor.save_pretrained(cls.tmpdirname)
@require_torch
@require_torchaudio
def get_tokenizer(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer
@require_torch
@require_torchaudio
def get_audio_processor(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).audio_processor
@require_torch
@require_torchaudio
def get_processor(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs)
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.tmpdirname, ignore_errors=True)
@require_torch
@require_torchaudio
def test_can_load_various_tokenizers(self):
processor = AudioFlamingo3Processor.from_pretrained(self.checkpoint)
tokenizer = AutoTokenizer.from_pretrained(self.checkpoint)
self.assertEqual(processor.tokenizer.__class__, tokenizer.__class__)
@require_torch
@require_torchaudio
def test_save_load_pretrained_default(self):
tokenizer = AutoTokenizer.from_pretrained(self.checkpoint)
processor = AudioFlamingo3Processor.from_pretrained(self.checkpoint)
feature_extractor = processor.feature_extractor
processor = AudioFlamingo3Processor(tokenizer=tokenizer, feature_extractor=feature_extractor)
with tempfile.TemporaryDirectory() as tmpdir:
processor.save_pretrained(tmpdir)
reloaded = AudioFlamingo3Processor.from_pretrained(tmpdir)
self.assertEqual(reloaded.tokenizer.get_vocab(), tokenizer.get_vocab())
self.assertEqual(reloaded.feature_extractor.to_json_string(), feature_extractor.to_json_string())
self.assertIsInstance(reloaded.feature_extractor, WhisperFeatureExtractor)
@require_torch
@require_torchaudio
def test_tokenizer_integration(self):
slow_tokenizer = AutoTokenizer.from_pretrained(self.checkpoint, use_fast=False)
fast_tokenizer = AutoTokenizer.from_pretrained(self.checkpoint, from_slow=True, legacy=False)
prompt = (
"<|im_start|>system\nAnswer the questions.<|im_end|>"
"<|im_start|>user\n<sound>What is it?<|im_end|>"
"<|im_start|>assistant\n"
)
EXPECTED_OUTPUT = [
"<|im_start|>",
"system",
"Ċ",
"Answer",
"Ġthe",
"Ġquestions",
".",
"<|im_end|>",
"<|im_start|>",
"user",
"Ċ",
"<sound>",
"What",
"Ġis",
"Ġit",
"?",
"<|im_end|>",
"<|im_start|>",
"assistant",
"Ċ",
]
self.assertEqual(slow_tokenizer.tokenize(prompt), EXPECTED_OUTPUT)
self.assertEqual(fast_tokenizer.tokenize(prompt), EXPECTED_OUTPUT)
@require_torch
@require_torchaudio
def test_chat_template(self):
processor = AutoProcessor.from_pretrained(self.checkpoint)
expected_prompt = (
"<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n"
"<|im_start|>user\n<sound>What is surprising about the relationship between the barking and the music?<|im_end|>\n"
"<|im_start|>assistant\n"
)
conversations = [
{
"role": "user",
"content": [
{
"type": "text",
"text": "What is surprising about the relationship between the barking and the music?",
},
{
"type": "audio",
"path": "https://huggingface.co/datasets/nvidia/AudioSkills/resolve/main/assets/dogs_barking_in_sync_with_the_music.wav",
},
],
}
]
formatted = processor.tokenizer.apply_chat_template(conversations, tokenize=False, add_generation_prompt=True)
self.assertEqual(expected_prompt, formatted)
@require_torch
@require_torchaudio
def test_apply_transcription_request_single(self):
processor = AutoProcessor.from_pretrained(self.checkpoint)
audio_url = "https://huggingface.co/datasets/nvidia/AudioSkills/resolve/main/assets/t_837b89f2-26aa-4ee2-bdf6-f73f0dd59b26.wav"
helper_outputs = processor.apply_transcription_request(audio=audio_url)
conversation = [
{
"role": "user",
"content": [
{"type": "text", "text": "Transcribe the input speech."},
{"type": "audio", "audio": audio_url},
],
}
]
manual_outputs = processor.apply_chat_template(
conversation,
tokenize=True,
add_generation_prompt=True,
return_dict=True,
)
for key in ("input_ids", "attention_mask", "input_features", "input_features_mask"):
self.assertIn(key, helper_outputs)
self.assertTrue(helper_outputs[key].equal(manual_outputs[key]))
# Overwrite to remove skip numpy inputs (still need to keep as many cases as parent)
@require_librosa
@parameterized.expand([(1, "np"), (1, "pt"), (2, "np"), (2, "pt")])
def test_apply_chat_template_audio(self, batch_size: int, return_tensors: str):
if return_tensors == "np":
self.skipTest("AudioFlamingo3 only supports PyTorch tensors")
self._test_apply_chat_template(
"audio", batch_size, return_tensors, "audio_input_name", "feature_extractor", MODALITY_INPUT_DATA["audio"]
)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/audioflamingo3/test_processing_audioflamingo3.py",
"license": "Apache License 2.0",
"lines": 163,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/fsdp/test_context_parallel.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import sys
from pathlib import Path
from transformers import is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_accelerate,
require_torch_multi_accelerator,
run_first,
slow,
)
if is_torch_available():
import torch
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
DataCollatorForLanguageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
class TestContextParallel(TestCasePlus):
"""Test Trainer with Torch context parallelism enabled via accelerate's ParallelismConfig."""
@require_torch_multi_accelerator
@require_accelerate
@slow
@run_first
def test_cp_equivalence(self):
"""Test that CP produces the same losses as without CP."""
# Shared setup
world_size = 2
script_path = __file__
# Step 1: Run with CP enabled (cp_size=world_size)
cp_yes_output_dir = Path(self.get_auto_remove_tmp_dir()).resolve()
cp_yes_config_path = cp_yes_output_dir / "context_parallel_config.yaml"
cp_yes_losses_path = cp_yes_output_dir / "cp_yes_losses.json"
# Write config file inline (self-contained test)
with open(cp_yes_config_path, "w") as f:
f.write(
f"""distributed_type: FSDP
fsdp_config:
fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
fsdp_state_dict_type: SHARDED_STATE_DICT
fsdp_version: 2
mixed_precision: bf16
num_processes: {world_size}
parallelism_config:
parallelism_config_dp_replicate_size: 1
parallelism_config_dp_shard_size: 1
parallelism_config_tp_size: 1
parallelism_config_cp_size: {world_size}
parallelism_config_cp_comm_strategy: alltoall
"""
)
cmd_cp_yes = f"""
accelerate launch
--config_file {cp_yes_config_path}
{script_path}
--output_dir {cp_yes_output_dir}
--report_to none
--max_steps 10
--per_device_train_batch_size 1
--gradient_accumulation_steps 1
--logging_steps 1
--remove_unused_columns False
--seed 42
--loss_output_file {cp_yes_losses_path}
""".split()
execute_subprocess_async(cmd_cp_yes, env=self.get_env())
# Step 2: Run without CP (FSDP with num_processes=1, no parallelism_config)
cp_no_output_dir = Path(self.get_auto_remove_tmp_dir()).resolve()
cp_no_config_path = cp_no_output_dir / "context_parallel_config.yaml"
cp_no_losses_path = cp_no_output_dir / "cp_no_losses.json"
# Write config file inline (self-contained test)
with open(cp_no_config_path, "w") as f:
f.write(
"""distributed_type: FSDP
fsdp_config:
fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
fsdp_state_dict_type: SHARDED_STATE_DICT
fsdp_transformer_layer_cls_to_wrap: LlamaDecoderLayer
fsdp_version: 2
mixed_precision: bf16
num_processes: 1
"""
)
cmd_cp_no = f"""
accelerate launch
--config_file {cp_no_config_path}
{script_path}
--output_dir {cp_no_output_dir}
--report_to none
--max_steps 10
--per_device_train_batch_size 1
--gradient_accumulation_steps 1
--logging_steps 1
--remove_unused_columns False
--seed 42
--loss_output_file {cp_no_losses_path}
""".split()
execute_subprocess_async(cmd_cp_no, env=self.get_env())
# Compare losses - should be very close since CP just splits sequence computation
with open(cp_yes_losses_path) as f:
cp_yes_losses = json.load(f)
with open(cp_no_losses_path) as f:
cp_no_losses = json.load(f)
assert len(cp_yes_losses) == len(cp_no_losses), (
f"Different number of losses: CP has {len(cp_yes_losses)}, no-CP has {len(cp_no_losses)}"
)
# CP should produce very similar results (small numerical differences expected)
# The differences come from:
# - Different gradient reduction patterns in distributed training
# - BF16 mixed precision accumulated differences
# - Sequence splitting and gathering in CP mode
cp_yes_losses_tensor = torch.tensor(cp_yes_losses)
cp_no_losses_tensor = torch.tensor(cp_no_losses)
# Use torch.testing.assert_close with rtol=2% and atol=0.02
# Testing shows actual differences are typically <1.5%
torch.testing.assert_close(
cp_yes_losses_tensor,
cp_no_losses_tensor,
rtol=2e-2, # 2% relative tolerance
atol=2e-2, # 0.02 absolute tolerance
msg=f"CP losses {cp_yes_losses} do not match non-CP losses {cp_no_losses}",
)
if __name__ == "__main__":
# Parse custom arguments (not TrainingArguments parameters)
loss_output_file = None
if "--loss_output_file" in sys.argv:
idx = sys.argv.index("--loss_output_file")
loss_output_file = sys.argv[idx + 1]
sys.argv.pop(idx)
sys.argv.pop(idx)
parser = HfArgumentParser((TrainingArguments,))
training_args = parser.parse_args_into_dataclasses()[0]
# Use SmolLM (small Llama-based model that works with CP)
model_name = "HuggingFaceTB/SmolLM-135M"
tokenizer = AutoTokenizer.from_pretrained(model_name)
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
model = AutoModelForCausalLM.from_pretrained(
model_name,
attn_implementation="sdpa", # CP requires SDPA
dtype=torch.float32,
)
# Create simple dataset: just tokenize some text
texts = [
"The quick brown fox jumps over the lazy dog. " * 10,
"Hello world, this is a test sentence for training. " * 10,
] * 4 # 8 samples total
def tokenize_function(examples):
return tokenizer(examples, max_length=128, truncation=True, padding="max_length")
train_dataset = [tokenize_function(text) for text in texts]
# Use standard DataCollatorForLanguageModeling for causal LM
# pad_to_multiple_of=4 ensures sequences are divisible by cp_size * 2 (for cp_size=2)
# Trainer will automatically generate position_ids and shift_labels as needed
data_collator = DataCollatorForLanguageModeling(
tokenizer=tokenizer,
mlm=False, # Causal language modeling
pad_to_multiple_of=4,
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
data_collator=data_collator,
)
# Train for a few steps
trainer.train()
# Verify training completed
assert trainer.state.global_step > 0, "Training should have completed at least one step"
# Save losses to file if requested (for equivalence testing)
if loss_output_file and training_args.process_index == 0:
losses = [log["loss"] for log in trainer.state.log_history if "loss" in log]
with open(loss_output_file, "w") as f:
json.dump(losses, f)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/fsdp/test_context_parallel.py",
"license": "Apache License 2.0",
"lines": 190,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/fuyu/image_processing_fuyu_fast.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Image processor class for Fuyu."""
import math
from typing import Optional
import torch
from ...image_processing_utils import get_size_dict
from ...image_processing_utils_fast import (
BaseImageProcessorFast,
group_images_by_shape,
reorder_images,
)
from ...image_utils import (
ImageInput,
PILImageResampling,
SizeDict,
)
from ...utils import (
TensorType,
auto_docstring,
is_torchvision_available,
logging,
requires_backends,
)
from .image_processing_fuyu import FuyuBatchFeature, FuyuImagesKwargs, make_list_of_list_of_images
if is_torchvision_available():
import torchvision.transforms.v2.functional as tvF
logger = logging.get_logger(__name__)
@auto_docstring
class FuyuImageProcessorFast(BaseImageProcessorFast):
do_resize = True
size = {"height": 1080, "width": 1920}
patch_size = {"height": 30, "width": 30}
resample = PILImageResampling.BILINEAR
do_pad = True
padding_value = 1.0
padding_mode = "constant"
do_normalize = True
image_mean = 0.5
image_std = 0.5
do_rescale = True
rescale_factor = 1 / 255
model_input_names = [
"images",
"image_input_ids",
"image_patches",
"image_patch_indices_per_batch",
"image_patch_indices_per_subsequence",
]
valid_kwargs = FuyuImagesKwargs
def _prepare_images_structure(
self,
images: ImageInput,
expected_ndims: int = 3,
) -> ImageInput:
images = self.fetch_images(images)
return make_list_of_list_of_images(images)
def resize(
self,
image: torch.Tensor,
size: SizeDict,
interpolation: Optional["tvF.InterpolationMode"] = None,
antialias: bool = True,
**kwargs,
) -> torch.Tensor:
"""
Resize an image to fit within `(size["height"], size["width"])` while maintaining aspect ratio.
Only resizes if the image is larger than the target size.
Args:
image (`torch.Tensor`):
Image to resize.
size (`SizeDict`):
Dictionary in the format `{"height": int, "width": int}` specifying the max size of the output image.
interpolation (`InterpolationMode`, *optional*, defaults to `InterpolationMode.BILINEAR`):
`InterpolationMode` filter to use when resizing the image e.g. `InterpolationMode.BILINEAR`.
antialias (`bool`, *optional*, defaults to `True`):
Whether to apply antialiasing when resizing.
"""
interpolation = interpolation if interpolation is not None else tvF.InterpolationMode.BILINEAR
image_height, image_width = image.shape[-2:]
target_height, target_width = size.height, size.width
# Only resize if image is larger than target
if image_width <= target_width and image_height <= target_height:
return image
# Calculate optimal scale factor to fit within target size
height_scale_factor = target_height / image_height
width_scale_factor = target_width / image_width
optimal_scale_factor = min(height_scale_factor, width_scale_factor)
new_height = int(image_height * optimal_scale_factor)
new_width = int(image_width * optimal_scale_factor)
return super().resize(
image, SizeDict(height=new_height, width=new_width), interpolation=interpolation, antialias=antialias
)
def _preprocess(
self,
images: list["torch.Tensor"],
do_resize: bool,
size: SizeDict,
interpolation: Optional["tvF.InterpolationMode"],
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
image_mean: float | list[float] | None,
image_std: float | list[float] | None,
do_pad: bool | None,
padding_value: float | None,
padding_mode: str | None,
disable_grouping: bool | None,
return_tensors: str | TensorType | None,
**kwargs,
) -> FuyuBatchFeature:
# Group images by size for batched resizing
original_image_sizes = [batch_image[0].shape[-2:] for batch_image in images if batch_image]
grouped_images, grouped_images_index = group_images_by_shape(
images, disable_grouping=disable_grouping, is_nested=True
)
resized_images_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_resize:
stacked_images = self.resize(image=stacked_images, size=size, interpolation=interpolation)
resized_images_grouped[shape] = stacked_images
resized_images = reorder_images(resized_images_grouped, grouped_images_index, is_nested=True)
image_sizes = [batch_image[0].shape[-2:] for batch_image in resized_images if batch_image]
image_unpadded_heights = [[image_size[0]] for image_size in image_sizes]
image_unpadded_widths = [[image_size[1]] for image_size in image_sizes]
image_scale_factors = [
[resized_size[0] / original_size[0]]
for original_size, resized_size in zip(original_image_sizes, image_sizes)
]
if do_pad:
resized_images = self.pad(
resized_images,
pad_size=size,
fill_value=padding_value,
padding_mode=padding_mode,
disable_grouping=disable_grouping,
is_nested=True,
)
# Group images by size for further processing
# Needed in case do_resize is False, or resize returns images with different sizes
grouped_images, grouped_images_index = group_images_by_shape(
resized_images, disable_grouping=disable_grouping, is_nested=True
)
processed_images_grouped = {}
for shape, stacked_images in grouped_images.items():
# Fused rescale and normalize
stacked_images = self.rescale_and_normalize(
stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
processed_images_grouped[shape] = stacked_images
processed_images = reorder_images(processed_images_grouped, grouped_images_index, is_nested=True)
return FuyuBatchFeature(
data={
"images": processed_images,
"image_unpadded_heights": image_unpadded_heights,
"image_unpadded_widths": image_unpadded_widths,
"image_scale_factors": image_scale_factors,
},
tensor_type=return_tensors,
)
def get_num_patches(self, image_height: int, image_width: int, patch_size: SizeDict | None = None) -> int:
"""
Calculate number of patches required to encode an image.
Args:
image_height (`int`):
Height of the image.
image_width (`int`):
Width of the image.
patch_size (`SizeDict`, *optional*):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the patches.
"""
if patch_size is None:
patch_size = SizeDict(**self.patch_size)
patch_height, patch_width = patch_size.height, patch_size.width
if image_height % patch_height != 0:
raise ValueError(f"{image_height=} must be divisible by {patch_height}")
if image_width % patch_width != 0:
raise ValueError(f"{image_width=} must be divisible by {patch_width}")
num_patches_per_dim_h = image_height // patch_height
num_patches_per_dim_w = image_width // patch_width
num_patches = num_patches_per_dim_h * num_patches_per_dim_w
return num_patches
def patchify_image(self, image: torch.Tensor, patch_size: SizeDict | None = None) -> torch.Tensor:
"""
Convert an image into a tensor of patches using PyTorch's unfold operation.
Args:
image (`torch.Tensor`):
Image to convert. Shape: [batch, channels, height, width]
patch_size (`SizeDict`, *optional*):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the patches.
"""
requires_backends(self, ["torch"])
if patch_size is None:
patch_size = SizeDict(**self.patch_size)
patch_height, patch_width = patch_size.height, patch_size.width
batch_size, channels, _, _ = image.shape
# Use unfold to extract patches
unfolded_along_height = image.unfold(2, patch_height, patch_height)
patches = unfolded_along_height.unfold(3, patch_width, patch_width)
patches = patches.contiguous()
# Reshape to [batch, num_patches, channels * patch_h * patch_w]
patches = patches.view(batch_size, channels, -1, patch_height, patch_width)
patches = patches.permute(0, 2, 3, 4, 1)
patches = patches.reshape(batch_size, -1, channels * patch_height * patch_width)
return patches
def preprocess_with_tokenizer_info(
self,
image_input: torch.Tensor,
image_present: torch.Tensor,
image_unpadded_h: torch.Tensor,
image_unpadded_w: torch.Tensor,
image_placeholder_id: int,
image_newline_id: int,
variable_sized: bool,
patch_size: dict[str, int] | None = None,
) -> FuyuBatchFeature:
"""
Process images for model input. In particular, variable-sized images are handled here.
Args:
image_input (`torch.Tensor` of shape [batch_size, subsequence_size, num_channels, height, width]):
Tensor of images padded to model input size.
image_present (`torch.Tensor` of shape [batch_size, subsequence_size, num_images]):
Tensor of 1s and 0s indicating whether an image is present.
image_unpadded_h (`torch.Tensor` of shape [batch_size, subsequence_size]):
Tensor of unpadded image heights.
image_unpadded_w (`torch.Tensor` of shape [batch_size, subsequence_size]):
Tensor of unpadded image widths.
image_placeholder_id (int):
The id of the image placeholder token. Comes from an associated tokenizer.
image_newline_id (int):
The id of the image newline token. Comes from an associated tokenizer.
variable_sized (bool):
Whether to process images as variable-sized.
patch_size (`dict[str, int]`, *optional*):
Size of the patches.
"""
requires_backends(self, ["torch"])
if patch_size is None:
patch_size = SizeDict(**self.patch_size)
else:
patch_size = SizeDict(**patch_size)
patch_height, patch_width = patch_size.height, patch_size.width
# Only images that are present
images: list[list[torch.Tensor]] = []
batch_image_patches: list[list[torch.Tensor]] = []
# Image input ids for every subsequence, including ones with no image present
batch_image_input_ids: list[list[torch.Tensor]] = []
for batch_index in range(image_input.shape[0]):
image_input_ids = []
image_patches = []
for subseq_index in range(image_input.shape[1]):
if image_present[batch_index, subseq_index]:
image = image_input[batch_index, subseq_index]
image_height, image_width = image.shape[1], image.shape[2]
if variable_sized:
# Calculate new dimensions based on unpadded size
# The min() is required here due to floating point issues
new_h = min(
image_height,
math.ceil(image_unpadded_h[batch_index, subseq_index] / patch_height) * patch_height,
)
new_w = min(
image_width,
math.ceil(image_unpadded_w[batch_index, subseq_index] / patch_width) * patch_width,
)
image = image[:, :new_h, :new_w]
image_height, image_width = new_h, new_w
num_patches = self.get_num_patches(
image_height=image_height, image_width=image_width, patch_size=patch_size
)
# Create tensor of placeholder IDs
tensor_of_image_ids = torch.full(
[num_patches], image_placeholder_id, dtype=torch.int32, device=image_input.device
)
# Patchify the image
patches = self.patchify_image(image=image.unsqueeze(0), patch_size=patch_size).squeeze(0)
assert num_patches == patches.shape[0]
if variable_sized:
# Terminate each line with newline ID
tensor_of_image_ids = tensor_of_image_ids.reshape(-1, image_width // patch_width)
newline_ids = torch.full(
[tensor_of_image_ids.shape[0], 1],
image_newline_id,
dtype=torch.int32,
device=image_input.device,
)
tensor_of_image_ids = torch.cat([tensor_of_image_ids, newline_ids], dim=1)
tensor_of_image_ids = tensor_of_image_ids.reshape(-1)
images.append([image])
image_input_ids.append(tensor_of_image_ids)
image_patches.append(patches)
else:
image_input_ids.append(torch.tensor([], dtype=torch.int32, device=image_input.device))
batch_image_input_ids.append(image_input_ids)
batch_image_patches.append(image_patches)
# Create image patch indices
image_patch_indices_per_batch: list[list[torch.Tensor]] = []
image_patch_indices_per_subsequence: list[list[torch.Tensor]] = []
for sample_image_input_ids in batch_image_input_ids:
index_offset = 0
per_batch_indices = []
per_subsequence_indices = []
for subseq_image_input_ids in sample_image_input_ids:
# Indices of image patches
patches_mask = subseq_image_input_ids == image_placeholder_id
num_patches = torch.count_nonzero(patches_mask)
indices = torch.arange(num_patches, dtype=torch.int64, device=subseq_image_input_ids.device).type_as(
subseq_image_input_ids
)
# Place those indices in the image input ids token stream, with -1 representing non-index tokens
indices_in_stream_per_batch = torch.full_like(subseq_image_input_ids, -1)
indices_in_stream_per_subsequence = torch.full_like(subseq_image_input_ids, -1)
patches_inds = torch.nonzero(patches_mask, as_tuple=True)[0]
indices_in_stream_per_batch[patches_inds] = indices + index_offset
indices_in_stream_per_subsequence[patches_inds] = indices
per_batch_indices.append(indices_in_stream_per_batch)
per_subsequence_indices.append(indices_in_stream_per_subsequence)
index_offset += num_patches
image_patch_indices_per_batch.append(per_batch_indices)
image_patch_indices_per_subsequence.append(per_subsequence_indices)
return FuyuBatchFeature(
data={
"images": images,
"image_input_ids": batch_image_input_ids,
"image_patches": batch_image_patches,
"image_patch_indices_per_batch": image_patch_indices_per_batch,
"image_patch_indices_per_subsequence": image_patch_indices_per_subsequence,
}
)
def _further_process_kwargs(
self,
patch_size: dict[str, int] | None = None,
**kwargs,
) -> dict:
"""
Process Fuyu-specific kwargs before validation.
"""
kwargs = super()._further_process_kwargs(**kwargs)
if patch_size is not None:
patch_size = SizeDict(**get_size_dict(patch_size, param_name="patch_size"))
kwargs["patch_size"] = patch_size
return kwargs
__all__ = ["FuyuImageProcessorFast"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/fuyu/image_processing_fuyu_fast.py",
"license": "Apache License 2.0",
"lines": 354,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/glpn/image_processing_glpn_fast.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Image processor class for GLPN."""
from typing import Optional
import torch
import torchvision.transforms.v2.functional as tvF
from ...image_processing_utils import BatchFeature
from ...image_processing_utils_fast import BaseImageProcessorFast, group_images_by_shape, reorder_images
from ...image_utils import (
PILImageResampling,
SizeDict,
)
from ...utils import (
TensorType,
auto_docstring,
requires_backends,
)
from .image_processing_glpn import GLPNImageProcessorKwargs
@auto_docstring
class GLPNImageProcessorFast(BaseImageProcessorFast):
do_resize = True
do_rescale = True
rescale_factor = 1 / 255
resample = PILImageResampling.BILINEAR
size_divisor = 32
valid_kwargs = GLPNImageProcessorKwargs
def _validate_preprocess_kwargs(self, **kwargs):
# pop `do_resize` to not raise an error as `size` is not None
kwargs.pop("do_resize", None)
return super()._validate_preprocess_kwargs(**kwargs)
def resize(
self,
image: "torch.Tensor",
size_divisor: int,
interpolation: Optional["tvF.InterpolationMode"] = None,
antialias: bool = True,
**kwargs,
) -> "torch.Tensor":
"""
Resize an image to `(size["height"], size["width"])`.
Args:
image (`torch.Tensor`):
Image to resize.
size (`SizeDict`):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
interpolation (`InterpolationMode`, *optional*, defaults to `InterpolationMode.BILINEAR`):
`InterpolationMode` filter to use when resizing the image e.g. `InterpolationMode.BICUBIC`.
antialias (`bool`, *optional*, defaults to `True`):
Whether to use antialiasing.
Returns:
`torch.Tensor`: The resized image.
"""
height, width = image.shape[-2:]
# Rounds the height and width down to the closest multiple of size_divisor
new_h = height // size_divisor * size_divisor
new_w = width // size_divisor * size_divisor
return super().resize(
image, SizeDict(height=new_h, width=new_w), interpolation=interpolation, antialias=antialias
)
def _preprocess(
self,
images: list["torch.Tensor"],
do_resize: bool,
size_divisor: int | None = None,
interpolation: Optional["tvF.InterpolationMode"] = None,
do_rescale: bool = True,
rescale_factor: float | None = 1 / 255,
do_normalize: bool = False,
image_mean: float | list[float] | None = None,
image_std: float | list[float] | None = None,
disable_grouping: bool | None = None,
return_tensors: str | TensorType | None = None,
resample: PILImageResampling | None = None,
**kwargs,
) -> BatchFeature:
grouped_images, grouped_index = group_images_by_shape(images, disable_grouping=disable_grouping)
processed_groups = {}
for shape, stacked_images in grouped_images.items():
if do_resize:
stacked_images = self.resize(stacked_images, size_divisor=size_divisor, interpolation=interpolation)
stacked_images = self.rescale_and_normalize(
stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
processed_groups[shape] = stacked_images
processed_images = reorder_images(processed_groups, grouped_index)
return BatchFeature(data={"pixel_values": processed_images}, tensor_type=return_tensors)
def post_process_depth_estimation(self, outputs, target_sizes=None):
"""
Convert raw model outputs to final depth predictions.
Mirrors slow GLPN: PyTorch interpolate w/ bicubic, align_corners=False.
"""
requires_backends(self, "torch")
predicted_depth = outputs.predicted_depth
results = []
target_sizes = target_sizes or [None] * predicted_depth.shape[0]
for depth, target_size in zip(predicted_depth, target_sizes):
if target_size is not None:
# Add batch and channel dimensions for interpolation
depth_4d = depth[None, None, ...]
resized = torch.nn.functional.interpolate(
depth_4d, size=target_size, mode="bicubic", align_corners=False
)
depth = resized.squeeze(0).squeeze(0)
results.append({"predicted_depth": depth})
return results
__all__ = ["GLPNImageProcessorFast"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/glpn/image_processing_glpn_fast.py",
"license": "Apache License 2.0",
"lines": 117,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/kernels/test_kernels.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Run the test: CUDA_VISIBLE_DEVICES=0 RUN_SLOW=1 pytest -sv tests/kernels/test_kernels.py
import copy
import os
import types
from unittest.mock import MagicMock, patch
from transformers import AutoModelForCausalLM, AutoTokenizer, KernelConfig
from transformers.integrations.hub_kernels import (
_HUB_KERNEL_MAPPING,
_KERNEL_MODULE_MAPPING,
is_kernel,
lazy_load_kernel,
load_and_register_attn_kernel,
)
from transformers.masking_utils import ALL_MASK_ATTENTION_FUNCTIONS
from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS
from transformers.testing_utils import (
TestCasePlus,
cleanup,
require_kernels,
require_torch_accelerator,
slow,
torch_device,
)
from transformers.utils.import_utils import is_kernels_available
if is_kernels_available():
import kernels as kernels_pkg
from kernels import Device, Mode, kernelize
import transformers.integrations.hub_kernels as hub_kernels_pkg
@require_kernels
@slow
class TestHubKernels(TestCasePlus):
@classmethod
def setUpClass(cls):
cls.model_id = "unsloth/Llama-3.2-1B-Instruct"
cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_id)
cls.model_kernelized = AutoModelForCausalLM.from_pretrained(
cls.model_id, use_kernels=True, device_map=torch_device
)
cls.model_not_kernelized = AutoModelForCausalLM.from_pretrained(
cls.model_id, use_kernels=False, device_map=torch_device
)
cls.input = "Hello"
@classmethod
def tearDownClass(cls):
for attr in [
"model_kernelized",
"model_not_kernelized",
"tokenizer",
]:
if hasattr(cls, attr):
try:
delattr(cls, attr)
except Exception as e:
print(f"Could not delete attribute {attr}: {e}")
# Clear any temporary kernel module cache entries populated by tests
try:
keys_to_remove = [
k for k, v in list(_KERNEL_MODULE_MAPPING.items()) if v is None or isinstance(v, types.ModuleType)
]
for k in keys_to_remove:
_KERNEL_MODULE_MAPPING.pop(k, None)
except Exception as e:
print(f"Could not clear kernel module cache: {e}")
def tearDown(self):
# Free accelerator memory/cache and trigger GC
cleanup(torch_device, gc_collect=True)
@require_torch_accelerator
def test_forward(self):
tokenized_input = self.tokenizer(self.input, return_tensors="pt").input_ids.to(self.model_kernelized.device)
output_ = self.model_kernelized.generate(tokenized_input, max_new_tokens=10, do_sample=False)
output = self.tokenizer.decode(output_[0], skip_special_tokens=True)
self.EXPECTED_OUTPUT = set()
self.EXPECTED_OUTPUT.add("Hello, I'm looking for a reliable and trustworthy online")
self.EXPECTED_OUTPUT.add("Hello! I'm excited to be a part of this")
self.assertTrue(output in self.EXPECTED_OUTPUT)
def test_getter_use_kernels(self):
self.assertTrue(self.model_kernelized.use_kernels)
self.assertFalse(self.model_not_kernelized.use_kernels)
def assert_kernelized_forward_is_different(self, kernelized_model, not_kernelized_model):
"""
Iterate over modules and check if the forward method is different between
the kernelized and not kernelized models. Break on first difference, else continue.
Finally, assert that at least one forward is different.
"""
found_difference = False
for (name1, module1), (name2, module2) in zip(
kernelized_model.named_modules(), not_kernelized_model.named_modules()
):
# Only compare modules with the same name
if name1 != name2:
continue
# Check if both modules have a 'forward' attribute
if hasattr(module1, "forward") and hasattr(module2, "forward"):
# Compare the code objects of the forward methods
code1 = getattr(module1.forward, "__code__", None)
code2 = getattr(module2.forward, "__code__", None)
if code1 is not None and code2 is not None:
if code1 is not code2:
found_difference = True
break
self.assertTrue(
found_difference,
"No module's forward method was different between kernelized and not kernelized models.",
)
def assert_kernelized_forward_is_the_same(self, model_1, model_2):
"""
Iterate over modules and check if the forward method is the same between
the kernelized and not kernelized models. Break on first difference, else continue.
Finally, assert that at least one forward is the same.
"""
no_difference = True
for (name1, module1), (name2, module2) in zip(model_1.named_modules(), model_2.named_modules()):
# Only compare modules with the same name
if name1 != name2:
continue
# Check if both modules have a 'forward' attribute
if hasattr(module1, "forward") and hasattr(module2, "forward"):
# Compare the code objects of the forward methods
code1 = getattr(module1.forward, "__code__", None)
code2 = getattr(module2.forward, "__code__", None)
if code1 is not None and code2 is not None:
if code1 != code2:
no_difference = False
break
self.assertTrue(
no_difference,
"All module's forward methods were the same between the two models",
)
def test_kernelize(self):
model = copy.deepcopy(self.model_not_kernelized)
kernelize(model, mode=Mode.INFERENCE, device=Device(type=model.device.type)) # type: ignore[arg-type]
self.assert_kernelized_forward_is_different(model, self.model_not_kernelized)
self.assert_kernelized_forward_is_the_same(model, self.model_kernelized)
del model
def test_setter_use_kernels(self):
model = copy.deepcopy(self.model_not_kernelized)
model.use_kernels = True
self.assertTrue(model.use_kernels)
self.assert_kernelized_forward_is_different(model, self.model_not_kernelized)
self.assert_kernelized_forward_is_the_same(model, self.model_kernelized)
del model
def test_unkernelize(self):
model = copy.deepcopy(self.model_kernelized)
with self.assertLogs("transformers.modeling_utils", level="WARNING") as cm:
model.use_kernels = False
self.assertTrue(
any(
"Disabling kernels at runtime is a no-op as there is no 'unkernelize' routine; keeping current kernels active."
in msg
for msg in cm.output
)
)
self.assertFalse(model.use_kernels)
del model
def test_kernels_mapping(self):
kernel_config = KernelConfig(kernel_mapping={"RMSNorm": "kernels-community/layer_norm:LlamaRMSNorm"})
model = AutoModelForCausalLM.from_pretrained(
"unsloth/Llama-3.2-1B-Instruct", use_kernels=True, device_map=torch_device, kernel_config=kernel_config
)
EXPECTED_OUTPUT = set()
EXPECTED_OUTPUT.add("Hello, I'm looking for a reliable and trustworthy online")
tokenized_input = self.tokenizer(self.input, return_tensors="pt").input_ids.to(model.device)
output = model.generate(tokenized_input, max_new_tokens=10, do_sample=False)
output = self.tokenizer.decode(output[0], skip_special_tokens=True)
self.assertTrue(output in EXPECTED_OUTPUT)
del model
def test_faulty_kernel_mapping_layer_name(self):
kernel_config = KernelConfig(kernel_mapping={"RMSNorm1": "kernels-community/layer_norm:LlamaRMSNorm"})
with self.assertRaises(ValueError):
_ = AutoModelForCausalLM.from_pretrained(
"unsloth/Llama-3.2-1B-Instruct", use_kernels=True, device_map=torch_device, kernel_config=kernel_config
)
def test_faulty_kernel_mapping_type(self):
kernel_config = KernelConfig(kernel_mapping={"RMSNorm": 1})
with self.assertRaises(ValueError):
_ = AutoModelForCausalLM.from_pretrained(
"unsloth/Llama-3.2-1B-Instruct", use_kernels=True, device_map=torch_device, kernel_config=kernel_config
)
@require_kernels
class TestKernelsEnv(TestCasePlus):
def test_disable_hub_kernels(self):
with patch.dict(os.environ, {"USE_HUB_KERNELS": "OFF"}):
import importlib
from transformers.integrations import hub_kernels
importlib.reload(hub_kernels)
self.assertFalse(hub_kernels._kernels_enabled)
def test_enable_hub_kernels(self):
with patch.dict(os.environ, {"USE_HUB_KERNELS": "ON"}):
import importlib
from transformers.integrations import hub_kernels
importlib.reload(hub_kernels)
self.assertTrue(hub_kernels._kernels_enabled)
@require_kernels
class TestKernelUtilities(TestCasePlus):
def test_is_kernel_regex(self):
valid = [
"org/model",
"org/model@main",
"org/model:my_func",
"org/model@v1.2.3:my_func",
"flash|org/model@rev:fn",
]
invalid = [
"org//model",
"org/model:too:many",
"org/model@rev:fn:extra",
"/org/model",
"org:model",
]
for s in valid:
self.assertTrue(is_kernel(s.split("|")[-1]))
for s in invalid:
self.assertFalse(is_kernel(s))
def test_lazy_load_kernel_success_and_cache(self):
sentinel = types.SimpleNamespace(name="sentinel")
original_get_kernel = getattr(kernels_pkg, "get_kernel")
try:
def fake_get_kernel(repo_id, revision=None, version=None):
self.assertIn(repo_id, {"kernels-community/causal-conv1d"})
return sentinel
setattr(hub_kernels_pkg, "get_kernel", fake_get_kernel)
_KERNEL_MODULE_MAPPING.pop("causal-conv1d", None)
mod1 = lazy_load_kernel("causal-conv1d")
self.assertIs(mod1, sentinel)
mod2 = lazy_load_kernel("causal-conv1d")
self.assertIs(mod2, sentinel)
finally:
setattr(kernels_pkg, "get_kernel", original_get_kernel)
# Ensure cache is cleared to avoid holding onto module references across tests
_KERNEL_MODULE_MAPPING.pop("causal-conv1d", None)
def test_lazy_load_kernel_unknown(self):
name = "unknown-kernel-name"
_KERNEL_MODULE_MAPPING.pop(name, None)
mod = lazy_load_kernel(name)
self.assertIsNone(mod)
self.assertIn(name, _KERNEL_MODULE_MAPPING)
# Cleanup cache entry to avoid growth across tests
_KERNEL_MODULE_MAPPING.pop(name, None)
def test_lazy_load_kernel_version(self):
HUB = _HUB_KERNEL_MAPPING
name = "causal-conv1d"
version_spec = ">=0.0.4,<0.1.0"
original_get_kernel = getattr(kernels_pkg, "get_kernel")
original_entry = HUB.get(name, None)
# Use a real ModuleType so caching short-circuits on the second call
sentinel_mod = types.ModuleType("sentinel_kernel_module")
call_count = {"n": 0}
try:
# Inject dict-style mapping with repo_id and version
HUB[name] = {"repo_id": "kernels-community/causal-conv1d", "version": version_spec} # type: ignore[assignment]
_KERNEL_MODULE_MAPPING.pop(name, None)
def fake_get_kernel(repo_id, revision=None, version=None):
call_count["n"] += 1
self.assertEqual(repo_id, "kernels-community/causal-conv1d")
self.assertIsNone(revision, "revision must not be set when version is provided")
self.assertEqual(version, version_spec)
return sentinel_mod
# Patch kernels.get_kernel so lazy_load_kernel picks it up on import
setattr(hub_kernels_pkg, "get_kernel", fake_get_kernel)
# Act
mod1 = lazy_load_kernel(name)
mod2 = lazy_load_kernel(name)
# Assert
self.assertIs(mod1, sentinel_mod)
self.assertIs(mod2, sentinel_mod)
self.assertEqual(call_count["n"], 1, "second call should hit the cache")
finally:
# Restore patched function and mapping to avoid side effects
setattr(kernels_pkg, "get_kernel", original_get_kernel)
if original_entry is None:
HUB.pop(name, None)
else:
HUB[name] = original_entry
_KERNEL_MODULE_MAPPING.pop(name, None)
@require_kernels
class TestAttentionKernelRegistration(TestCasePlus):
def test_load_and_register_flash_attn_like_kernel(self):
kernel_obj = types.SimpleNamespace(flash_attn_varlen_func=lambda *a, **k: None)
with (
patch("transformers.integrations.hub_kernels.get_kernel", return_value=kernel_obj),
patch("transformers.modeling_flash_attention_utils.lazy_import_flash_attention", return_value=None),
):
attn_impl = "org/model"
load_and_register_attn_kernel(attn_impl)
self.assertIn(attn_impl, ALL_ATTENTION_FUNCTIONS.valid_keys())
# Cleanup registration to avoid leaking functions across tests
try:
ALL_ATTENTION_FUNCTIONS.pop(attn_impl, None)
except Exception as e:
print(f"Could not clean up `ALL_ATTENTION_FUNCTIONS`: {e}")
try:
ALL_MASK_ATTENTION_FUNCTIONS.pop(attn_impl, None)
except Exception as e:
print(f"Could not clean up `ALL_MASK_ATTENTION_FUNCTIONS`: {e}")
def test_load_and_register_named_function_kernel(self):
def my_attention(*args, **kwargs):
return None
kernel_obj = types.SimpleNamespace(my_func=my_attention)
with patch("transformers.integrations.hub_kernels.get_kernel", return_value=kernel_obj):
attn_impl = "org/model:my_func"
load_and_register_attn_kernel(attn_impl)
self.assertIn(attn_impl, ALL_ATTENTION_FUNCTIONS.valid_keys())
# Cleanup registration to avoid leaking functions across tests
try:
ALL_ATTENTION_FUNCTIONS.pop(attn_impl, None)
except Exception as e:
print(f"Could not clean up `ALL_ATTENTION_FUNCTIONS`: {e}")
try:
ALL_MASK_ATTENTION_FUNCTIONS.pop(attn_impl, None)
except Exception as e:
print(f"Could not clean up `ALL_MASK_ATTENTION_FUNCTIONS`: {e}")
@require_kernels
class TestUseKernelsLifecycle(TestCasePlus):
@classmethod
def setUpClass(cls):
cls.model_id = "unsloth/Llama-3.2-1B-Instruct"
cls.model = AutoModelForCausalLM.from_pretrained(cls.model_id, use_kernels=False, device_map=torch_device)
@classmethod
def tearDownClass(cls):
# Delete large objects to drop references early
if hasattr(cls, "model"):
try:
del cls.model
except Exception as e:
print(f"Could not delete model: {e}")
def tearDown(self):
# Free accelerator memory/cache and trigger GC
cleanup(torch_device, gc_collect=True)
def test_setting_use_kernels_twice_does_not_rekernelize(self):
call_count = {"n": 0}
def spy_kernelize(*args, **kwargs):
call_count["n"] += 1
with patch.object(kernels_pkg, "kernelize", side_effect=spy_kernelize):
self.model.use_kernels = True
self.assertTrue(self.model.use_kernels)
self.assertEqual(call_count["n"], 1)
self.model.use_kernels = True
self.assertEqual(call_count["n"], 1)
def test_train_eval_calls_kernelize_with_correct_mode(self):
last_modes = []
def spy_kernelize(model, device=None, mode=None):
last_modes.append(mode)
with patch.object(kernels_pkg, "kernelize", side_effect=spy_kernelize):
self.model.use_kernels = True
self.model.train(True)
self.assertTrue(any(m == Mode.TRAINING for m in last_modes))
self.model.eval()
self.assertTrue(any(m == Mode.INFERENCE for m in last_modes))
@require_kernels
class TestKernelMappingDeviceFiltering(TestCasePlus):
"""Test that kernel mappings correctly filter by current device."""
def test_multi_device_mapping_filters_correctly(self):
"""
Test that when a kernel_mapping contains multiple devices (cuda, rocm),
only the current device's kernel is registered.
Regression test for issue where ROCm overwrote CUDA mapping.
"""
kernel_mapping = {
"RMSNorm": {
"cuda": "kernels-community/layer_norm:LlamaRMSNorm",
"rocm": "kernels-community/layer_norm:LlamaRMSNorm",
}
}
kernel_config = KernelConfig(kernel_mapping)
# Create a mock model on CUDA device
mock_model = MagicMock()
mock_model.training = False
# Mock parameter with CUDA device
mock_param = MagicMock()
mock_param.device.type = "cuda"
mock_model.parameters.return_value = iter([mock_param])
# Mock named_modules with RMSNorm layer
mock_layer = MagicMock()
mock_layer.kernel_layer_name = "RMSNorm"
mock_model.named_modules.return_value = [("layers.0", mock_layer)]
# Trigger the mapping creation
kernel_config.create_compatible_mapping(mock_model)
# Verify results
result_mapping = kernel_config.kernel_mapping
self.assertIn("RMSNorm", result_mapping, "RMSNorm should be in mapping")
backends = list(result_mapping["RMSNorm"].keys())
# Assert only CUDA is present, not ROCm
self.assertIn("cuda", backends, "CUDA backend should be registered")
self.assertNotIn("rocm", backends, "ROCm backend should NOT be registered on CUDA device")
def test_single_device_mapping_still_works(self):
"""
Test that single-device mappings continue to work as expected.
"""
kernel_mapping = {"RMSNorm": "kernels-community/layer_norm:LlamaRMSNorm"}
kernel_config = KernelConfig(kernel_mapping)
# Create a mock model
mock_model = MagicMock()
mock_model.training = False
mock_param = MagicMock()
mock_param.device.type = "cuda"
mock_model.parameters.return_value = iter([mock_param])
mock_layer = MagicMock()
mock_layer.kernel_layer_name = "RMSNorm"
mock_model.named_modules.return_value = [("layers.0", mock_layer)]
kernel_config.create_compatible_mapping(mock_model)
result_mapping = kernel_config.kernel_mapping
self.assertIn("RMSNorm", result_mapping, "RMSNorm should be in mapping")
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/kernels/test_kernels.py",
"license": "Apache License 2.0",
"lines": 415,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/utils/chat_parsing_utils.py | from __future__ import annotations
import json
import re
from transformers.utils import is_jmespath_available
if is_jmespath_available():
import jmespath
else:
jmespath = None
def _parse_re_match(node_match: re.Match) -> dict | str:
# If the regex has named groups, return a dict of those groups
if node_match.groupdict():
return {key: val for key, val in node_match.groupdict().items() if val is not None}
# Otherwise the regex must have exactly one unnamed group, and we return that
else:
groups = list(node_match.groups())
if len(groups) > 1:
raise ValueError(f"Regex has multiple unnamed groups!\nGroups: {groups}\n")
elif len(groups) == 0:
raise ValueError(f"Regex has no capture groups:\n\n{node_match.group(0)}")
return groups[0]
def recursive_parse(
node_content: str | list | dict,
node_schema: dict,
):
"""
This function takes content and a JSON schema which includes
regex extractors, and recursively parses the content. The output
should be a data structure matching the schema.
Args:
node_content: The content corresponding to this node. Usually a string, but can be something else
if the parent node has multiple capture groups or named groups. In that case,
we generally pass the capture groups straight through to the children of this node
and don't do any parsing at this level.
node_schema: The schema node controlling the parsing.
Returns:
The parsed data structure for the current node.
"""
# If the schema has a const, we just return that value and do absolutely nothing else
if "const" in node_schema:
return node_schema["const"]
# If the node content is None, we return None. EZ.
if node_content is None:
return None
# If not, we have to do a little parsing. First, set some vars and do basic validation
node_type = node_schema.get("type")
has_regex = "x-regex" in node_schema or "x-regex-iterator" in node_schema or "x-regex-key-value" in node_schema
if has_regex and not isinstance(node_content, str):
raise TypeError(
"Schema node got a non-string input, but has a regex for parsing.\n"
f"Input: {node_content}\n"
f"Schema: {node_schema}"
)
node_regex = node_schema.get("x-regex")
node_regex_iterator = node_schema.get("x-regex-iterator")
node_regex_to_dict = node_schema.get("x-regex-key-value")
if node_regex is not None:
node_match = re.search(node_regex, node_content, flags=re.DOTALL)
if not node_match:
return None
node_content = _parse_re_match(node_match)
if node_regex_iterator is not None:
if node_type != "array":
raise TypeError(f"Schema node with type {node_type} cannot use x-regex-iterator.\nSchema: {node_schema}")
# Note that this can be applied after a standard node-regex search
node_content = [
_parse_re_match(node_match)
for node_match in re.finditer(node_regex_iterator, node_content, flags=re.DOTALL)
]
if not node_content:
return None
if node_regex_to_dict is not None:
if node_type != "object":
raise TypeError(f"Schema node with type {node_type} cannot use x-regex-key-value.\nSchema: {node_schema}")
# Note that this can be applied after a standard node-regex search
output_content = {}
for node_match in re.finditer(node_regex_to_dict, node_content, flags=re.DOTALL):
match_groups = _parse_re_match(node_match)
if not isinstance(match_groups, dict) or "key" not in match_groups or "value" not in match_groups:
raise ValueError(
f"Regex for x-regex-key-value must have named groups 'key' and 'value'.\n"
f"Match groups: {match_groups}\n"
f"Schema: {node_schema}"
)
output_content[match_groups["key"]] = match_groups["value"]
node_content = output_content
if not node_content:
return None
# Next, if the node has a parser, apply it. We do this after regexes so that the regex can extract
# a substring to parse, if needed.
if "x-parser" in node_schema:
parser = node_schema["x-parser"]
if parser == "json":
if not isinstance(node_content, str):
raise TypeError(
f"Node has JSON parser but got non-string input: {node_content}\nSchema: {node_schema}"
)
parser_args = node_schema.get("x-parser-args", {})
transform = parser_args.get("transform")
allow_non_json = parser_args.get("allow_non_json", False)
try:
parsed_json = json.loads(node_content)
except json.JSONDecodeError as e:
if allow_non_json:
parsed_json = node_content
else:
raise ValueError(
f"Node has JSON parser but could not parse its contents as JSON. You can use the `allow_non_json` parser arg for nodes which may contain JSON or string content.\n\nContent: {node_content}\n\nError: {e}"
)
if transform is not None:
if jmespath is None:
raise ImportError(
"Chat response schema includes a jmespath transformation, but jmespath is not installed. You can install it with `pip install jmespath`."
)
parsed_json = jmespath.search(parser_args["transform"], parsed_json)
node_content = parsed_json
else:
raise ValueError(f"Unknown parser {parser} for schema node: {node_schema}")
# If there's a mapping, apply it now
if "x-mapping" in node_schema:
if not isinstance(node_content, str):
raise TypeError(
f"Schema node with type {node_type} cannot use x-mapping on non-string content.\n"
f"Content: {node_content}\n"
f"Schema: {node_schema}"
)
mapping = node_schema["x-mapping"]
if node_content in mapping:
node_content = mapping[node_content]
if "x-mapping-regex" in node_schema:
if not isinstance(node_content, str):
raise TypeError(
f"Schema node with type {node_type} cannot use x-mapping-regex on non-string content.\n"
f"Content: {node_content}\n"
f"Schema: {node_schema}"
)
mapping_regex = node_schema["x-mapping-regex"]
for pattern, replacement in mapping_regex.items():
node_content = re.sub(pattern, replacement, node_content, flags=re.DOTALL)
# Finally, handle parsed content based on schema type and recurse if required
if node_type == "object":
parsed_schema = {}
if isinstance(node_content, str):
# This means we don't have a regex at this level, so all of our child nodes need to parse the whole
# string themselves to extract their value.
if "properties" not in node_schema:
raise ValueError(
f"Object node received string content but has no regex or parser to handle it.\n"
f"Content: {node_content}\n"
f"Schema: {node_schema}"
)
for key, child_node in node_schema["properties"].items():
child_node_content = recursive_parse(node_content, node_schema["properties"][key])
if child_node_content is not None:
parsed_schema[key] = child_node_content
return parsed_schema
elif isinstance(node_content, dict):
for key, child_node in node_schema.get("properties", {}).items():
if "const" in child_node:
parsed_schema[key] = child_node["const"]
elif key in node_content:
parsed_schema[key] = recursive_parse(node_content[key], child_node)
elif "default" in child_node:
parsed_schema[key] = child_node["default"]
if "additionalProperties" in node_schema:
for key, value in node_content.items():
if key not in node_schema.get("properties", {}):
parsed_schema[key] = recursive_parse(value, node_schema["additionalProperties"])
return parsed_schema
else:
raise TypeError(f"Expected a dict or str for schema node with type object, got {node_content}")
elif node_type == "array":
if not node_content:
return []
parsed_schema = []
if "items" in node_schema:
if not isinstance(node_content, list):
raise TypeError(f"Expected a list or regex for schema node with type array, got {node_content}")
for item in node_content:
parsed_schema.append(recursive_parse(item, node_schema["items"]))
return parsed_schema
elif "prefixItems" in node_schema:
if not isinstance(node_content, list):
if len(node_schema["prefixItems"]) == 1:
# If there's only one prefix item, this is a single item array, we can just wrap the string
node_content = [node_content]
else:
raise TypeError(f"Expected a list or regex for schema node with type array, got {node_content}")
if len(node_content) != len(node_schema["prefixItems"]):
raise ValueError(
f"Array node has {len(node_content)} items, but schema only has "
f"{len(node_schema['prefixItems'])} prefixItems defined.\n"
f"Content: {node_content}\n"
f"Schema: {node_schema}"
)
for item, item_schema in zip(node_content, node_schema["prefixItems"]):
parsed_schema.append(recursive_parse(item, item_schema))
return parsed_schema
else:
raise ValueError(f"Array node has no items or prefixItems schema defined.\nSchema: {node_schema}")
elif node_type in ("string", "integer", "number", "boolean"):
if not isinstance(node_content, str):
raise TypeError(f"Expected a string for schema node with type {node_type}, got {node_content}")
if node_type == "integer":
return int(node_content)
elif node_type == "number":
return float(node_content)
elif node_type == "boolean":
if node_content.lower() in ("true", "1"):
return True
elif node_content.lower() in ("false", "0"):
return False
else:
raise ValueError(f"Invalid boolean value: {node_content}")
else:
# String type
return node_content
elif node_type is None:
return node_content # Don't touch it
else:
raise TypeError(f"Unsupported schema type {node_type} for node: {node_content}")
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/utils/chat_parsing_utils.py",
"license": "Apache License 2.0",
"lines": 220,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
huggingface/transformers:src/transformers/models/efficientloftr/modular_efficientloftr.py | import torch
from ...utils import TensorType
from ..superglue.image_processing_superglue_fast import SuperGlueImageProcessorFast
from .modeling_efficientloftr import EfficientLoFTRKeypointMatchingOutput
class EfficientLoFTRImageProcessorFast(SuperGlueImageProcessorFast):
def post_process_keypoint_matching(
self,
outputs: "EfficientLoFTRKeypointMatchingOutput",
target_sizes: TensorType | list[tuple],
threshold: float = 0.0,
) -> list[dict[str, torch.Tensor]]:
"""
Converts the raw output of [`EfficientLoFTRKeypointMatchingOutput`] into lists of keypoints, scores and descriptors
with coordinates absolute to the original image sizes.
Args:
outputs ([`EfficientLoFTRKeypointMatchingOutput`]):
Raw outputs of the model.
target_sizes (`torch.Tensor` or `List[Tuple[Tuple[int, int]]]`, *optional*):
Tensor of shape `(batch_size, 2, 2)` or list of tuples of tuples (`Tuple[int, int]`) containing the
target size `(height, width)` of each image in the batch. This must be the original image size (before
any processing).
threshold (`float`, *optional*, defaults to 0.0):
Threshold to filter out the matches with low scores.
Returns:
`List[Dict]`: A list of dictionaries, each dictionary containing the keypoints in the first and second image
of the pair, the matching scores and the matching indices.
"""
if outputs.matches.shape[0] != len(target_sizes):
raise ValueError("Make sure that you pass in as many target sizes as the batch dimension of the mask")
if not all(len(target_size) == 2 for target_size in target_sizes):
raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch")
if isinstance(target_sizes, list):
image_pair_sizes = torch.tensor(target_sizes, device=outputs.matches.device)
else:
if target_sizes.shape[1] != 2 or target_sizes.shape[2] != 2:
raise ValueError(
"Each element of target_sizes must contain the size (h, w) of each image of the batch"
)
image_pair_sizes = target_sizes
keypoints = outputs.keypoints.clone()
keypoints = keypoints * image_pair_sizes.flip(-1).reshape(-1, 2, 1, 2)
keypoints = keypoints.to(torch.int32)
results = []
for keypoints_pair, matches, scores in zip(keypoints, outputs.matches, outputs.matching_scores):
# Filter out matches with low scores
valid_matches = torch.logical_and(scores > threshold, matches > -1)
matched_keypoints0 = keypoints_pair[0][valid_matches[0]]
matched_keypoints1 = keypoints_pair[1][valid_matches[1]]
matching_scores = scores[0][valid_matches[0]]
results.append(
{
"keypoints0": matched_keypoints0,
"keypoints1": matched_keypoints1,
"matching_scores": matching_scores,
}
)
return results
__all__ = ["EfficientLoFTRImageProcessorFast"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/efficientloftr/modular_efficientloftr.py",
"license": "Apache License 2.0",
"lines": 58,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
huggingface/transformers:src/transformers/cli/chat.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import json
import os
import platform
import re
import string
import time
from collections.abc import AsyncIterator
from typing import Annotated, Any
from urllib.parse import urljoin, urlparse
import httpx
import typer
import yaml
from huggingface_hub import AsyncInferenceClient, ChatCompletionStreamOutput
from transformers import GenerationConfig
from transformers.utils import is_rich_available
try:
import readline # noqa importing this enables GNU readline capabilities
except ImportError:
# some platforms may not support readline: https://docs.python.org/3/library/readline.html
pass
if platform.system() != "Windows":
import pwd
if is_rich_available():
from rich.console import Console
from rich.live import Live
from rich.markdown import Markdown
DEFAULT_HTTP_ENDPOINT = {"hostname": "localhost", "port": 8000}
ALLOWED_KEY_CHARS = set(string.ascii_letters + string.whitespace)
ALLOWED_VALUE_CHARS = set(
string.ascii_letters + string.digits + string.whitespace + r".!\"#$%&'()*+,\-/:<=>?@[]^_`{|}~"
)
DEFAULT_EXAMPLES = {
"llama": {"text": "There is a Llama in my lawn, how can I get rid of it?"},
"code": {
"text": (
"Write a Python function that integrates any Python function f(x) numerically over an arbitrary "
"interval [x_start, x_end]."
),
},
"helicopter": {"text": "How many helicopters can a human eat in one sitting?"},
"numbers": {"text": "Count to 10 but skip every number ending with an 'e'"},
"birds": {"text": "Why aren't birds real?"},
"socks": {"text": "Why is it important to eat socks after meditating?"},
"numbers2": {"text": "Which number is larger, 9.9 or 9.11?"},
}
# Printed at the start of a chat session
HELP_STRING_MINIMAL = """
**TRANSFORMERS CHAT INTERFACE**
Chat interface to try out a model. Besides chatting with the model, here are some basic commands:
- **!help**: shows all available commands (set generation settings, save chat, etc.)
- **!status**: shows the current status of the model and generation settings
- **!clear**: clears the current conversation and starts a new one
- **!exit**: closes the interface
"""
# Printed when the user types `help` in the chat session
HELP_STRING = f"""
**TRANSFORMERS CHAT INTERFACE HELP**
Full command list:
- **!help**: shows this help message
- **!clear**: clears the current conversation and starts a new one
- **!status**: shows the current status of the model and generation settings
- **!example {{NAME}}**: loads example named `{{NAME}}` from the config and uses it as the user input.
Available example names: `{"`, `".join(DEFAULT_EXAMPLES.keys())}`
- **!set {{ARG_1}}={{VALUE_1}} {{ARG_2}}={{VALUE_2}}** ...: changes the system prompt or generation settings (multiple
settings are separated by a space). Accepts the same flags and format as the `generate_flags` CLI argument.
If you're a new user, check this basic flag guide: https://huggingface.co/docs/transformers/llm_tutorial#common-options
- **!save {{SAVE_NAME}} (optional)**: saves the current chat and settings to file by default to
`./chat_history/{{MODEL_ID}}/chat_{{DATETIME}}.yaml` or `{{SAVE_NAME}}` if provided
- **!exit**: closes the interface
"""
class RichInterface:
def __init__(self, model_id: str, user_id: str):
self._console = Console()
self.model_id = model_id
self.user_id = user_id
async def stream_output(self, stream: AsyncIterator[ChatCompletionStreamOutput]) -> tuple[str, str | Any | None]:
self._console.print(f"[bold blue]<{self.model_id}>:")
with Live(console=self._console, refresh_per_second=4) as live:
text = ""
finish_reason: str | None = None
async for token in await stream:
outputs = token.choices[0].delta.content
finish_reason = getattr(token.choices[0], "finish_reason", finish_reason)
if not outputs:
continue
# Escapes single words encased in <>, e.g. <think> -> \<think\>, for proper rendering in Markdown.
# It only escapes single words that may have `_`, optionally following a `/` (e.g. </think>)
outputs = re.sub(r"<(/*)(\w*)>", r"\<\1\2\>", outputs)
text += outputs
# Render the accumulated text as Markdown
# NOTE: this is a workaround for the rendering "unstandard markdown"
# in rich. The chatbots output treat "\n" as a new line for
# better compatibility with real-world text. However, rendering
# in markdown would break the format. It is because standard markdown
# treat a single "\n" in normal text as a space.
# Our workaround is adding two spaces at the end of each line.
# This is not a perfect solution, as it would
# introduce trailing spaces (only) in code block, but it works well
# especially for console output, because in general the console does not
# care about trailing spaces.
lines = []
for line in text.splitlines():
lines.append(line)
if line.startswith("```"):
# Code block marker - do not add trailing spaces, as it would
# break the syntax highlighting
lines.append("\n")
else:
lines.append(" \n")
markdown = Markdown("".join(lines).strip(), code_theme="github-dark")
# Update the Live console output
live.update(markdown, refresh=True)
self._console.print()
return text, finish_reason
def input(self) -> str:
"""Gets user input from the console."""
input = self._console.input(f"[bold red]<{self.user_id}>:\n")
self._console.print()
return input
def clear(self):
"""Clears the console."""
self._console.clear()
def print_user_message(self, text: str):
"""Prints a user message to the console."""
self._console.print(f"[bold red]<{self.user_id}>:[/ bold red]\n{text}")
self._console.print()
def print_color(self, text: str, color: str):
"""Prints text in a given color to the console."""
self._console.print(f"[bold {color}]{text}")
self._console.print()
def confirm(self, message: str, default: bool = False) -> bool:
"""Displays a yes/no prompt to the user, returning True for confirmation."""
default_hint = "Y/n" if default else "y/N"
response = self._console.input(f"[bold yellow]{message} ({default_hint}): ")
self._console.print()
response = response.strip().lower()
if not response:
return default
return response in {"y", "yes"}
def print_help(self, minimal: bool = False):
"""Prints the help message to the console."""
self._console.print(Markdown(HELP_STRING_MINIMAL if minimal else HELP_STRING))
self._console.print()
def print_status(self, config: GenerationConfig):
"""Prints the status of the model and generation settings to the console."""
self._console.print(f"[bold blue]Model: {self.model_id}\n")
self._console.print(f"[bold blue]{config}")
self._console.print()
class Chat:
"""Chat with a model from the command line."""
# Defining a class to help with internal state but in practice it's just a method to call
# TODO: refactor into a proper module with helpers + 1 main method
def __init__(
self,
model_id: Annotated[str, typer.Argument(help="ID of the model to use (e.g. 'HuggingFaceTB/SmolLM3-3B').")],
base_url: Annotated[
str | None, typer.Argument(help="Base url to connect to (e.g. http://localhost:8000/v1).")
] = f"http://{DEFAULT_HTTP_ENDPOINT['hostname']}:{DEFAULT_HTTP_ENDPOINT['port']}",
generate_flags: Annotated[
list[str] | None,
typer.Argument(
help=(
"Flags to pass to `generate`, using a space as a separator between flags. Accepts booleans, numbers, "
"and lists of integers, more advanced parameterization should be set through --generation-config. "
"Example: `transformers chat <base_url> <model_id> max_new_tokens=100 do_sample=False eos_token_id=[1,2]`. "
"If you're a new user, check this basic flag guide: "
"https://huggingface.co/docs/transformers/llm_tutorial#common-options"
)
),
] = None,
# General settings
user: Annotated[
str | None,
typer.Option(help="Username to display in chat interface. Defaults to the current user's name."),
] = None,
system_prompt: Annotated[str | None, typer.Option(help="System prompt.")] = None,
save_folder: Annotated[str, typer.Option(help="Folder to save chat history.")] = "./chat_history/",
examples_path: Annotated[str | None, typer.Option(help="Path to a yaml file with examples.")] = None,
# Generation settings
generation_config: Annotated[
str | None,
typer.Option(
help="Path to a local generation config file or to a HuggingFace repo containing a `generation_config.json` file. Other generation settings passed as CLI arguments will be applied on top of this generation config."
),
] = None,
) -> None:
"""Chat with a model from the command line."""
self.base_url = base_url
parsed = urlparse(self.base_url)
if parsed.hostname == DEFAULT_HTTP_ENDPOINT["hostname"] and parsed.port == DEFAULT_HTTP_ENDPOINT["port"]:
self.check_health(self.base_url)
self.model_id = model_id
self.system_prompt = system_prompt
self.save_folder = save_folder
# Generation settings
config = load_generation_config(generation_config)
config.update(do_sample=True, max_new_tokens=256) # some default values
config.update(**parse_generate_flags(generate_flags))
self.config = config
self.settings = {"base_url": base_url, "model_id": model_id, "config": self.config.to_dict()}
# User settings
self.user = user if user is not None else get_username()
# Load examples
if examples_path:
with open(examples_path) as f:
self.examples = yaml.safe_load(f)
else:
self.examples = DEFAULT_EXAMPLES
# Check requirements
if not is_rich_available():
raise ImportError("You need to install rich to use the chat interface. (`pip install rich`)")
# Run chat session
asyncio.run(self._inner_run())
@staticmethod
def check_health(url):
health_url = urljoin(url + "/", "health")
try:
output = httpx.get(health_url)
if output.status_code != 200:
raise ValueError(
f"The server running on {url} returned status code {output.status_code} on health check (/health)."
)
except httpx.ConnectError:
raise ValueError(
f"No server currently running on {url}. To run a local server, please run `transformers serve` in a"
f"separate shell. Find more information here: https://huggingface.co/docs/transformers/serving"
)
return True
def handle_non_exit_user_commands(
self,
user_input: str,
interface: RichInterface,
examples: dict[str, dict[str, str]],
config: GenerationConfig,
chat: list[dict],
) -> tuple[list[dict], GenerationConfig]:
"""
Handles all user commands except for `!exit`. May update the chat history (e.g. reset it) or the
generation config (e.g. set a new flag).
"""
valid_command = True
if user_input == "!clear":
chat = new_chat_history(self.system_prompt)
interface.clear()
elif user_input == "!help":
interface.print_help()
elif user_input.startswith("!save") and len(user_input.split()) < 2:
split_input = user_input.split()
filename = (
split_input[1]
if len(split_input) == 2
else os.path.join(self.save_folder, self.model_id, f"chat_{time.strftime('%Y-%m-%d_%H-%M-%S')}.json")
)
save_chat(filename=filename, chat=chat, settings=self.settings)
interface.print_color(text=f"Chat saved to {filename}!", color="green")
elif user_input.startswith("!set"):
# splits the new args into a list of strings, each string being a `flag=value` pair (same format as
# `generate_flags`)
new_generate_flags = user_input[4:].strip()
new_generate_flags = new_generate_flags.split()
# sanity check: each member in the list must have an =
for flag in new_generate_flags:
if "=" not in flag:
interface.print_color(
text=(
f"Invalid flag format, missing `=` after `{flag}`. Please use the format "
"`arg_1=value_1 arg_2=value_2 ...`."
),
color="red",
)
break
else:
# Update config from user flags
config.update(**parse_generate_flags(new_generate_flags))
elif user_input.startswith("!example") and len(user_input.split()) == 2:
example_name = user_input.split()[1]
if example_name in examples:
interface.clear()
chat = []
interface.print_user_message(examples[example_name]["text"])
chat.append({"role": "user", "content": examples[example_name]["text"]})
else:
example_error = (
f"Example {example_name} not found in list of available examples: {list(examples.keys())}."
)
interface.print_color(text=example_error, color="red")
elif user_input == "!status":
interface.print_status(config=config)
else:
valid_command = False
interface.print_color(text=f"'{user_input}' is not a valid command. Showing help message.", color="red")
interface.print_help()
return chat, valid_command, config
async def _inner_run(self):
interface = RichInterface(model_id=self.model_id, user_id=self.user)
interface.clear()
chat = new_chat_history(self.system_prompt)
# Starts the session with a minimal help message at the top, so that a user doesn't get stuck
interface.print_help(minimal=True)
config = self.config
async with AsyncInferenceClient(base_url=self.base_url) as client:
pending_user_input: str | None = None
while True:
try:
if pending_user_input is not None:
user_input = pending_user_input
pending_user_input = None
interface.print_user_message(user_input)
else:
user_input = interface.input()
# User commands
if user_input == "!exit":
break
elif user_input == "!clear":
chat = new_chat_history(self.system_prompt)
interface.clear()
continue
elif user_input == "!help":
interface.print_help()
continue
elif user_input.startswith("!save") and len(user_input.split()) < 2:
split_input = user_input.split()
filename = (
split_input[1]
if len(split_input) == 2
else os.path.join(
self.save_folder, self.model_id, f"chat_{time.strftime('%Y-%m-%d_%H-%M-%S')}.json"
)
)
save_chat(filename=filename, chat=chat, settings=self.settings)
interface.print_color(text=f"Chat saved to {filename}!", color="green")
continue
elif user_input.startswith("!set"):
# splits the new args into a list of strings, each string being a `flag=value` pair (same format as
# `generate_flags`)
new_generate_flags = user_input[4:].strip()
new_generate_flags = new_generate_flags.split()
# sanity check: each member in the list must have an =
for flag in new_generate_flags:
if "=" not in flag:
interface.print_color(
text=(
f"Invalid flag format, missing `=` after `{flag}`. Please use the format "
"`arg_1=value_1 arg_2=value_2 ...`."
),
color="red",
)
break
else:
# Update config from user flags
config.update(**parse_generate_flags(new_generate_flags))
continue
elif user_input.startswith("!example") and len(user_input.split()) == 2:
example_name = user_input.split()[1]
if example_name in self.examples:
interface.clear()
chat = []
interface.print_user_message(self.examples[example_name]["text"])
chat.append({"role": "user", "content": self.examples[example_name]["text"]})
else:
example_error = f"Example {example_name} not found in list of available examples: {list(self.examples.keys())}."
interface.print_color(text=example_error, color="red")
elif user_input == "!status":
interface.print_status(config=config)
continue
elif user_input.startswith("!"):
interface.print_color(
text=f"'{user_input}' is not a valid command. Showing help message.", color="red"
)
interface.print_help()
continue
else:
chat.append({"role": "user", "content": user_input})
stream = client.chat_completion(
chat,
stream=True,
model=self.model_id,
extra_body={
"generation_config": config.to_json_string(),
"model": self.model_id,
},
)
model_output, finish_reason = await interface.stream_output(stream)
chat.append({"role": "assistant", "content": model_output})
if finish_reason == "length":
interface.print_color("Generation stopped after reaching the token limit.", "yellow")
if interface.confirm("Continue generating?"):
pending_user_input = "Please continue. Do not repeat text.”"
continue
except KeyboardInterrupt:
break
def load_generation_config(generation_config: str | None) -> GenerationConfig:
if generation_config is None:
return GenerationConfig()
if ".json" in generation_config: # is a local file
dirname = os.path.dirname(generation_config)
filename = os.path.basename(generation_config)
return GenerationConfig.from_pretrained(dirname, filename)
else:
return GenerationConfig.from_pretrained(generation_config)
def parse_generate_flags(generate_flags: list[str] | None) -> dict:
"""Parses the generate flags from the user input into a dictionary of `generate` kwargs."""
if generate_flags is None or len(generate_flags) == 0:
return {}
# Assumption: `generate_flags` is a list of strings, each string being a `flag=value` pair, that can be parsed
# into a json string if we:
# 1. Add quotes around each flag name
generate_flags_as_dict = {'"' + flag.split("=")[0] + '"': flag.split("=")[1] for flag in generate_flags}
# 2. Handle types:
# 2. a. booleans should be lowercase, None should be null
generate_flags_as_dict = {
k: v.lower() if v.lower() in ["true", "false"] else v for k, v in generate_flags_as_dict.items()
}
generate_flags_as_dict = {k: "null" if v == "None" else v for k, v in generate_flags_as_dict.items()}
# 2. b. strings should be quoted
def is_number(s: str) -> bool:
# handle negative numbers
s = s.removeprefix("-")
return s.replace(".", "", 1).isdigit()
generate_flags_as_dict = {k: f'"{v}"' if not is_number(v) else v for k, v in generate_flags_as_dict.items()}
# 2. c. [no processing needed] lists are lists of ints because `generate` doesn't take lists of strings :)
# We also mention in the help message that we only accept lists of ints for now.
# 3. Join the result into a comma separated string
generate_flags_string = ", ".join([f"{k}: {v}" for k, v in generate_flags_as_dict.items()])
# 4. Add the opening/closing brackets
generate_flags_string = "{" + generate_flags_string + "}"
# 5. Remove quotes around boolean/null and around lists
generate_flags_string = generate_flags_string.replace('"null"', "null")
generate_flags_string = generate_flags_string.replace('"true"', "true")
generate_flags_string = generate_flags_string.replace('"false"', "false")
generate_flags_string = generate_flags_string.replace('"[', "[")
generate_flags_string = generate_flags_string.replace(']"', "]")
# 6. Replace the `=` with `:`
generate_flags_string = generate_flags_string.replace("=", ":")
try:
processed_generate_flags = json.loads(generate_flags_string)
except json.JSONDecodeError:
raise ValueError(
"Failed to convert `generate_flags` into a valid JSON object."
"\n`generate_flags` = {generate_flags}"
"\nConverted JSON string = {generate_flags_string}"
)
return processed_generate_flags
def new_chat_history(system_prompt: str | None = None) -> list[dict]:
"""Returns a new chat conversation."""
return [{"role": "system", "content": system_prompt}] if system_prompt else []
def save_chat(filename: str, chat: list[dict], settings: dict) -> str:
"""Saves the chat history to a file."""
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, "w") as f:
json.dump({"settings": settings, "chat_history": chat}, f, indent=4)
return os.path.abspath(filename)
def get_username() -> str:
"""Returns the username of the current user."""
if platform.system() == "Windows":
return os.getlogin()
else:
return pwd.getpwuid(os.getuid()).pw_name
if __name__ == "__main__":
Chat(model_id="meta-llama/Llama-3.2-3b-Instruct")
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/cli/chat.py",
"license": "Apache License 2.0",
"lines": 474,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/cli/download.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Annotated
import typer
def download(
model_id: Annotated[str, typer.Argument(help="The model ID to download")],
cache_dir: Annotated[str | None, typer.Option(help="Directory where to save files.")] = None,
force_download: Annotated[
bool, typer.Option(help="If set, the files will be downloaded even if they are already cached locally.")
] = False,
trust_remote_code: Annotated[
bool,
typer.Option(
help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine"
),
] = False,
):
"""Download a model and its tokenizer from the Hub."""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
model_id, cache_dir=cache_dir, force_download=force_download, trust_remote_code=trust_remote_code
)
AutoTokenizer.from_pretrained(
model_id, cache_dir=cache_dir, force_download=force_download, trust_remote_code=trust_remote_code
)
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/cli/download.py",
"license": "Apache License 2.0",
"lines": 36,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/cli/system.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains commands to print information about the environment and version.
Usage:
transformers env
transformers version
"""
import contextlib
import io
import os
import platform
from typing import Annotated
import huggingface_hub
import typer
from .. import __version__
from ..integrations.deepspeed import is_deepspeed_available
from ..utils import (
is_accelerate_available,
is_torch_available,
is_torch_hpu_available,
is_torch_npu_available,
is_torch_xpu_available,
)
def env(
accelerate_config_file: Annotated[
str | None,
typer.Argument(help="The accelerate config file to use for the default values in the launching script."),
] = None,
) -> None:
"""Print information about the environment."""
import safetensors
safetensors_version = safetensors.__version__
accelerate_version = "not installed"
accelerate_config = accelerate_config_str = "not found"
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
accelerate_version = accelerate.__version__
# Get the default from the config file.
if accelerate_config_file is not None or os.path.isfile(default_config_file):
accelerate_config = load_config_from_file(accelerate_config_file).to_dict()
accelerate_config_str = (
"\n".join([f"\t- {prop}: {val}" for prop, val in accelerate_config.items()])
if isinstance(accelerate_config, dict)
else f"\t{accelerate_config}"
)
pt_version = "not installed"
pt_cuda_available = "NA"
pt_accelerator = "NA"
if is_torch_available():
import torch
pt_version = torch.__version__
pt_cuda_available = torch.cuda.is_available()
pt_xpu_available = is_torch_xpu_available()
pt_npu_available = is_torch_npu_available()
pt_hpu_available = is_torch_hpu_available()
if pt_cuda_available:
pt_accelerator = "CUDA"
elif pt_xpu_available:
pt_accelerator = "XPU"
elif pt_npu_available:
pt_accelerator = "NPU"
elif pt_hpu_available:
pt_accelerator = "HPU"
deepspeed_version = "not installed"
if is_deepspeed_available():
# Redirect command line output to silence deepspeed import output.
with contextlib.redirect_stdout(io.StringIO()):
import deepspeed
deepspeed_version = deepspeed.__version__
info = {
"`transformers` version": __version__,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Huggingface_hub version": huggingface_hub.__version__,
"Safetensors version": f"{safetensors_version}",
"Accelerate version": f"{accelerate_version}",
"Accelerate config": f"{accelerate_config_str}",
"DeepSpeed version": f"{deepspeed_version}",
"PyTorch version (accelerator?)": f"{pt_version} ({pt_accelerator})",
"Using distributed or parallel set-up in script?": "<fill in>",
}
if is_torch_available():
if pt_cuda_available:
info["Using GPU in script?"] = "<fill in>"
info["GPU type"] = torch.cuda.get_device_name()
elif pt_xpu_available:
info["Using XPU in script?"] = "<fill in>"
info["XPU type"] = torch.xpu.get_device_name()
elif pt_hpu_available:
info["Using HPU in script?"] = "<fill in>"
info["HPU type"] = torch.hpu.get_device_name()
elif pt_npu_available:
info["Using NPU in script?"] = "<fill in>"
info["NPU type"] = torch.npu.get_device_name()
info["CANN version"] = torch.version.cann
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n")
print(_format_dict(info))
return info
def version() -> None:
"""Print CLI version."""
print(__version__)
def _format_dict(d: dict) -> str:
return "\n".join([f"- {prop}: {val}" for prop, val in d.items()]) + "\n"
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/cli/system.py",
"license": "Apache License 2.0",
"lines": 115,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/cli/transformers.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformers CLI."""
from huggingface_hub import check_cli_update, typer_factory
from transformers.cli.add_fast_image_processor import add_fast_image_processor
from transformers.cli.add_new_model_like import add_new_model_like
from transformers.cli.chat import Chat
from transformers.cli.download import download
from transformers.cli.serve import Serve
from transformers.cli.system import env, version
app = typer_factory(help="Transformers CLI")
app.command()(add_fast_image_processor)
app.command()(add_new_model_like)
app.command(name="chat")(Chat)
app.command()(download)
app.command()(env)
app.command(name="serve")(Serve)
app.command()(version)
def main():
check_cli_update("transformers")
app()
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/cli/transformers.py",
"license": "Apache License 2.0",
"lines": 34,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/cli/test_chat.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import tempfile
from transformers.cli.chat import new_chat_history, parse_generate_flags, save_chat
def test_help(cli):
output = cli("chat", "--help")
assert output.exit_code == 0
assert "Chat with a model from the command line." in output.output
def test_save_and_clear_chat():
with tempfile.TemporaryDirectory() as tmp_path:
filename = os.path.join(tmp_path, "chat.json")
save_chat(filename, [{"role": "user", "content": "hi"}], {"foo": "bar"})
assert os.path.isfile(filename)
with open(filename, "r") as f:
data = json.load(f)
assert data["chat_history"] == [{"role": "user", "content": "hi"}]
assert data["settings"] == {"foo": "bar"}
def test_new_chat_history():
assert new_chat_history() == []
assert new_chat_history("prompt") == [{"role": "system", "content": "prompt"}]
def test_parse_generate_flags():
parsed = parse_generate_flags(["temperature=0.5", "max_new_tokens=10"])
assert parsed["temperature"] == 0.5
assert parsed["max_new_tokens"] == 10
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/cli/test_chat.py",
"license": "Apache License 2.0",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/cli/test_download.py | # Copyright 2025-present, the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
from transformers.testing_utils import require_torch
@require_torch
def test_cli_download(cli):
with tempfile.TemporaryDirectory() as tmpdir:
output = cli("download", "hf-internal-testing/tiny-random-gptj", "--cache-dir", tmpdir)
assert output.exit_code == 0
# check if the model files are downloaded correctly
model_dir = os.path.join(tmpdir, "models--hf-internal-testing--tiny-random-gptj")
assert os.path.exists(os.path.join(model_dir, "blobs"))
assert os.path.exists(os.path.join(model_dir, "refs"))
assert os.path.exists(os.path.join(model_dir, "snapshots"))
@require_torch
def test_cli_download_trust_remote(cli, caplog, capsys):
caplog.set_level(100000)
# ^ hack to avoid an issue happening only in CI. We don't check logs anyway so it's fine.
# Source: https://github.com/pallets/click/issues/824#issuecomment-562581313
with capsys.disabled():
with tempfile.TemporaryDirectory() as tmpdir:
output = cli(
"download",
"hf-internal-testing/test_dynamic_model_with_tokenizer",
"--trust-remote-code",
"--cache-dir",
tmpdir,
)
assert output.exit_code == 0
# check if the model files are downloaded correctly
model_dir = os.path.join(tmpdir, "models--hf-internal-testing--test_dynamic_model_with_tokenizer")
assert os.path.exists(os.path.join(model_dir, "blobs"))
assert os.path.exists(os.path.join(model_dir, "refs"))
assert os.path.exists(os.path.join(model_dir, "snapshots"))
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/cli/test_download.py",
"license": "Apache License 2.0",
"lines": 46,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/cli/test_system.py | # Copyright 2025-present, the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from transformers import __version__
def test_cli_env(cli):
output = cli("env")
assert output.exit_code == 0
assert "Python version" in output.output
assert "Platform" in output.output
assert "Using distributed or parallel set-up in script?" in output.output
def test_cli_version(cli):
output = cli("version")
assert output.exit_code == 0
assert output.output.strip() == __version__
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/cli/test_system.py",
"license": "Apache License 2.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:benchmark_v2/framework/benchmark_config.py | import hashlib
import itertools
import json
import logging
from functools import lru_cache
from typing import Any
import torch
from transformers.generation.configuration_utils import CompileConfig
from transformers.utils import is_torch_accelerator_available
from transformers.utils.import_utils import is_flash_attn_2_available, is_kernels_available
KERNELIZATION_AVAILABLE = False
try:
from kernels import Mode, kernelize # noqa: F401
KERNELIZATION_AVAILABLE = True
except ImportError:
pass
logger = logging.getLogger(__name__)
@lru_cache
def is_fa2_or_kernel_available() -> bool:
"""Returns True if the flash_attn_2 or a fallback kernel is available"""
# Early return if flash_attn_2 is available
if is_flash_attn_2_available():
return True
# Early return if kernels is not available
if not is_kernels_available():
logger.warning(
"flash_attention_2 is not available. kernels is not installed. Benchmarking flash_attention_2 will not "
"be possible."
)
return False
# If kernels is available, try to get the flash_attn_2 kernel
try:
from kernels import get_kernel
# TODO: Pass the 'version' kwarg to specify the binary version once kernels >= 0.12.0 is supported.
get_kernel("kernels-community/flash-attn2")
except Exception as _:
logger.warning(
"flash_attention_2 is not available. kernels is installed, but the flash_attn kernel is not available."
"Benchmarking flash_attention_2 will not be possible."
)
return False
return True
class BenchmarkConfig:
"""Configuration for a single benchmark scenario."""
all_attn_implementations = ["flash_attention_2", "eager", "sdpa", "flex_attention"]
all_compiled_modes = [None, "default", "reduce-overhead", "max-autotune", "max-autotune-no-cudagraphs"]
def __init__(
self,
warmup_iterations: int = 5,
measurement_iterations: int = 20,
gpu_monitoring: bool = True, # NOTE: you may want to disable this at times as we have obsvered it could heavily slow down benchmarks on AMD
continuous_batching: bool = False,
batch_size: int = 1,
sequence_length: int = 128,
num_tokens_to_generate: int = 128,
attn_implementation: str = "eager",
compile_kwargs: dict[str, Any] | None = None,
kernelize: bool = False,
name: str | None = None,
skip_validity_check: bool = False,
) -> None:
# Benchmark parameters
self.warmup_iterations = warmup_iterations
self.measurement_iterations = measurement_iterations
self.gpu_monitoring = gpu_monitoring
self.continuous_batching = continuous_batching
# Input parameters
self.batch_size = batch_size
self.sequence_length = sequence_length
self.num_tokens_to_generate = num_tokens_to_generate
# Generation parameters
self.attn_implementation = attn_implementation
# Optimization parameters
if compile_kwargs is None:
self.compile_config = None
else:
compile_kwargs["fullgraph"] = compile_kwargs.get("fullgraph", True)
self.compile_config = CompileConfig(**compile_kwargs)
self.kernelize = kernelize
# Constant parameters
self.dtype = "torch.bfloat16"
self.device = torch.accelerator.current_accelerator().type if is_torch_accelerator_available() else "cuda"
self.check_validity(skip_validity_check)
self.name = name if name is not None else self.infer_name()
def check_validity(self, skip_validity_check: bool = False) -> None:
if skip_validity_check:
return
# If flash_attention_2 is selected but not available, default to SDPA
if self.attn_implementation == "flash_attention_2" and not is_fa2_or_kernel_available():
logger.error("Flash attention is not available. Defaulting to SDPA.")
self.attn_implementation = "sdpa"
# The combination of flash_attention_2, compile and generate is not supported # FIXME: support it
if (
not self.continuous_batching
and self.attn_implementation == "flash_attention_2"
and self.compile_config is not None
):
logger.error(
"The combination of flash_attention_2, compile and generate is not supported. Turning off compile."
)
self.compile_config = None
# Continuous batching does not support flex attention as an attention implementation # FIXME: support it
if self.attn_implementation == "flex_attention" and self.continuous_batching:
logger.error(
"Disabling continuous batching because of invalid configuration: flex attention is not supported."
)
self.continuous_batching = False
# Continuous batching supports compile mode "default" or "max-autotune-no-cudagraphs"
if (
self.continuous_batching
and self.compile_config is not None
and self.compile_config.mode not in ["default", "max-autotune-no-cudagraphs"]
):
logger.error(
f"You have continuous batching and compile enabled, but {self.compile_config.mode = } is not supported."
" Supported modes are: default, max-autotune-no-cudagraphs. Changing to default."
)
self.compile_config.mode = "default"
@property
def hash(self) -> str:
return hashlib.sha256(json.dumps(self.to_dict()).encode()).hexdigest()
def infer_name(self, compact: bool = True) -> str:
"""Infer a human-readable name for the benchmark config, either compact or verbose."""
if compact:
iter_str = f"w{self.warmup_iterations}_i{self.measurement_iterations}"
gpu_monitor_str = "monitored" if self.gpu_monitoring else "unmonitored"
dimensions_str = f"b{self.batch_size}_s{self.sequence_length}_n{self.num_tokens_to_generate}"
attn_code = self.attn_implementation
compile_str = f"compiled_{self.compile_config.mode}" if self.compile_config is not None else "uncompiled"
kernelize_str = "kernelized" if self.kernelize else "unkernelized"
continuous_batching_str = "cb" if self.continuous_batching else "generate"
sep = "-"
else:
iter_str = f"{self.warmup_iterations} warmup, {self.measurement_iterations} iterations"
gpu_monitor_str = ("with" if self.gpu_monitoring else "no") + " GPU monitoring"
dimensions_str = f"batch size {self.batch_size}, sequence length {self.sequence_length}, {self.num_tokens_to_generate} generated tokens"
attn_code = f"{self.attn_implementation} attention"
compile_str = "compiled" if self.compile_config is not None else "not compiled"
kernelize_str = "kernelized" if self.kernelize else "not kernelized"
continuous_batching_str = "continuous batching" if self.continuous_batching else "regular generate"
sep = ", "
return sep.join(
[iter_str, gpu_monitor_str, dimensions_str, attn_code, compile_str, kernelize_str, continuous_batching_str]
)
def to_dict(self) -> dict[str, Any]:
return {
"name": self.name,
"warmup_iterations": self.warmup_iterations,
"measurement_iterations": self.measurement_iterations,
"gpu_monitoring": self.gpu_monitoring,
"continuous_batching": self.continuous_batching,
"batch_size": self.batch_size,
"sequence_length": self.sequence_length,
"num_tokens_to_generate": self.num_tokens_to_generate,
"attn_implementation": self.attn_implementation,
"compile_kwargs": self.compile_config.to_dict() if self.compile_config is not None else None,
"kernelize": self.kernelize,
}
@classmethod
def from_dict(cls, data: dict[str, Any], skip_validity_check: bool = False) -> "BenchmarkConfig":
return cls(
warmup_iterations=data.get("warmup_iterations", 5),
measurement_iterations=data.get("measurement_iterations", 20),
gpu_monitoring=data.get("gpu_monitoring", False),
continuous_batching=data.get("continuous_batching", False),
batch_size=data.get("batch_size", 1),
sequence_length=data.get("sequence_length", 128),
num_tokens_to_generate=data.get("num_tokens_to_generate", 128),
attn_implementation=data.get("attn_implementation", "eager"),
compile_kwargs=data.get("compile_kwargs"),
kernelize=data.get("kernelize", False),
name=data.get("name"),
skip_validity_check=skip_validity_check,
)
def adapt_configs(
configs: list[BenchmarkConfig],
warmup_iterations: int | list[int] = 5,
measurement_iterations: int | list[int] = 20,
batch_size: int | list[int] = 1,
sequence_length: int | list[int] = 128,
num_tokens_to_generate: int | list[int] = 128,
gpu_monitoring: bool | list[bool] = True,
) -> list[BenchmarkConfig]:
parameters = (
x if isinstance(x, list) else [x]
for x in [
warmup_iterations,
measurement_iterations,
batch_size,
sequence_length,
num_tokens_to_generate,
gpu_monitoring,
]
)
iterator = itertools.product(*parameters)
adapted_configs = []
for warmup_iters, measurement_iters, bs, seqlen, ntok, monitor in iterator:
for config in configs:
config = config.to_dict()
config["warmup_iterations"] = warmup_iters
config["measurement_iterations"] = measurement_iters
config["batch_size"] = bs
config["sequence_length"] = seqlen
config["num_tokens_to_generate"] = ntok
config["gpu_monitoring"] = monitor
# Remove the old name so it gets re-inferred with the updated values
config.pop("name", None)
adapted_configs.append(BenchmarkConfig.from_dict(config))
return adapted_configs
def get_config_by_level(level: int) -> list[BenchmarkConfig]:
configs = []
# Early return if level is greater than 3: we generate all combinations of configs, maybe even w/ all compile modes
if level >= 3:
for attn_implementation in BenchmarkConfig.all_attn_implementations:
# Usually there is not much to gain by compiling with other modes, but we allow it for level 4
compile_modes = BenchmarkConfig.all_compiled_modes if level >= 4 else [None, "default"]
for cm in compile_modes:
compile_kwargs = {"mode": cm} if cm is not None else None
for kernelize_on in {False, KERNELIZATION_AVAILABLE}:
for cb_on in [False, True]:
configs.append(
BenchmarkConfig(
attn_implementation=attn_implementation,
compile_kwargs=compile_kwargs,
kernelize=kernelize_on,
continuous_batching=cb_on,
)
)
return configs
# Otherwise, we add the configs for the given level
if level >= 0:
configs.append(BenchmarkConfig(attn_implementation="flex_attention", compile_kwargs={}))
if level >= 1:
configs.append(BenchmarkConfig(attn_implementation="flash_attention_2"))
configs.append(BenchmarkConfig(attn_implementation="eager", compile_kwargs={}))
configs.append(BenchmarkConfig(attn_implementation="flash_attention_2", continuous_batching=True))
if level >= 2:
configs.append(BenchmarkConfig(attn_implementation="sdpa", compile_kwargs={}))
configs.append(BenchmarkConfig(attn_implementation="flex_attention", compile_kwargs={}, kernelize=True))
configs.append(BenchmarkConfig(attn_implementation="flash_attention_2", kernelize=True))
configs.append(BenchmarkConfig(attn_implementation="sdpa", continuous_batching=True))
return configs
| {
"repo_id": "huggingface/transformers",
"file_path": "benchmark_v2/framework/benchmark_config.py",
"license": "Apache License 2.0",
"lines": 242,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
huggingface/transformers:benchmark_v2/framework/benchmark_runner.py | import gc
import json
import logging
import os
import pathlib
import re
import tempfile
import time
from datetime import datetime
from queue import Queue
from typing import Any
import torch
from datasets import Dataset
from huggingface_hub import HfApi
from tqdm import trange
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
GenerationConfig,
GenerationMixin,
is_torch_xpu_available,
)
from transformers.generation.streamers import BaseStreamer
from transformers.utils import is_torch_accelerator_available
from .benchmark_config import BenchmarkConfig
from .data_classes import BenchmarkMetadata, BenchmarkResult, GPURawMetrics, pretty_print_dict
from .hardware_metrics import GPUMonitor
try:
from kernels import Mode, kernelize # noqa: F401
except ImportError:
kernelize = None
Mode = None
DEFAULT_PROMPT = "\n".join([
"The French Revolution was a period of political and societal change in France that began with the Estates General of 1789 and ended with the Coup of 18 Brumaire on 9 November 1799.",
"Many of the revolution's ideas are considered fundamental principles of liberal democracy, and its values remain central to modern French political discourse.",
"It was caused by a combination of social, political, and economic factors which the existing regime proved unable to manage.",
"Financial crisis and widespread social distress led to the convocation of the Estates General in May 1789, its first meeting since 1614.",
"The representatives of the Third Estate broke away and re-constituted themselves as a National Assembly in June.",
"The Storming of the Bastille in Paris on 14 July led to a series of radical measures by the Assembly, including the abolition of feudalism, state control over the Catholic Church in France, and issuing the Declaration of the Rights of Man and of the Citizen.",
"The next three years were dominated by a struggle for political control.",
"King Louis XVI's attempted flight to Varennes in June 1791 further discredited the monarchy, and military defeats after the outbreak of the French Revolutionary Wars in April 1792 led to the insurrection of 10 August 1792.",
"As a result, the monarchy was replaced by the French First Republic in September, followed by the execution of Louis XVI himself in January 1793.",
"After another revolt in June 1793, the constitution was suspended, and political power passed from the National Convention to the Committee of Public Safety, dominated by radical Jacobins led by Maximilien Robespierre.",
"About 16,000 people were sentenced by the Revolutionary Tribunal and executed in the Reign of Terror, which ended in July 1794 with the Thermidorian Reaction.",
"Weakened by external threats and internal opposition, the Committee of Public Safety was replaced in November 1795 by the Directory.",
"Its instability ended in the coup of 18 Brumaire and the establishment of the Consulate, with Napoleon Bonaparte as First Consul.",
]) # fmt: skip
PUSH_TO_HUB_TOKEN = os.getenv("PUSH_TO_HUB_TOKEN", None)
def compact_json_numeric_arrays(data: dict):
# Match arrays that contain only numbers (ints/floats), whitespace, commas, and newlines
pattern = r"\[\s*\n\s*((?:\d+(?:\.\d+)?\s*,\s*)*\d+(?:\.\d+)?)\s*\n\s*\]"
def replace_numeric_array(match):
# Get the array content
content = match.group(1)
# Remove extra whitespace but keep commas
compact_content = re.sub(r"\s+", " ", content).strip()
return f"[{compact_content}]"
return re.sub(pattern, replace_numeric_array, json.dumps(data, indent=4, default=str), flags=re.DOTALL)
def get_git_revision() -> str:
base_path = pathlib.Path(__file__).parent.parent.parent
git_dir = base_path / ".git"
with (git_dir / "HEAD").open("r") as head:
ref = head.readline().split(" ")[-1].strip()
with (git_dir / ref).open("r") as git_hash:
return git_hash.readline().strip()
def flush_memory(flush_compile: bool = True) -> None:
"""Flush GPU memory and run garbage collection. If the flush_compile flag is set, we also clear the everything
related to compile cache."""
gc.collect()
# If needed, flush everything related to torch.compile
if flush_compile:
# Dynamo resets
torch._dynamo.reset()
torch._dynamo.reset_code_caches()
if hasattr(torch._inductor, "codecache"):
# Clear FX graph cache
if hasattr(torch._inductor.codecache, "FxGraphCache"):
torch._inductor.codecache.FxGraphCache.clear()
# Clear PyCodeCache
if hasattr(torch._inductor.codecache, "PyCodeCache"):
torch._inductor.codecache.PyCodeCache.cache_clear()
# Clear TritonFuture cache (for async compilation)
if hasattr(torch._inductor.codecache, "TritonFuture"):
if hasattr(torch._inductor.codecache.TritonFuture, "_compile_cache"):
torch._inductor.codecache.TritonFuture._compile_cache.clear()
# Clear device cache
if torch.cuda.is_available():
torch.cuda.empty_cache()
torch.cuda.synchronize()
elif is_torch_xpu_available():
torch.xpu.empty_cache()
torch.xpu.synchronize()
gc.collect()
class BenchmarkStreamer(BaseStreamer):
def __init__(self, **kwargs) -> None:
self.timeout = kwargs.pop("timeout", 10)
self.timestamps = []
self.text_queue = Queue()
self.stop_signal = None
def put(self, value):
"""Receives tokens and logs the timestamp of the generation."""
self.timestamps.append(time.perf_counter())
self.text_queue.put(value)
def end(self):
self.timestamps.append(time.perf_counter())
self.text_queue.put(self.stop_signal)
def __iter__(self):
return self
def __next__(self):
value = self.text_queue.get(timeout=self.timeout)
if value == self.stop_signal:
raise StopIteration()
else:
return value
class BenchmarkRunner:
"""Main benchmark runner that coordinates benchmark execution."""
def __init__(
self,
logger: logging.Logger,
output_dir: str | None = None,
branch_name: str | None = None,
commit_id: str | None = None,
commit_message: str | None = None,
) -> None:
# Those stay constant for the whole run
self.logger = logger
if output_dir is None:
output_dir = os.path.join(os.path.dirname(os.path.dirname(__file__)), "benchmark_results")
self.output_dir = output_dir
self.branch_name = branch_name
self.commit_id = get_git_revision() if commit_id is None else commit_id
self.commit_message = commit_message
os.makedirs(self.output_dir, exist_ok=True)
self.profile_dir = None
# Attributes that are reset for each model
self._setup_for = ""
# Attributes that are reset for each run
self.model: GenerationMixin | None = None
self.device_type = torch.accelerator.current_accelerator().type if is_torch_accelerator_available() else "cuda"
self.torch_accelerator_module = getattr(torch, self.device_type, torch.cuda)
def cleanup(self) -> None:
del self.model
self.model = None
flush_memory()
def setup_benchmark(self, model_id: str, config: BenchmarkConfig) -> None:
# Some attributes only need to be set once per model
if self._setup_for != model_id:
self.tokenizer = AutoTokenizer.from_pretrained(model_id)
# We set the EOS token to the padding token for open-ended generation
self.tokenizer.eos_token = self.tokenizer.pad_token
self._setup_for = model_id
# Prepare inputs
self.inputs = self.tokenizer(
[DEFAULT_PROMPT for _ in range(config.batch_size)],
return_tensors="pt",
max_length=config.sequence_length,
truncation=True,
return_attention_mask=True,
).to(config.device)
self.inputs["use_cache"] = True
# Prepare generation config
generation_config_kwargs = {
"do_sample": False,
"max_new_tokens": config.num_tokens_to_generate,
}
# Add compile config if found
if config.compile_config is not None:
generation_config_kwargs.update(compile_config=config.compile_config)
# To trigger compile in generate, we need to set the cache to static
if not config.continuous_batching:
generation_config_kwargs.update(cache_implementation="static")
generation_config = GenerationConfig(**generation_config_kwargs)
# Load model
self.logger.debug(f"Loading model {model_id} on device {config.device}...")
dtype = getattr(torch, config.dtype.removeprefix("torch."))
use_kernels = config.kernelize and kernelize is not None and Mode is not None
self.model = AutoModelForCausalLM.from_pretrained(
model_id,
dtype=dtype,
attn_implementation=config.attn_implementation,
generation_config=generation_config,
use_kernels=use_kernels,
device_map=config.device,
)
self.model = self.model.eval()
def run_benchmark(self, config: BenchmarkConfig, num_tokens_to_profile: int = 0) -> BenchmarkResult | None:
"""Run a single benchmark with the given model ID and config."""
with torch.no_grad():
self.logger.info(f"Running benchmark scenario: {config.name}")
self.logger.debug(f"Full config: {config.to_dict()}")
# Quick validation: try one measurement first to see if this scenario works
flush_memory()
e2e_latency = self.time_generate(config, warmup=True)[0]
if e2e_latency < 0:
self.logger.warning(f"Skipping config {config.name}: {e2e_latency = }")
return None
# Warmup runs
self.logger.info(f"Warming up with {config.warmup_iterations} iterations...")
for _ in trange(config.warmup_iterations, desc="Warmup"):
self.time_generate(config, warmup=True)
self.logger.info("Warmup over.")
# Measurement runs
result = BenchmarkResult()
self.logger.info(f"Benchmarking with {config.measurement_iterations} iterations.")
for _ in trange(config.measurement_iterations, desc="Benchmarking"):
e2e_latency, timestamps, shape_and_decoded_output, gpu_metrics = self.time_generate(
config, warmup=False
)
result.accumulate(e2e_latency, timestamps, shape_and_decoded_output, gpu_metrics)
self.logger.info("Benchmarking done. Cleaning up.")
# Profile if needed
if num_tokens_to_profile > 0:
self.profile_generate(num_tokens_to_profile, config.name)
return result
def time_generate(
self, config: BenchmarkConfig, warmup: bool
) -> tuple[float, list[float], str, GPURawMetrics | None]:
# Prepare gpu monitoring if needed
if config.gpu_monitoring and not warmup:
gpu_monitor = GPUMonitor(logger=self.logger)
gpu_monitor.start()
else:
gpu_monitor = None
# Generate and time
if config.continuous_batching:
inputs = self.inputs["input_ids"].tolist()
wall_time_0 = time.perf_counter()
outputs = self.model.generate_batch(inputs, allow_block_sharing=False, record_timestamps=True)
else:
streamer = BenchmarkStreamer()
wall_time_0 = time.perf_counter()
outputs = self.model.generate(**self.inputs, streamer=streamer)
wall_time_1 = time.perf_counter()
gpu_metrics = gpu_monitor.stop_and_collect() if gpu_monitor is not None else None
# Retrieve timestamps and results in a way that allows similar post-processing
input_tokens = self.inputs["input_ids"].size(-1)
if config.continuous_batching:
timestamps = [output.timestamps[:] for output in outputs.values()]
results = torch.tensor([output.generated_tokens[:] for output in outputs.values()])
else:
timestamps = [streamer.timestamps[1:]] # skip the first timestamp because it's the input tokens
results = outputs[:, input_tokens:]
outputs = None
flush_memory(flush_compile=False)
# Check if generation had the right number of tokens
if results.size(-1) != config.num_tokens_to_generate:
raise RuntimeError(f"Generated {results.size(-1)} tokens, expected {config.num_tokens_to_generate}")
# Decode outputs
decoded_output = self.tokenizer.decode(results[0], skip_special_tokens=True)
shape_and_decoded_output = f"{tuple(results.shape)} | {decoded_output}"
# Compute metrics
e2e_latency = wall_time_1 - wall_time_0
timestamps = torch.tensor(timestamps).sub(wall_time_0).tolist()
self.logger.info(
f"Time generate done in {e2e_latency:.2f} seconds. Memory usage: {self.torch_accelerator_module.memory_allocated() / 1024**2:.2f} MB"
)
return e2e_latency, timestamps, shape_and_decoded_output, gpu_metrics
def profile_generate(self, num_tokens_to_profile: int, config_name: str) -> None:
"""Profile the latency of a call to model.generate() with the given (inputs) and (max_new_tokens)."""
activities = [torch.profiler.ProfilerActivity.CPU]
if self.device_type == "cuda":
activities.append(torch.profiler.ProfilerActivity.CUDA)
elif self.device_type == "xpu":
activities.append(torch.profiler.ProfilerActivity.XPU)
profiler = torch.profiler.profile(
activities=activities,
record_shapes=True,
)
with profiler as prof:
_ = self.model.generate(
**self.inputs,
max_new_tokens=num_tokens_to_profile,
)
if self.profile_dir is None:
self.profile_dir = self.output_dir + "_profiles"
os.makedirs(self.profile_dir, exist_ok=True)
prof.export_chrome_trace(f"{self.profile_dir}/{config_name}.json")
@torch.inference_mode()
def run_benchmarks(
self,
model_id: str,
benchmark_configs: list[BenchmarkConfig],
num_tokens_to_profile: int = 0,
pretty_print_summary: bool = True,
summarized: bool = True,
) -> tuple[str, dict[str, Any]]:
"""Run multiple benchmarks for the given model ID and list of benchmark configs."""
all_results = {}
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
start_time = time.perf_counter()
n_configs = len(benchmark_configs)
for i, config in enumerate(benchmark_configs):
# Skip if already run
if config.hash in all_results:
self.logger.info(f"Skipping duplicate config {config.name} for model {model_id} ({i + 1}/{n_configs})")
continue
# Otherwise, run the benchmark
self.setup_benchmark(model_id, config)
self.logger.info(
f"Running benchmark of model {model_id} with scenario: {config.name} ({i + 1}/{n_configs})"
)
# Launch benchmark in a try/except block to avoid stopping the whole run if one benchmark fails
try:
result = self.run_benchmark(config, num_tokens_to_profile)
except Exception as e:
self.logger.error(f"Error running with scenario: {config.name}:\n{repr(e)}")
result = None
# Memoize
all_results[config.hash] = {
"metadata": BenchmarkMetadata(
model_id=model_id,
branch_name=self.branch_name,
commit_id=self.commit_id,
commit_message=self.commit_message,
success=result is not None,
),
"measurements": result if result is not None else BenchmarkResult(),
"config": config,
}
# Cleanup model and save results
self.cleanup()
self.save_results(model_id, all_results, timestamp=timestamp, summarized=summarized)
if len(all_results) < 1:
raise RuntimeError("No benchmark was run successfully")
if pretty_print_summary:
print()
print("=" * 100)
print(f"Finished benchmarks in {time.perf_counter() - start_time:.2f} seconds")
print(f"Total number of benchmarks: {len(all_results)}")
print("First run metadata:")
first_key = list(all_results.keys())[0]
first_metadata = all_results[first_key]["metadata"].to_dict()
hardware_info = first_metadata.pop("hardware_info")
pretty_print_dict(first_metadata | hardware_info, tabs=1)
for result in all_results.values():
print("=" * 100)
print(f"Config: {result['config'].infer_name(compact=False)}\n")
result["measurements"].pprint(
batch_size=result["config"].batch_size,
num_generated_tokens=result["config"].num_tokens_to_generate,
tabs=1,
)
print("=" * 100)
return (timestamp, all_results)
def save_results(self, model_name: str, results: dict, timestamp: str = "", summarized: bool = True) -> str:
"""Save benchmark results to JSON file."""
# Create model-specific subdirectory
model_name = model_name.replace("/", "_")
model_dir = os.path.join(self.output_dir, model_name)
os.makedirs(model_dir, exist_ok=True)
# Create filename with timestamp
timestamp = timestamp if timestamp else datetime.now().strftime("%Y%m%d_%H%M%S")
filename = f"{model_name}_benchmark_{timestamp}.json"
filepath = os.path.join(model_dir, filename)
# Convert results to dict
converted_results = {}
for cfg_hash in results.keys():
converted_results[cfg_hash] = {
"metadata": results[cfg_hash]["metadata"].to_dict(),
"measurements": results[cfg_hash]["measurements"].to_dict(summarized=summarized),
"config": results[cfg_hash]["config"].to_dict(),
}
# Save to JSON file
with open(filepath, "w") as f:
f.write(compact_json_numeric_arrays(converted_results))
self.logger.info(f"Results saved to {filepath}")
return filepath
def push_results_to_hub(self, dataset_id: str, results: dict[Any, Any], timestamp: str) -> None:
if PUSH_TO_HUB_TOKEN is None:
raise ValueError(
"PUSH_TO_HUB_TOKEN is not set, cannot push results to the Hub. When setting dataset_id, please also set the PUSH_TO_HUB_TOKEN environment variable."
)
api = HfApi()
n_results = len(results)
for summarized in [False, True]:
self.logger.info(f"Pushing {n_results} results to: {dataset_id} with {summarized = }")
rows = []
for cfg_hash, entry in results.items():
row = {
"benchmark_config_hash": cfg_hash,
"config": entry["config"].to_dict(),
"measurements": entry["measurements"].to_dict(summarized=summarized),
"metadata": entry["metadata"].to_dict(),
}
rows.append(row)
ds = Dataset.from_list(rows)
with tempfile.TemporaryDirectory() as tmp:
file_name = "summarized_results" if summarized else "full_results"
jsonl_path = os.path.join(tmp, f"{file_name}.jsonl")
with open(jsonl_path, "w") as f:
json_lines = []
for ex in ds:
json_lines.append(json.dumps(ex, ensure_ascii=False))
f.write("\n".join(json_lines))
# NOTE: we expect the repository to already exist
timestamp = timestamp if timestamp else datetime.now().strftime("%Y%m%d_%H%M%S")
file_name = file_name + "/" + f"benchmark_run_{timestamp}.jsonl"
api.upload_file(
path_or_fileobj=jsonl_path,
path_in_repo=file_name,
repo_id=dataset_id,
repo_type="dataset",
token=PUSH_TO_HUB_TOKEN,
)
self.logger.info(f"Successfully uploaded results to: {dataset_id} with {summarized = }")
| {
"repo_id": "huggingface/transformers",
"file_path": "benchmark_v2/framework/benchmark_runner.py",
"license": "Apache License 2.0",
"lines": 405,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
huggingface/transformers:benchmark_v2/framework/data_classes.py | from dataclasses import dataclass
from datetime import datetime, timezone
from typing import Any
import numpy as np
from .hardware_metrics import GPURawMetrics, HardwareInfo
def compute_basic_statistics(measurements: list[float]) -> dict[str, float]:
return {
"avg": np.mean(measurements) if measurements else 0,
"std": np.std(measurements) if measurements else 0,
"min": np.min(measurements) if measurements else 0,
"med": np.median(measurements) if measurements else 0,
"max": np.max(measurements) if measurements else 0,
"p95": np.percentile(measurements, 95) if measurements else 0,
}
def add_unit_to_duration(stats: dict[str, float]) -> dict[str, str]:
for key in list(stats.keys()):
value = stats[key]
if value > 3600:
stats[key] = f"{(value / 3600):.2f}hr"
elif value > 60:
stats[key] = f"{(value / 60):.2f}min"
elif value > 1:
stats[key] = f"{value:.2f}s"
elif value > 1e-3:
stats[key] = f"{(value * 1e3):.2f}ms"
elif value > 1e-6:
stats[key] = f"{(value * 1e6):.2f}us"
else:
stats[key] = f"{(value * 1e9):.2f}ns"
return stats
def equalize_lengths_and_collate(stats: dict[str, dict[str, str]]) -> dict[str, str]:
"""Note: This operation is destructive as it will update values in place before returning a new correctly formatted dict"""
keys = ["avg", "std", "min", "med", "max", "p95"]
for key in keys:
max_length = max(len(stat[key]) for stat in stats.values())
for stat in stats.values():
stat[key] = stat[key].ljust(max_length, " ")
return {name: " ".join([f"{key}={stat[key]}" for key in keys]) for name, stat in stats.items()}
def pretty_print_dict(data: dict[str, str], tabs: int = 0) -> None:
max_key_length = max([len(key) for key in data.keys()])
for key, value in data.items():
tabs_str = " " * tabs
padded_key = key.ljust(max_key_length + 1, ".")
print(f"{tabs_str}{padded_key}: {value}")
@dataclass
class BenchmarkMetadata:
"""Metadata collected for each benchmark run."""
model_id: str
timestamp: str
branch_name: str
commit_id: str
commit_message: str
hardware_info: HardwareInfo
success: bool
def __init__(
self, model_id: str, commit_id: str, branch_name: str = "main", commit_message: str = "", success: bool = True
) -> None:
self.model_id = model_id
self.timestamp = datetime.now(timezone.utc).isoformat()
self.branch_name = branch_name
self.commit_id = commit_id
self.commit_message = commit_message
self.hardware_info = HardwareInfo()
self.success = success
def to_dict(self) -> dict[str, Any]:
return {
"model_id": self.model_id,
"timestamp": self.timestamp,
"branch_name": self.branch_name,
"commit_id": self.commit_id,
"commit_message": self.commit_message,
"hardware_info": self.hardware_info.to_dict(),
"success": self.success,
}
class BenchmarkResult:
"""Result from a series of benchmark runs."""
def __init__(self) -> None:
self.e2e_latency = []
self._timestamps = []
self.time_to_first_token = []
self.inter_token_latency = []
self.shape_and_decoded_outputs = []
self.gpu_metrics = []
def accumulate(
self,
e2e_latency: float,
timestamps: list[float],
shape_and_decoded_output: str,
gpu_metrics: GPURawMetrics | None,
) -> None:
self.e2e_latency.append(e2e_latency)
self._timestamps.append(timestamps)
self._accumulate_ttft_and_itl(timestamps)
self.shape_and_decoded_outputs.append(shape_and_decoded_output)
self.gpu_metrics.append(gpu_metrics)
def _accumulate_ttft_and_itl(self, timestamps: list[float]) -> None:
timestamps = np.array(timestamps)
tftt = np.min(timestamps[:, 0])
itl = np.mean(timestamps[:, -1] - timestamps[:, 0]) / (timestamps.shape[1] - 1)
self.time_to_first_token.append(tftt)
self.inter_token_latency.append(itl)
def to_dict(self, summarized: bool = False) -> dict[str, Any]:
# Save GPU metrics as None if it contains only None values or if we are summarizing
if summarized or all(gm is None for gm in self.gpu_metrics):
gpu_metrics = None
else:
gpu_metrics = [gm.to_dict() for gm in self.gpu_metrics]
return {
"e2e_latency": self.e2e_latency,
"time_to_first_token": self.time_to_first_token,
"inter_token_latency": self.inter_token_latency,
"shape_and_decoded_outputs": self.shape_and_decoded_outputs,
"gpu_metrics": gpu_metrics,
"timestamps": None if summarized else self._timestamps,
}
@classmethod
def from_dict(cls, data: dict[str, Any]) -> "BenchmarkResult":
# Handle GPU metrics, which is saved as None if it contains only None values
if data["gpu_metrics"] is None:
gpu_metrics = [None for _ in range(len(data["e2e_latency"]))]
else:
gpu_metrics = [GPURawMetrics.from_dict(gm) for gm in data["gpu_metrics"]]
# Handle timestamps, which can be saved as None to reduce file size
if data["timestamps"] is None:
timestamps = [None for _ in range(len(data["e2e_latency"]))]
else:
timestamps = data["timestamps"]
# Create a new instance and accumulate the data
new_instance = cls()
new_instance.e2e_latency = data["e2e_latency"]
new_instance._timestamps = timestamps
new_instance.time_to_first_token = data["time_to_first_token"]
new_instance.inter_token_latency = data["inter_token_latency"]
new_instance.shape_and_decoded_outputs = data["shape_and_decoded_outputs"]
new_instance.gpu_metrics = gpu_metrics
return new_instance
def get_throughput(self, total_generated_tokens: int) -> list[float]:
return [total_generated_tokens / e2e_latency for e2e_latency in self.e2e_latency]
def pprint(self, batch_size: int = 0, num_generated_tokens: int = 0, tabs: int = 0) -> None:
measurements = {
"E2E Latency": add_unit_to_duration(compute_basic_statistics(self.e2e_latency)),
"Time to First Token": add_unit_to_duration(compute_basic_statistics(self.time_to_first_token)),
}
if len(self.inter_token_latency) > 0:
measurements["Inter-Token Latency"] = add_unit_to_duration(
compute_basic_statistics(self.inter_token_latency)
)
if batch_size > 0:
throughput_stats = compute_basic_statistics(self.get_throughput(batch_size * num_generated_tokens))
measurements["Throughput"] = {key: f"{value:.2f}tok/s" for key, value in throughput_stats.items()}
dict_to_pprint = equalize_lengths_and_collate(measurements)
pretty_print_dict(dict_to_pprint, tabs=tabs)
| {
"repo_id": "huggingface/transformers",
"file_path": "benchmark_v2/framework/data_classes.py",
"license": "Apache License 2.0",
"lines": 152,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
huggingface/transformers:benchmark_v2/framework/hardware_metrics.py | import logging
import subprocess
import sys
import time
from dataclasses import dataclass
from enum import Enum
from logging import Logger
from multiprocessing import Pipe, Process
from multiprocessing.connection import Connection
from transformers.utils.import_utils import is_cuda_platform, is_rocm_platform
if is_cuda_platform():
import pynvml
if is_rocm_platform():
import amdsmi
import psutil
import torch
from transformers.utils import is_torch_accelerator_available
_logger = logging.getLogger(__name__)
# Data class to hold the hardware information
def get_device_name_and_memory_total() -> tuple[str, float]:
"""Returns the name and memory total of GPU 0."""
device_type = torch.accelerator.current_accelerator().type if is_torch_accelerator_available() else "cuda"
torch_accelerator_module = getattr(torch, device_type, torch.cuda)
device_name = torch_accelerator_module.get_device_properties(0).name
device_memory_total = torch_accelerator_module.get_device_properties(0).total_memory / 1024**3
return device_name, device_memory_total
class HardwareInfo:
"""A class to hold information about the hardware."""
def __init__(self) -> None:
# Retrieve GPU stats
try:
self.gpu_name, self.gpu_memory_total_gb = get_device_name_and_memory_total()
except Exception:
self.gpu_name, self.gpu_memory_total_gb = None, None
# Retrieve python, torch and CUDA version
self.python_version = f"{sys.version.split()[0]}"
self.torch_version = torch.__version__
if hasattr(torch, "cuda") and torch.cuda.is_available():
self.cuda_version = torch.version.cuda
else:
self.cuda_version = None
# Retrieve general hardware information
self.cpu_count = psutil.cpu_count()
self.memory_total_mb = int(psutil.virtual_memory().total / (1024 * 1024))
def to_dict(self) -> dict[str, None | int | float | str]:
return {
"gpu_name": self.gpu_name,
"gpu_memory_total_gb": self.gpu_memory_total_gb,
"python_version": self.python_version,
"torch_version": self.torch_version,
}
# Functions to get information about the GPU
def get_amd_gpu_stats(device_handle) -> tuple[int, float]:
"""Get AMD GPU stats using amdsmi library."""
utilization = amdsmi.amdsmi_get_gpu_activity(device_handle)["gfx_activity"]
memory_used = amdsmi.amdsmi_get_gpu_vram_usage(device_handle)["vram_used"]
return int(utilization), float(memory_used) / 1024**3 # Convert bytes to GB
def get_intel_xpu_stats() -> tuple[int, float]:
"""Returns the utilization and memory used of an Intel XPU"""
# xpu-smi outputs CSV format: Timestamp, DeviceId, GPU Memory Utilization (%), GPU Memory Used (MiB)
xpu_smi_output = subprocess.check_output(["xpu-smi", "dump", "-m", "5,18", "-n", "1"])
lines = xpu_smi_output.decode("utf-8").strip().split("\n")
# Parse all data lines (skip header) and collect stats from all cards
xpu_stats = []
for line in lines[1:]:
data_line = line.split(",")
if len(data_line) < 4:
continue
device_id = data_line[1].strip()
utilization_str = data_line[2].strip()
memory_used_str = data_line[3].strip()
if utilization_str != "N/A" and memory_used_str != "N/A":
utilization = int(float(utilization_str))
memory_used_mib = float(memory_used_str)
xpu_stats.append((device_id, utilization, memory_used_mib))
if not xpu_stats:
return 0, 0.0
# Sort by utilization (descending) and pick the highest
xpu_stats.sort(key=lambda x: x[1], reverse=True)
device_id, utilization, memory_used_mib = xpu_stats[0]
memory_used_gb = memory_used_mib / 1024
return utilization, memory_used_gb
def get_nvidia_gpu_stats(device_handle) -> tuple[int, float]:
"""Returns the utilization and memory used of an NVIDIA GPU using pynvml."""
utilization = pynvml.nvmlDeviceGetUtilizationRates(device_handle).gpu
memory_info = pynvml.nvmlDeviceGetMemoryInfo(device_handle)
memory_used_gb = memory_info.used / 1024**3
return int(utilization), float(memory_used_gb)
# Simple data classes to hold the raw GPU metrics
class GPUMonitoringStatus(Enum):
"""Status of GPU monitoring."""
SUCCESS = "success"
FAILED = "failed"
NO_GPUS_AVAILABLE = "no_gpus_available"
NO_SAMPLES_COLLECTED = "no_samples_collected"
@dataclass
class GPURawMetrics:
"""Raw values for GPU utilization and memory used."""
utilization: list[float] # in percent
memory_used: list[float] # in GB
timestamps: list[float] # in seconds
timestamp_0: float # in seconds
monitoring_status: GPUMonitoringStatus
def to_dict(self) -> dict[str, None | int | float | str]:
return {
"utilization": self.utilization,
"memory_used": self.memory_used,
"timestamps": self.timestamps,
"timestamp_0": self.timestamp_0,
"monitoring_status": self.monitoring_status.value,
}
@classmethod
def from_dict(cls, data: dict[str, None | int | float | str]) -> "GPURawMetrics":
"""Create a GPURawMetrics instance from a dictionary."""
return cls(
utilization=data["utilization"],
memory_used=data["memory_used"],
timestamps=data["timestamps"],
timestamp_0=data["timestamp_0"],
monitoring_status=GPUMonitoringStatus(data["monitoring_status"]),
)
# Main class, used to monitor the GPU utilization during benchmark execution
class GPUMonitor:
"""Monitor GPU utilization during benchmark execution using a separate process."""
def __init__(self, sample_interval_sec: float = 0.05, logger: Logger | None = None):
self.sample_interval_sec = sample_interval_sec
self.logger = logger if logger is not None else _logger
self.gpu_type = None
self.process = None
device_type = torch.accelerator.current_accelerator().type if is_torch_accelerator_available() else "cuda"
torch_accelerator_module = getattr(torch, device_type, torch.cuda)
self.num_available_gpus = torch_accelerator_module.device_count()
if self.num_available_gpus == 0:
self.logger.warning(f"No GPUs detected by torch.{device_type}.device_count().")
return
# Determine GPU type
device_name, _ = get_device_name_and_memory_total()
if "amd" in device_name.lower():
self.gpu_type = "amd"
elif "nvidia" in device_name.lower():
self.gpu_type = "nvidia"
elif "intel" in device_name.lower() or device_type == "xpu":
self.gpu_type = "intel"
else:
self.logger.warning(f"Unsupported GPU for monitoring: {device_name}")
@staticmethod
def _monitor_worker(gpu_type: str, sample_interval_sec: float, connection: Connection):
"""Worker process for GPU monitoring."""
gpu_utilization = []
gpu_memory_used = []
timestamps = []
device_handle = None
# Initialize GPU-specific monitoring
if gpu_type == "amd":
amdsmi.amdsmi_init()
device_handle = amdsmi.amdsmi_get_processor_handles()[0]
elif gpu_type == "nvidia":
pynvml.nvmlInit()
device_handle = pynvml.nvmlDeviceGetHandleByIndex(0)
# Signal ready
try:
connection.send(0)
except Exception:
return
# Monitoring loop
stop = False
while not stop:
try:
if gpu_type == "amd":
utilization, memory_used = get_amd_gpu_stats(device_handle)
elif gpu_type == "nvidia":
utilization, memory_used = get_nvidia_gpu_stats(device_handle)
elif gpu_type == "intel":
utilization, memory_used = get_intel_xpu_stats()
else:
break
gpu_utilization.append(utilization)
gpu_memory_used.append(memory_used)
timestamps.append(time.time())
except Exception as e:
# Skips failed measurements
_logger.debug(f"Failed to collect GPU metrics sample: {e}")
stop = connection.poll(sample_interval_sec)
# Cleanup
if gpu_type == "amd":
try:
amdsmi.amdsmi_shut_down()
except Exception as e:
_logger.debug(f"Failed to shutdown AMD GPU monitoring: {e}")
elif gpu_type == "nvidia":
try:
pynvml.nvmlShutdown()
except Exception as e:
_logger.debug(f"Failed to shutdown NVIDIA GPU monitoring: {e}")
# Send results back
try:
connection.send((gpu_utilization, gpu_memory_used, timestamps))
except Exception as e:
_logger.error(f"Failed to send GPU monitoring results: {e}")
connection.close()
def start(self):
"""Start monitoring GPU metrics in a separate process."""
if self.gpu_type is None:
self.logger.debug("GPU monitoring skipped (no supported GPU)")
return
self.child_connection, self.parent_connection = Pipe()
self.process = Process(
target=GPUMonitor._monitor_worker,
args=(self.gpu_type, self.sample_interval_sec, self.child_connection),
daemon=True,
)
self.process.start()
# Wait for worker to signal ready
if self.process.is_alive():
self.parent_connection.recv()
self.logger.debug("GPU monitoring started (multiprocessing)")
def stop_and_collect(self) -> GPURawMetrics:
"""Stop monitoring and return collected metrics."""
# No GPU available or unsupported GPU
if self.process is None:
return GPURawMetrics(
utilization=[],
memory_used=[],
timestamps=[],
timestamp_0=0.0,
monitoring_status=GPUMonitoringStatus.NO_GPUS_AVAILABLE,
)
# Process crashed before we could collect results
process_failed = False
if not self.process.is_alive():
process_failed = True
gpu_utilization, gpu_memory_used, timestamps = [], [], []
else:
# Signal stop
self.parent_connection.send(0)
# Get results
try:
gpu_utilization, gpu_memory_used, timestamps = self.parent_connection.recv()
except Exception:
process_failed = True
gpu_utilization, gpu_memory_used, timestamps = [], [], []
self.parent_connection.close()
self.process.join(timeout=2.0)
if self.process.is_alive():
self.process.terminate()
if gpu_utilization:
timestamp_0 = timestamps[0]
metrics = GPURawMetrics(
utilization=gpu_utilization,
memory_used=gpu_memory_used,
timestamps=[t - timestamp_0 for t in timestamps],
timestamp_0=timestamp_0,
monitoring_status=GPUMonitoringStatus.SUCCESS,
)
self.logger.debug(f"GPU monitoring completed: {len(gpu_utilization)} samples collected")
elif process_failed:
metrics = GPURawMetrics(
utilization=[],
memory_used=[],
timestamps=[],
timestamp_0=0.0,
monitoring_status=GPUMonitoringStatus.FAILED,
)
self.logger.warning("GPU monitoring failed (process crashed or timed out)")
else:
metrics = GPURawMetrics(
utilization=[],
memory_used=[],
timestamps=[],
timestamp_0=0.0,
monitoring_status=GPUMonitoringStatus.NO_SAMPLES_COLLECTED,
)
return metrics
| {
"repo_id": "huggingface/transformers",
"file_path": "benchmark_v2/framework/hardware_metrics.py",
"license": "Apache License 2.0",
"lines": 273,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
huggingface/transformers:src/transformers/models/video_llama_3/modular_video_llama_3.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Callable
from dataclasses import dataclass
from typing import Any, Optional, Union
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import LayerNorm
from ... import initialization as init
from ...cache_utils import Cache
from ...configuration_utils import PreTrainedConfig
from ...feature_extraction_utils import BatchFeature
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
SizeDict,
get_image_size,
make_flat_list_of_images,
valid_images,
validate_preprocess_arguments,
)
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, ModelOutput
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...processing_utils import Unpack
from ...tokenization_utils_base import PreTokenizedInput, TextInput
from ...utils import (
TensorType,
auto_docstring,
can_return_tuple,
logging,
)
from ...utils.generic import is_flash_attention_requested, merge_with_config_defaults
from ...utils.output_capturing import capture_outputs
from ...video_utils import (
VideoInput,
group_videos_by_shape,
reorder_videos,
)
from ..auto import CONFIG_MAPPING, AutoConfig
from ..auto.modeling_auto import AutoModel
from ..qwen2_vl.image_processing_qwen2_vl import Qwen2VLImageProcessor, Qwen2VLImageProcessorKwargs, smart_resize
from ..qwen2_vl.image_processing_qwen2_vl_fast import (
Qwen2VLImageProcessorFast,
)
from ..qwen2_vl.modeling_qwen2_vl import (
Qwen2VLForConditionalGeneration,
Qwen2VLModel,
Qwen2VLPreTrainedModel,
TransformersKwargs,
VisionRotaryEmbedding,
apply_rotary_pos_emb_vision,
eager_attention_forward,
)
from ..qwen2_vl.processing_qwen2_vl import (
Qwen2VLProcessor,
Qwen2VLProcessorKwargs,
)
from ..qwen2_vl.video_processing_qwen2_vl import (
Qwen2VLVideoProcessor,
Qwen2VLVideoProcessorInitKwargs,
)
from ..siglip.configuration_siglip import SiglipVisionConfig
from ..siglip.modeling_siglip import (
SiglipAttention,
SiglipEncoder,
SiglipEncoderLayer,
SiglipMLP,
)
logger = logging.get_logger(__name__)
class VideoLlama3VisionConfig(SiglipVisionConfig):
"""
This is the configuration class to store the configuration of a [`VideoLlama3VisionModel`]. It is used to instantiate a
VideoLLaMA3 vision encoder model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of
VideoLLaMA3-2B [lkhl/VideoLLaMA3-2B-Image-HF](https://huggingface.co/lkhl/VideoLLaMA3-2B-Image-HF).
Args:
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
num_channels (`int`, *optional*, defaults to 3):
Number of channels in the input images.
patch_size (`int`, *optional*, defaults to 16):
The size (resolution) of each patch.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
"""
model_type = "video_llama_3_vision"
base_config_key = "vision_config"
def __init__(
self,
hidden_size=768,
intermediate_size=3072,
num_hidden_layers=12,
num_attention_heads=12,
num_channels=3,
patch_size=16,
hidden_act="gelu_pytorch_tanh",
layer_norm_eps=1e-6,
attention_dropout=0.0,
initializer_range=0.02,
**kwargs,
):
super().__init__(
hidden_size=hidden_size,
intermediate_size=intermediate_size,
num_hidden_layers=num_hidden_layers,
num_attention_heads=num_attention_heads,
num_channels=num_channels,
patch_size=patch_size,
hidden_act=hidden_act,
layer_norm_eps=layer_norm_eps,
attention_dropout=attention_dropout,
**kwargs,
)
self.initializer_range = initializer_range
del self.image_size
class VideoLlama3Config(PreTrainedConfig):
"""
This is the configuration class to store the configuration of a [`VideoLlama3Model`]. It is used to instantiate a
VideoLLaMA3 model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of
VideoLLaMA3-2B [lkhl/VideoLLaMA3-2B-Image-HF](https://huggingface.co/lkhl/VideoLLaMA3-2B-Image-HF).
Args:
text_config (`Union[PreTrainedConfig, dict]`, *optional*, defaults to `Qwen2Config`):
The config object or dictionary of the text backbone.
vision_config (`Union[PreTrainedConfig, dict]`, *optional*, defaults to `VideoLlama3VisionConfig`):
The config object or dictionary of the vision backbone.
image_token_id (`int`, *optional*, defaults to 151655):
The image token index to encode the image prompt.
video_token_id (`int`, *optional*, defaults to 151656):
The video token index to encode the image prompt.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
"""
model_type = "video_llama_3"
sub_configs = {"vision_config": VideoLlama3VisionConfig, "text_config": AutoConfig}
keys_to_ignore_at_inference = ["past_key_values"]
def __init__(
self,
text_config=None,
vision_config=None,
image_token_id=151655,
video_token_id=151656,
tie_word_embeddings=False,
**kwargs,
):
if isinstance(vision_config, dict):
self.vision_config = self.sub_configs["vision_config"](**vision_config)
elif isinstance(vision_config, PreTrainedConfig):
self.vision_config = vision_config
elif vision_config is None:
self.vision_config = self.sub_configs["vision_config"]()
else:
raise ValueError(
f"vision_config must be of type `dict` or `PreTrainedConfig`, but got {type(vision_config)}."
)
if isinstance(text_config, dict):
self.text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config)
elif isinstance(text_config, PreTrainedConfig):
self.text_config = text_config
elif text_config is None:
self.text_config = CONFIG_MAPPING["qwen2"]()
else:
raise ValueError(f"text_config must be of type `dict` or `PreTrainedConfig`, but got {type(text_config)}.")
self.image_token_id = image_token_id
self.video_token_id = video_token_id
self.tie_word_embeddings = tie_word_embeddings
super().__init__(**kwargs)
class VideoLlama3VisionRotaryEmbedding(VisionRotaryEmbedding):
def forward(self, grid_thw, merge_sizes) -> tuple[torch.Tensor, torch.Tensor]:
pos_ids = []
for (t, h, w), merge_size in zip(grid_thw, merge_sizes):
hpos_ids = torch.arange(h).unsqueeze(1).expand(-1, w)
hpos_ids = hpos_ids.reshape(
h // merge_size,
merge_size,
w // merge_size,
merge_size,
)
hpos_ids = hpos_ids.permute(0, 2, 1, 3)
hpos_ids = hpos_ids.flatten()
wpos_ids = torch.arange(w).unsqueeze(0).expand(h, -1)
wpos_ids = wpos_ids.reshape(
h // merge_size,
merge_size,
w // merge_size,
merge_size,
)
wpos_ids = wpos_ids.permute(0, 2, 1, 3)
wpos_ids = wpos_ids.flatten()
pos_ids.append(torch.stack([hpos_ids, wpos_ids], dim=-1).repeat(t, 1))
pos_ids = torch.cat(pos_ids, dim=0)
max_grid_thw = grid_thw[:, 1:].max()
seq = torch.arange(max_grid_thw, device=self.inv_freq.device, dtype=self.inv_freq.dtype)
rotary_pos_emb_full = torch.outer(seq, self.inv_freq)
rotary_pos_emb = rotary_pos_emb_full[pos_ids].flatten(1)
emb = torch.cat((rotary_pos_emb, rotary_pos_emb), dim=-1)
return (emb.cos(), emb.sin())
class VideoLlama3VisionEmbeddings(nn.Module):
def __init__(self, config: VideoLlama3VisionConfig) -> None:
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.patch_size = config.patch_size
self.patch_embedding = nn.Conv2d(
in_channels=config.num_channels,
out_channels=self.embed_dim,
kernel_size=self.patch_size,
stride=self.patch_size,
padding="valid",
)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = hidden_states.view(-1, self.config.num_channels, self.patch_size, self.patch_size)
patch_embeds = self.patch_embedding(hidden_states)
embeddings = patch_embeds.view(-1, self.embed_dim)
return embeddings
class VideoLlama3VisionMLP(SiglipMLP):
pass
class VideoLlama3VisionAttention(SiglipAttention):
def __init__(self, config):
super().__init__(config)
self.num_key_value_groups = 1
self.scaling = self.head_dim**-0.5
self.attention_dropout = config.attention_dropout
del self.scale
del self.dropout
def forward(
self,
hidden_states: torch.Tensor,
cu_seqlens: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, torch.Tensor | None]:
"""
Args:
hidden_states (`torch.Tensor`):
Input to the layer of shape `(seq_len, embed_dim)`.
cu_seqlens (`torch.Tensor` of shape `(num_images_or_videos + 1,)`):
The cumulative sequence lengths of each image or video feature.
position_embeddings (`tuple(torch.Tensor, torch.Tensor)` of shape `(num_patches, head_dim // 2)`):
The cosine and sine position embeddings for vision attention.
"""
seq_length = hidden_states.shape[0]
query_states = self.q_proj(hidden_states).view(seq_length, self.num_heads, self.head_dim)
key_states = self.k_proj(hidden_states).view(seq_length, self.num_heads, self.head_dim)
value_states = self.v_proj(hidden_states).view(seq_length, self.num_heads, self.head_dim)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb_vision(query_states, key_states, cos, sin)
query_states = query_states.transpose(0, 1).unsqueeze(0)
key_states = key_states.transpose(0, 1).unsqueeze(0)
value_states = value_states.transpose(0, 1).unsqueeze(0)
attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface(
self.config._attn_implementation, eager_attention_forward
)
if is_flash_attention_requested(self.config):
# Flash Attention 2: Use cu_seqlens for variable length attention
max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max()
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask=None,
scaling=self.scaling,
dropout=0.0 if not self.training else self.attention_dropout,
cu_seq_lens_q=cu_seqlens,
cu_seq_lens_k=cu_seqlens,
max_length_q=max_seqlen,
max_length_k=max_seqlen,
is_causal=False,
**kwargs,
)
else:
# Other implementations: Process each chunk separately
lengths = cu_seqlens[1:] - cu_seqlens[:-1]
splits = [
torch.split(tensor, lengths.tolist(), dim=2) for tensor in (query_states, key_states, value_states)
]
attn_outputs, attn_weights = [], []
for q, k, v in zip(*splits):
attn_output, attn_weight = attention_interface(
self,
q,
k,
v,
attention_mask=None,
scaling=self.scaling,
dropout=0.0 if not self.training else self.attention_dropout,
is_causal=False,
**kwargs,
)
attn_outputs.append(attn_output)
attn_weights.append(attn_weight)
attn_output = torch.cat(attn_outputs, dim=1)
attn_output = attn_output.reshape(seq_length, -1).contiguous()
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights
class VideoLlama3VisionEncoderLayer(SiglipEncoderLayer):
def __init__(self, config: VideoLlama3VisionConfig):
super().__init__(config)
self.self_attn = VideoLlama3VisionAttention(config=config)
self.mlp = VideoLlama3VisionMLP(config=config)
def forward(
self,
hidden_states: torch.Tensor,
cu_seqlens: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
r"""
cu_seqlens (`torch.Tensor` of shape `(num_images_or_videos + 1,)`):
The cumulative sequence lengths of each image or video feature.
position_embeddings (`tuple(torch.Tensor, torch.Tensor)` of shape `(num_patches, head_dim // 2)`):
The cosine and sine position embeddings for vision attention.
"""
residual = hidden_states
hidden_states = self.layer_norm1(hidden_states)
hidden_states, _ = self.self_attn(
hidden_states,
cu_seqlens=cu_seqlens,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.layer_norm2(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
class VideoLlama3VisionEncoder(SiglipEncoder):
def __init__(self, config: VideoLlama3VisionConfig):
super().__init__(config)
self.layers = nn.ModuleList([VideoLlama3VisionEncoderLayer(config) for _ in range(config.num_hidden_layers)])
@can_return_tuple
@auto_docstring
def forward(
self,
hidden_states: torch.Tensor,
cu_seqlens: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
**kwargs: Unpack[TransformersKwargs],
) -> tuple | BaseModelOutput:
r"""
cu_seqlens (`torch.Tensor` of shape `(num_images_or_videos + 1,)`):
The cumulative sequence lengths of each image or video feature.
position_embeddings (`tuple(torch.Tensor, torch.Tensor)` of shape `(num_patches, head_dim // 2)`):
The cosine and sine position embeddings for vision attention.
"""
for encoder_layer in self.layers:
hidden_states = encoder_layer(
hidden_states,
cu_seqlens=cu_seqlens,
position_embeddings=position_embeddings,
**kwargs,
)
return BaseModelOutput(last_hidden_state=hidden_states)
class VideoLlama3PreTrainedModel(Qwen2VLPreTrainedModel):
config: VideoLlama3Config
_no_split_modules = ["VideoLlama3VisionEncoderLayer"]
def _init_weights(self, module):
PreTrainedModel._init_weights(self, module)
if isinstance(module, VideoLlama3VisionRotaryEmbedding):
inv_freq = 1.0 / (module.theta ** (torch.arange(0, module.dim, 2, dtype=torch.float) / module.dim))
init.copy_(module.inv_freq, inv_freq)
class VideoLlama3VisionModel(VideoLlama3PreTrainedModel):
config: VideoLlama3VisionConfig
main_input_name = "pixel_values"
input_modalities = ("image",)
_can_record_outputs = {
"hidden_states": VideoLlama3VisionEncoderLayer,
"attentions": VideoLlama3VisionAttention,
}
def __init__(self, config: VideoLlama3VisionConfig):
super().__init__(config)
head_dim = config.hidden_size // config.num_attention_heads
self.rotary_pos_emb = VideoLlama3VisionRotaryEmbedding(head_dim // 2)
self.embeddings = VideoLlama3VisionEmbeddings(config)
self.encoder = VideoLlama3VisionEncoder(config)
self.post_layernorm = LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.post_init()
def get_input_embeddings(self) -> VideoLlama3VisionEmbeddings:
return self.embeddings.patch_embedding
def pixel_unshuffle(
self,
hidden_states: torch.Tensor,
grid_thw: torch.Tensor,
merge_sizes: torch.Tensor,
):
hidden_states_chunks = hidden_states.split(grid_thw.prod(dim=1).tolist(), dim=0)
outputs = []
for hidden_states, (t, h, w), merge_size in zip(hidden_states_chunks, grid_thw, merge_sizes):
c = hidden_states.shape[-1]
hidden_states = hidden_states.view(t, h // merge_size, w // merge_size, merge_size, merge_size, c).permute(
0, 1, 3, 2, 4, 5
)
hidden_states = hidden_states.reshape(t, h, w, c).permute(0, 3, 1, 2)
hidden_states = torch.nn.functional.interpolate(
hidden_states, size=(h // merge_size, w // merge_size), mode="bilinear"
)
hidden_states = hidden_states.permute(0, 2, 3, 1).view(-1, c)
outputs.append(hidden_states)
return torch.cat(outputs, dim=0)
@merge_with_config_defaults
@capture_outputs(tie_last_hidden_states=False)
@auto_docstring
def forward(
self,
pixel_values: torch.Tensor,
grid_thw: torch.Tensor,
merge_sizes: torch.Tensor,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | BaseModelOutput:
r"""
grid_thw (`torch.LongTensor` of shape `(num_images_or_videos, 3)`):
The temporal, height and width dimensions of feature shape for each image. Each row contains [t, h, w] values.
merge_sizes (`torch.Tensor` of shape `(num_images_or_videos,)`):
The spatial downsampling ratio of each image or video feature.
"""
position_embeddings = self.rotary_pos_emb(grid_thw, merge_sizes)
cu_seqlens = torch.repeat_interleave(grid_thw[:, 1] * grid_thw[:, 2], grid_thw[:, 0]).cumsum(
dim=0,
# Select dtype based on the following factors:
# - FA2 requires that cu_seqlens_q must have dtype int32
# - torch.onnx.export requires that cu_seqlens_q must have same dtype as grid_thw
# See https://github.com/huggingface/transformers/pull/34852 for more information
dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32,
)
cu_seqlens = torch.nn.functional.pad(cu_seqlens, (1, 0), value=0)
hidden_states = self.embeddings(pixel_values.type(self.dtype))
encoder_outputs: BaseModelOutput = self.encoder(
hidden_states,
cu_seqlens=cu_seqlens,
position_embeddings=position_embeddings,
**kwargs,
)
last_hidden_state = encoder_outputs[0]
last_hidden_state = self.post_layernorm(last_hidden_state)
last_hidden_state = self.pixel_unshuffle(last_hidden_state, grid_thw, merge_sizes)
return BaseModelOutput(last_hidden_state=last_hidden_state)
class VideoLlama3Projector(nn.Module):
def __init__(self, config: VideoLlama3Config) -> None:
super().__init__()
in_hidden_size = config.vision_config.hidden_size
out_hidden_size = config.text_config.hidden_size
self.readout = nn.Sequential(
nn.Linear(in_hidden_size, out_hidden_size),
nn.GELU(),
nn.Linear(out_hidden_size, out_hidden_size),
)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.readout(hidden_states)
return hidden_states
@dataclass
@auto_docstring(
custom_intro="""
Base class for VideoLLaMA3 outputs, with hidden states and attentions.
"""
)
class VideoLlama3ModelOutputWithPast(ModelOutput):
r"""
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
`(batch_size, num_heads, sequence_length, embed_size_per_head)`)
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
image_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size `(num_images_features, hidden_size)`.
image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
video_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size `(num_video_features, hidden_size)`.
video_hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
"""
last_hidden_state: torch.FloatTensor = None
past_key_values: list[torch.FloatTensor] | None = None
hidden_states: tuple[torch.FloatTensor] | None = None
attentions: tuple[torch.FloatTensor] | None = None
image_hidden_states: torch.FloatTensor | None = None
video_hidden_states: torch.FloatTensor | None = None
class VideoLlama3Model(Qwen2VLModel):
_checkpoint_conversion_mapping = {}
_can_compile_fullgraph = False
def __init__(self, config: VideoLlama3Config):
PreTrainedModel.__init__(self, config)
self.vision_model = AutoModel.from_config(config.vision_config)
self.projector = VideoLlama3Projector(config)
self.language_model = AutoModel.from_config(config.text_config)
self.post_init()
def get_rope_index(self):
raise AttributeError("Not needed for VideoLLaMA3")
def get_vision_position_ids(self):
raise AttributeError("Not needed for VideoLLaMA3")
def compute_3d_position_ids(self):
raise AttributeError("Not needed for VideoLLaMA3")
@can_return_tuple
@auto_docstring
def get_video_features(
self,
pixel_values_videos: torch.FloatTensor,
video_grid_thw: torch.LongTensor,
video_merge_sizes: torch.LongTensor,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | BaseModelOutputWithPooling:
r"""
pixel_values_videos (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
The tensors corresponding to the input videos.
video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
The temporal, height and width of feature shape of each video in LLM.
video_merge_sizes (`torch.Tensor` of shape `(num_videos,)`):
The spatial downsampling ratio of each video feature.
"""
return self.get_image_features(
pixel_values=pixel_values_videos,
image_grid_thw=video_grid_thw,
image_merge_sizes=video_merge_sizes,
**kwargs,
)
@can_return_tuple
@auto_docstring
def get_image_features(
self,
pixel_values: torch.FloatTensor,
image_grid_thw: torch.LongTensor,
image_merge_sizes: torch.LongTensor,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | BaseModelOutputWithPooling:
r"""
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
The tensors corresponding to the input images.
image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
The temporal, height and width of feature shape of each image in LLM.
image_merge_sizes (`torch.Tensor` of shape `(num_images,)`):
The spatial downsampling ratio of each image feature.
"""
vision_outputs = self.vision_model(
pixel_values=pixel_values,
grid_thw=image_grid_thw,
merge_sizes=image_merge_sizes,
return_dict=True,
**kwargs,
)
last_hidden_state = vision_outputs.last_hidden_state
image_embeds = self.projector(last_hidden_state)
split_sizes = image_grid_thw.prod(dim=1) // (image_merge_sizes**2)
image_embeds = torch.split(image_embeds, split_sizes.tolist())
vision_outputs.pooler_output = image_embeds
return vision_outputs
@can_return_tuple
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
use_cache: bool | None = None,
pixel_values: torch.Tensor | None = None,
image_grid_thw: torch.LongTensor | None = None,
image_merge_sizes: torch.LongTensor | None = None,
pixel_values_videos: torch.FloatTensor | None = None,
video_grid_thw: torch.LongTensor | None = None,
video_merge_sizes: torch.LongTensor | None = None,
video_compression_mask: torch.BoolTensor | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | VideoLlama3ModelOutputWithPast:
r"""
image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
The temporal, height and width of feature shape of each image in LLM.
image_merge_sizes (`torch.Tensor` of shape `(num_images,)`):
The spatial downsampling ratio of each image feature.
video_grid_thw (`torch.Tensor` of shape `(num_videos, 3)`):
The temporal, height and width of feature shape of each video before vision encoder.
video_merge_sizes (`torch.Tensor` of shape `(num_videos,)`):
The spatial downsampling ratio of each video feature.
video_compression_mask (`torch.BoolTensor` of shape `(num_video_features,)`, *optional*):
The mask to indicate which video features are kept after token compression.
"""
if inputs_embeds is None:
inputs_embeds = self.get_input_embeddings()(input_ids)
image_embeds = None
if pixel_values is not None:
image_embeds = self.get_image_features(
pixel_values, image_grid_thw, image_merge_sizes, return_dict=True
).pooler_output
image_embeds = torch.cat(image_embeds, dim=0).to(inputs_embeds.device, inputs_embeds.dtype)
image_mask, _ = self.get_placeholder_mask(
input_ids, inputs_embeds=inputs_embeds, image_features=image_embeds
)
inputs_embeds = inputs_embeds.masked_scatter(image_mask, image_embeds)
video_embeds = None
if pixel_values_videos is not None:
video_embeds = self.get_video_features(
pixel_values_videos, video_grid_thw, video_merge_sizes, return_dict=True
).pooler_output
video_embeds = torch.cat(video_embeds, dim=0).to(inputs_embeds.device, inputs_embeds.dtype)
if video_compression_mask is not None:
video_embeds = video_embeds[video_compression_mask.to(video_embeds.device)]
_, video_mask = self.get_placeholder_mask(
input_ids, inputs_embeds=inputs_embeds, video_features=video_embeds
)
inputs_embeds = inputs_embeds.masked_scatter(video_mask, video_embeds)
outputs = self.language_model(
input_ids=None,
position_ids=position_ids,
attention_mask=attention_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
return VideoLlama3ModelOutputWithPast(
last_hidden_state=outputs.last_hidden_state,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=image_embeds,
video_hidden_states=video_embeds,
)
@dataclass
@auto_docstring(
custom_intro="""
Base class for VideoLLaMA3 causal language model (or autoregressive) outputs.
"""
)
class VideoLlama3CausalLMOutputWithPast(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
`(batch_size, num_heads, sequence_length, embed_size_per_head)`)
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
image_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size `(num_images_features, hidden_size)`.
image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
video_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size `(num_video_features, hidden_size)`.
video_hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
"""
loss: torch.FloatTensor | None = None
logits: torch.FloatTensor | None = None
past_key_values: list[torch.FloatTensor] | None = None
hidden_states: tuple[torch.FloatTensor] | None = None
attentions: tuple[torch.FloatTensor] | None = None
image_hidden_states: torch.FloatTensor | None = None
video_hidden_states: torch.FloatTensor | None = None
class VideoLlama3ForConditionalGeneration(Qwen2VLForConditionalGeneration):
_checkpoint_conversion_mapping = {}
_can_compile_fullgraph = False
def __init__(self, config: VideoLlama3Config):
super().__init__(config) # just to add type hint on config
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
labels: torch.LongTensor | None = None,
use_cache: bool | None = None,
pixel_values: torch.Tensor | None = None,
image_grid_thw: torch.LongTensor | None = None,
image_merge_sizes: torch.LongTensor | None = None,
pixel_values_videos: torch.FloatTensor | None = None,
video_grid_thw: torch.LongTensor | None = None,
video_merge_sizes: torch.LongTensor | None = None,
video_compression_mask: torch.BoolTensor | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | VideoLlama3CausalLMOutputWithPast:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
The temporal, height and width of feature shape of each image in LLM.
image_merge_sizes (`torch.Tensor` of shape `(num_images,)`):
The spatial downsampling ratio of each image feature.
video_grid_thw (`torch.Tensor` of shape `(num_videos, 3)`):
The temporal, height and width of feature shape of each video before vision encoder.
video_merge_sizes (`torch.Tensor` of shape `(num_videos,)`):
The spatial downsampling ratio of each video feature.
video_compression_mask (`torch.BoolTensor` of shape `(num_video_features,)`, *optional*):
The mask to indicate which video features are kept after token compression.
"""
outputs = self.model(
input_ids=input_ids,
pixel_values=pixel_values,
image_grid_thw=image_grid_thw,
image_merge_sizes=image_merge_sizes,
pixel_values_videos=pixel_values_videos,
video_grid_thw=video_grid_thw,
video_merge_sizes=video_merge_sizes,
video_compression_mask=video_compression_mask,
position_ids=position_ids,
attention_mask=attention_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
return_dict=True,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs[0]
logits = self.lm_head(hidden_states)
loss = None
if labels is not None:
loss = self.loss_function(
logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size, **kwargs
)
return VideoLlama3CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=outputs.image_hidden_states,
video_hidden_states=outputs.video_hidden_states,
)
def prepare_inputs_for_generation(
self,
input_ids,
past_key_values=None,
attention_mask=None,
inputs_embeds=None,
cache_position=None,
position_ids=None,
use_cache=True,
pixel_values: torch.Tensor | None = None,
image_grid_thw: torch.LongTensor | None = None,
image_merge_sizes: torch.LongTensor | None = None,
pixel_values_videos: torch.FloatTensor | None = None,
video_grid_thw: torch.LongTensor | None = None,
video_merge_sizes: torch.LongTensor | None = None,
video_compression_mask: torch.BoolTensor | None = None,
is_first_iteration: bool | None = False,
**kwargs,
):
# Overwritten -- in specific circumstances we don't want to forward image inputs to the model
model_inputs = super().prepare_inputs_for_generation(
input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
cache_position=cache_position,
position_ids=position_ids,
pixel_values=pixel_values,
image_grid_thw=image_grid_thw,
image_merge_sizes=image_merge_sizes,
pixel_values_videos=pixel_values_videos,
video_grid_thw=video_grid_thw,
video_merge_sizes=video_merge_sizes,
video_compression_mask=video_compression_mask,
use_cache=use_cache,
is_first_iteration=is_first_iteration,
**kwargs,
)
if not is_first_iteration and use_cache:
model_inputs["pixel_values"] = None
model_inputs["pixel_values_videos"] = None
return model_inputs
def _prepare_position_ids_for_generation(self):
raise AttributeError("Not needed for VideoLLaMA3")
def _get_image_nums_and_video_nums(
self,
input_ids: torch.LongTensor | None,
inputs_embeds: torch.Tensor | None = None,
image_grid_thw: torch.LongTensor | None = None,
image_merge_sizes: torch.LongTensor | None = None,
video_grid_thw: torch.LongTensor | None = None,
video_merge_sizes: torch.LongTensor | None = None,
video_compression_mask: torch.BoolTensor | None = None,
) -> tuple[torch.Tensor, torch.Tensor]:
"""
Get the number of images and videos for each sample to calculate the separation length of the sample tensor.
These parameters are not passed through the processor to avoid unpredictable impacts from interface modifications.
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Returns:
image_nums (`torch.LongTensor` of shape `(batch_size, num_images_sample)`)
video_nums (`torch.LongTensor` of shape `(batch_size, num_videos_sample)`)
"""
image_token_id = self.config.image_token_id
video_token_id = self.config.video_token_id
if inputs_embeds is not None:
image_mask = (
inputs_embeds
== self.get_input_embeddings()(
torch.tensor(image_token_id, dtype=torch.long, device=inputs_embeds.device)
)
)[..., 0]
video_mask = (
inputs_embeds
== self.get_input_embeddings()(
torch.tensor(video_token_id, dtype=torch.long, device=inputs_embeds.device)
)
)[..., 0]
else:
image_mask = input_ids == image_token_id
video_mask = input_ids == video_token_id
if image_grid_thw is not None:
num_image_features = image_grid_thw.prod(dim=1) // (image_merge_sizes**2)
else:
num_image_features = []
if video_grid_thw is not None:
num_video_features = video_grid_thw.prod(dim=1) // (video_merge_sizes**2)
if video_compression_mask is not None:
num_video_features = video_compression_mask.split(num_video_features.tolist())
num_video_features = [mask.sum() for mask in num_video_features]
else:
num_video_features = []
image_nums, video_nums = [], []
start_image_idx, start_video_idx = 0, 0
for num_image_tokens, num_video_tokens in zip(image_mask.sum(dim=1), video_mask.sum(dim=1)):
cu_num_features = 0
image_idx = start_image_idx
while image_idx < len(num_image_features) and cu_num_features < num_image_tokens:
cu_num_features += num_image_features[image_idx]
image_idx += 1
assert cu_num_features == num_image_tokens, (
"The number of image tokens does not match the number of image features."
)
image_nums.append(image_idx - start_image_idx)
start_image_idx = image_idx
cu_num_features = 0
video_idx = start_video_idx
while video_idx < len(num_video_features) and cu_num_features < num_video_tokens:
cu_num_features += num_video_features[video_idx]
video_idx += 1
assert cu_num_features == num_video_tokens, (
"The number of video tokens does not match the number of video features."
)
video_nums.append(video_idx - start_video_idx)
start_video_idx = video_idx
return image_nums, video_nums
def _expand_inputs_for_generation(
self,
expand_size: int = 1,
is_encoder_decoder: bool = False,
input_ids: torch.LongTensor | None = None,
**model_kwargs,
) -> tuple[torch.LongTensor, dict[str, Any]]:
# Overwritten -- Support for expanding tensors without a batch size dimension
# e.g., pixel_values, image_grid_thw, pixel_values_videos, video_grid_thw, second_per_grid_t
# pixel_values.shape[0] is sum(seqlen_images for samples)
# image_grid_thw.shape[0] is sum(num_images for samples)
if expand_size == 1:
return input_ids, model_kwargs
visual_keys = [
"pixel_values",
"image_grid_thw",
"image_merge_sizes",
"pixel_values_videos",
"video_grid_thw",
"video_merge_sizes",
"video_compression_mask",
]
def _repeat_interleave_samples(x, lengths, repeat_times):
samples = torch.split(x, lengths)
repeat_args = [repeat_times] + [1] * (x.dim() - 1)
result = torch.cat([sample.repeat(*repeat_args) for sample in samples], dim=0)
return result
def _expand_dict_for_generation_visual(dict_to_expand):
image_grid_thw = model_kwargs.get("image_grid_thw", None)
video_grid_thw = model_kwargs.get("video_grid_thw", None)
video_merge_sizes = model_kwargs.get("video_merge_sizes", None)
video_compression_mask = model_kwargs.get("video_compression_mask", None)
image_nums, video_nums = self._get_image_nums_and_video_nums(
input_ids,
inputs_embeds=model_kwargs.get("inputs_embeds", None),
image_grid_thw=image_grid_thw,
image_merge_sizes=model_kwargs.get("image_merge_sizes", None),
video_grid_thw=video_grid_thw,
video_merge_sizes=video_merge_sizes,
video_compression_mask=video_compression_mask,
)
for key in dict_to_expand:
if key == "pixel_values":
# split images into samples
samples = torch.split(image_grid_thw, list(image_nums))
# compute the sequence length of images for each sample
lengths = [torch.prod(sample, dim=1).sum() for sample in samples]
dict_to_expand[key] = _repeat_interleave_samples(
dict_to_expand[key], lengths=lengths, repeat_times=expand_size
)
elif key == "image_grid_thw":
# get the num of images for each sample
lengths = list(image_nums)
dict_to_expand[key] = _repeat_interleave_samples(
dict_to_expand[key], lengths=lengths, repeat_times=expand_size
)
elif key == "image_merge_sizes":
lengths = list(image_nums)
dict_to_expand[key] = _repeat_interleave_samples(
dict_to_expand[key], lengths=lengths, repeat_times=expand_size
)
elif key == "pixel_values_videos":
samples = torch.split(video_grid_thw, list(video_nums))
lengths = [torch.prod(sample, dim=1).sum() for sample in samples]
dict_to_expand[key] = _repeat_interleave_samples(
dict_to_expand[key], lengths=lengths, repeat_times=expand_size
)
elif key == "video_compression_mask":
samples = torch.split(video_grid_thw, list(video_nums))
merge_sizes = torch.split(video_merge_sizes, list(video_nums))
lengths = [
(torch.prod(sample, dim=1) // merge_size**2).sum()
for sample, merge_size in zip(samples, merge_sizes)
]
dict_to_expand[key] = _repeat_interleave_samples(
dict_to_expand[key], lengths=lengths, repeat_times=expand_size
)
elif key == "video_grid_thw":
lengths = list(video_nums)
dict_to_expand[key] = _repeat_interleave_samples(
dict_to_expand[key], lengths=lengths, repeat_times=expand_size
)
elif key == "video_merge_sizes":
lengths = list(video_nums)
dict_to_expand[key] = _repeat_interleave_samples(
dict_to_expand[key], lengths=lengths, repeat_times=expand_size
)
return dict_to_expand
def _expand_dict_for_generation(dict_to_expand):
for key in dict_to_expand:
if (
key != "cache_position"
and dict_to_expand[key] is not None
and isinstance(dict_to_expand[key], torch.Tensor)
and key not in visual_keys
):
dict_to_expand[key] = dict_to_expand[key].repeat_interleave(expand_size, dim=0)
return dict_to_expand
model_kwargs = _expand_dict_for_generation_visual(model_kwargs)
if input_ids is not None:
input_ids = input_ids.repeat_interleave(expand_size, dim=0)
model_kwargs = _expand_dict_for_generation(model_kwargs)
if is_encoder_decoder:
if model_kwargs.get("encoder_outputs") is None:
raise ValueError("If `is_encoder_decoder` is True, make sure that `encoder_outputs` is defined.")
model_kwargs["encoder_outputs"] = _expand_dict_for_generation(model_kwargs["encoder_outputs"])
return input_ids, model_kwargs
class VideoLlama3ProcessorKwargs(Qwen2VLProcessorKwargs):
_defaults = {
"text_kwargs": {
"padding": False,
"return_mm_token_type_ids": False,
},
"videos_kwargs": {"return_metadata": True},
}
class VideoLlama3Processor(Qwen2VLProcessor):
def __call__(
self,
images: ImageInput = None,
text: TextInput | PreTokenizedInput | list[TextInput] | list[PreTokenizedInput] = None,
videos: VideoInput = None,
**kwargs: Unpack[VideoLlama3ProcessorKwargs],
) -> BatchFeature:
output_kwargs = self._merge_kwargs(
VideoLlama3ProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
image_inputs = videos_inputs = {}
if images is not None:
image_inputs = self.image_processor(images=images, **output_kwargs["images_kwargs"])
image_grid_thw = image_inputs["image_grid_thw"]
image_merge_sizes = image_inputs["image_merge_sizes"]
else:
image_grid_thw = image_merge_sizes = []
if videos is not None:
videos_inputs = self.video_processor(videos=videos, **output_kwargs["videos_kwargs"])
num_video_tokens = [
grid_thw.prod() // merge_size**2
for grid_thw, merge_size in zip(videos_inputs["video_grid_thw"], videos_inputs["video_merge_sizes"])
]
video_compression_masks = videos_inputs["video_compression_mask"].split(num_video_tokens)
if not kwargs.get("return_metadata"):
video_metadata = videos_inputs.pop("video_metadata")
else:
video_metadata = videos_inputs["video_metadata"]
timestamps = []
for metadata in video_metadata:
if metadata.fps is None:
logger.warning_once(
"VideoLLaMA4 requires frame timestamps to construct prompts, but the `fps` of the input video could not be inferred. "
"Probably `video_metadata` was missing from inputs and you passed pre-sampled frames. "
"Defaulting to `fps=1`. Please provide `video_metadata` for more accurate results."
)
metadata.fps = 1 if metadata.fps is None else metadata.fps
timestamps.append(metadata.timestamps)
else:
video_compression_masks = timestamps = []
if not isinstance(text, list):
text = [text]
text = text.copy() # below lines change text in-place
if images is not None:
image_index = 0
for i in range(len(text)):
while self.image_token in text[i]:
num_image_tokens = image_grid_thw[image_index].prod() // (image_merge_sizes[image_index] ** 2)
text[i] = text[i].replace(self.image_token, "<|placeholder|>" * num_image_tokens, 1)
image_index += 1
text[i] = text[i].replace("<|placeholder|>", self.image_token)
if videos is not None:
video_index = 0
for i in range(len(text)):
while self.video_token in text[i]:
frame_compression_masks = video_compression_masks[video_index].split(
len(video_compression_masks[video_index]) // len(timestamps[video_index])
)
num_frame_tokens = [x.sum() for x in frame_compression_masks]
frame_prompts = [
f"Time {t:.1f}s:" + "<|placeholder|>" * n
for n, t in zip(num_frame_tokens, timestamps[video_index])
]
text[i] = text[i].replace(self.video_token, ",".join(frame_prompts), 1)
video_index += 1
text[i] = text[i].replace("<|placeholder|>", self.video_token)
return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None)
return_mm_token_type_ids = output_kwargs["text_kwargs"].pop("return_mm_token_type_ids", False)
text_inputs = self.tokenizer(text, **output_kwargs["text_kwargs"], return_tensors=None)
self._check_special_mm_tokens(text, text_inputs, modalities=["image", "video"])
if return_mm_token_type_ids:
array_ids = np.array(text_inputs["input_ids"])
mm_token_type_ids = np.zeros_like(text_inputs["input_ids"])
mm_token_type_ids[array_ids == self.image_token_id] = 1
mm_token_type_ids[array_ids == self.video_token_id] = 2
text_inputs["mm_token_type_ids"] = mm_token_type_ids.tolist()
return BatchFeature(data={**text_inputs, **image_inputs, **videos_inputs}, tensor_type=return_tensors)
def model_input_names(self):
raise AttributeError("VideoLlama doesn't need to override it")
class VideoLlama3ImageProcessorKwargs(Qwen2VLImageProcessorKwargs):
pass
class VideoLlama3ImageProcessor(Qwen2VLImageProcessor):
r"""
Constructs a VideoLLaMA3 image processor that dynamically resizes images based on the original images.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions.
size (`dict[str, int]`, *optional*, defaults to `{"shortest_edge": 56 * 56, "longest_edge": 28 * 28 * 1280}`):
Size of the image after resizing. `shortest_edge` and `longest_edge` keys must be present.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
Resampling filter to use when resizing the image.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`):
Mean to use if normalizing the image. This is a float or list of floats for each channel in the image.
image_std (`float` or `list[float]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`):
Standard deviation to use if normalizing the image. This is a float or list of floats for each channel in the image.
do_convert_rgb (`bool`, *optional*, defaults to `True`):
Whether to convert the image to RGB.
min_pixels (`int`, *optional*, defaults to `56 * 56`):
The min pixels of the image to resize the image.
max_pixels (`int`, *optional*, defaults to `28 * 28 * 1280`):
The max pixels of the image to resize the image.
patch_size (`int`, *optional*, defaults to 14):
The spatial patch size of the vision encoder.
temporal_patch_size (`int`, *optional*, defaults to 1):
The temporal patch size of the vision encoder.
merge_size (`int`, *optional*, defaults to 1):
The merge size of the vision encoder to llm encoder.
"""
model_input_names = ["pixel_values", "image_grid_thw", "image_merge_sizes"]
valid_kwargs = VideoLlama3ImageProcessorKwargs
def __init__(
self,
do_resize: bool = True,
size: dict[str, int] | None = None,
resample: PILImageResampling = PILImageResampling.BICUBIC,
do_rescale: bool = True,
rescale_factor: int | float = 1 / 255,
do_normalize: bool = True,
image_mean: float | list[float] | None = None,
image_std: float | list[float] | None = None,
do_convert_rgb: bool = True,
min_pixels: int | None = None,
max_pixels: int | None = None,
patch_size: int = 14,
temporal_patch_size: int = 1,
merge_size: int = 1,
**kwargs,
) -> None:
super().__init__(
do_resize=do_resize,
size=size,
resample=resample,
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_convert_rgb=do_convert_rgb,
min_pixels=min_pixels,
max_pixels=max_pixels,
patch_size=patch_size,
temporal_patch_size=temporal_patch_size,
merge_size=merge_size,
**kwargs,
)
self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
if self.temporal_patch_size != 1:
raise ValueError("`temporal_patch_size` must be 1 for VideoLLaMA3")
def preprocess(
self,
images: ImageInput,
videos: VideoInput | None = None,
do_resize: bool | None = None,
size: dict[str, int] | None = None,
min_pixels: int | None = None,
max_pixels: int | None = None,
resample: PILImageResampling | None = None,
do_rescale: bool | None = None,
rescale_factor: float | None = None,
do_normalize: bool | None = None,
image_mean: float | list[float] | None = None,
image_std: float | list[float] | None = None,
patch_size: int | None = None,
temporal_patch_size: int | None = None,
merge_size: int | None = None,
do_convert_rgb: bool | None = None,
return_tensors: str | TensorType | None = None,
data_format: ChannelDimension | None = ChannelDimension.FIRST,
input_data_format: str | ChannelDimension | None = None,
):
"""
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
videos (`VideoInput`):
Video to preprocess. Expects a single or batch of videos with pixel values ranging from 0 to 255. If
passing in videos with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
the longest edge resized to keep the input aspect ratio.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
`True`.
min_pixels (`int`, *optional*, defaults to `self.min_pixels`):
The min pixels of the image to resize the image.
max_pixels (`int`, *optional*, defaults to `self.max_pixels`):
The max pixels of the image to resize the image.
patch_size (`int`, *optional*, defaults to `self.patch_size`):
The spatial patch size of the vision encoder.
temporal_patch_size (`int`, *optional*, defaults to `self.temporal_patch_size`):
The temporal patch size of the vision encoder.
merge_size (`int`, *optional*, defaults to `self.merge_size`):
The merge size of the vision encoder to llm encoder.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
Whether to convert the image to RGB.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
min_pixels = min_pixels if min_pixels is not None else self.min_pixels
max_pixels = max_pixels if max_pixels is not None else self.max_pixels
if size is not None:
if "shortest_edge" not in size or "longest_edge" not in size:
raise ValueError("size must contain 'shortest_edge' and 'longest_edge' keys.")
min_pixels = size["shortest_edge"]
elif min_pixels is not None and max_pixels is not None:
# backward compatibility: override size with min_pixels and max_pixels if they are provided
size = {"shortest_edge": min_pixels, "longest_edge": max_pixels}
else:
size = {**self.size}
do_resize = do_resize if do_resize is not None else self.do_resize
resample = resample if resample is not None else self.resample
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
patch_size = patch_size if patch_size is not None else self.patch_size
temporal_patch_size = temporal_patch_size if temporal_patch_size is not None else self.temporal_patch_size
merge_size = merge_size if merge_size is not None else self.merge_size
do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
if images is not None:
images = self.fetch_images(images)
images = make_flat_list_of_images(images)
if images is not None and not valid_images(images):
raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor")
validate_preprocess_arguments(
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_resize=do_resize,
size=size,
resample=resample,
)
data = {}
if images is not None:
pixel_values, vision_grid_thws = [], []
for image in images:
patches, image_grid_thw = self._preprocess(
image,
do_resize=do_resize,
size=size,
resample=resample,
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
patch_size=patch_size,
temporal_patch_size=temporal_patch_size,
merge_size=merge_size,
data_format=data_format,
do_convert_rgb=do_convert_rgb,
input_data_format=input_data_format,
)
pixel_values.extend(patches)
vision_grid_thws.append(image_grid_thw)
data.update(
{
"pixel_values": np.array(pixel_values),
"image_grid_thw": np.array(vision_grid_thws),
"image_merge_sizes": np.array([merge_size] * len(vision_grid_thws)),
}
)
return BatchFeature(data=data, tensor_type=return_tensors)
class VideoLlama3ImageProcessorFast(Qwen2VLImageProcessorFast):
image_mean = IMAGENET_STANDARD_MEAN
image_std = IMAGENET_STANDARD_STD
temporal_patch_size = 1
merge_size = 1
valid_kwargs = VideoLlama3ImageProcessorKwargs
model_input_names = [
"pixel_values",
"image_grid_thw",
"image_merge_sizes",
]
def _preprocess_image_like_inputs(
self,
images: ImageInput,
do_convert_rgb: bool,
input_data_format: ChannelDimension,
device: Union[str, "torch.device"] | None = None,
**kwargs: Unpack[VideoLlama3ImageProcessorKwargs],
) -> BatchFeature:
# Prepare input images
batch_feature = BatchFeature()
if kwargs["temporal_patch_size"] != 1:
raise ValueError("`temporal_patch_size` must be 1 for VideoLLaMA3")
images = self._prepare_image_like_inputs(
images=images, do_convert_rgb=do_convert_rgb, input_data_format=input_data_format, device=device
)
batch_feature = self._preprocess(images, **kwargs)
batch_feature["image_merge_sizes"] = torch.tensor(
[kwargs["merge_size"]] * batch_feature.image_grid_thw.size(0),
dtype=batch_feature.image_grid_thw.dtype,
device=batch_feature.image_grid_thw.device,
)
return batch_feature
class VideoLlama3VideoProcessorInitKwargs(Qwen2VLVideoProcessorInitKwargs):
use_token_compression: bool | None
class VideoLlama3VideoProcessor(Qwen2VLVideoProcessor):
use_token_compression = True
image_mean = IMAGENET_STANDARD_MEAN
image_std = IMAGENET_STANDARD_STD
temporal_patch_size = 1
max_frames = 180
return_metadata = True
valid_kwargs = VideoLlama3VideoProcessorInitKwargs
model_input_names = ["pixel_values_videos", "video_grid_thw", "video_merge_sizes", "video_compression_mask"]
def _get_compression_mask(
self,
pixel_values_videos: torch.FloatTensor,
video_grid_thw: torch.LongTensor,
video_merge_sizes: torch.LongTensor,
threshold: float | None = 0.1,
min_tokens: int | None = 1,
) -> torch.BoolTensor:
"""
Get the compression mask for video tokens based on pixel differences.
Args:
pixel_values_videos (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
The tensors corresponding to the input videos.
video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
The temporal, height and width of feature shape of each video in LLM.
video_merge_sizes (`torch.Tensor` of shape `(num_videos,)`):
The spatial downsampling ratio of each video feature.
threshold (`float`, *optional*, defaults to 0.1):
The threshold to determine whether a token should be kept based on pixel differences.
min_tokens (`int`, *optional*, defaults to 1):
The minimum number of tokens to keep for each frame.
"""
videos = pixel_values_videos.split(video_grid_thw.prod(dim=1).tolist(), dim=0)
compression_masks = []
for images, grid_size, merge_size in zip(videos, video_grid_thw, video_merge_sizes):
t, h, w = grid_size
if t == 1:
num_tokens = images.size(0) // (merge_size**2)
compression_masks.append(torch.ones((num_tokens,), dtype=torch.bool, device=images.device))
else:
# NOTE: video token compressor
images = images.view(t, (h // merge_size) * (w // merge_size), -1)
pixel_diff = images[1:] - images[:-1]
pixel_diff = torch.abs(pixel_diff).mean(dim=-1) * 255
pixel_diff = torch.cat([torch.full_like(pixel_diff[0:1], threshold + 1), pixel_diff], dim=0)
mask = pixel_diff > threshold
padding_ids = torch.nonzero(mask.sum(dim=1) < min_tokens)[:, 0]
mask[padding_ids, :min_tokens] = 1
compression_masks.append(mask.flatten())
return torch.cat(compression_masks)
def _preprocess(
self,
videos: list["torch.Tensor"],
do_convert_rgb: bool,
do_resize: bool,
size: SizeDict,
interpolation: Optional["F.InterpolationMode"],
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
image_mean: float | list[float] | None,
image_std: float | list[float] | None,
patch_size: int | None = None,
temporal_patch_size: int | None = None,
merge_size: int | None = None,
use_token_compression: bool | None = None,
return_tensors: str | TensorType | None = None,
device: Optional["torch.Tensor"] = None,
**kwargs,
):
# Group videos by size for batched resizing
grouped_videos, grouped_videos_index = group_videos_by_shape(videos)
resized_videos_grouped = {}
for shape, stacked_videos in grouped_videos.items():
height, width = get_image_size(stacked_videos[0], channel_dim=ChannelDimension.FIRST)
resized_height, resized_width = height, width
if do_resize:
resized_height, resized_width = smart_resize(
height,
width,
factor=patch_size * merge_size,
min_pixels=size["shortest_edge"],
max_pixels=size["longest_edge"] // shape[0],
)
stacked_videos = self.resize(
image=stacked_videos,
size=SizeDict(height=resized_height, width=resized_width),
interpolation=interpolation,
)
resized_videos_grouped[shape] = stacked_videos
resized_videos = reorder_videos(resized_videos_grouped, grouped_videos_index)
# Group videos by size for further processing
# Needed in case do_resize is False, or resize returns videos with different sizes
grouped_videos, grouped_videos_index = group_videos_by_shape(resized_videos)
processed_videos_grouped = {}
processed_grids = {}
for shape, stacked_videos in grouped_videos.items():
resized_height, resized_width = get_image_size(stacked_videos[0], channel_dim=ChannelDimension.FIRST)
# Fused rescale and normalize
stacked_videos = self.rescale_and_normalize(
stacked_videos, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
patches = stacked_videos
# Check that videos have `num_frames` divisible by `temporal_patch_size`
if patches.shape[1] % temporal_patch_size != 0:
repeats = patches[:, -1:].repeat(1, self.temporal_patch_size - 1, 1, 1, 1)
patches = torch.cat([patches, repeats], dim=1)
batch_size, grid_t, channel = patches.shape[:3]
grid_t = grid_t // temporal_patch_size
grid_h, grid_w = resized_height // patch_size, resized_width // patch_size
patches = patches.view(
batch_size,
grid_t,
temporal_patch_size,
channel,
grid_h // merge_size,
merge_size,
patch_size,
grid_w // merge_size,
merge_size,
patch_size,
)
patches = patches.permute(0, 1, 4, 7, 5, 8, 3, 2, 6, 9)
flatten_patches = patches.reshape(
batch_size,
grid_t * grid_h * grid_w,
channel * temporal_patch_size * patch_size * patch_size,
)
processed_videos_grouped[shape] = flatten_patches
processed_grids[shape] = [[grid_t, grid_h, grid_w]] * batch_size
processed_videos = reorder_videos(processed_videos_grouped, grouped_videos_index)
processed_grids = reorder_videos(processed_grids, grouped_videos_index)
pixel_values_videos = torch.cat(processed_videos, dim=0)
video_grid_thw = torch.tensor(processed_grids)
video_merge_sizes = torch.full(
(video_grid_thw.size(0),), merge_size, dtype=video_grid_thw.dtype, device=video_grid_thw.device
)
if use_token_compression:
video_compression_mask = self._get_compression_mask(
pixel_values_videos=pixel_values_videos,
video_grid_thw=video_grid_thw,
video_merge_sizes=video_merge_sizes,
)
else:
num_video_tokens = video_grid_thw.prod(-1).sum() // (merge_size**2)
video_compression_mask = torch.ones(
(num_video_tokens,), dtype=torch.bool, device=pixel_values_videos.device
)
return BatchFeature(
data={
"pixel_values_videos": pixel_values_videos,
"video_grid_thw": video_grid_thw,
"video_merge_sizes": video_merge_sizes,
"video_compression_mask": video_compression_mask,
},
tensor_type=return_tensors,
)
__all__ = [
"VideoLlama3VisionConfig",
"VideoLlama3Config",
"VideoLlama3VisionModel",
"VideoLlama3PreTrainedModel",
"VideoLlama3Model",
"VideoLlama3ForConditionalGeneration",
"VideoLlama3Processor",
"VideoLlama3ImageProcessor",
"VideoLlama3ImageProcessorFast",
"VideoLlama3VideoProcessor",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/video_llama_3/modular_video_llama_3.py",
"license": "Apache License 2.0",
"lines": 1492,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.