sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
huggingface/transformers:src/transformers/models/hunyuan_v1_dense/configuration_hunyuan_v1_dense.py | # Copyright (C) 2025 THL A29 Limited, a Tencent company and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""HunYuanDenseV1 model configuration"""
from ...configuration_utils import PreTrainedConfig
from ...modeling_rope_utils import RopeParameters
from ...utils import logging
logger = logging.get_logger(__name__)
class HunYuanDenseV1Config(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`HunYuanDenseV1Config`]. It is used to instantiate an
HunYuan model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the HunYuan-7B.
Hunyuan-7B-Instruct [tencent/Hunyuan-7B-Instruct](https://huggingface.co/tencent/Hunyuan-7B-Instruct).
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 290943):
Vocabulary size of the HunYuan model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`HunYuanDenseV1Config`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 11008):
Dimension of the MLP representations or shared MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details checkout [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*, defaults to 0):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 1):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 2):
End of stream token id.
eod_token_id (int, *optional*, defaults to 3):
Token ID representing the end-of-document marker. Used to indicate the termination of a text sequence.
Example: In multi-document processing, this token helps the model distinguish between separate documents.
pretraining_tp (`int`, *optional*, defaults to 1):
Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is
necessary to ensure exact reproducibility of the pretraining results. Please refer to [this
issue](https://github.com/pytorch/pytorch/issues/76232).
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
head_dim (`int`, *optional*, defaults to 128):
The attention head dimension.
"""
model_type = "hunyuan_v1_dense"
keys_to_ignore_at_inference = ["past_key_values"]
def __init__(
self,
vocab_size: int | None = 290943,
hidden_size: int | None = 4096,
intermediate_size: int | None = 11008,
num_hidden_layers: int | None = 32,
num_attention_heads: int | None = 32,
num_key_value_heads: int | None = None,
hidden_act: str | None = "silu",
max_position_embeddings: int | None = 2048,
initializer_range: float | None = 0.02,
rms_norm_eps: float | None = 1e-5,
use_cache: bool | None = True,
pad_token_id: int | None = 0,
bos_token_id: int | None = 1,
eos_token_id: int | None = 2,
eod_token_id: int | None = 3,
pretraining_tp: int | None = 1,
tie_word_embeddings: bool | None = False,
rope_parameters: RopeParameters | dict[str, RopeParameters] | None = None,
attention_bias: bool | None = False,
attention_dropout: float | None = 0.0,
head_dim: int | None = None,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.head_dim = head_dim
# for backward compatibility
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.pretraining_tp = pretraining_tp
self.use_cache = use_cache
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.rope_parameters = rope_parameters
self.tie_word_embeddings = tie_word_embeddings
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
super().__init__(**kwargs)
__all__ = ["HunYuanDenseV1Config"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/hunyuan_v1_dense/configuration_hunyuan_v1_dense.py",
"license": "Apache License 2.0",
"lines": 136,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/hunyuan_v1_dense/modular_hunyuan_v1_dense.py | # Copyright (C) 2025 THL A29 Limited, a Tencent company and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch HunYuanDenseV1 model."""
from collections.abc import Callable
import torch
from torch import nn
from transformers.cache_utils import Cache
from transformers.utils import (
logging,
)
from ... import initialization as init
from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...processing_utils import Unpack
from ...utils import TransformersKwargs
from ..llama.modeling_llama import (
LlamaAttention,
LlamaDecoderLayer,
LlamaForCausalLM,
LlamaForSequenceClassification,
LlamaMLP,
LlamaModel,
LlamaPreTrainedModel,
LlamaRMSNorm,
LlamaRotaryEmbedding,
apply_rotary_pos_emb,
eager_attention_forward,
)
from .configuration_hunyuan_v1_dense import HunYuanDenseV1Config
logger = logging.get_logger(__name__)
class HunYuanDenseV1RMSNorm(LlamaRMSNorm):
pass
class HunYuanDenseV1MLP(LlamaMLP):
def __init__(self, config: HunYuanDenseV1Config, layer_idx=None, is_shared_mlp=False):
super().__init__(config)
self.layer_idx = layer_idx
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
class HunYuanDenseV1Attention(LlamaAttention):
def __init__(self, config: HunYuanDenseV1Config, layer_idx: int):
super().__init__(config, layer_idx)
self.query_layernorm = HunYuanDenseV1RMSNorm(self.head_dim, eps=config.rms_norm_eps)
self.key_layernorm = HunYuanDenseV1RMSNorm(self.head_dim, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: torch.Tensor | None,
past_key_values: Cache | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, torch.Tensor]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
query_states = self.query_layernorm(query_states)
key_states = self.key_layernorm(key_states)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface(
self.config._attn_implementation, eager_attention_forward
)
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class HunYuanDenseV1DecoderLayer(LlamaDecoderLayer):
def __init__(self, config: HunYuanDenseV1Config, layer_idx: int):
super().__init__(config, layer_idx)
self.layer_idx = layer_idx
class HunYuanDenseV1PreTrainedModel(LlamaPreTrainedModel, PreTrainedModel):
@torch.no_grad()
def _init_weights(self, module):
PreTrainedModel._init_weights(self, module)
# DynamicNTKAlphaRotary - unique to this model
if "RotaryEmbedding" in module.__class__.__name__ and hasattr(module, "original_inv_freq"):
if module.rope_type == "dynamic" and module.config.rope_parameters.get("alpha"):
dim = module.config.head_dim
rope_theta = module.config.rope_parameters["rope_theta"]
alpha = module.config.rope_parameters["alpha"]
base = rope_theta * alpha ** (dim / (dim - 2))
buffer_value = 1.0 / (base ** (torch.arange(0, dim, 2).float() / dim))
else:
rope_fn = (
ROPE_INIT_FUNCTIONS[module.rope_type]
if module.rope_type != "default"
else module.compute_default_rope_parameters
)
buffer_value, _ = rope_fn(module.config)
init.copy_(module.inv_freq, buffer_value)
init.copy_(module.original_inv_freq, buffer_value)
class HunYuanDenseV1RotaryEmbedding(LlamaRotaryEmbedding):
def __init__(self, config: HunYuanDenseV1Config, device=None):
nn.Module.__init__()
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_type = self.config.rope_parameters["rope_type"]
# Diff from Llama - DynamicNTKAlphaRotary
if self.rope_type == "dynamic" and self.config.rope_parameters.get("alpha"):
self.dim = config.head_dim
base = self.config.rope_parameters["rope_theta"] * self.config.rope_parameters["alpha"] ** (
self.config.head_dim / (self.config.head_dim - 2)
)
inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2).float().to(device) / self.config.head_dim))
self.attention_scaling = 1.0
else:
rope_init_fn: Callable = self.compute_default_rope_parameters
if self.rope_type != "default":
rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.register_buffer("original_inv_freq", inv_freq.clone(), persistent=False)
class HunYuanDenseV1Model(LlamaModel):
pass
class HunYuanDenseV1ForCausalLM(LlamaForCausalLM):
pass
class HunYuanDenseV1ForSequenceClassification(LlamaForSequenceClassification):
pass
__all__ = [
"HunYuanDenseV1ForCausalLM",
"HunYuanDenseV1Model",
"HunYuanDenseV1PreTrainedModel",
"HunYuanDenseV1ForSequenceClassification",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/hunyuan_v1_dense/modular_hunyuan_v1_dense.py",
"license": "Apache License 2.0",
"lines": 152,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/hunyuan_v1_moe/configuration_hunyuan_v1_moe.py | # Copyright (C) 2025 THL A29 Limited, a Tencent company and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""HunYuanMoEV1 model configuration"""
from ...configuration_utils import PreTrainedConfig
from ...modeling_rope_utils import RopeParameters
from ...utils import logging
logger = logging.get_logger(__name__)
class HunYuanMoEV1Config(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`HunYuanMoEV1Model`]. It is used to instantiate an
HunYuan model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the HunYuan-7B.
Hunyuan-A13B-Instruct [tencent/Hunyuan-A13B-Instruct](https://huggingface.co/tencent/Hunyuan-A13B-Instruct).
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 290943):
Vocabulary size of the HunYuan model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`HunYuanMoEV1Model`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 11008):
Dimension of the MLP representations or shared MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details checkout [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*, defaults to 0):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 1):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 2):
End of stream token id.
eod_token_id (int, *optional*, defaults to 3):
Token ID representing the end-of-document marker. Used to indicate the termination of a text sequence.
Example: In multi-document processing, this token helps the model distinguish between separate documents.
sep_token_id (`int`, *optional*, defaults to 4):
Token ID representing the separator token (`[SEP]`), used to demarcate boundaries between different text segments.
pretraining_tp (`int`, *optional*, defaults to 1):
Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is
necessary to ensure exact reproducibility of the pretraining results. Please refer to [this
issue](https://github.com/pytorch/pytorch/issues/76232).
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
num_experts (`int` or `List`, *optional*, defaults to 1):
The number of experts for moe. If it is a list, it will be used as the number of experts for each layer.
moe_topk (int or List, *optional*, defaults to 1):
Number of experts selected per token (Top-K routing). List form enables layer-wise customization.
head_dim (`int`, *optional*, defaults to 128):
The attention head dimension.
"""
model_type = "hunyuan_v1_moe"
keys_to_ignore_at_inference = ["past_key_values"]
attribute_map = {
"num_experts_per_tok": "moe_topk",
"num_local_experts": "num_experts",
}
def __init__(
self,
vocab_size: int | None = 290943,
hidden_size: int | None = 4096,
intermediate_size: int | None = 11008,
num_hidden_layers: int | None = 32,
num_attention_heads: int | None = 32,
num_key_value_heads: int | None = None,
hidden_act: str | None = "silu",
max_position_embeddings: int | None = 2048,
initializer_range: float | None = 0.02,
rms_norm_eps: float | None = 1e-5,
use_cache: bool | None = True,
pad_token_id: int | None = 0,
bos_token_id: int | None = 1,
eos_token_id: int | None = 2,
eod_token_id: int | None = 3,
sep_token_id: int | None = 4,
pretraining_tp: int | None = 1,
tie_word_embeddings: bool | None = False,
rope_parameters: RopeParameters | dict[str, RopeParameters] | None = None,
attention_bias: bool | None = False,
attention_dropout: float | None = 0.0,
num_experts: int | list = 1,
moe_topk: int | list = 1,
head_dim: int | None = None,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_experts = num_experts
self.moe_topk = moe_topk
self.head_dim = head_dim
# for backward compatibility
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.pretraining_tp = pretraining_tp
self.use_cache = use_cache
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.rope_parameters = rope_parameters
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.sep_token_id = sep_token_id
self.tie_word_embeddings = tie_word_embeddings
super().__init__(**kwargs)
def _rope_parameters_validation(self):
"""
Validate the `rope_parameters` configuration.
"""
if self.rope_parameters is None:
return
if not isinstance(self.rope_parameters, dict) or len(self.rope_parameters) != 2:
raise ValueError(
"`rope_parameters` must be a dictionary with two fields, `type` and `factor` or `type` and `alpha`,"
f"got {self.rope_parameters}"
)
rope_parameters_type = self.rope_parameters.get("type", None)
rope_parameters_factor = self.rope_parameters.get("factor", None)
rope_parameters_alpha = self.rope_parameters.get("alpha", None)
if rope_parameters_type is None or rope_parameters_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_parameters`'s type field must be one of ['linear', 'dynamic'], got {rope_parameters_type}"
)
if rope_parameters_factor is None and rope_parameters_alpha is None:
raise ValueError("`rope_parameters`'s factor or alpha field must be have one, got both of none")
if rope_parameters_factor is not None:
if not isinstance(rope_parameters_factor, float) or rope_parameters_factor <= 1.0:
raise ValueError(
f"`rope_parameters`'s factor field must be a float > 1.0, got {rope_parameters_factor}"
)
if rope_parameters_alpha is not None:
if not isinstance(rope_parameters_alpha, float) or rope_parameters_alpha <= 1.0:
raise ValueError(f"`rope_parameters`'s alpha field must be a float > 1.0, got {rope_parameters_alpha}")
__all__ = ["HunYuanMoEV1Config"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/hunyuan_v1_moe/configuration_hunyuan_v1_moe.py",
"license": "Apache License 2.0",
"lines": 180,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/hunyuan_v1_moe/modular_hunyuan_v1_moe.py | # Copyright (C) 2025 THL A29 Limited, a Tencent company and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch HunYuanMoEV1 model."""
from collections.abc import Callable
import torch
import torch.nn.functional as F
from torch import nn
from ... import initialization as init
from ...cache_utils import Cache
from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, logging
from ..hunyuan_v1_dense.modeling_hunyuan_v1_dense import HunYuanDenseV1RotaryEmbedding
from ..llama.modeling_llama import (
LlamaAttention,
LlamaDecoderLayer,
LlamaForCausalLM,
LlamaForSequenceClassification,
LlamaMLP,
LlamaModel,
LlamaPreTrainedModel,
LlamaRMSNorm,
apply_rotary_pos_emb,
eager_attention_forward,
)
from ..mixtral.modeling_mixtral import MixtralExperts
from .configuration_hunyuan_v1_moe import HunYuanMoEV1Config
logger = logging.get_logger(__name__)
class HunYuanMoEV1RMSNorm(LlamaRMSNorm):
pass
class HunYuanMoEV1MLP(LlamaMLP):
def __init__(self, config: HunYuanMoEV1Config):
super().__init__(config)
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
class HunYuanMoEV1Attention(LlamaAttention):
def __init__(self, config: HunYuanMoEV1Config, layer_idx: int):
super().__init__(config, layer_idx)
self.query_layernorm = HunYuanMoEV1RMSNorm(self.head_dim, eps=config.rms_norm_eps)
self.key_layernorm = HunYuanMoEV1RMSNorm(self.head_dim, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: torch.Tensor | None,
past_key_values: Cache | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, torch.Tensor]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
query_states = self.query_layernorm(query_states)
key_states = self.key_layernorm(key_states)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface(
self.config._attn_implementation, eager_attention_forward
)
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class HunYuanMoEV1Gate(nn.Module):
def __init__(self, config: HunYuanMoEV1Config, layer_idx: int | None = None):
super().__init__()
self.config = config
self.layer_idx = layer_idx
num_experts = config.num_experts if isinstance(config.num_experts, int) else config.num_experts[layer_idx]
self.wg = nn.Linear(config.hidden_size, num_experts, bias=False, dtype=torch.float32)
def forward(self, hidden_states):
bsz, seq_len, hidden_size = hidden_states.shape
hidden_states = hidden_states.reshape(-1, hidden_size)
if self.wg.weight.dtype == torch.float32:
hidden_states = hidden_states.float()
logits = self.wg(hidden_states)
return logits
class HunYuanMoEV1Experts(MixtralExperts):
pass
class HunYuanMoEV1Moe(nn.Module):
def __init__(self, config: HunYuanMoEV1Config, layer_idx: int | None = None):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.num_experts = config.num_experts if isinstance(config.num_experts, int) else config.num_experts[layer_idx]
self.top_k = config.moe_topk if isinstance(config.moe_topk, int) else config.moe_topk[layer_idx]
self.gate = HunYuanMoEV1Gate(config, layer_idx=layer_idx)
self.experts = HunYuanMoEV1Experts(config)
self.shared_mlp = HunYuanMoEV1MLP(config)
def route_tokens_to_experts(self, hidden_states):
routing_weights = F.softmax(hidden_states, dim=1, dtype=torch.float)
routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1)
routing_weights /= routing_weights.sum(dim=-1, keepdim=True)
return selected_experts, routing_weights.to(hidden_states.dtype)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
batch_size, sequence_length, hidden_dim = hidden_states.shape
hidden_states_mlp = self.shared_mlp(hidden_states)
router_logits = self.gate(hidden_states)
hidden_states = hidden_states.view(-1, hidden_dim)
selected_experts, routing_weights = self.route_tokens_to_experts(router_logits)
final_hidden_states = self.experts(hidden_states, selected_experts, routing_weights).reshape(
batch_size, sequence_length, hidden_dim
)
return final_hidden_states + hidden_states_mlp
class HunYuanMoEV1DecoderLayer(LlamaDecoderLayer):
def __init__(self, config: HunYuanMoEV1Config, layer_idx: int):
super().__init__(config, layer_idx)
self.hidden_size = config.hidden_size
self.self_attn = HunYuanMoEV1Attention(config=config, layer_idx=layer_idx)
self.mlp = HunYuanMoEV1Moe(config, layer_idx=layer_idx)
self.input_layernorm = HunYuanMoEV1RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = HunYuanMoEV1RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.layer_idx = layer_idx
class HunYuanMoEV1PreTrainedModel(LlamaPreTrainedModel):
@torch.no_grad()
def _init_weights(self, module):
PreTrainedModel._init_weights(self, module)
if isinstance(module, HunYuanMoEV1Experts):
init.normal_(module.gate_up_proj, mean=0.0, std=self.config.initializer_range)
init.normal_(module.down_proj, mean=0.0, std=self.config.initializer_range)
# DynamicNTKAlphaRotary - unique to this model
elif "RotaryEmbedding" in module.__class__.__name__ and hasattr(module, "original_inv_freq"):
if module.rope_type == "dynamic" and module.config.rope_parameters.get("alpha"):
dim = module.config.head_dim
rope_theta = module.config.rope_parameters["rope_theta"]
alpha = module.config.rope_parameters["alpha"]
base = rope_theta * alpha ** (dim / (dim - 2))
buffer_value = 1.0 / (base ** (torch.arange(0, dim, 2).float() / dim))
else:
rope_fn = (
ROPE_INIT_FUNCTIONS[module.rope_type]
if module.rope_type != "default"
else module.compute_default_rope_parameters
)
buffer_value, _ = rope_fn(module.config)
init.copy_(module.inv_freq, buffer_value)
init.copy_(module.original_inv_freq, buffer_value)
class HunYuanMoEV1RotaryEmbedding(HunYuanDenseV1RotaryEmbedding):
pass
class HunYuanMoEV1Model(LlamaModel):
pass
class HunYuanMoEV1ForCausalLM(LlamaForCausalLM):
pass
class HunYuanMoEV1ForSequenceClassification(LlamaForSequenceClassification):
pass
__all__ = [
"HunYuanMoEV1ForCausalLM",
"HunYuanMoEV1Model",
"HunYuanMoEV1PreTrainedModel",
"HunYuanMoEV1ForSequenceClassification",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/hunyuan_v1_moe/modular_hunyuan_v1_moe.py",
"license": "Apache License 2.0",
"lines": 179,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/hunyuan_v1_dense/test_modeling_hunyuan_v1_dense.py | # Copyright (C) 2024 THL A29 Limited, a Tencent company and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch HunYuanDenseV1 model."""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import (
cleanup,
require_torch,
slow,
torch_device,
)
if is_torch_available():
from transformers import (
HunYuanDenseV1Model,
)
from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester
class HunYuanDenseV1ModelTester(CausalLMModelTester):
if is_torch_available():
base_model_class = HunYuanDenseV1Model
@require_torch
class HunYuanDenseV1ModelTest(CausalLMModelTest, unittest.TestCase):
model_tester_class = HunYuanDenseV1ModelTester
def is_pipeline_test_to_skip(
self,
pipeline_test_case_name,
config_class,
model_architecture,
tokenizer_name,
image_processor_name,
feature_extractor_name,
processor_name,
):
return True
@require_torch
class HunYuanDenseV1IntegrationTest(unittest.TestCase):
def setUp(self):
cleanup(torch_device, gc_collect=True)
def tearDown(self):
cleanup(torch_device, gc_collect=True)
@slow
def test_model_generation(self):
# TODO Need new Dense Model
return True
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/hunyuan_v1_dense/test_modeling_hunyuan_v1_dense.py",
"license": "Apache License 2.0",
"lines": 54,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/models/hunyuan_v1_moe/test_modeling_hunyuan_v1_moe.py | # Copyright (C) 2024 THL A29 Limited, a Tencent company and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch HunYuanMoEV1 model."""
import unittest
import pytest
import torch
from transformers import is_torch_available
from transformers.testing_utils import (
cleanup,
require_torch,
slow,
torch_device,
)
if is_torch_available():
from transformers import AutoTokenizer, HunYuanMoEV1ForCausalLM, HunYuanMoEV1Model
from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester
class HunYuanMoEV1ModelTester(CausalLMModelTester):
if is_torch_available():
base_model_class = HunYuanMoEV1Model
@require_torch
class HunYuanMoEV1ModelTest(CausalLMModelTest, unittest.TestCase):
test_all_params_have_gradient = False
model_tester_class = HunYuanMoEV1ModelTester
def is_pipeline_test_to_skip(
self,
pipeline_test_case_name,
config_class,
model_architecture,
tokenizer_name,
image_processor_name,
feature_extractor_name,
processor_name,
):
return True
@unittest.skip("Hunyuan model Unsupported")
@pytest.mark.torch_compile_test
def test_generate_compilation_all_outputs(self):
pass
@unittest.skip("Hunyuan model Unsupported")
@pytest.mark.torch_compile_test
def test_generate_compile_model_forward(self):
pass
@unittest.skip("Hunyuan model Unsupported")
def test_generate_from_inputs_embeds_with_static_cache(self):
pass
@unittest.skip("Hunyuan model Unsupported")
def test_generate_with_static_cache(self):
pass
@require_torch
class HunYuanMoEV1IntegrationTest(unittest.TestCase):
def setUp(self):
cleanup(torch_device, gc_collect=True)
def tearDown(self):
cleanup(torch_device, gc_collect=True)
@slow
def test_model_generation(self):
EXPECTED_ANSWER = "\nOkay, I need to write a"
prompt = "Write a short summary of the benefits of regular exercise"
tokenizer = AutoTokenizer.from_pretrained("tencent/Hunyuan-A13B-Instruct")
model = HunYuanMoEV1ForCausalLM.from_pretrained(
"tencent/Hunyuan-A13B-Instruct", device_map="auto", dtype=torch.bfloat16
)
messages = [
{"role": "user", "content": prompt},
]
tokenized_chat = tokenizer.apply_chat_template(
messages,
tokenize=True,
add_generation_prompt=True,
return_tensors="pt",
).to(model.device)
generated_ids = model.generate(**tokenized_chat, max_new_tokens=10, top_k=1)
text = tokenizer.decode(generated_ids[0])
output = text.split("<think>")[1]
self.assertEqual(EXPECTED_ANSWER, output)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/hunyuan_v1_moe/test_modeling_hunyuan_v1_moe.py",
"license": "Apache License 2.0",
"lines": 86,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/utils/test_attention_visualizer.py | # Copyright 2025 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import builtins
import io
import re
import unittest
from transformers.testing_utils import require_torch
from transformers.utils.attention_visualizer import AttentionMaskVisualizer
ANSI_RE = re.compile(r"\x1b\[[0-9;]*m")
def _normalize(s: str) -> str:
# drop ANSI (colors may be disabled on CI), normalize line endings,
# and strip trailing spaces without touching alignment inside lines
s = ANSI_RE.sub("", s)
s = s.replace("\r\n", "\n").replace("\r", "\n")
return "\n".join(line.rstrip() for line in s.split("\n")).strip()
@require_torch
class AttentionMaskVisualizerTester(unittest.TestCase):
"""Test suite for AttentionMaskVisualizer"""
def test_paligemma_multimodal_visualization(self):
"""Test AttentionMaskVisualizer with PaliGemma multimodal model"""
model_name = "hf-internal-testing/namespace_google_repo_name_paligemma-3b-pt-224"
input_text = "<img> What is in this image?"
buf = io.StringIO()
orig_print = builtins.print
def _print(*args, **kwargs):
kwargs.setdefault("file", buf)
orig_print(*args, **kwargs)
try:
builtins.print = _print
visualizer = AttentionMaskVisualizer(model_name)
visualizer(input_text)
finally:
builtins.print = orig_print
output = buf.getvalue()
expected_output = """
##########################################################################################################################################################################################################################################
## Attention visualization for \033[1mpaligemma:hf-internal-testing/namespace_google_repo_name_paligemma-3b-pt-224\033[0m PaliGemmaModel ##
##########################################################################################################################################################################################################################################
\033[92m■\033[0m: i == j (diagonal) \033[93m■\033[0m: token_type_ids
Attention Matrix
\033[93m'" : fixed token for the end of image
self.image_token = tokenizer.image_token # "<s>" : within a  pair, these <s> tokens indicate they are positions reserved for an image
self.num_image_tokens = num_image_tokens
super().__init__(image_processor, tokenizer)
@auto_docstring
def __call__(
self,
images: ImageInput | None = None,
text: TextInput | list[TextInput] = None,
**kwargs: Unpack[Kosmos2_5ProcessorKwargs],
) -> BatchFeature:
if images is None and text is None:
raise ValueError("You have to specify either images or text.")
if images is None:
raise ValueError("Kosmos2_5Processor requires images to be passed.")
output_kwargs = self._merge_kwargs(
Kosmos2_5ProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
encoding = BatchFeature()
if images is not None:
image_encoding = self.image_processor(images, **output_kwargs["images_kwargs"])
image_encoding.pop("rows")
image_encoding.pop("cols")
encoding.update(image_encoding)
prompt = f"{self.tokenizer.bos_token}{self.image_start_token}{self.image_token * self.num_image_tokens}{self.image_end_token}"
if text is not None:
if isinstance(text, str):
text = [prompt + text]
else:
text = [prompt + t for t in text]
input = self.tokenizer(text, **output_kwargs["text_kwargs"])
batch_size, seq_len = input.input_ids.shape
image_embeds_position_mask = [0, -1] + [1] * self.num_image_tokens + [-1]
image_embeds_position_mask += [0] * (seq_len - len(image_embeds_position_mask))
image_embeds_position_mask = (
torch.LongTensor(image_embeds_position_mask).unsqueeze(0).repeat(batch_size, 1)
)
encoding.update(
{
"input_ids": input.input_ids,
"attention_mask": input.attention_mask,
"image_embeds_position_mask": image_embeds_position_mask,
}
)
return encoding
def batch_decode(self, *args, **kwargs):
"""
This method forwards all its arguments to Kosmos2_5TokenizerFast's [`~PreTrainedTokenizer.batch_decode`].
Please refer to the docstring of this method for more information.
"""
return self.tokenizer.batch_decode(*args, **kwargs)
def decode(self, *args, **kwargs):
"""
This method forwards all its arguments to Kosmos2_5TokenizerFast's [`~PreTrainedTokenizer.decode`]. Please
refer to the docstring of this method for more information.
"""
return self.tokenizer.decode(*args, **kwargs)
@property
def model_input_names(self):
tokenizer_input_names = self.tokenizer.model_input_names
image_processor_input_names = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
__all__ = ["Kosmos2_5Processor"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/kosmos2_5/processing_kosmos2_5.py",
"license": "Apache License 2.0",
"lines": 109,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/kosmos2_5/test_image_processing_kosmos2_5.py | # Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import pytest
from transformers.image_utils import load_image
from transformers.testing_utils import require_torch, require_torch_accelerator, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available
from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs
from ...test_processing_common import url_to_local_path
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import Kosmos2_5ImageProcessor
if is_torchvision_available():
from transformers import Kosmos2_5ImageProcessorFast
class Kosmos2_5ImageProcessingTester:
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
size=None,
do_normalize=True,
do_convert_rgb=True,
patch_size=None,
):
size = size if size is not None else {"height": 20, "width": 20}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.size = size
self.do_normalize = do_normalize
self.do_convert_rgb = do_convert_rgb
self.max_patches = [512, 1024, 2048, 4096]
self.patch_size = patch_size if patch_size is not None else {"height": 16, "width": 16}
def prepare_image_processor_dict(self):
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def prepare_dummy_image(self):
img_url = url_to_local_path(
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
)
raw_image = load_image(img_url).convert("RGB")
return raw_image
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
@require_torch
@require_vision
class Kosmos2_5ImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = Kosmos2_5ImageProcessor if is_vision_available() else None
fast_image_processing_class = Kosmos2_5ImageProcessorFast if is_torchvision_available() else None
def setUp(self):
super().setUp()
self.image_processor_tester = Kosmos2_5ImageProcessingTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
# Overwrite from the common test to use `flattened_patches` instead of `pixel_values`.
# TODO: enhance the common test to avoid overwriting
@require_vision
@require_torch
def test_slow_fast_equivalence(self):
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast equivalence test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
dummy_image = load_image(url_to_local_path("http://images.cocodataset.org/val2017/000000039769.jpg"))
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
encoding_slow = image_processor_slow(dummy_image, return_tensors="pt")
encoding_fast = image_processor_fast(dummy_image, return_tensors="pt")
self.assertTrue(torch.allclose(encoding_slow.flattened_patches, encoding_fast.flattened_patches, atol=1e-1))
self.assertLessEqual(
torch.mean(torch.abs(encoding_slow.flattened_patches - encoding_fast.flattened_patches)).item(), 1e-3
)
# Overwrite from the common test to use `flattened_patches` instead of `pixel_values`.
# TODO: enhance the common test to avoid overwriting
@require_vision
@require_torch
def test_slow_fast_equivalence_batched(self):
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast equivalence test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
if hasattr(self.image_processor_tester, "do_center_crop") and self.image_processor_tester.do_center_crop:
self.skipTest(
reason="Skipping as do_center_crop is True and center_crop functions are not equivalent for fast and slow processors"
)
dummy_images = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
encoding_slow = image_processor_slow(dummy_images, return_tensors="pt")
encoding_fast = image_processor_fast(dummy_images, return_tensors="pt")
self.assertTrue(torch.allclose(encoding_slow.flattened_patches, encoding_fast.flattened_patches, atol=1e-1))
self.assertLessEqual(
torch.mean(torch.abs(encoding_slow.flattened_patches - encoding_fast.flattened_patches)).item(), 1e-3
)
# Overwrite from the common test to use `flattened_patches` instead of `pixel_values`.
# TODO: enhance the common test to avoid overwriting + fix this compile test.
@unittest.skip("Failing with `AttributeError: 'StrictLessThan' object has no attribute 'diff'`.")
@slow
@require_torch_accelerator
@require_vision
@pytest.mark.torch_compile_test
def test_can_compile_fast_image_processor(self):
if self.fast_image_processing_class is None:
self.skipTest("Skipping compilation test as fast image processor is not defined")
torch.compiler.reset()
input_image = torch.randint(0, 255, (3, 224, 224), dtype=torch.uint8)
image_processor = self.fast_image_processing_class(**self.image_processor_dict)
output_eager = image_processor(input_image, device=torch_device, return_tensors="pt")
image_processor = torch.compile(image_processor, mode="reduce-overhead")
output_compiled = image_processor(input_image, device=torch_device, return_tensors="pt")
self._assert_slow_fast_tensors_equivalence(
output_eager.pixel_values, output_compiled.pixel_values, atol=1e-4, rtol=1e-4, mean_atol=1e-5
)
def test_image_processor_properties(self):
image_processor = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processor, "do_normalize"))
self.assertTrue(hasattr(image_processor, "do_convert_rgb"))
def test_expected_patches(self):
dummy_image = self.image_processor_tester.prepare_dummy_image()
image_processor = self.image_processing_class(**self.image_processor_dict)
max_patch = 2048
inputs = image_processor(dummy_image, return_tensors="pt", max_patches=max_patch)
self.assertTrue(torch.allclose(inputs.flattened_patches.mean(), torch.tensor(0.0606), atol=1e-3, rtol=1e-3))
def test_call_pil(self):
# Initialize image_processor
image_processor = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False)
for image in image_inputs:
self.assertIsInstance(image, Image.Image)
# Test not batched input
expected_hidden_dim = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
encoded_images = image_processor(
image_inputs[0], return_tensors="pt", max_patches=max_patch
).flattened_patches
self.assertEqual(
encoded_images.shape,
(1, max_patch, expected_hidden_dim),
)
# Test batched
encoded_images = image_processor(
image_inputs, return_tensors="pt", max_patches=max_patch
).flattened_patches
self.assertEqual(
encoded_images.shape,
(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim),
)
def test_call_numpy(self):
# Initialize image_processor
image_processor = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True)
for image in image_inputs:
self.assertIsInstance(image, np.ndarray)
expected_hidden_dim = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
encoded_images = image_processor(
image_inputs[0], return_tensors="pt", max_patches=max_patch
).flattened_patches
self.assertEqual(
encoded_images.shape,
(1, max_patch, expected_hidden_dim),
)
# Test batched
encoded_images = image_processor(
image_inputs, return_tensors="pt", max_patches=max_patch
).flattened_patches
self.assertEqual(
encoded_images.shape,
(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim),
)
def test_call_numpy_4_channels(self):
# Initialize image_processor
image_processor = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
self.image_processor_tester.num_channels = 4
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True)
for image in image_inputs:
self.assertIsInstance(image, np.ndarray)
expected_hidden_dim = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
encoded_images = image_processor(
image_inputs[0], return_tensors="pt", max_patches=max_patch, input_data_format="channels_last"
).flattened_patches
self.assertEqual(
encoded_images.shape,
(1, max_patch, expected_hidden_dim),
)
# Test batched
encoded_images = image_processor(
image_inputs, return_tensors="pt", max_patches=max_patch, input_data_format="channels_last"
).flattened_patches
self.assertEqual(
encoded_images.shape,
(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim),
)
self.image_processor_tester.num_channels = 3
def test_call_pytorch(self):
# Initialize image_processor
image_processor = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)
for image in image_inputs:
self.assertIsInstance(image, torch.Tensor)
# Test not batched input
expected_hidden_dim = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
encoded_images = image_processor(
image_inputs[0], return_tensors="pt", max_patches=max_patch
).flattened_patches
self.assertEqual(
encoded_images.shape,
(1, max_patch, expected_hidden_dim),
)
# Test batched
encoded_images = image_processor(
image_inputs, return_tensors="pt", max_patches=max_patch
).flattened_patches
self.assertEqual(
encoded_images.shape,
(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim),
)
@require_torch
@require_vision
class Kosmos2_5ImageProcessingTestFourChannels(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = Kosmos2_5ImageProcessor if is_vision_available() else None
fast_image_processing_class = Kosmos2_5ImageProcessorFast if is_torchvision_available() else None
def setUp(self):
super().setUp()
self.image_processor_tester = Kosmos2_5ImageProcessingTester(self, num_channels=4)
self.expected_encoded_image_num_channels = 3
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
# Overwrite from the common test to use `flattened_patches` instead of `pixel_values`.
# TODO: enhance the common test to avoid overwriting
@unittest.skip(reason="Kosmos2_5ImageProcessor does not support 4 channels yet") # FIXME Amy
@require_vision
@require_torch
def test_slow_fast_equivalence(self):
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast equivalence test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
dummy_image = load_image(url_to_local_path("http://images.cocodataset.org/val2017/000000039769.jpg"))
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
encoding_slow = image_processor_slow(dummy_image, return_tensors="pt")
encoding_fast = image_processor_fast(dummy_image, return_tensors="pt")
self.assertTrue(torch.allclose(encoding_slow.flattened_patches, encoding_fast.flattened_patches, atol=1e-1))
self.assertLessEqual(
torch.mean(torch.abs(encoding_slow.flattened_patches - encoding_fast.flattened_patches)).item(), 1e-3
)
@unittest.skip(reason="Kosmos2_5ImageProcessor does not support 4 channels yet")
def test_slow_fast_equivalence_batched(self):
return super().test_slow_fast_equivalence_batched()
@unittest.skip(reason="Kosmos2_5ImageProcessor does not support 4 channels yet")
def test_can_compile_fast_image_processor(self):
return super().test_can_compile_fast_image_processor()
def test_image_processor_properties(self):
image_processor = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processor, "do_normalize"))
self.assertTrue(hasattr(image_processor, "do_convert_rgb"))
def test_call_pil(self):
# Initialize image_processor
image_processor = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False)
for image in image_inputs:
self.assertIsInstance(image, Image.Image)
# Test not batched input
expected_hidden_dim = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
encoded_images = image_processor(
image_inputs[0], return_tensors="pt", max_patches=max_patch
).flattened_patches
self.assertEqual(
encoded_images.shape,
(1, max_patch, expected_hidden_dim),
)
# Test batched
encoded_images = image_processor(
image_inputs, return_tensors="pt", max_patches=max_patch
).flattened_patches
self.assertEqual(
encoded_images.shape,
(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim),
)
@unittest.skip(reason="Kosmos2_5ImageProcessor does not support 4 channels yet") # FIXME Amy
def test_call_numpy(self):
return super().test_call_numpy()
@unittest.skip(reason="Kosmos2_5ImageProcessor does not support 4 channels yet") # FIXME Amy
def test_call_pytorch(self):
return super().test_call_pytorch()
@unittest.skip(
reason="Kosmos2_5ImageProcessor does treat numpy and PIL 4 channel images consistently"
) # FIXME Amy
def test_call_numpy_4_channels(self):
return super().test_call_pytorch()
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/kosmos2_5/test_image_processing_kosmos2_5.py",
"license": "Apache License 2.0",
"lines": 349,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/models/kosmos2_5/test_modeling_kosmos2_5.py | # Copyright 2024 Microsoft Research and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch KOSMOS-2.5 model."""
import copy
import inspect
import tempfile
import unittest
import numpy as np
import pytest
import requests
from parameterized import parameterized
from transformers import AutoProcessor, Kosmos2_5Config
from transformers.models.kosmos2_5.configuration_kosmos2_5 import (
Kosmos2_5TextConfig,
Kosmos2_5VisionConfig,
)
from transformers.testing_utils import (
require_flash_attn,
require_torch,
require_torch_accelerator,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_torch_available, is_vision_available
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import (
ModelTesterMixin,
floats_tensor,
ids_tensor,
random_attention_mask,
)
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import Kosmos2_5ForConditionalGeneration, Kosmos2_5Model
if is_vision_available():
from PIL import Image
class Kosmos2_5VisionModelTester:
def __init__(
self,
parent,
batch_size=6,
image_size=32,
patch_size=4,
num_channels=3,
is_training=True,
hidden_size=32,
intermediate_size=64,
num_hidden_layers=2,
num_attention_heads=4,
dropout=0,
attention_dropout=0,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.is_training = is_training
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.patch_embed_hidden_size = patch_size * patch_size * num_channels
self.dropout = dropout
self.attention_dropout = attention_dropout
self.scope = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
num_patches = (image_size // patch_size) ** 2
self.seq_length = num_patches + 1
def prepare_config_and_inputs(self):
flattened_patches = floats_tensor([self.batch_size, self.seq_length, self.patch_embed_hidden_size + 2])
config = self.get_config()
return config, flattened_patches
def get_config(self):
return Kosmos2_5VisionConfig(
image_size=self.image_size,
patch_size=self.patch_size,
num_channels=self.num_channels,
hidden_size=self.hidden_size,
intermediate_size=self.intermediate_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
patch_embed_hidden_size=self.patch_embed_hidden_size,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, flattened_patches = config_and_inputs
inputs_dict = {"flattened_patches": flattened_patches}
return config, inputs_dict
class Kosmos2_5TextModelTester:
def __init__(
self,
parent,
batch_size=6,
seq_length=7,
is_training=True,
use_input_mask=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
ffn_dim=64,
num_hidden_layers=2,
num_attention_heads=4,
dropout=0,
attention_dropout=0,
max_position_embeddings=512,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.ffn_dim = ffn_dim
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.max_position_embeddings = max_position_embeddings
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
if input_mask is not None:
batch_size, seq_length = input_mask.shape
rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,))
for batch_idx, start_index in enumerate(rnd_start_indices):
input_mask[batch_idx, :start_index] = 1
input_mask[batch_idx, start_index:] = 0
config = self.get_config()
return config, input_ids, input_mask
def get_config(self):
return Kosmos2_5TextConfig(
vocab_size=self.vocab_size,
embed_dim=self.hidden_size,
ffn_dim=self.ffn_dim,
layers=self.num_hidden_layers,
attention_heads=self.num_attention_heads,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
max_position_embeddings=self.max_position_embeddings,
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, input_mask = config_and_inputs
inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
class Kosmos2_5ModelTester:
def __init__(
self,
parent,
text_kwargs=None,
vision_kwargs=None,
latent_query_num=3,
is_training=True,
):
if text_kwargs is None:
text_kwargs = {}
if vision_kwargs is None:
vision_kwargs = {}
self.parent = parent
self.text_model_tester = Kosmos2_5TextModelTester(parent, **text_kwargs)
self.vision_model_tester = Kosmos2_5VisionModelTester(parent, **vision_kwargs)
self.batch_size = self.text_model_tester.batch_size # need bs for batching_equivalence test
self.seq_length = self.text_model_tester.seq_length
self.latent_query_num = latent_query_num
self.is_training = is_training
def prepare_config_and_inputs(self):
text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs()
vision_config, flattened_patches = self.vision_model_tester.prepare_config_and_inputs()
# build `image_embeds_position_mask`
image_embeds_position_mask = torch.zeros_like(input_ids)
image_embeds_position_mask[:, 1 : 1 + self.latent_query_num :] = 1
config = self.get_config()
return (
config,
input_ids,
attention_mask,
image_embeds_position_mask,
flattened_patches,
)
def get_config(self):
return Kosmos2_5Config(
self.text_model_tester.get_config().to_dict(),
self.vision_model_tester.get_config().to_dict(),
latent_query_num=self.latent_query_num,
)
def create_and_check_model(
self,
config,
input_ids,
attention_mask,
image_embeds_position_mask,
flattened_patches,
):
model = Kosmos2_5Model(config).to(torch_device).eval()
with torch.no_grad():
result = model(input_ids, flattened_patches, image_embeds_position_mask, attention_mask)
self.parent.assertEqual(
result.last_hidden_state.shape,
(
self.text_model_tester.batch_size,
self.text_model_tester.seq_length,
self.text_model_tester.hidden_size,
),
)
self.parent.assertEqual(
result.image_embeds.shape,
(
self.text_model_tester.batch_size,
self.latent_query_num,
self.text_model_tester.hidden_size,
),
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
attention_mask,
image_embeds_position_mask,
flattened_patches,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"image_embeds_position_mask": image_embeds_position_mask,
"flattened_patches": flattened_patches,
}
return config, inputs_dict
@require_torch
class Kosmos2_5ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (Kosmos2_5Model, Kosmos2_5ForConditionalGeneration) if is_torch_available() else ()
all_generative_model_classes = (Kosmos2_5ForConditionalGeneration,) if is_torch_available() else ()
pipeline_model_mapping = (
{
"feature-extraction": Kosmos2_5Model,
}
if is_torch_available()
else {}
)
test_resize_embeddings = False
test_attention_outputs = False
_is_composite = True
def is_pipeline_test_to_skip(
self,
pipeline_test_casse_name,
config_class,
model_architecture,
tokenizer_name,
processor_name,
):
return pipeline_test_casse_name == "ImageToTextPipelineTests"
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = copy.deepcopy(inputs_dict)
if return_labels:
if model_class.__name__ == "Kosmos2_5ForConditionalGeneration":
inputs_dict["labels"] = torch.zeros(
(
self.model_tester.text_model_tester.batch_size,
self.model_tester.text_model_tester.seq_length,
),
dtype=torch.long,
device=torch_device,
)
if model_class.__name__ in [
"Kosmos2_5Model",
"Kosmos2_5ForConditionalGeneration",
]:
bs, _ = inputs_dict["input_ids"].shape
seqlen = self.model_tester.text_model_tester.seq_length
inputs_dict["input_ids"] = torch.arange(seqlen, device=torch_device).unsqueeze(0).expand(bs, seqlen)
inputs_dict["input_ids"] = inputs_dict["input_ids"] % self.model_tester.text_model_tester.vocab_size
inputs_dict["attention_mask"] = torch.ones((bs, seqlen), device=torch_device)
inputs_dict["image_embeds_position_mask"] = torch.zeros((bs, seqlen), device=torch_device)
inputs_dict["image_embeds_position_mask"][:, : self.model_tester.latent_query_num] = 1
return inputs_dict
def setUp(self):
self.model_tester = Kosmos2_5ModelTester(self)
self.config_tester = ConfigTester(self, config_class=Kosmos2_5Config, hidden_size=37)
@unittest.skip("KOSMOS-2.5 doesn't support padding")
def test_eager_padding_matches_padding_free_with_position_ids(self):
pass
@unittest.skip("KOSMOS-2.5 doesn't support padding")
def test_sdpa_padding_matches_padding_free_with_position_ids(self):
pass
@parameterized.expand([("random",), ("same",)])
@pytest.mark.generate
@unittest.skip(
"Kosmos-2.5 doesn't support assisted generation due to the need to extend `image_embeds_position_mask` length."
)
def test_assisted_decoding_matches_greedy_search(self):
pass
@pytest.mark.generate
@unittest.skip(
"Kosmos-2.5 doesn't support assisted generation due to the need to extend `image_embeds_position_mask` length."
)
def test_assisted_decoding_sample(self):
pass
@unittest.skip(
"Kosmos-2.5 doesn't support assisted generation due to the need to extend `image_embeds_position_mask` length."
)
def test_prompt_lookup_decoding_matches_greedy_search(self):
pass
@unittest.skip(reason="Kosmos2-3 has no separate base model without a head.")
def test_model_base_model_prefix(self):
pass
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["input_ids"]
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_load_save_without_tied_weights(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
config.text_config.tie_word_embeddings = False
for model_class in self.all_model_classes:
model = model_class(config)
with tempfile.TemporaryDirectory() as d:
model.save_pretrained(d)
model_reloaded, infos = model_class.from_pretrained(d, output_loading_info=True)
# Checking the state dicts are correct
reloaded_state = model_reloaded.state_dict()
for k, v in model.state_dict().items():
self.assertIn(k, reloaded_state, f"Key {k} is missing from reloaded")
torch.testing.assert_close(
v,
reloaded_state[k],
msg=lambda x: f"{model_class.__name__}: Tensor {k}: {x}",
)
# Checking there was no complain of missing weights
self.assertEqual(infos["missing_keys"], set())
# overwrite from common in order to use `self.model_tester.text_model_tester.num_hidden_layers`
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.hidden_states
expected_num_layers = getattr(
self.model_tester,
"expected_num_hidden_layers",
self.model_tester.text_model_tester.num_hidden_layers + 1,
)
self.assertEqual(len(hidden_states), expected_num_layers)
seq_length = self.model_tester.text_model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[seq_length, self.model_tester.text_model_tester.hidden_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
@slow
def test_model_from_pretrained(self):
model_name = "microsoft/kosmos-2.5"
model = Kosmos2_5Model.from_pretrained(model_name)
self.assertIsNotNone(model)
@unittest.skip(reason="Does not work on the tiny model as we keep hitting edge cases.")
def test_model_parallelism(self):
pass
# TODO: ydshieh
@require_torch_accelerator
@slow
@unittest.skip(reason="_update_causal_mask is not implemented yet which fails this test")
def test_sdpa_can_dispatch_on_flash(self):
pass
# TODO: vasqu
@unittest.skip(reason="why the heck does this have bigger tols")
def test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions(self):
pass
# TODO: ydshieh
@unittest.skip(reason=" the model hasn't been added to auto class")
def test_flash_attn_2_from_config(self):
pass
@unittest.skip("This test is currently not well designed for multimodal model (float type as an input).")
def test_flash_attn_2_fp32_ln(self):
pass
@unittest.skip("This test is currently not well designed for multimodal model (float type as an input).")
def test_flash_attention_2_padding_matches_padding_free_with_position_ids(self):
pass
@unittest.skip("Kosmos 2.5 is multimodel and has specific input shapes.")
def test_flash_attn_2_generate_reuse_cache(self):
pass
@pytest.mark.generate
@parameterized.expand([("greedy", 1), ("beam search", 2)])
@unittest.skip(
"KOSMOS-2.5 doesn't support inputs embeds. The test isn't skipped by checking input args because KOSMOS-2 has `generate()` overwritten",
)
def test_generate_from_inputs_embeds(self):
pass
@pytest.mark.generate
def test_left_padding_compatibility(self):
# Overwrite -- Kosmos-2.5 needs to prepare `image_embeds_position_mask`, and it must be padded accordingly
_, inputs_dict = self.prepare_config_and_inputs_for_generate()
input_ids = inputs_dict["input_ids"]
def _prepare_image_embeds_position_mask(input_ids, pad_size):
image_embeds_position_mask = torch.zeros(
input_ids.shape[0], input_ids.shape[1] + pad_size, device=torch_device, dtype=input_ids.dtype
)
image_embeds_position_mask[:, (pad_size + 1) : pad_size + 1 + self.model_tester.latent_query_num] = 1
return image_embeds_position_mask
# `image_embeds_position_mask` is randomly generated in `prepare_config_and_inputs_for_generate`, and it must
# match its padded version for the test to be valid -- we need to pass both
unpadded_custom_inputs = {"image_embeds_position_mask": _prepare_image_embeds_position_mask(input_ids, 0)}
padded_custom_inputs = {"image_embeds_position_mask": _prepare_image_embeds_position_mask(input_ids, 32)}
super().test_left_padding_compatibility(
unpadded_custom_inputs=unpadded_custom_inputs, padded_custom_inputs=padded_custom_inputs
)
@require_vision
@require_torch
@slow
class Kosmos2_5ModelIntegrationTest(unittest.TestCase):
# This variable is used to determine which CUDA device are we using for our runners (A10 or T4)
# Depending on the hardware we get different logits / generations
cuda_compute_capability_major_version = None
@classmethod
def setUpClass(cls):
if is_torch_available() and torch.cuda.is_available():
# 8 is for A100 / A10 and 7 for T4
cls.cuda_compute_capability_major_version = torch.cuda.get_device_capability()[0]
def run_example(self, prompt, image, model, processor):
inputs = processor(text=prompt, images=image, return_tensors="pt")
inputs = {k: v.to(torch_device) if v is not None else None for k, v in inputs.items()}
inputs["flattened_patches"] = inputs["flattened_patches"].to(model.dtype)
generation_outputs = model.generate(
**inputs,
max_new_tokens=1024,
)
generated_ids = generation_outputs
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)
return generated_ids, generated_text
def test_eager(self):
url = "https://huggingface.co/microsoft/kosmos-2.5/resolve/main/receipt_00008.png"
image = Image.open(requests.get(url, stream=True).raw)
dtype = torch.bfloat16
repo = "microsoft/kosmos-2.5"
model = Kosmos2_5ForConditionalGeneration.from_pretrained(
repo, device_map=torch_device, dtype=dtype, attn_implementation="eager"
)
processor = AutoProcessor.from_pretrained(repo)
prompt = "<ocr>"
generated_ids, generated_text = self.run_example(prompt, image, model, processor)
EXPECTED_TEXT = {
8: [
"<bbox><x_53><y_573><x_69><y_606></bbox>1\n<bbox><x_79><y_573><x_464><y_611></bbox>[REG] BLACK SAKURA\n<bbox><x_690><y_569><x_810><y_606></bbox>45,455\n<bbox><x_53><y_614><x_69><y_648></bbox>1\n<bbox><x_79><y_614><x_468><y_651></bbox>COOKIE DOH SAUCES\n<bbox><x_788><y_609><x_812><y_642></bbox>0\n<bbox><x_50><y_658><x_69><y_693></bbox>1\n<bbox><x_79><y_658><x_358><y_693></bbox>NATA DE COCO\n<bbox><x_790><y_652><x_814><y_683></bbox>0\n<bbox><x_31><y_742><x_820><y_781></bbox>Sub Total 45,455\n<bbox><x_27><y_781><x_822><y_827></bbox>PB1 (10%) 4,545\n<bbox><x_27><y_826><x_824><y_872></bbox>Rounding 0\n<bbox><x_24><y_872><x_827><y_921></bbox>Total 50,000\n<bbox><x_17><y_1056><x_836><y_1108></bbox>Card Payment 50,000\n"
]
}
self.assertListEqual(generated_text, EXPECTED_TEXT[self.cuda_compute_capability_major_version])
prompt = "<md>"
generated_ids, generated_text = self.run_example(prompt, image, model, processor)
EXPECTED_TEXT = {
8: [
"- **1 \\[REG\\] BLACK SAKURA** 45,455\n- **1 COOKIE DOH SAUCES** 0\n- **1 NATA DE COCO** 0\n- **Sub Total** 45,455\n- **PB1 (10%)** 4,545\n- **Rounding** 0\n- **Total** **50,000**\n\nCard Payment 50,000"
],
}
self.assertListEqual(generated_text, EXPECTED_TEXT[self.cuda_compute_capability_major_version])
def test_sdpa(self):
url = "https://huggingface.co/microsoft/kosmos-2.5/resolve/main/receipt_00008.png"
image = Image.open(requests.get(url, stream=True).raw)
dtype = torch.bfloat16
repo = "microsoft/kosmos-2.5"
model = Kosmos2_5ForConditionalGeneration.from_pretrained(
repo, device_map=torch_device, dtype=dtype, attn_implementation="sdpa"
)
processor = AutoProcessor.from_pretrained(repo)
prompt = "<ocr>"
generated_ids, generated_text = self.run_example(prompt, image, model, processor)
EXPECTED_TEXT = {
7: [
"<bbox><x_53><y_573><x_69><y_606></bbox>1\n<bbox><x_79><y_573><x_464><y_611></bbox>[REG] BLACK SAKURA\n<bbox><x_690><y_569><x_810><y_606></bbox>45,455\n<bbox><x_53><y_614><x_69><y_648></bbox>1\n<bbox><x_79><y_614><x_468><y_651></bbox>COOKIE DOH SAUCES\n<bbox><x_788><y_609><x_812><y_642></bbox>0\n<bbox><x_50><y_658><x_69><y_693></bbox>1\n<bbox><x_79><y_658><x_358><y_693></bbox>NATA DE COCO\n<bbox><x_790><y_652><x_814><y_683></bbox>0\n<bbox><x_31><y_742><x_820><y_781></bbox>Sub Total 45,455\n<bbox><x_27><y_781><x_822><y_827></bbox>PB1 (10%) 4,545\n<bbox><x_27><y_826><x_824><y_872></bbox>Rounding 0\n<bbox><x_24><y_872><x_827><y_921></bbox>Total 50,000\n<bbox><x_17><y_1056><x_836><y_1108></bbox>Card Payment 50,000\n",
],
8: [
"<bbox><x_53><y_573><x_69><y_606></bbox>1\n<bbox><x_79><y_573><x_464><y_611></bbox>[REG] BLACK SAKURA\n<bbox><x_690><y_569><x_810><y_606></bbox>45,455\n<bbox><x_53><y_614><x_69><y_648></bbox>1\n<bbox><x_79><y_614><x_468><y_651></bbox>COOKIE DOH SAUCES\n<bbox><x_788><y_609><x_812><y_642></bbox>0\n<bbox><x_50><y_658><x_69><y_693></bbox>1\n<bbox><x_79><y_658><x_358><y_693></bbox>NATA DE COCO\n<bbox><x_790><y_652><x_814><y_683></bbox>0\n<bbox><x_31><y_742><x_820><y_781></bbox>Sub Total 45,455\n<bbox><x_27><y_781><x_822><y_827></bbox>PB1 (10%) 4,545\n<bbox><x_27><y_826><x_824><y_872></bbox>Rounding 0\n<bbox><x_24><y_872><x_827><y_921></bbox>Total 50,000\n<bbox><x_17><y_1056><x_836><y_1108></bbox>Card Payment 50,000\n"
],
}
self.assertListEqual(generated_text, EXPECTED_TEXT[self.cuda_compute_capability_major_version])
prompt = "<md>"
generated_ids, generated_text = self.run_example(prompt, image, model, processor)
EXPECTED_TEXT = {
7: [
"- **1 \\[REG\\] BLACK SAKURA** 45,455\n- **1 COOKIE DOH SAUCES** 0\n- **1 NATA DE COCO** 0\n- **Sub Total** 45,455\n- **PB1 (10%)** 4,545\n- **Rounding** 0\n- **Total** **50,000**\n\nCard Payment 50,000"
],
8: [
"- **1 \\[REG\\] BLACK SAKURA** 45,455\n- **1 COOKIE DOH SAUCES** 0\n- **1 NATA DE COCO** 0\n- **Sub Total** 45,455\n- **PB1 (10%)** 4,545\n- **Rounding** 0\n- **Total** **50,000**\n\nCard Payment 50,000"
],
}
self.assertListEqual(generated_text, EXPECTED_TEXT[self.cuda_compute_capability_major_version])
@require_flash_attn
@require_torch_accelerator
@pytest.mark.flash_attn_test
@slow
def test_FA2(self):
url = "https://huggingface.co/microsoft/kosmos-2.5/resolve/main/receipt_00008.png"
image = Image.open(requests.get(url, stream=True).raw)
dtype = torch.bfloat16
repo = "microsoft/kosmos-2.5"
model = Kosmos2_5ForConditionalGeneration.from_pretrained(
repo,
device_map=torch_device,
dtype=dtype,
attn_implementation="flash_attention_2",
)
processor = AutoProcessor.from_pretrained(repo)
prompt = "<ocr>"
generated_ids, generated_text = self.run_example(prompt, image, model, processor)
EXPECTED_TEXT = [
"<bbox><x_53><y_573><x_69><y_606></bbox>1\n<bbox><x_79><y_573><x_464><y_612></bbox>[REG] BLACK SAKURA\n<bbox><x_690><y_569><x_812><y_606></bbox>45,455\n<bbox><x_53><y_614><x_69><y_650></bbox>1\n<bbox><x_79><y_614><x_468><y_650></bbox>COOKIE DOH SAUCES\n<bbox><x_788><y_610><x_813><y_644></bbox>0\n<bbox><x_50><y_658><x_65><y_693></bbox>1\n<bbox><x_76><y_658><x_358><y_693></bbox>NATA DE COCO\n<bbox><x_790><y_652><x_815><y_687></bbox>0\n<bbox><x_31><y_742><x_822><y_781></bbox>Sub Total 45,455\n<bbox><x_27><y_780><x_822><y_827></bbox>PB1 (10%) 4,545\n<bbox><x_27><y_826><x_824><y_874></bbox>Rounding 0\n<bbox><x_24><y_872><x_827><y_921></bbox>Total 50,000\n<bbox><x_17><y_1056><x_835><y_1108></bbox>Card Payment 50,000\n"
]
self.assertListEqual(generated_text, EXPECTED_TEXT)
prompt = "<md>"
generated_ids, generated_text = self.run_example(prompt, image, model, processor)
# A10 gives the 1st one, but A100 gives the 2nd one
EXPECTED_TEXT = [
"- **1 \\[REG\\] BLACK SAKURA** 45,455\n- **1 COOKIE DOH SAUCES** 0\n- **1 NATA DE COCO** 0\n\n<table>\n<thead>\n<tr>\n<th>\nSub Total\n</th>\n<th>\n45,455\n</th>\n</tr>\n</thead>\n<tbody>\n<tr>\n<td>\nPB1 (10%)\n</td>\n<td>\n4,545\n</td>\n</tr>\n<tr>\n<td>\nRounding\n</td>\n<td>\n0\n</td>\n</tr>\n<tr>\n<td>\n<strong>\nTotal\n</strong>\n</td>\n<td>\n<strong>\n50,000\n</strong>\n</td>\n</tr>\n</tbody>\n</table>\n\nCard Payment 50,000",
"- **1 \\[REG\\] BLACK SAKURA** 45,455\n- **1 COOKIE DOH SAUCES** 0\n- **1 NATA DE COCO** 0\n- **Sub Total** 45,455\n- **PB1 (10%)** 4,545\n- **Rounding** 0\n- **Total** **50,000**\n",
]
self.assertIn(generated_text[0], EXPECTED_TEXT)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/kosmos2_5/test_modeling_kosmos2_5.py",
"license": "Apache License 2.0",
"lines": 553,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/models/kosmos2_5/test_processor_kosmos2_5.py | # Copyright 2024 Microsoft Research and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from tempfile import TemporaryDirectory
import numpy as np
import pytest
from transformers.image_utils import load_image
from transformers.testing_utils import (
require_torch,
require_vision,
)
from transformers.utils import is_vision_available
from ...test_processing_common import ProcessorTesterMixin, url_to_local_path
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
AutoTokenizer,
Kosmos2_5ImageProcessor,
Kosmos2_5Processor,
)
@require_vision
class Kosmos2_5ProcessorTest(ProcessorTesterMixin, unittest.TestCase):
processor_class = Kosmos2_5Processor
images_input_name = "flattened_patches"
model_id = "microsoft/kosmos-2.5"
@unittest.skip("Kosmos2_5Processor removes 'rows' and 'cols' from the output")
def test_image_processor_defaults(self):
pass
def test_image_procesor_load_save_reload(self):
# make sure load from Hub repo. -> save -> reload locally work
image_processor = Kosmos2_5ImageProcessor.from_pretrained("microsoft/kosmos-2.5")
with TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(tmp_dir)
reloaded_image_processor = Kosmos2_5ImageProcessor.from_pretrained(tmp_dir)
assert image_processor.to_dict() == reloaded_image_processor.to_dict()
assert image_processor.to_json_string() == reloaded_image_processor.to_json_string()
def test_can_load_various_tokenizers(self):
for checkpoint in ["microsoft/kosmos-2.5"]:
processor = AutoProcessor.from_pretrained(checkpoint)
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
self.assertEqual(processor.tokenizer.__class__, tokenizer.__class__)
@require_torch
def test_model_input_names(self):
image_processor = self.get_component("image_processor")
tokenizer = self.get_component("tokenizer")
processor = Kosmos2_5Processor(tokenizer=tokenizer, image_processor=image_processor)
input_str = "This is a test"
image_input = self.prepare_image_inputs()
# both image and text
inputs = processor(text=input_str, images=image_input)
self.assertListEqual(
list(inputs.keys()),
[
"flattened_patches",
"attention_mask",
"width",
"height",
"input_ids",
"image_embeds_position_mask",
],
)
# test if it raises when no input is passed
with pytest.raises(ValueError):
processor()
@require_torch
@require_vision
def test_image_processor_defaults_preserved_by_image_kwargs(self):
# Rewrite as KOSMOS-2.5 processor return "flattened_patches" and not "pixel_values"
if "image_processor" not in self.processor_class.get_attributes():
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
image_processor = self.get_component("image_processor", max_patches=1024, patch_size={"height": 8, "width": 8})
tokenizer = self.get_component("tokenizer", max_length=117, padding="max_length")
processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs()
image_input = self.prepare_image_inputs()
inputs = processor(text=input_str, images=image_input)
self.assertEqual(len(inputs["flattened_patches"][0][0]), 194)
@require_torch
@require_vision
def test_kwargs_overrides_default_image_processor_kwargs(self):
# Rewrite as KOSMOS-2.5 processor return "flattened_patches" and not "pixel_values"
if "image_processor" not in self.processor_class.get_attributes():
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
image_processor = self.get_component("image_processor", max_patches=4096)
tokenizer = self.get_component("tokenizer", max_length=117, padding="max_length")
processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs()
image_input = self.prepare_image_inputs()
inputs = processor(text=input_str, images=image_input, max_patches=1024)
self.assertEqual(len(inputs["flattened_patches"][0]), 1024)
@require_torch
@require_vision
def test_unstructured_kwargs(self):
# Rewrite as KOSMOS-2.5 processor doesn't use `rescale_factor`
if "image_processor" not in self.processor_class.get_attributes():
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
image_processor = self.get_component("image_processor")
tokenizer = self.get_component("tokenizer")
processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs()
image_input = self.prepare_image_inputs()
inputs = processor(
text=input_str,
images=image_input,
return_tensors="pt",
max_patches=1024,
padding="max_length",
max_length=76,
)
self.assertEqual(inputs["flattened_patches"].shape[1], 1024)
self.assertEqual(len(inputs["input_ids"][0]), 76)
@require_torch
@require_vision
def test_unstructured_kwargs_batched(self):
# Rewrite as KOSMOS-2.5 processor doesn't use `rescale_factor`
if "image_processor" not in self.processor_class.get_attributes():
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
image_processor = self.get_component("image_processor")
tokenizer = self.get_component("tokenizer")
processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs(batch_size=2)
image_input = self.prepare_image_inputs(batch_size=2)
inputs = processor(
text=input_str,
images=image_input,
return_tensors="pt",
max_patches=1024,
padding="longest",
max_length=76,
)
self.assertEqual(inputs["flattened_patches"].shape[1], 1024)
self.assertEqual(len(inputs["input_ids"][0]), 76)
@require_torch
@require_vision
def test_structured_kwargs_nested(self):
# Rewrite as KOSMOS-2.5 processor doesn't use `rescale_factor`
if "image_processor" not in self.processor_class.get_attributes():
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
image_processor = self.get_component("image_processor")
tokenizer = self.get_component("tokenizer")
processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs()
image_input = self.prepare_image_inputs()
# Define the kwargs for each modality
all_kwargs = {
"common_kwargs": {"return_tensors": "pt"},
"images_kwargs": {"max_patches": 1024},
"text_kwargs": {"padding": "max_length", "max_length": 76},
}
inputs = processor(text=input_str, images=image_input, **all_kwargs)
self.skip_processor_without_typed_kwargs(processor)
self.assertEqual(inputs["flattened_patches"].shape[1], 1024)
self.assertEqual(len(inputs["input_ids"][0]), 76)
@require_torch
@require_vision
def test_structured_kwargs_nested_from_dict(self):
# Rewrite as KOSMOS-2.5 processor doesn't use `rescale_factor`
if "image_processor" not in self.processor_class.get_attributes():
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
image_processor = self.get_component("image_processor")
tokenizer = self.get_component("tokenizer")
processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs()
image_input = self.prepare_image_inputs()
# Define the kwargs for each modality
all_kwargs = {
"common_kwargs": {"return_tensors": "pt"},
"images_kwargs": {"max_patches": 1024},
"text_kwargs": {"padding": "max_length", "max_length": 76},
}
inputs = processor(text=input_str, images=image_input, **all_kwargs)
self.assertEqual(inputs["flattened_patches"].shape[1], 1024)
self.assertEqual(len(inputs["input_ids"][0]), 76)
@require_torch
def test_full_processor(self):
url = url_to_local_path("https://huggingface.co/microsoft/kosmos-2.5/resolve/main/receipt_00008.png")
processor = AutoProcessor.from_pretrained("microsoft/kosmos-2.5")
texts = ["<md>", "<ocr>"]
expected_input_ids = [
[100288],
[100282],
]
expected_attention_mask = [[1], [1]]
image = load_image(url)
# To match the official (microsoft) Kosmos-2 demo from which the expected values here are grabbed
image_path = os.path.join(self.tmpdirname, "image.png")
image.save(image_path)
image = Image.open(image_path)
# test single image
outputs = processor(images=image, text=texts[0])
self.assertListEqual(
outputs.input_ids[0].numpy().tolist(),
[0, 100283] + [0] * 2048 + [100284] + expected_input_ids[0],
)
self.assertListEqual(
outputs.image_embeds_position_mask[0].numpy().tolist(),
[0, -1] + [1] * 2048 + [-1] + [0] * (len(expected_input_ids[0])),
)
self.assertListEqual(
outputs.attention_mask[0].numpy().tolist(),
[1, 1] + [1] * 2048 + [1] + expected_attention_mask[0],
)
EXPECTED_FP_1 = [
1.0,
2.0,
-2.9527735710144043,
-2.672085762023926,
-2.9933173656463623,
-2.905944585800171,
-2.5891761779785156,
-2.8751866817474365,
-2.962153434753418,
-2.588062047958374,
]
EXPECTED_FP_200 = [
4.0,
45.0,
1.5713728666305542,
1.584628939628601,
1.3589054346084595,
1.6515952348709106,
1.7014952898025513,
1.3731343746185303,
1.6010395288467407,
1.6607422828674316,
]
self.assertTupleEqual(outputs.flattened_patches.shape, (1, 4096, 770))
np.testing.assert_allclose(
outputs.flattened_patches[0][1][:10].numpy().tolist(),
EXPECTED_FP_1,
atol=1e-4,
)
np.testing.assert_allclose(
outputs.flattened_patches[0][200][:10].numpy().tolist(),
EXPECTED_FP_200,
atol=1e-4,
)
# test a batch of images and texts, right padding
outputs = processor(images=[image, image], text=texts)
self.assertListEqual(
outputs.input_ids[1].numpy().tolist(),
[0, 100283] + [0] * 2048 + [100284] + expected_input_ids[1],
)
self.assertListEqual(
outputs.image_embeds_position_mask[1].numpy().tolist(),
[0, -1] + [1] * 2048 + [-1] + [0] * (len(expected_input_ids[1])),
)
self.assertListEqual(
outputs.attention_mask[1].numpy().tolist(),
[1, 1] + [1] * 2048 + [1] + expected_attention_mask[1],
)
self.assertTupleEqual(outputs.flattened_patches.shape, (2, 4096, 770))
np.testing.assert_allclose(
outputs.flattened_patches[1][1][:10].numpy().tolist(),
EXPECTED_FP_1,
atol=1e-4,
)
np.testing.assert_allclose(
outputs.flattened_patches[1][200][:10].numpy().tolist(),
EXPECTED_FP_200,
atol=1e-4,
)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/kosmos2_5/test_processor_kosmos2_5.py",
"license": "Apache License 2.0",
"lines": 281,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/ovis2/configuration_ovis2.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...configuration_utils import PreTrainedConfig
from ..qwen2.configuration_qwen2 import Qwen2Config
class Ovis2VisionConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Ovis2VisionModel`]. It is used to instantiate a
Ovis2VisionModel model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of Ovis2.
Args:
hidden_size (`int`, *optional*, defaults to 1024):
Dimensionality of the encoder layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 2816):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
num_hidden_layers (`int`, *optional*, defaults to 24):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer encoder.
num_channels (`int`, *optional*, defaults to 3):
Number of channels in the input images.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 14):
The size (resolution) of each patch.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the RMSNorm layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
qkv_bias (`bool`, *optional*, defaults to `False`):
Whether to add a learnable bias to the query, key, and value sequences at each attention head.
mlp_bias (`bool`, *optional*, defaults to `False`):
Whether to add a learnable bias to the MLP layers.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
vocab_size (`int`, *optional*, defaults to 16384):
Vocabulary size of the Vision Transformer.
hidden_stride (`int`, *optional*, defaults to 1):
The stride of the hidden layer in the Vision Transformer.
num_visual_indicator_tokens (`int`, *optional*, defaults to 5):
Number of visual indicator tokens.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated normal initializer for initializing all weight matrices.
tokenize_function (`str`, *optional*, defaults to `"softmax"`):
The function used to tokenize the visual indicator tokens.
"""
base_config_key = "vision_config"
def __init__(
self,
hidden_size: int = 1024,
intermediate_size: int = 2816,
num_hidden_layers: int = 24,
num_attention_heads: int = 8,
num_channels: int = 3,
image_size: int = 224,
patch_size: int = 14,
rms_norm_eps: float = 1e-5,
attention_dropout: float = 0.0,
qkv_bias: bool = False,
mlp_bias: bool = False,
hidden_act="silu",
vocab_size=16384,
hidden_stride=1,
num_visual_indicator_tokens=5,
initializer_range=0.02,
tokenize_function="softmax",
**kwargs,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_channels = num_channels
self.patch_size = patch_size
self.image_size = image_size
self.attention_dropout = attention_dropout
self.hidden_act = hidden_act
self.qkv_bias = qkv_bias
self.mlp_bias = mlp_bias
self.rms_norm_eps = rms_norm_eps
self.vocab_size = vocab_size
self.hidden_stride = hidden_stride
self.num_visual_indicator_tokens = num_visual_indicator_tokens
self.tokenize_function = tokenize_function
self.initializer_range = initializer_range
class Ovis2Config(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Ovis2ForConditionalGeneration`]. It is used to instantiate a
Ovis2 model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of Ovis2.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
e.g. [thisisiron/Ovis2-1B-hf](https://huggingface.co/thisisiron/Ovis2-1B-hf)
Args:
vision_config (`Union[AutoConfig, dict]`, *optional*, defaults to `Ovis2VisionConfig`):
The config object or dictionary of the vision backbone.
text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `Qwen2Config`):
The config object or dictionary of the text backbone.
image_token_id (`int`, *optional*, defaults to 151665):
The image token id to encode the image prompt.
visual_indicator_token_ids (`List[int]`, *optional*, defaults to `[151666, 151667, 151668, 151669, 151670]`):
The visual indicator token ids to encode the image prompt.
vocab_size (`int`, *optional*, defaults to 151643):
Vocabulary size of the text model.
hidden_size (`int`, *optional*, defaults to 1536):
Dimensionality of the encoder layers and the pooler layer.
tie_word_embeddings (`bool`, *optional*, defaults to `True`):
Whether to tie weight embeddings
```python
>>> from transformers import Ovis2ForConditionalGeneration, Ovis2Config
>>> # Initializing a Ovis2 style configuration
>>> configuration = Ovis2Config()
>>> # Initializing a model from the Ovis2-2B style configuration
>>> model = Ovis2ForConditionalGeneration(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = "ovis2"
sub_configs = {"text_config": Qwen2Config, "vision_config": Ovis2VisionConfig}
def __init__(
self,
vision_config=None,
text_config=None,
image_token_id=151665,
visual_indicator_token_ids=[151666, 151667, 151668, 151669, 151670],
vocab_size=151643,
hidden_size=1536,
tie_word_embeddings=True,
**kwargs,
):
if isinstance(vision_config, dict):
self.vision_config = Ovis2VisionConfig(**vision_config)
elif isinstance(vision_config, Ovis2VisionConfig):
self.vision_config = vision_config
if vision_config is None:
self.vision_config = Ovis2VisionConfig(num_visual_indicator_tokens=len(visual_indicator_token_ids))
if isinstance(text_config, dict):
self.text_config = Qwen2Config(**text_config)
elif isinstance(text_config, Qwen2Config):
self.text_config = text_config
elif text_config is None:
self.text_config = Qwen2Config()
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.image_token_id = image_token_id
self.visual_indicator_token_ids = visual_indicator_token_ids
self.tie_word_embeddings = tie_word_embeddings
super().__init__(**kwargs)
__all__ = ["Ovis2VisionConfig", "Ovis2Config"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/ovis2/configuration_ovis2.py",
"license": "Apache License 2.0",
"lines": 162,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/ovis2/convert_ovis2_weights_to_hf.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import re
from io import BytesIO
import httpx
import torch
from PIL import Image
from transformers import (
AutoModelForCausalLM,
AutoModelForImageTextToText,
AutoProcessor,
AutoTokenizer,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING_NAMES
from transformers.models.ovis2.configuration_ovis2 import Ovis2Config, Ovis2VisionConfig
from transformers.models.ovis2.image_processing_ovis2 import Ovis2ImageProcessor
from transformers.models.ovis2.modeling_ovis2 import Ovis2ForConditionalGeneration
from transformers.models.ovis2.processing_ovis2 import Ovis2Processor
from transformers.models.qwen2.configuration_qwen2 import Qwen2Config
# Constants
CONTEXT_LENGTH = 32768 # multimodal_max_length
# fmt: off
# Mapping from original model key patterns to HF key patterns
ORIGINAL_TO_HF_MAPPING = {
r"trunk.blocks\.(\d+)\.norm_1": r"encoder.layers.\1.rms_norm1",
r"trunk.blocks\.(\d+)\.norm_2": r"encoder.layers.\1.rms_norm2",
r"trunk.blocks\.(\d+)\.attn.proj": r"encoder.layers.\1.attention.out_proj",
r"visual_tokenizer": r"model.vision_tower",
r"backbone": r"transformer",
r"preprocessor": r"embeddings",
r"patchifier.proj": r"patch_embedding",
r"patchifier.norm": r"rms_norm",
r"trunk.post_trunk_norm": r"rms_norm",
r"trunk.blocks": r"encoder.layers",
r"mlp.fc1": r"ffn.gate_proj",
r"mlp.fc2": r"ffn.down_proj",
r"mlp.fc3": r"ffn.up_proj",
r"head.0": r"head_linear",
r"head.1": r"head_norm",
r"vte.weight": r"model.visual_embeddings_table.weight",
r"llm.model": r"model.language_model",
r"llm.lm_head": r"lm_head",
}
# fmt: on
# Special tokens for the tokenizer
SPECIAL_TOKENS = [
"<IMG_ATOM>",
"<IMG_START>",
"<IMG_GRID>",
"<IMG_COL>",
"<IMG_ROW>",
"<IMG_END>",
]
# Configuration keys to ignore when converting
UNNECESSARY_CONFIG_KEYS = [
"_name_or_path",
"_attn_implementation_autoset",
"auto_map",
"use_bfloat16",
"use_flash_attn",
"qk_normalization",
"bias",
"norm_type",
]
# Chat template for the tokenizer
CHAT_TEMPLATE = (
"<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n"
"{% for message in messages %}"
"{{'<|im_start|>' + message['role'] + '\n'}}"
"{% if message['content'] is string %}"
"{{ message['content'] }}"
"{% else %}"
"{% for content in message['content'] %}"
"{% if content['type'] == 'image' %}"
"{{ '<image>\n' }}"
"{% elif content['type'] == 'text' %}"
"{{ content['text'] }}"
"{% endif %}"
"{% endfor %}"
"{% endif %}"
"{{'<|im_end|>\n'}}"
"{% endfor %}"
"{% if add_generation_prompt %}"
"{{'<|im_start|>assistant\n' }}"
"{% endif %}"
)
def create_tokenizer(model_name_or_path, save_dir):
"""
Create and configure a tokenizer for the Ovis2 model.
Args:
model_name_or_path: Path to the source model or tokenizer
save_dir: Directory to save the tokenizer to
Returns:
The configured tokenizer
"""
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, return_token_type_ids=False)
tokenizer.model_max_length = CONTEXT_LENGTH
tokenizer.add_special_tokens({"additional_special_tokens": SPECIAL_TOKENS})
tokenizer.chat_template = CHAT_TEMPLATE
setattr(tokenizer, "image_token", "<IMG_ATOM>") # 151665
setattr(tokenizer, "image_token_id", tokenizer.convert_tokens_to_ids(tokenizer.image_token))
return tokenizer
def create_image_processor(save_dir):
"""
Create and save an image processor for the Ovis2 model.
Args:
save_dir: Directory to save the image processor to
Returns:
The configured image processor
"""
image_processor = Ovis2ImageProcessor(
crop_to_patches=True,
size={"height": 448, "width": 448},
)
return image_processor
def extract_vision_config_from_original(orig_config):
"""
Extract and format vision configuration from the original model config.
Args:
orig_config: Original model configuration
Returns:
dict: Cleaned vision configuration dictionary
"""
visual_tokenizer_config = orig_config.visual_tokenizer_config.to_dict()
# backbone_config = visual_tokenizer_config.pop("backbone_config")
# Copy required fields from backbone config
visual_tokenizer_config["hidden_size"] = orig_config.visual_tokenizer_config.backbone_config.hidden_size
visual_tokenizer_config["intermediate_size"] = (
orig_config.visual_tokenizer_config.backbone_config.intermediate_size
)
visual_tokenizer_config["num_attention_heads"] = (
orig_config.visual_tokenizer_config.backbone_config.num_attention_heads
)
visual_tokenizer_config["num_hidden_layers"] = (
orig_config.visual_tokenizer_config.backbone_config.num_hidden_layers
)
visual_tokenizer_config["rms_norm_eps"] = orig_config.visual_tokenizer_config.backbone_config.rms_norm_eps
visual_tokenizer_config["image_size"] = orig_config.visual_tokenizer_config.backbone_config.image_size
visual_tokenizer_config["num_channels"] = orig_config.visual_tokenizer_config.backbone_config.num_channels
visual_tokenizer_config["patch_size"] = orig_config.visual_tokenizer_config.backbone_config.patch_size
visual_tokenizer_config["qkv_bias"] = orig_config.visual_tokenizer_config.backbone_config.qkv_bias
# Remove unnecessary keys
return {k: v for k, v in visual_tokenizer_config.items() if k not in UNNECESSARY_CONFIG_KEYS}
def get_ovis2_config(model_name_or_path):
"""
Create an Ovis2 configuration from the original model.
Args:
model_name_or_path: Path to the original model
Returns:
Ovis2Config: Configuration for the HF implementation
"""
orig_config = AutoModelForCausalLM.from_pretrained(
model_name_or_path,
trust_remote_code=True,
).config
# Extract and clean LLM config
llm_config = orig_config.llm_config.to_dict()
llm_config = {k: v for k, v in llm_config.items() if k not in UNNECESSARY_CONFIG_KEYS}
# Extract and clean vision config
visual_tokenizer_config = extract_vision_config_from_original(orig_config)
return Ovis2Config(
text_config=Qwen2Config(**llm_config),
vision_config=Ovis2VisionConfig(**visual_tokenizer_config),
hidden_size=llm_config["hidden_size"],
vocab_size=llm_config["vocab_size"],
initializer_range=llm_config["initializer_range"],
)
def load_orig_state_dict(model_name_or_path):
"""
Load the state dictionary from the original model.
Args:
model_name_or_path: Path to the original model
Returns:
dict: Original model state dictionary
"""
model = AutoModelForCausalLM.from_pretrained(
model_name_or_path,
dtype=torch.bfloat16,
trust_remote_code=True,
).eval()
return model.state_dict()
def convert_orig2hf(state_dict, dim):
"""
Convert original state dictionary keys to HF format.
Args:
state_dict: Original state dictionary
dim: Hidden dimension for splitting QKV weights
Returns:
dict: Converted state dictionary for HF model
"""
new_state_dict = {}
for key, val in state_dict.items():
orig_key = key
# Apply regex pattern replacements
for pattern, replacement in ORIGINAL_TO_HF_MAPPING.items():
key = re.sub(pattern, replacement, key)
# Handle special cases
if "attn.qkv" in key:
# Split QKV into separate Q, K, V matrices
new_key_query = key.replace("attn.qkv", "attention.q_proj")
new_state_dict[new_key_query] = state_dict[orig_key][:dim]
new_key_key = key.replace("attn.qkv", "attention.k_proj")
new_state_dict[new_key_key] = state_dict[orig_key][dim : 2 * dim]
new_key_value = key.replace("attn.qkv", "attention.v_proj")
new_state_dict[new_key_value] = state_dict[orig_key][-dim:]
elif "pos_embed" in key:
new_key = key.replace("pos_embed", "position_embedding.weight")
new_state_dict[new_key] = state_dict[orig_key][0]
else:
new_state_dict[key] = val
return new_state_dict
def convert_model(model_name_or_path):
"""
Convert and save the model in HF format.
Args:
model_name_or_path: Path to the original model
save_dir: Directory to save the converted model
Returns:
The converted model
"""
config = get_ovis2_config(model_name_or_path)
config.architectures = ["Ovis2ForConditionalGeneration"]
# Load and convert weights
orig_state_dict = load_orig_state_dict(model_name_or_path)
new_state_dict = convert_orig2hf(orig_state_dict, config.vision_config.hidden_size)
# Create model and load converted weights
model = Ovis2ForConditionalGeneration(config)
missing_keys, unexpected_keys = model.load_state_dict(new_state_dict, strict=False)
# Report any issues with weight loading
if missing_keys:
print(f"Missing keys: {missing_keys}")
if unexpected_keys:
print(f"Unexpected keys: {unexpected_keys}")
return model
def main():
"""Process command line arguments and execute the conversion pipeline."""
parser = argparse.ArgumentParser(description="Convert Ovis2 model to HF format")
parser.add_argument(
"--model_name_or_path",
default="AIDC-AI/Ovis2-2B",
choices=[
"AIDC-AI/Ovis2-1B",
"AIDC-AI/Ovis2-2B",
"AIDC-AI/Ovis2-4B",
"AIDC-AI/Ovis2-8B",
"AIDC-AI/Ovis2-16B",
"AIDC-AI/Ovis2-34B",
],
help="Location of original Ovis2 model",
)
parser.add_argument("--save_dir", default="Ovis2-2B-hf", help="Location to write HF model and processors")
parser.add_argument("--hub_dir", default="thisisiron/Ovis2-2B-hf", help="Hub repository name if pushing to hub")
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to push the converted model to the Hugging Face hub"
)
args = parser.parse_args()
# Execute conversion pipeline
print(f"Converting model from {args.model_name_or_path} to {args.save_dir}")
# If already included in the transformers library, remove to avoid duplication.
if "aimv2" in CONFIG_MAPPING_NAMES:
CONFIG_MAPPING_NAMES.pop("aimv2")
tokenizer = create_tokenizer(
model_name_or_path=args.model_name_or_path,
save_dir=args.save_dir,
)
image_processor = create_image_processor(
save_dir=args.save_dir,
)
os.makedirs(args.save_dir, exist_ok=True)
# Convert and save the model
model = convert_model(model_name_or_path=args.model_name_or_path)
model.save_pretrained(args.save_dir)
# Save the processor
processor = Ovis2Processor(tokenizer=tokenizer, image_processor=image_processor, chat_template=CHAT_TEMPLATE)
processor.save_pretrained(args.save_dir)
# Push to hub if requested
if args.push_to_hub:
model_name = args.hub_dir.split("/")[-1]
processor.push_to_hub(model_name)
model.push_to_hub(model_name)
model = (
AutoModelForImageTextToText.from_pretrained(
args.save_dir,
dtype=torch.bfloat16,
)
.eval()
.to("cuda:0")
)
processor = AutoProcessor.from_pretrained(args.save_dir)
messages = [
{
"role": "user",
"content": [
{"type": "image"},
{"type": "text", "text": "Describe the image."},
],
},
]
url = "http://images.cocodataset.org/val2014/COCO_val2014_000000537955.jpg"
with httpx.stream("GET", url) as response:
image = Image.open(BytesIO(response.read()))
messages = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
print(messages)
inputs = processor(
images=[image],
text=messages,
return_tensors="pt",
)
inputs = inputs.to("cuda:0")
inputs["pixel_values"] = inputs["pixel_values"].to(torch.bfloat16)
with torch.inference_mode():
output_ids = model.generate(**inputs, max_new_tokens=128, do_sample=False)
generated_ids = [output_ids[len(input_ids) :] for input_ids, output_ids in zip(inputs.input_ids, output_ids)]
output_text = processor.batch_decode(generated_ids, skip_special_tokens=True)
print(output_text)
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/ovis2/convert_ovis2_weights_to_hf.py",
"license": "Apache License 2.0",
"lines": 330,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/ovis2/image_processing_ovis2.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import lru_cache
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
infer_channel_dimension_format,
is_scaled_image,
make_flat_list_of_images,
to_numpy_array,
valid_images,
validate_preprocess_arguments,
)
from ...processing_utils import ImagesKwargs
from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging
if is_vision_available():
import PIL
logger = logging.get_logger(__name__)
class Ovis2ImageProcessorKwargs(ImagesKwargs, total=False):
"""
crop_to_patches (`bool`, *optional*, defaults to `False`):
Whether to crop the image to patches. Can be overridden by the `crop_to_patches` parameter in the
`preprocess` method.
min_patches (`int`, *optional*, defaults to 1):
The minimum number of patches to be extracted from the image. Only has an effect if `crop_to_patches` is
set to `True`. Can be overridden by the `min_patches` parameter in the `preprocess` method.
max_patches (`int`, *optional*, defaults to 12):
The maximum number of patches to be extracted from the image. Only has an effect if `crop_to_patches` is
set to `True`. Can be overridden by the `max_patches` parameter in the `preprocess` method.
use_covering_area_grid (`bool`, *optional*, defaults to `True`):
Whether to use the covering area grid to determine the number of patches. Only has an effect if
`crop_to_patches` is set to `True`. Can be overridden by the `use_covering_area_grid` parameter in the
`preprocess` method.
"""
crop_to_patches: bool
min_patches: int
max_patches: int
use_covering_area_grid: bool
# Similar to image_processing_mllama.get_all_supported_aspect_ratios
@lru_cache(maxsize=10)
def get_all_supported_aspect_ratios(min_image_tiles: int, max_image_tiles: int) -> list[tuple[int, int]]:
"""
Computes all allowed aspect ratios for a given minimum and maximum number of input tiles.
This function calculates all possible arrangements of tiles that can be formed
within the constraint of the minimum and maximum number of tiles. Each arrangement is
represented by its aspect ratio (width/height) and the corresponding tile configuration.
Args:
min_image_tiles (`int`):
The minimum number of tiles allowed.
max_image_tiles (`int`):
The maximum number of tiles allowed.
Returns:
`List[Tuple[int, int]]`: A list of tuples, each tuple representing a valid (width, height)
configuration in terms of number of tiles.
Example:
>>> get_all_supported_aspect_ratios(1, 4)
[(1, 1), (1, 2), (2, 1), (1, 3), (3, 1), (1, 4), (2, 2), (4, 1)]
"""
aspect_ratios = []
for width in range(1, max_image_tiles + 1):
for height in range(1, max_image_tiles + 1):
if width * height <= max_image_tiles and width * height >= min_image_tiles:
aspect_ratios.append((width, height))
aspect_ratios = sorted(aspect_ratios, key=lambda x: x[0] * x[1])
return aspect_ratios
@lru_cache(maxsize=100)
def get_optimal_tiled_canvas(
original_image_size: tuple[int, int],
target_tile_size: tuple[int, int],
min_image_tiles: int,
max_image_tiles: int,
) -> tuple[int, int]:
"""
Given a minimum and maximum number of tiles, find the canvas with the closest aspect ratio to the
original image aspect ratio.
In case of tie-breaking condition when two canvases have the same aspect ratio difference, we favor the canvas with
more tiles, until the area covered by the tiles is more than twice the target area, in order to avoid unnecessarily
excessive tiling.
"""
possible_tile_arrangements = get_all_supported_aspect_ratios(min_image_tiles, max_image_tiles)
original_height, original_width = original_image_size
target_tile_height, target_tile_width = target_tile_size
aspect_ratio = original_width / original_height
area = original_width * original_height
# find the grid with the best aspect ratio
best_ratio_diff = float("inf")
best_grid = (1, 1)
for grid in possible_tile_arrangements:
grid_aspect_ratio = grid[0] / grid[1]
ratio_diff = abs(aspect_ratio - grid_aspect_ratio)
if ratio_diff < best_ratio_diff:
best_ratio_diff = ratio_diff
best_grid = grid
elif ratio_diff == best_ratio_diff:
# if the aspect ratio difference is the same, we favor the grid with more patches
# until the area covered by the patches is more than twice the original image area
if area > 0.5 * target_tile_height * target_tile_width * grid[0] * grid[1]:
best_grid = grid
return best_grid
def compute_patch_covering_area(left: int, upper: int, right: int, lower: int, side: int) -> float:
w = right - left
h = lower - upper
w, h = max(w, h), min(w, h)
if w > side:
h = h / w * side
w = side
return w * h
def split_image_into_grid(h: int, w: int, grid: tuple[int, int]) -> list[tuple[int, int, int, int]]:
row_height = h // grid[0]
col_width = w // grid[1]
return [
(
col * col_width,
row * row_height,
w if col == grid[1] - 1 else (col + 1) * col_width,
h if row == grid[0] - 1 else (row + 1) * row_height,
)
for row in range(grid[0])
for col in range(grid[1])
]
@lru_cache(maxsize=100)
def get_min_tile_covering_grid(
image_size: tuple[int, int],
target_patch_size: int,
max_image_tiles: int,
covering_threshold: float = 0.9,
) -> tuple[int, int]:
image_height, image_width = image_size
image_area = image_width * image_height
candidate_tile_grids = get_all_supported_aspect_ratios(1, max_image_tiles)
evaluated_grids = []
sufficient_covering_grids = []
for tile_grid in candidate_tile_grids:
tile_regions = split_image_into_grid(image_height, image_width, tile_grid)
tile_covering_ratio = (
sum(compute_patch_covering_area(*region, target_patch_size) for region in tile_regions) / image_area
)
evaluated_grids.append((tile_grid, tile_covering_ratio))
if tile_covering_ratio > covering_threshold:
sufficient_covering_grids.append((tile_grid, tile_covering_ratio))
if sufficient_covering_grids:
# Prefer fewer tiles and higher covering ratio
return min(sufficient_covering_grids, key=lambda x: (x[0][0] * x[0][1], -x[1]))[0]
else:
# Fallback: prefer higher covering even if below threshold
return min(evaluated_grids, key=lambda x: (-x[1], x[0][0] * x[0][1]))[0]
class Ovis2ImageProcessor(BaseImageProcessor):
r"""
Constructs a Ovis2 image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
`do_resize` parameter in the `preprocess` method.
size (`dict`, *optional*, defaults to `{"height": 384, "width": 384}`):
Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess`
method.
crop_to_patches (`bool`, *optional*, defaults to `False`):
Whether to crop the image to patches. Can be overridden by the `crop_to_patches` parameter in the
`preprocess` method.
min_patches (`int`, *optional*, defaults to 1):
The minimum number of patches to be extracted from the image. Only has an effect if `crop_to_patches` is
set to `True`. Can be overridden by the `min_patches` parameter in the `preprocess` method.
max_patches (`int`, *optional*, defaults to 12):
The maximum number of patches to be extracted from the image. Only has an effect if `crop_to_patches` is
set to `True`. Can be overridden by the `max_patches` parameter in the `preprocess` method.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`. Can be
overridden by the `resample` parameter in the `preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the
`do_rescale` parameter in the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Only has an effect if `do_rescale` is set to `True`. Can be
overridden by the `rescale_factor` parameter in the `preprocess` method.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
method. Can be overridden by the `do_normalize` parameter in the `preprocess` method.
image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be
overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
Can be overridden by the `image_std` parameter in the `preprocess` method.
do_convert_rgb (`bool`, *optional*, defaults to `True`):
Whether to convert the image to RGB.
use_covering_area_grid (`bool`, *optional*, defaults to `True`):
Whether to use the covering area grid to determine the number of patches. Only has an effect if
`crop_to_patches` is set to `True`. Can be overridden by the `use_covering_area_grid` parameter in the
`preprocess` method.
"""
model_input_names = ["pixel_values"]
valid_kwargs = Ovis2ImageProcessorKwargs
def __init__(
self,
do_resize: bool = True,
size: dict[str, int] | None = None,
crop_to_patches: bool = False,
min_patches: int = 1,
max_patches: int = 12,
resample: PILImageResampling = PILImageResampling.BICUBIC,
do_rescale: bool = True,
rescale_factor: int | float = 1 / 255,
do_normalize: bool = True,
image_mean: float | list[float] | None = None,
image_std: float | list[float] | None = None,
do_convert_rgb: bool = True,
use_covering_area_grid: bool = True,
**kwargs,
) -> None:
super().__init__(**kwargs)
size = size if size is not None else {"height": 384, "width": 384}
size = get_size_dict(size, default_to_square=True)
self.do_resize = do_resize
self.size = size
self.crop_to_patches = crop_to_patches
self.min_patches = min_patches
self.max_patches = max_patches
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD
self.do_convert_rgb = do_convert_rgb
def resize(
self,
image: np.ndarray,
size: dict[str, int],
resample: PILImageResampling = PILImageResampling.BICUBIC,
data_format: str | ChannelDimension | None = None,
input_data_format: str | ChannelDimension | None = None,
**kwargs,
) -> np.ndarray:
"""
Resize an image to `(size["height"], size["width"])`.
Args:
image (`np.ndarray`):
Image to resize.
size (`Dict[str, int]`):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
`PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
Returns:
`np.ndarray`: The resized image.
"""
size = get_size_dict(size)
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}")
output_size = (size["height"], size["width"])
return resize(
image,
size=output_size,
resample=resample,
data_format=data_format,
input_data_format=input_data_format,
**kwargs,
)
@filter_out_non_signature_kwargs()
def preprocess(
self,
images: ImageInput,
do_resize: bool | None = None,
size: dict[str, int] | None = None,
crop_to_patches: bool | None = None,
min_patches: int | None = None,
max_patches: int | None = None,
resample: PILImageResampling | None = None,
do_rescale: bool | None = None,
rescale_factor: float | None = None,
do_normalize: bool | None = None,
image_mean: float | list[float] | None = None,
image_std: float | list[float] | None = None,
return_tensors: str | TensorType | None = None,
do_convert_rgb: bool | None = None,
data_format: ChannelDimension = ChannelDimension.FIRST,
input_data_format: str | ChannelDimension | None = None,
use_covering_area_grid: bool = True,
) -> PIL.Image.Image:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`Dict[str, int]`, *optional*, defaults to `self.size`):
Controls the size of the image after `resize`. The shortest edge of the image is resized to
`size["shortest_edge"]` whilst preserving the aspect ratio. If the longest edge of this resized image
is > `int(size["shortest_edge"] * (1333 / 800))`, then the image is resized again to make the longest
edge equal to `int(size["shortest_edge"] * (1333 / 800))`.
crop_to_patches (`bool`, *optional*, defaults to `self.crop_to_patches`):
Whether to crop the image to patches.
min_patches (`int`, *optional*, defaults to `self.min_patches`):
The minimum number of patches to be extracted from the image. Only has an effect if `crop_to_patches` is
set to `True`.
max_patches (`int`, *optional*, defaults to `self.max_patches`):
The maximum number of patches to be extracted from the image. Only has an effect if `crop_to_patches` is
set to `True`.
resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
Image mean to normalize the image by if `do_normalize` is set to `True`.
image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to normalize the image by if `do_normalize` is set to `True`.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
Whether to convert the image to RGB.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
use_covering_area_grid (`bool`, *optional*, defaults to `True`):
Whether to use the covering area grid to determine the number of patches. Only has an effect if
`crop_to_patches` is set to `True`.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
crop_to_patches = crop_to_patches if crop_to_patches is not None else self.crop_to_patches
min_patches = min_patches if min_patches is not None else self.min_patches
max_patches = max_patches if max_patches is not None else self.max_patches
resample = resample if resample is not None else self.resample
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
size = size if size is not None else self.size
size = get_size_dict(size, default_to_square=False)
images = self.fetch_images(images)
images = make_flat_list_of_images(images)
if not valid_images(images):
raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor")
validate_preprocess_arguments(
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_resize=do_resize,
size=size,
resample=resample,
)
# PIL RGBA images are converted to RGB
if do_convert_rgb:
images = [convert_to_rgb(image) for image in images]
# All transformations expect numpy arrays.
images = [to_numpy_array(image) for image in images]
if do_rescale and is_scaled_image(images[0]):
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])
if crop_to_patches and max_patches > 1:
images = [
self.crop_image_to_patches(
image,
min_patches=min_patches,
max_patches=max_patches,
patch_size=size,
data_format=input_data_format,
use_covering_area_grid=use_covering_area_grid,
)
for image in images
]
grids = [grid for _, grid in images]
images = [image for images_list, _ in images for image in images_list]
else:
grids = [(1, 1)] * len(images)
for i, image in enumerate(images):
if do_resize:
images[i] = self.resize(image, size=size, resample=resample, input_data_format=input_data_format)
if do_rescale:
images[i] = self.rescale(image=images[i], scale=rescale_factor, input_data_format=input_data_format)
if do_normalize:
images[i] = self.normalize(
image=images[i],
mean=image_mean,
std=image_std,
input_data_format=input_data_format,
)
images[i] = to_channel_dimension_format(images[i], data_format, input_channel_dim=input_data_format)
encoded_outputs = BatchFeature(data={"pixel_values": images, "grids": grids}, tensor_type=return_tensors)
return encoded_outputs
def crop_image_to_patches(
self,
images: np.ndarray,
min_patches: int,
max_patches: int,
use_covering_area_grid: bool = True,
patch_size: tuple | int | dict | None = None,
data_format: ChannelDimension | None = None,
covering_threshold: float = 0.9,
):
"""
Crop the image to patches and return a list of cropped images.
The number of patches and their grid arrangement are determined by the original image size,
the target patch size and the minimum and maximum number of patches.
The aspect ratio of the patches grid is chosen to be the closest to the original image aspect ratio.
Args:
images (`np.ndarray`):
The image to be cropped.
min_patches (`int`):
The minimum number of patches to be extracted from the image.
max_patches (`int`):
The maximum number of patches to be extracted from the image.
use_covering_area_grid (`bool`, *optional*, defaults to `True`):
Whether to use the covering area grid to determine the number of patches.
patch_size (`int`, `Tuple[int, int]`, `dict`, *optional*):
The size of the output patches.
data_format (`ChannelDimension`, *optional*):
The format of the image data. If `None`, the format is inferred from the input image.
covering_threshold (`float`, *optional*, defaults to `0.9`):
The threshold for the covering area grid. If the covering area is less than this value, the grid is
considered invalid.
Returns:
List[`PIL.Image.Image`] or List[np.ndarray]: The list of cropped images.
"""
if data_format is None:
data_format = infer_channel_dimension_format(images)
images = to_channel_dimension_format(images, ChannelDimension.FIRST, data_format)
patch_size_height, patch_size_width = patch_size["height"], patch_size["width"]
original_height, original_width = images.shape[-2:]
if use_covering_area_grid:
# Use the original OVIS2 approach: compute the minimal number of tiles that cover at least 90% of the image area
num_columns, num_rows = get_min_tile_covering_grid(
(original_height, original_width),
target_patch_size=patch_size_height, # square patch size
max_image_tiles=max_patches,
covering_threshold=covering_threshold,
)
else:
# find the closest aspect ratio to the target
num_columns, num_rows = get_optimal_tiled_canvas(
(original_height, original_width),
(patch_size_height, patch_size_width),
min_patches,
max_patches,
)
# calculate the target width and height
target_width = patch_size_width * num_columns
target_height = patch_size_height * num_rows
num_blocks = num_columns * num_rows
# resize the image so that each patch is of patch_size
resized_image = self.resize(
images,
{"height": target_height, "width": target_width},
data_format=ChannelDimension.FIRST,
input_data_format=ChannelDimension.FIRST,
)
# split the image into patches
processed_images = []
for i in range(num_blocks):
column = i % num_columns
row = i // num_columns
box = (
column * patch_size_width,
row * patch_size_height,
(column + 1) * patch_size_width,
(row + 1) * patch_size_height,
)
# split the image
patch_image = resized_image[..., box[1] : box[3], box[0] : box[2]]
patch_image = to_channel_dimension_format(patch_image, data_format, ChannelDimension.FIRST)
processed_images.append(patch_image)
if len(processed_images) != 1:
thumbnail_img = self.resize(
images, patch_size, data_format=data_format, input_data_format=ChannelDimension.FIRST
)
processed_images.insert(0, thumbnail_img)
return processed_images, (num_rows, num_columns)
__all__ = ["Ovis2ImageProcessor"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/ovis2/image_processing_ovis2.py",
"license": "Apache License 2.0",
"lines": 523,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/ovis2/image_processing_ovis2_fast.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import torch
import torchvision.transforms.v2.functional as tvF
from ...image_processing_utils import BatchFeature
from ...image_processing_utils_fast import (
BaseImageProcessorFast,
group_images_by_shape,
reorder_images,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ImageInput,
PILImageResampling,
SizeDict,
)
from ...processing_utils import Unpack
from ...utils import (
TensorType,
auto_docstring,
)
from .image_processing_ovis2 import Ovis2ImageProcessorKwargs, get_min_tile_covering_grid, get_optimal_tiled_canvas
@auto_docstring
class Ovis2ImageProcessorFast(BaseImageProcessorFast):
resample = PILImageResampling.BICUBIC
image_mean = OPENAI_CLIP_MEAN
image_std = OPENAI_CLIP_STD
size = {"height": 384, "width": 384}
default_to_square = None
do_resize = True
do_rescale = True
do_normalize = True
do_convert_rgb = True
crop_to_patches = False
min_patches = 1
max_patches = 12
use_covering_area_grid = True
valid_kwargs = Ovis2ImageProcessorKwargs
@auto_docstring
def preprocess(self, images: ImageInput, **kwargs: Unpack[Ovis2ImageProcessorKwargs]) -> BatchFeature:
return super().preprocess(images, **kwargs)
def crop_image_to_patches(
self,
images: "torch.Tensor",
min_patches: int,
max_patches: int,
use_covering_area_grid: bool = True,
covering_threshold: float = 0.9,
patch_size: tuple | int | dict | None = None,
interpolation: Optional["tvF.InterpolationMode"] = None,
):
"""
Crop the images to patches and return a list of cropped images.
The number of patches and their grid arrangement are determined by the original image size,
the target patch size and the minimum and maximum number of patches.
The aspect ratio of the patches grid is chosen to be the closest to the original image aspect ratio.
Args:
images (`torch.Tensor`):
The images to be cropped.
min_patches (`int`):
The minimum number of patches to be extracted from the image.
max_patches (`int`):
The maximum number of patches to be extracted from the image.
use_covering_area_grid (`bool`, *optional*, defaults to `True`):
Whether to use the original OVIS2 approach: compute the minimal number of tiles that cover at least 90%
of the image area. If `False`, the closest aspect ratio to the target is used.
covering_threshold (`float`, *optional*, defaults to `0.9`):
The threshold for the covering area. Only has an effect if `use_covering_area_grid` is set to `True`.
patch_size (`int`, `Tuple[int, int]`, `dict`, *optional*):
The size of the output patches.
The format of the image data. If `None`, the format is inferred from the input image.
interpolation (`InterpolationMode`):
Resampling filter to use if resizing the image.
Returns:
List[`PIL.Image.Image`] or List[np.ndarray]: The list of cropped images.
"""
num_image = images.shape[0]
patch_size_height, patch_size_width = patch_size.height, patch_size.width
original_height, original_width = images.shape[-2:]
if use_covering_area_grid:
# Use the original OVIS2 approach: compute the minimal number of tiles that cover at least 90% of the image area
num_columns, num_rows = get_min_tile_covering_grid(
(original_height, original_width),
target_patch_size=patch_size_height, # square patch size
max_image_tiles=max_patches,
covering_threshold=covering_threshold,
)
else:
# find the closest aspect ratio to the target
num_columns, num_rows = get_optimal_tiled_canvas(
(original_height, original_width), (patch_size_height, patch_size_width), min_patches, max_patches
)
# calculate the target width and height
target_width = patch_size_width * num_columns
target_height = patch_size_height * num_rows
num_blocks = num_columns * num_rows
# resize the image so that each patch is of patch_size
resized_image = self.resize(
images, SizeDict(height=target_height, width=target_width), interpolation=interpolation
)
# split the image into patches
processed_images = []
for i in range(num_blocks):
column = i % num_columns
row = i // num_columns
box = (
column * patch_size_width,
row * patch_size_height,
(column + 1) * patch_size_width,
(row + 1) * patch_size_height,
)
# split the image
patch_image = resized_image[..., box[1] : box[3], box[0] : box[2]]
processed_images.append(patch_image)
if len(processed_images) != 1:
thumbnail_img = self.resize(images, patch_size, interpolation=interpolation)
processed_images.insert(0, thumbnail_img)
processed_images = torch.stack(processed_images, dim=0).transpose(0, 1).contiguous()
grid = [[num_rows, num_columns] for _ in range(num_image)]
return processed_images, grid
def _preprocess(
self,
images: list["torch.Tensor"],
do_resize: bool,
size: SizeDict,
crop_to_patches: bool,
min_patches: int,
max_patches: int,
use_covering_area_grid: bool,
interpolation: Optional["tvF.InterpolationMode"],
do_center_crop: bool,
crop_size: SizeDict,
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
image_mean: float | list[float] | None,
image_std: float | list[float] | None,
disable_grouping: bool | None,
return_tensors: str | TensorType | None,
**kwargs,
) -> BatchFeature:
if crop_to_patches and max_patches > 1:
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
processed_images_grouped = {}
grids = {}
for shape, stacked_images in grouped_images.items():
stacked_images, grid = self.crop_image_to_patches(
stacked_images,
min_patches,
max_patches,
patch_size=size,
use_covering_area_grid=use_covering_area_grid,
interpolation=interpolation,
)
processed_images_grouped[shape] = stacked_images
grids[shape] = grid
images = reorder_images(processed_images_grouped, grouped_images_index)
images = [image for images_list in images for image in images_list]
grids = reorder_images(grids, grouped_images_index)
else:
grids = [[1, 1] for _ in range(len(images))]
# Group images by size for batched resizing
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
resized_images_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_resize:
stacked_images = self.resize(image=stacked_images, size=size, interpolation=interpolation)
resized_images_grouped[shape] = stacked_images
resized_images = reorder_images(resized_images_grouped, grouped_images_index)
# Group images by size for further processing
# Needed in case do_resize is False, or resize returns images with different sizes
grouped_images, grouped_images_index = group_images_by_shape(resized_images, disable_grouping=disable_grouping)
processed_images_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_center_crop:
stacked_images = self.center_crop(stacked_images, crop_size)
# Fused rescale and normalize
stacked_images = self.rescale_and_normalize(
stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
processed_images_grouped[shape] = stacked_images
processed_images = reorder_images(processed_images_grouped, grouped_images_index)
return BatchFeature(data={"pixel_values": processed_images, "grids": grids}, tensor_type=return_tensors)
__all__ = ["Ovis2ImageProcessorFast"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/ovis2/image_processing_ovis2_fast.py",
"license": "Apache License 2.0",
"lines": 197,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/ovis2/modular_ovis2.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from dataclasses import dataclass
import torch
from torch import nn
from ... import initialization as init
from ...cache_utils import Cache
from ...generation import GenerationMixin
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
from ...modeling_utils import PreTrainedModel
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
from ...utils.generic import merge_with_config_defaults
from ...utils.output_capturing import capture_outputs
from ..aimv2.modeling_aimv2 import Aimv2Attention, Aimv2EncoderLayer
from ..auto import AutoModel
from ..llama.modeling_llama import LlamaMLP, LlamaRMSNorm
from ..llava.modeling_llava import LlavaForConditionalGeneration, LlavaModel
from ..llava_next.modeling_llava_next import LlavaNextCausalLMOutputWithPast, LlavaNextModelOutputWithPast
from ..siglip.modeling_siglip import SiglipEncoder, SiglipVisionEmbeddings
from .configuration_ovis2 import Ovis2Config, Ovis2VisionConfig
def hard_softmax(logits: torch.Tensor, dim: int):
y_soft = logits.softmax(dim)
# Straight through.
index = y_soft.max(dim, keepdim=True)[1]
y_hard = torch.zeros_like(logits, memory_format=torch.legacy_contiguous_format).scatter_(dim, index, 1.0)
ret = y_hard - y_soft.detach() + y_soft
return ret
@dataclass
@auto_docstring
class BaseModelOutputWithVisualIndicatorFeatures(BaseModelOutputWithPooling):
r"""
visual_indicator_features (`torch.FloatTensor` of shape `(batch_size, visual_indicator_size)`):
Visual indicator features extracted from the model, which can be used for auxiliary tasks or further processing.
"""
visual_indicator_features: torch.FloatTensor | None = None
class Ovis2ModelOutputWithPast(LlavaNextModelOutputWithPast):
pass
class Ovis2CausalLMOutputWithPast(LlavaNextCausalLMOutputWithPast):
pass
class Ovis2RMSNorm(LlamaRMSNorm):
pass
class Ovis2VisionMLP(LlamaMLP):
pass
class Ovis2VisionEmbeddings(SiglipVisionEmbeddings):
def __init__(self, config: Ovis2VisionConfig):
super().__init__(config)
self.rms_norm = Ovis2RMSNorm(config.hidden_size, config.rms_norm_eps)
def interpolate_pos_encoding(self):
raise NotImplementedError("Not needed for Ovis2")
def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
target_dtype = self.patch_embedding.weight.dtype
patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype))
embeddings = patch_embeds.flatten(2).transpose(1, 2)
embeddings = self.rms_norm(embeddings)
embeddings = embeddings + self.position_embedding(self.position_ids)
return embeddings
class Ovis2VisionAttention(Aimv2Attention):
pass
class Ovis2VisionEncoderLayer(Aimv2EncoderLayer):
def __init__(self, config: Ovis2VisionConfig):
super().__init__()
self.attention = Ovis2VisionAttention(config)
class Ovis2VisionEncoder(SiglipEncoder):
def __init__(self, config: Ovis2VisionConfig):
super().__init__(config)
self.layers = nn.ModuleList([Ovis2VisionEncoderLayer(config) for _ in range(config.num_hidden_layers)])
@can_return_tuple
@auto_docstring
def forward(
self,
inputs_embeds,
attention_mask: torch.Tensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutput:
hidden_states = inputs_embeds
for encoder_layer in self.layers:
hidden_states = encoder_layer(hidden_states, attention_mask, **kwargs)
return BaseModelOutput(last_hidden_state=hidden_states)
class Ovis2VisionTransformer(nn.Module):
def __init__(self, config: Ovis2VisionConfig):
super().__init__()
self.config = config
self.embeddings = Ovis2VisionEmbeddings(config)
self.encoder = Ovis2VisionEncoder(config)
self.rms_norm = Ovis2RMSNorm(config.hidden_size, config.rms_norm_eps)
self.gradient_checkpointing = False
@can_return_tuple
def forward(
self,
pixel_values,
attention_mask: torch.Tensor | None = None,
**kwargs,
):
hidden_states = self.embeddings(pixel_values)
encoder_outputs: BaseModelOutput = self.encoder(
inputs_embeds=hidden_states,
attention_mask=attention_mask,
**kwargs,
)
last_hidden_state = encoder_outputs.last_hidden_state
last_hidden_state = self.rms_norm(last_hidden_state)
return BaseModelOutput(last_hidden_state=last_hidden_state)
class Ovis2VisualEmbeddingTable(nn.Embedding):
def forward(self, visual_tokens: torch.Tensor) -> torch.Tensor:
if visual_tokens.dtype in [torch.int8, torch.int16, torch.int32, torch.int64, torch.long]:
return super().forward(visual_tokens)
return torch.matmul(visual_tokens, self.weight)
class Ovis2PreTrainedModel(PreTrainedModel):
config: Ovis2Config
base_model_prefix = "model"
input_modalities = ("image", "text")
supports_gradient_checkpointing = True
_no_split_modules = ["Ovis2VisionAttention"]
_skip_keys_device_placement = "past_key_values"
_supports_cache_class = True
_supports_flash_attn = True
_supports_flex_attn = True
_supports_sdpa = True
_can_compile_fullgraph = True
_supports_attention_backend = True
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, Ovis2VisionEmbeddings):
init.copy_(module.position_ids, torch.arange(module.position_ids.shape[-1]).expand((1, -1)))
class Ovis2VisionModel(Ovis2PreTrainedModel):
config: Ovis2VisionConfig
_can_record_outputs = {
"hidden_states": Ovis2VisionEncoderLayer,
"attentions": Ovis2VisionAttention,
}
def __init__(self, config: Ovis2VisionConfig):
super().__init__(config)
self.config = config
self.transformer = Ovis2VisionTransformer(config)
self.num_visual_indicator_tokens = config.num_visual_indicator_tokens
self.vocab_size = config.vocab_size
self.head_linear = nn.Linear(
config.hidden_size * config.hidden_stride * config.hidden_stride,
self.vocab_size - self.num_visual_indicator_tokens,
bias=False,
)
self.head_norm = nn.LayerNorm(self.vocab_size - self.num_visual_indicator_tokens)
self.post_init()
@merge_with_config_defaults
@capture_outputs
def forward(
self, pixel_values: torch.FloatTensor, **kwargs: Unpack[TransformersKwargs]
) -> tuple | BaseModelOutputWithVisualIndicatorFeatures:
outputs = self.transformer(pixel_values, **kwargs)
last_hidden_state = outputs[0]
if self.config.hidden_stride > 1:
num_images, seq_len, hidden_dim = last_hidden_state.shape
hidden_stride = self.config.hidden_stride
sqrt_l = int(math.sqrt(seq_len))
if sqrt_l * sqrt_l != seq_len:
raise ValueError("Token sequence length must be a perfect square")
pad_size = (hidden_stride - (sqrt_l % hidden_stride)) % hidden_stride
last_hidden_state = nn.functional.pad(last_hidden_state, (0, 0, 0, pad_size, 0, pad_size), "constant", 0)
sqrt_l += pad_size
last_hidden_state = last_hidden_state.reshape(
num_images, sqrt_l // hidden_stride, hidden_stride, sqrt_l // hidden_stride, hidden_stride, hidden_dim
)
last_hidden_state = last_hidden_state.permute(0, 1, 3, 2, 4, 5)
last_hidden_state = last_hidden_state.reshape(
num_images, -1, hidden_stride * hidden_stride * hidden_dim
) # (n, (sqrt_l//hs)^2, hs^2*d)
logits = self.head_linear(last_hidden_state)
logits = self.head_norm(logits)
if self.config.tokenize_function == "gumbel_argmax":
prob_token = nn.functional.gumbel_softmax(logits, dim=-1, hard=True)
elif self.config.tokenize_function == "st_argmax":
prob_token = hard_softmax(logits, dim=-1)
elif self.config.tokenize_function == "softmax":
prob_token = nn.functional.softmax(logits, dim=-1)
return BaseModelOutputWithVisualIndicatorFeatures(
last_hidden_state=last_hidden_state,
pooler_output=prob_token,
)
class Ovis2Model(LlavaModel):
_checkpoint_conversion_mapping = {}
def __init__(self, config: Ovis2Config):
super().__init__(config)
self.vision_tower = Ovis2VisionModel(config.vision_config)
self.visual_embeddings_table = Ovis2VisualEmbeddingTable(config.vision_config.vocab_size, config.hidden_size)
self.visual_vocab_size = config.vision_config.vocab_size
self.vocab_size = config.vocab_size
self.visual_indicator_token_ids = config.visual_indicator_token_ids
self.language_model = AutoModel.from_config(config.text_config)
del self.multi_modal_projector
@can_return_tuple
@auto_docstring(
custom_intro="Obtains image last hidden states from the vision tower and apply multimodal projection."
)
def get_image_features(
self,
pixel_values: torch.FloatTensor,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | BaseModelOutputWithVisualIndicatorFeatures:
image_outputs = self.vision_tower(pixel_values, return_dict=True, **kwargs)
image_features = image_outputs.pooler_output
batch_size, img_seq_len, _ = image_features.shape
padding_tensor = torch.zeros(
(batch_size, img_seq_len, self.vision_tower.num_visual_indicator_tokens),
dtype=image_features.dtype,
device=image_features.device,
requires_grad=False,
layout=image_features.layout,
)
image_features = torch.cat([image_features, padding_tensor], dim=2)
image_features = self.visual_embeddings_table(image_features)
visual_indicator = torch.arange(
self.visual_vocab_size - self.vision_tower.num_visual_indicator_tokens,
self.visual_vocab_size,
dtype=torch.long,
).to(image_features.device)
image_outputs.pooler_output = image_features
image_outputs.visual_indicator_features = self.visual_embeddings_table(visual_indicator)
return image_outputs
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
pixel_values: torch.FloatTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
labels: torch.LongTensor | None = None,
use_cache: bool | None = None,
output_attentions: bool | None = None,
output_hidden_states: bool | None = None,
return_dict: bool | None = None,
cache_position: torch.LongTensor | None = None,
logits_to_keep: int | torch.Tensor = 0,
**kwargs,
) -> tuple | Ovis2ModelOutputWithPast:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.get_input_embeddings()(input_ids)
if pixel_values is not None:
image_outputs = self.get_image_features(pixel_values=pixel_values, return_dict=True)
image_features = image_outputs.pooler_output
visual_indicator_features = image_outputs.visual_indicator_features
special_image_mask = self.get_placeholder_mask(
input_ids,
inputs_embeds=inputs_embeds,
image_features=image_features,
)
inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features)
for i, visual_indicator_id in enumerate(self.visual_indicator_token_ids):
if input_ids is None:
mask = inputs_embeds == self.get_input_embeddings()(
torch.tensor(visual_indicator_id, dtype=torch.long, device=inputs_embeds.device)
)
mask = mask.all(-1)
else:
mask = (input_ids == visual_indicator_id).to(inputs_embeds.device)
if mask.any():
inputs_embeds[mask] = (
visual_indicator_features[i]
.expand_as(inputs_embeds[mask])
.to(inputs_embeds.device, inputs_embeds.dtype)
)
outputs = self.language_model(
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
cache_position=cache_position,
logits_to_keep=logits_to_keep,
**kwargs,
)
return Ovis2ModelOutputWithPast(
last_hidden_state=outputs.last_hidden_state,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=image_features if pixel_values is not None else None,
)
@auto_docstring
class Ovis2ForConditionalGeneration(LlavaForConditionalGeneration, GenerationMixin):
_checkpoint_conversion_mapping = {}
def __init__(self, config: Ovis2Config):
super().__init__(config)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
@auto_docstring
def get_image_features(
self, pixel_values: torch.FloatTensor, **kwargs: Unpack[TransformersKwargs]
) -> tuple | BaseModelOutputWithVisualIndicatorFeatures:
return self.model.get_image_features(pixel_values=pixel_values, **kwargs)
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
pixel_values: torch.FloatTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
labels: torch.LongTensor | None = None,
use_cache: bool | None = None,
output_attentions: bool | None = None,
output_hidden_states: bool | None = None,
return_dict: bool | None = None,
cache_position: torch.LongTensor | None = None,
logits_to_keep: int | torch.Tensor = 0,
**kwargs,
) -> tuple | Ovis2CausalLMOutputWithPast:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from PIL import Image
>>> import httpx
>>> from io import BytesIO
>>> from transformers import AutoProcessor, Ovis2ForConditionalGeneration
>>> model = Ovis2ForConditionalGeneration.from_pretrained("thisisiron/Ovis2-2B-hf")
>>> processor = AutoProcessor.from_pretrained("thisisiron/Ovis2-2B-hf")
>>> prompt = "<|im_start|>user\n<image>\nDescribe the image.<|im_end|>\n<|im_start|>assistant\n"
>>> url = "http://images.cocodataset.org/val2014/COCO_val2014_000000537955.jpg"
>>> with httpx.stream("GET", url) as response:
... image = Image.open(BytesIO(response.read()))
>>> inputs = processor(images=image, text=prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(**inputs, max_new_tokens=15)
>>> processor.batch_decode(generate_ids, skip_special_tokens=True)[0]
"user\n\nDescribe the image.\nassistant\nThe image features a brown dog standing on a wooden floor, looking up with"
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
outputs = self.model(
input_ids=input_ids,
pixel_values=pixel_values,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs[0]
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(
logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size, **kwargs
)
return Ovis2CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=outputs.image_hidden_states,
)
__all__ = ["Ovis2PreTrainedModel", "Ovis2Model", "Ovis2ForConditionalGeneration"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/ovis2/modular_ovis2.py",
"license": "Apache License 2.0",
"lines": 391,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/ovis2/processing_ovis2.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...feature_extraction_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessingKwargs, ProcessorMixin, Unpack
from ...tokenization_utils_base import PreTokenizedInput, TextInput
from ...utils import auto_docstring, logging
logger = logging.get_logger(__name__)
class Ovis2ProcessorKwargs(ProcessingKwargs, total=False):
_defaults = {
"text_kwargs": {
"padding": False,
},
"image_kwargs": {},
}
@auto_docstring
class Ovis2Processor(ProcessorMixin):
def __init__(
self,
image_processor=None,
tokenizer=None,
chat_template=None,
image_token="<image>",
image_seq_length=256,
**kwargs,
):
r"""
image_token (`str`, *optional*, defaults to `"<image>"`):
Special token used to denote image location.
image_seq_length (`int`, *optional*, defaults to 256):
The number of image tokens to be used for each image in the input.
"""
self.image_seq_length = image_seq_length
self.image_token = tokenizer.image_token if hasattr(tokenizer, "image_token") else image_token
self.image_token_id = (
tokenizer.image_token_id
if getattr(tokenizer, "image_token_id", None)
else tokenizer.convert_tokens_to_ids(self.image_token)
)
super().__init__(image_processor, tokenizer, chat_template=chat_template, **kwargs)
@auto_docstring
def __call__(
self,
images: ImageInput | None = None,
text: TextInput | PreTokenizedInput | list[TextInput] | list[PreTokenizedInput] = None,
**kwargs: Unpack[Ovis2ProcessorKwargs],
) -> BatchFeature:
r"""
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
- **image_sizes** -- Size of each image that will be used to unpad an image. Returned when `images` is not `None`.
"""
output_kwargs = self._merge_kwargs(
Ovis2ProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
if isinstance(text, str):
text = [text]
elif not isinstance(text, list) and not isinstance(text[0], str):
raise TypeError("Invalid input text. Please provide a string, or a list of strings")
image_inputs = {}
if images is not None:
image_inputs = self.image_processor(images, **output_kwargs["images_kwargs"])
image_grids = image_inputs.pop("grids").tolist()
text = self._expand_image_tokens(text, image_grids)
text_inputs = self.tokenizer(text, **output_kwargs["text_kwargs"])
return BatchFeature(data={**text_inputs, **image_inputs})
def _expand_image_tokens(
self,
text: list[TextInput],
grids: list[list[int]],
):
processed_text = []
grid_index = 0
for sample in text:
while "<image>" in sample:
grid = grids[grid_index]
row, col = grid[0], grid[1]
placeholder = f"<IMG_START>{'<IMG_ATOM>' * self.image_seq_length}<IMG_GRID>"
if row * col > 1:
for r in range(row):
for c in range(col):
placeholder += f"{'<IMG_ATOM>' * self.image_seq_length}"
if c < col - 1:
placeholder += "<IMG_COL>"
if r < row - 1:
placeholder += "<IMG_ROW>"
placeholder += "<IMG_END>"
sample = sample.replace("<image>", placeholder, 1)
grid_index += 1
processed_text.append(sample)
return processed_text
def batch_decode(self, *args, **kwargs):
"""
This method forwards all its arguments to Qwen2TokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
refer to the docstring of this method for more information.
"""
return self.tokenizer.batch_decode(*args, **kwargs)
def decode(self, *args, **kwargs):
"""
This method forwards all its arguments to Qwen2TokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
the docstring of this method for more information.
"""
return self.tokenizer.decode(*args, **kwargs)
@property
def model_input_names(self):
tokenizer_input_names = self.tokenizer.model_input_names
image_processor_input_names = self.image_processor.model_input_names
return list(tokenizer_input_names) + list(image_processor_input_names)
__all__ = ["Ovis2Processor"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/ovis2/processing_ovis2.py",
"license": "Apache License 2.0",
"lines": 127,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/ovis2/test_image_processing_ovis2.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers.image_utils import SizeDict
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available
from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import Ovis2ImageProcessor
if is_torchvision_available():
from transformers import Ovis2ImageProcessorFast
class Ovis2ImageProcessingTester(unittest.TestCase):
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
do_resize=True,
size=None,
do_normalize=True,
do_pad=False,
image_mean=[0.48145466, 0.4578275, 0.40821073],
image_std=[0.26862954, 0.26130258, 0.27577711],
do_convert_rgb=True,
):
super().__init__()
size = size if size is not None else {"height": 20, "width": 20}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.do_pad = do_pad
self.do_convert_rgb = do_convert_rgb
def prepare_image_processor_dict(self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
"do_pad": self.do_pad,
}
def expected_output_image_shape(self, images):
return self.num_channels, self.size["height"], self.size["width"]
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
@require_torch
@require_vision
class Ovis2ProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = Ovis2ImageProcessor if is_vision_available() else None
fast_image_processing_class = Ovis2ImageProcessorFast if is_torchvision_available() else None
def setUp(self):
super().setUp()
self.image_processor_tester = Ovis2ImageProcessingTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processor, "do_resize"))
self.assertTrue(hasattr(image_processor, "size"))
self.assertTrue(hasattr(image_processor, "do_normalize"))
self.assertTrue(hasattr(image_processor, "image_mean"))
self.assertTrue(hasattr(image_processor, "image_std"))
self.assertTrue(hasattr(image_processor, "do_convert_rgb"))
def test_slow_fast_equivalence_crop_to_patches(self):
dummy_image = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)[0]
image_processor_slow = self.image_processing_class(**self.image_processor_dict, crop_to_patches=True)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict, crop_to_patches=True)
encoding_slow = image_processor_slow(dummy_image, return_tensors="pt")
encoding_fast = image_processor_fast(dummy_image, return_tensors="pt")
# torch.testing.assert_close(encoding_slow.num_patches, encoding_fast.num_patches)
self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1))
self.assertLessEqual(
torch.mean(torch.abs(encoding_slow.pixel_values - encoding_fast.pixel_values)).item(), 1e-3
)
def test_slow_fast_equivalence_batched_crop_to_patches(self):
# Prepare image inputs so that we have two groups of images with equal resolution with a group of images with
# different resolutions in between
dummy_images = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, torchify=True)
dummy_images += self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)
dummy_images += self.image_processor_tester.prepare_image_inputs(equal_resolution=True, torchify=True)
image_processor_slow = self.image_processing_class(**self.image_processor_dict, crop_to_patches=True)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict, crop_to_patches=True)
encoding_slow = image_processor_slow(dummy_images, return_tensors="pt")
encoding_fast = image_processor_fast(dummy_images, return_tensors="pt")
# torch.testing.assert_close(encoding_slow.num_patches, encoding_fast.num_patches)
self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1))
self.assertLessEqual(
torch.mean(torch.abs(encoding_slow.pixel_values - encoding_fast.pixel_values)).item(), 1e-3
)
def test_crop_to_patches(self):
# test slow image processor
image_processor = self.image_processor_list[0](**self.image_processor_dict)
image = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, numpify=True)[0]
processed_images, grid = image_processor.crop_image_to_patches(
image,
min_patches=1,
max_patches=6,
patch_size={"height": 20, "width": 20},
)
self.assertEqual(len(processed_images), 5)
self.assertEqual(processed_images[0].shape[:2], (20, 20))
self.assertEqual(len(grid), 2) # (row, col)
# test fast image processor (process batch)
image_processor = self.image_processor_list[1](**self.image_processor_dict)
image = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, torchify=True)[0]
processed_images, grid = image_processor.crop_image_to_patches(
image.unsqueeze(0),
min_patches=1,
max_patches=6,
patch_size=SizeDict(height=20, width=20),
)
self.assertEqual(len(processed_images[0]), 5)
self.assertEqual(processed_images.shape[-2:], (20, 20))
self.assertEqual(len(grid[0]), 2)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/ovis2/test_image_processing_ovis2.py",
"license": "Apache License 2.0",
"lines": 149,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/models/ovis2/test_modeling_ovis2.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import requests
from transformers import (
AutoProcessor,
Ovis2Config,
Ovis2ForConditionalGeneration,
Ovis2Model,
is_torch_available,
is_vision_available,
)
from transformers.testing_utils import (
cleanup,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import (
ModelTesterMixin,
floats_tensor,
ids_tensor,
)
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
class Ovis2VisionText2TextModelTester:
def __init__(
self,
parent,
seq_length=7,
text_config={
"model_type": "qwen2",
"seq_length": 7,
"is_training": True,
"use_labels": True,
"vocab_size": 99,
"hidden_size": 64,
"num_hidden_layers": 2,
"num_attention_heads": 4,
"num_key_value_heads": 4,
"intermediate_size": 54,
"hidden_act": "gelu",
"max_position_embeddings": 580,
"initializer_range": 0.02,
"num_labels": 3,
"pad_token_id": 0,
},
is_training=True,
vision_config={
"image_size": 32,
"patch_size": 8,
"num_channels": 3,
"hidden_size": 64,
"vocab_size": 99,
"num_hidden_layers": 2,
"num_attention_heads": 4,
"intermediate_size": 54,
"attention_dropout": 0.0,
"hidden_act": "silu",
"qkv_bias": False,
"hidden_stride": 2,
"tokenize_function": "softmax",
},
image_token_id=1,
visual_indicator_token_ids=[],
vocab_size=99,
hidden_size=64,
ignore_id=-100,
):
self.parent = parent
self.text_config = text_config
self.vision_config = vision_config
self.image_token_id = image_token_id
self.visual_indicator_token_ids = visual_indicator_token_ids
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.image_seq_length = (
vision_config["image_size"] // (vision_config["patch_size"] * vision_config["hidden_stride"])
) ** 2
self.seq_length = seq_length + self.image_seq_length
self.is_training = is_training
self.num_attention_heads = text_config["num_attention_heads"]
self.num_hidden_layers = text_config["num_hidden_layers"]
self.pad_token_id = text_config["pad_token_id"]
self.ignore_id = ignore_id
self.batch_size = 3
self.num_channels = 3
def get_config(self):
return Ovis2Config(
text_config=self.text_config,
vision_config=self.vision_config,
image_token_id=self.image_token_id,
visual_indicator_token_ids=self.visual_indicator_token_ids,
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
)
def prepare_config_and_inputs(self):
pixel_values = floats_tensor(
[
self.batch_size,
self.vision_config["num_channels"],
self.vision_config["image_size"],
self.vision_config["image_size"],
]
)
config = self.get_config()
return config, pixel_values
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
vocab_range = self.vocab_size - 2
input_ids = ids_tensor([self.batch_size, self.seq_length], vocab_range) + 2
input_ids[:, : self.image_seq_length] = config.image_token_id
attention_mask = torch.ones(input_ids.shape, dtype=torch.long).to(torch_device)
labels = torch.zeros((self.batch_size, self.seq_length), dtype=torch.long, device=torch_device)
labels[:, : self.image_seq_length] = self.ignore_id
inputs_dict = {
"pixel_values": pixel_values,
"input_ids": input_ids,
"attention_mask": attention_mask,
"labels": labels,
}
return config, inputs_dict
@require_torch
class Ovis2ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
"""
Model tester for `Ovis2ForConditionalGeneration`.
"""
all_model_classes = (
(
Ovis2Model,
Ovis2ForConditionalGeneration,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{"image-text-to-text": Ovis2ForConditionalGeneration, "any-to-any": Ovis2ForConditionalGeneration}
if is_torch_available()
else {}
)
# Ovis2 post-processes the last_hidden_state to hidden_size * hidden_stride**2
skip_test_image_features_output_shape = True
_is_composite = True
def setUp(self):
self.model_tester = Ovis2VisionText2TextModelTester(self)
self.config_tester = ConfigTester(self, config_class=Ovis2Config, has_text_modality=False)
def test_config(self):
self.config_tester.run_common_tests()
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = self._prepare_for_class(inputs_dict, model_class)
input_ids = inputs["input_ids"]
del inputs["input_ids"]
del inputs["pixel_values"]
wte = model.get_input_embeddings()
inputs["inputs_embeds"] = wte(input_ids)
with torch.no_grad():
model(**inputs)
# overwrite inputs_embeds tests because we need to delete "pixel values" for LVLMs
# while some other models require pixel_values to be present
def test_inputs_embeds_matches_input_ids(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = self._prepare_for_class(inputs_dict, model_class)
input_ids = inputs["input_ids"]
del inputs["input_ids"]
del inputs["pixel_values"]
inputs_embeds = model.get_input_embeddings()(input_ids)
with torch.no_grad():
out_ids = model(input_ids=input_ids, **inputs)[0]
out_embeds = model(inputs_embeds=inputs_embeds, **inputs)[0]
torch.testing.assert_close(out_embeds, out_ids)
@require_torch
@slow
class Ovis2IntegrationTest(unittest.TestCase):
def setUp(self):
self.processor = AutoProcessor.from_pretrained(
"thisisiron/Ovis2-2B-hf",
)
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
self.image = Image.open(requests.get(url, stream=True).raw)
self.prompt_image = ""
self.messages = [
{
"role": "user",
"content": [
{"type": "image"},
{"type": "text", "text": "What do you see in this image?"},
],
}
]
self.text = self.processor.apply_chat_template(self.messages, add_generation_prompt=True, tokenize=False)
def tearDown(self):
cleanup(torch_device, gc_collect=True)
def test_small_model_integration_test(self):
model = Ovis2ForConditionalGeneration.from_pretrained(
"thisisiron/Ovis2-2B-hf", dtype="bfloat16", device_map=torch_device
)
inputs = self.processor(images=self.image, text=self.text, return_tensors="pt").to(
torch_device, torch.bfloat16
)
self.assertTrue(inputs.input_ids.shape[1] == 1314) # should expand num-image-tokens times
self.assertTrue(inputs.pixel_values.shape == torch.Size([5, 3, 448, 448]))
inputs = inputs.to(torch_device)
output = model.generate(**inputs, max_new_tokens=64)
EXPECTED_DECODED_TEXT = 'system\nYou are a helpful assistant.\nuser\n\nWhat do you see in this image?\nassistant\nI see two cats lying on a pink blanket. There are also two remote controls on the blanket.' # fmt: skip
self.assertEqual(
self.processor.decode(output[0], skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
def test_small_model_integration_test_batch(self):
model = Ovis2ForConditionalGeneration.from_pretrained(
"thisisiron/Ovis2-2B-hf", dtype="bfloat16", device_map=torch_device
)
inputs = self.processor(
text=[self.text],
images=self.image,
return_tensors="pt",
padding=True,
).to(torch_device, torch.bfloat16)
output = model.generate(**inputs, max_new_tokens=20)
EXPECTED_DECODED_TEXT = ['system\nYou are a helpful assistant.\nuser\n\nWhat do you see in this image?\nassistant\nI see two cats lying on a pink blanket. There are also two remote controls on the blanket.'] # fmt: skip
self.assertEqual(
self.processor.batch_decode(output, skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
def test_small_model_integration_test_multi_image(self):
# related to (#29835)
model = Ovis2ForConditionalGeneration.from_pretrained(
"thisisiron/Ovis2-2B-hf",
dtype="bfloat16",
device_map=torch_device,
)
url = "http://images.cocodataset.org/val2014/COCO_val2014_000000537955.jpg"
image = Image.open(requests.get(url, stream=True).raw)
prompt = [
{
"role": "user",
"content": [
{"type": "image"},
{"type": "image"},
{"type": "text", "text": "What do you see in these images?"},
],
}
]
text = self.processor.apply_chat_template(prompt, add_generation_prompt=True, tokenize=False)
inputs = self.processor(text=text, images=[self.image, image], return_tensors="pt").to(
torch_device, torch.bfloat16
)
output = model.generate(**inputs, max_new_tokens=40)
EXPECTED_DECODED_TEXT = 'system\nYou are a helpful assistant.\nuser\n\n\nWhat do you see in these images?\nassistant\nIn the first image, I see two cats lying on a pink blanket with remote controls nearby. The second image shows a dog standing on a wooden floor near a kitchen cabinet.' # fmt: skip
self.assertEqual(
self.processor.decode(output[0], skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
def test_small_model_integration_test_batch_different_resolutions(self):
model = Ovis2ForConditionalGeneration.from_pretrained(
"thisisiron/Ovis2-2B-hf", dtype="bfloat16", device_map=torch_device
)
lowres_url = "http://images.cocodataset.org/val2014/COCO_val2014_000000537955.jpg"
lowres_img = Image.open(requests.get(lowres_url, stream=True).raw).resize((320, 240))
inputs = self.processor(
text=[self.text, self.text],
images=[lowres_img, self.image],
return_tensors="pt",
padding=True,
).to(torch_device, torch.bfloat16)
output = model.generate(**inputs, max_new_tokens=20)
EXPECTED_DECODED_TEXT = [
'system\nYou are a helpful assistant.\nuser\n\nWhat do you see in this image?\nassistant\nAnswer: I see a brown dog standing on a wooden floor in what appears to be a kitchen.',
'system\nYou are a helpful assistant.\nuser\n\nWhat do you see in this image?\nassistant\nI see two cats lying on a pink blanket. There are also two remote controls on the blanket.'
] # fmt: skip
self.assertEqual(
self.processor.batch_decode(output, skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
def test_small_model_integration_test_batch_matches_single(self):
model = Ovis2ForConditionalGeneration.from_pretrained(
"thisisiron/Ovis2-2B-hf",
dtype="bfloat16",
device_map=torch_device,
)
lowres_url = "https://4.img-dpreview.com/files/p/TS560x560~forums/56876524/03975b28741443319e9a94615e35667e"
lowres_img = Image.open(requests.get(lowres_url, stream=True).raw)
inputs_batched = self.processor(
text=[self.text, self.text],
images=[self.image, lowres_img],
return_tensors="pt",
padding=True,
).to(torch_device, torch.bfloat16)
inputs_single = self.processor(text=self.text, images=self.image, return_tensors="pt", padding=True).to(
torch_device, torch.bfloat16
)
output_batched = model.generate(**inputs_batched, max_new_tokens=50)
output_single = model.generate(**inputs_single, max_new_tokens=50)
self.assertEqual(
self.processor.decode(output_batched[0], skip_special_tokens=True),
self.processor.decode(output_single[0], skip_special_tokens=True),
)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/ovis2/test_modeling_ovis2.py",
"license": "Apache License 2.0",
"lines": 323,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/models/ovis2/test_processor_ovis2.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import unittest
from transformers.testing_utils import require_av, require_vision
from transformers.utils import is_vision_available
from ...test_processing_common import ProcessorTesterMixin
if is_vision_available():
from transformers import (
AutoProcessor,
Ovis2Processor,
)
@require_vision
class Ovis2ProcessorTest(ProcessorTesterMixin, unittest.TestCase):
processor_class = Ovis2Processor
@classmethod
def _setup_tokenizer(cls):
tokenizer_class = cls._get_component_class_from_processor("tokenizer")
return tokenizer_class.from_pretrained("thisisiron/Ovis2-1B-hf")
@staticmethod
def prepare_processor_dict():
return {
"chat_template": "<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n'}}{% if message['content'] is string %}{{ message['content'] }}{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' %}{{ '<image>\n' }}{% elif content['type'] == 'text' %}{{ content['text'] }}{% endif %}{% endfor %}{% endif %}{{'<|im_end|>\n'}}{% endfor %}{% if add_generation_prompt %}{{'<|im_start|>assistant\n' }}{% endif %}",
} # fmt: skip
def test_processor_to_json_string(self):
processor = self.get_processor()
obj = json.loads(processor.to_json_string())
for key, value in self.prepare_processor_dict().items():
# chat_tempalate are tested as a separate test because they are saved in separate files
if key != "chat_template":
self.assertEqual(obj[key], value)
self.assertEqual(getattr(processor, key, None), value)
def test_chat_template_is_saved(self):
processor_loaded = self.processor_class.from_pretrained(self.tmpdirname)
processor_dict_loaded = json.loads(processor_loaded.to_json_string())
# chat templates aren't serialized to json in processors
self.assertFalse("chat_template" in processor_dict_loaded)
# they have to be saved as separate file and loaded back from that file
# so we check if the same template is loaded
processor_dict = self.prepare_processor_dict()
self.assertTrue(processor_loaded.chat_template == processor_dict.get("chat_template", None))
def test_chat_template(self):
processor = AutoProcessor.from_pretrained("thisisiron/Ovis2-1B-hf")
expected_prompt = "<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\n<image>\nWhat is shown in this image?<|im_end|>\n<|im_start|>assistant\n"
messages = [
{
"role": "user",
"content": [
{"type": "image"},
{"type": "text", "text": "What is shown in this image?"},
],
},
]
formatted_prompt = processor.apply_chat_template(messages, add_generation_prompt=True)
self.assertEqual(expected_prompt, formatted_prompt)
@require_av
def test_chat_template_dict(self):
processor = AutoProcessor.from_pretrained("thisisiron/Ovis2-1B-hf")
messages = [
{
"role": "user",
"content": [
{"type": "image"},
{"type": "text", "text": "What is shown in this image?"},
],
},
]
formatted_prompt_tokenized = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True)
expected_output = [[151644, 8948, 198, 2610, 525, 264, 10950, 17847, 13, 151645, 198, 151644, 872, 198, 27, 1805, 397, 3838, 374, 6839, 304, 419, 2168, 30, 151645, 198, 151644, 77091, 198]] # fmt: skip
self.assertListEqual(expected_output, formatted_prompt_tokenized)
out_dict = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True, return_dict=True)
self.assertListEqual(list(out_dict.keys()), ["input_ids", "attention_mask"])
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/ovis2/test_processor_ovis2.py",
"license": "Apache License 2.0",
"lines": 83,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/xcodec/configuration_xcodec.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Xcodec model configuration"""
import math
import numpy as np
from ...configuration_utils import PreTrainedConfig
from ..auto import CONFIG_MAPPING, AutoConfig
class XcodecConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of an [`XcodecModel`]. It is used to instantiate a
Xcodec model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the
[Manel/X-Codec](https://huggingface.co/Manel/X-Codec) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
target_bandwidths (`List[float]`, *optional*, defaults to `[0.5, 1, 1.5, 2, 4]`):
The range of different bandwidths (in kbps) the model can encode audio with.
sample_rate (`int`, *optional*, defaults to 16000):
The sampling rate at which the audio waveform should be digitalized, in hertz (Hz).
kernel_size (`int`, *optional*, defaults to 3):
Kernel size for the initial semantic convolution.
channel_ratios (`List[float]`, *optional*, defaults to `[1, 1]`):
Expansion factors for the number of output channels in each semantic block.
strides (`List[int]`, *optional*, defaults to `[1, 1]`):
Strides for each semantic encoder block.
block_dilations (`List[int]`, *optional*, defaults to `[1, 1]`):
Dilation factors for the residual units in semantic blocks.
unit_kernel_size (`int`, *optional*, defaults to 3):
Kernel size inside each ResidualUnit in semantic blocks.
codebook_size (`int`, *optional*, defaults to 1024):
Number of entries in each residual quantizer's codebook.
codebook_dim (`int`, *optional*):
Dimensionality of each codebook vector. Defaults to sum of hidden size of acoustic and semantic models.
initializer_range (`float`, *optional*, defaults to 0.02):
Standard deviation of the truncated normal initializer for all weight matrices.
acoustic_model_config (`Union[Dict, DacConfig]`, *optional*):
An instance of the configuration for the acoustic (DAC) model.
semantic_model_config (`Union[Dict, HubertConfig, WavLMConfig]`, *optional*):
An instance of the configuration object for the semantic (HuBERT) model.
Example:
```python
>>> from transformers import XcodecModel, XcodecConfig
>>> # Initializing configuration
>>> configuration = XcodecConfig()
>>> # Initializing a model (with random weights) from the configuration
>>> model = XcodecModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "xcodec"
sub_configs = {
"acoustic_model_config": AutoConfig,
"semantic_model_config": AutoConfig,
}
_default_acoustic_model_config_kwargs = {
"encoder_hidden_size": 64,
# NOTE: original DAC uses [2, 4, 8, 8] `downsampling ratios`, namely reverse of `upsampling_ratios`
# (not sure if intentional by Xcodec but we keep it)
"downsampling_ratios": [8, 5, 4, 2],
"decoder_hidden_size": 1024,
"upsampling_ratios": [8, 5, 4, 2],
"hidden_size": 256,
}
_default_semantic_model_config_kwargs = {}
def __init__(
self,
target_bandwidths: list[float] | None = None,
sample_rate: int = 16000,
kernel_size: int = 3,
channel_ratios: list[float] = [1, 1],
strides: list[int] = [1, 1],
block_dilations: list[int] = [1, 1],
unit_kernel_size: int = 3,
codebook_size: int = 1024,
codebook_dim: int | None = None,
initializer_range: float = 0.02,
acoustic_model_config=None,
semantic_model_config=None,
**kwargs,
):
if isinstance(acoustic_model_config, dict):
acoustic_model_config["model_type"] = acoustic_model_config.get("model_type", "dac")
acoustic_model_config = CONFIG_MAPPING[acoustic_model_config["model_type"]](
**{**self._default_acoustic_model_config_kwargs, **acoustic_model_config}
)
elif acoustic_model_config is None:
acoustic_model_config = CONFIG_MAPPING["dac"](**self._default_acoustic_model_config_kwargs)
self.acoustic_model_config = acoustic_model_config
if isinstance(semantic_model_config, dict):
semantic_model_config["model_type"] = semantic_model_config.get("model_type", "hubert")
semantic_model_config = CONFIG_MAPPING[semantic_model_config["model_type"]](
**{**self._default_semantic_model_config_kwargs, **semantic_model_config}
)
elif semantic_model_config is None:
semantic_model_config = CONFIG_MAPPING["hubert"](**self._default_semantic_model_config_kwargs)
self.semantic_model_config = semantic_model_config
if target_bandwidths is None:
target_bandwidths = [0.5, 1, 1.5, 2, 4]
self.target_bandwidths = target_bandwidths
self.sample_rate = sample_rate
self.kernel_size = kernel_size
self.channel_ratios = channel_ratios
self.strides = strides
self.block_dilations = block_dilations
self.unit_kernel_size = unit_kernel_size
self.codebook_size = codebook_size
self.initializer_range = initializer_range
if codebook_dim is None:
codebook_dim = self.acoustic_model_config.hidden_size + self.semantic_model_config.hidden_size
self.codebook_dim = codebook_dim
super().__init__(**kwargs)
@property
def frame_rate(self) -> int:
return math.ceil(self.sample_rate / self.hop_length)
@property
def semantic_hidden_size(self) -> int:
return self.semantic_model_config.hidden_size
@property
def hop_length(self) -> int:
return int(np.prod(self.acoustic_model_config.downsampling_ratios))
@property
def codebook_nbits(self) -> int:
return math.ceil(math.log2(self.codebook_size))
@property
def hidden_size(self) -> int:
return self.acoustic_model_config.hidden_size + self.semantic_model_config.hidden_size
@property
def num_quantizers(self) -> int:
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * self.codebook_nbits))
__all__ = ["XcodecConfig"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/xcodec/configuration_xcodec.py",
"license": "Apache License 2.0",
"lines": 142,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/xcodec/convert_xcodec_weights_to_hf.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import io
import re
import torch
import yaml
from transformers import (
AutoConfig,
DacFeatureExtractor,
XcodecConfig,
XcodecModel,
logging,
)
logging.set_verbosity_info()
logger = logging.get_logger(__name__)
torch.serialization.add_safe_globals([io.BytesIO])
MAPPING_ACOUSTIC_ENCODER = {
r"^block\.0": ["conv1"],
r"^block\.(\d+)\.block\.(\d+)\.block\.0": ["block", "res_unit", "snake1"],
r"^block\.(\d+)\.block\.(\d+)\.block\.1": ["block", "res_unit", "conv1"],
r"^block\.(\d+)\.block\.(\d+)\.block\.2": ["block", "res_unit", "snake2"],
r"^block\.(\d+)\.block\.(\d+)\.block\.3": ["block", "res_unit", "conv2"],
r"^block\.(\d+)\.block\.3": ["block", "snake1"],
r"^block\.(\d+)\.block\.4": ["block", "conv1"],
r"^block\.5": ["snake1"],
r"^block\.6": ["conv2"],
}
MAPPING_ACOUSTIC_DECODER = {
r"^model\.0": ["conv1"],
r"^model\.(\d+)\.block\.0": ["block", "snake1"],
r"^model\.(\d+)\.block\.1": ["block", "conv_t1"],
r"^model\.(\d+)\.block\.(\d+)\.block\.0": ["block", "res_unit", "snake1"],
r"^model\.(\d+)\.block\.(\d+)\.block\.1": ["block", "res_unit", "conv1"],
r"^model\.(\d+)\.block\.(\d+)\.block\.2": ["block", "res_unit", "snake2"],
r"^model\.(\d+)\.block\.(\d+)\.block\.3": ["block", "res_unit", "conv2"],
r"^model\.5": ["snake1"],
r"^model\.6": ["conv2"],
}
MAPPING_SEMANTIC_ENCODER = {
"conv.conv.": "conv.",
"conv1.conv.": "conv1.",
"conv2.conv.": "conv2.",
}
MAPPING_SEMANTIC_DECODER = {
"conv1.conv.": "conv1.",
"conv2.conv.": "conv2.",
"conv.conv.": "conv.",
}
MAPPING_QUANTIZER = {
"quantizer.vq.layers": "quantizer.quantizers",
"._codebook.": ".codebook.",
}
def safe_load(path: str) -> dict[str, torch.Tensor]:
"""
Load only the tensor objects from a checkpoint, skipping any BytesIO
"""
shard = torch.load(path, map_location="cpu", weights_only=True)
return {k: v for k, v in shard.items() if not isinstance(v, io.BytesIO)}
def _rewrite_weight_norm(key: str) -> str:
if key.endswith("weight_g"):
return key[: -len("weight_g")] + "parametrizations.weight.original0"
if key.endswith("weight_v"):
return key[: -len("weight_v")] + "parametrizations.weight.original1"
return key
def convert_old_keys_to_new_keys(original_state_dict: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
converted_checkpoint: dict[str, torch.Tensor] = {}
for old_key, value in original_state_dict.items():
if old_key.startswith("encoder."):
layer_key = old_key[len("encoder.") :]
for pattern, path_parts in MAPPING_ACOUSTIC_ENCODER.items():
pattern_match = re.match(pattern, layer_key)
if pattern_match is None:
continue
digit_strings = [g for g in pattern_match.groups() if g is not None]
digit_indices = [int(ds) for ds in digit_strings]
remainder = layer_key[pattern_match.end() :]
if len(path_parts) == 1:
mapped_subkey = f"{path_parts[0]}{remainder}"
elif len(path_parts) == 2:
encoder_layer = digit_indices[0] - 1
mapped_subkey = f"{path_parts[0]}.{encoder_layer}.{path_parts[1]}{remainder}"
else:
encoder_layer, unit_idx = digit_indices
mapped_subkey = (
f"{path_parts[0]}.{encoder_layer - 1}.{path_parts[1]}{unit_idx + 1}.{path_parts[2]}{remainder}"
)
new_key = f"acoustic_encoder.{_rewrite_weight_norm(mapped_subkey)}"
converted_checkpoint[new_key] = value
break
elif old_key.startswith("decoder_2."):
layer_key = old_key[len("decoder_2.") :]
for pattern, path_parts in MAPPING_ACOUSTIC_DECODER.items():
pattern_match = re.match(pattern, layer_key)
if pattern_match is None:
continue
digit_strings = [g for g in pattern_match.groups() if g is not None]
digit_indices = [int(ds) for ds in digit_strings]
remainder = layer_key[pattern_match.end() :]
if len(path_parts) == 1:
mapped_subkey = f"{path_parts[0]}{remainder}"
elif len(path_parts) == 2:
decoder_layer = digit_indices[0] - 1
mapped_subkey = f"{path_parts[0]}.{decoder_layer}.{path_parts[1]}{remainder}"
else:
decoder_layer, unit_idx = digit_indices
mapped_subkey = (
f"{path_parts[0]}.{decoder_layer - 1}.{path_parts[1]}{unit_idx - 1}.{path_parts[2]}{remainder}"
)
new_key = f"acoustic_decoder.{_rewrite_weight_norm(mapped_subkey)}"
converted_checkpoint[new_key] = value
break
elif old_key.startswith("encoder_semantic."):
semantic_key = old_key[len("encoder_semantic.") :]
for old, new in MAPPING_SEMANTIC_ENCODER.items():
semantic_key = semantic_key.replace(old, new)
converted_checkpoint[f"encoder_semantic.{semantic_key}"] = value
elif old_key.startswith("decoder_semantic."):
semantic_key = old_key[len("decoder_semantic.") :]
for old, new in MAPPING_SEMANTIC_DECODER.items():
semantic_key = semantic_key.replace(old, new)
converted_checkpoint[f"decoder_semantic.{semantic_key}"] = value
elif old_key.startswith("semantic_model."):
converted_checkpoint[old_key] = value
elif old_key.startswith("fc_prior."):
converted_checkpoint[f"fc.{old_key[len('fc_prior.') :]}"] = value
elif old_key.startswith("fc_post1."):
converted_checkpoint[f"fc1.{old_key[len('fc_post1.') :]}"] = value
elif old_key.startswith("fc_post2."):
converted_checkpoint[f"fc2.{old_key[len('fc_post2.') :]}"] = value
elif old_key.startswith("quantizer.vq.layers"):
new_key = old_key
for old_sub, new_sub in MAPPING_QUANTIZER.items():
new_key = new_key.replace(old_sub, new_sub)
converted_checkpoint[new_key] = value
return converted_checkpoint
# for reference, see original implementation: https://github.com/zhenye234/xcodec/blob/main/models/soundstream_semantic.py#L24
@torch.no_grad()
def convert_checkpoint(checkpoint_path, config_path, pytorch_dump_folder_path=None, push_to_hub=None):
# load config yaml file
with open(config_path, "r") as f:
model_config = yaml.safe_load(f)
# extra relevant parameters
ratios = model_config["generator"]["config"]["ratios"]
target_bandwidths = model_config["generator"]["config"]["target_bandwidths"]
sample_rate = model_config["generator"]["config"]["sample_rate"]
acoustic_model_config = {
"encoder_hidden_size": 64,
"decoder_hidden_size": 1024,
# NOTE: original DAC uses [2, 4, 8, 8] `downsampling ratios`, namely reverse of `upsampling_ratios`
# (not sure if intentional by Xcodec but we keep it)
"downsampling_ratios": ratios,
"upsampling_ratios": ratios,
"sampling_rate": sample_rate,
"hidden_size": model_config["generator"]["config"]["D"],
}
semantic_model = model_config["generator"]["config"]["semantic_techer"]
if semantic_model == "hubert_base":
semantic_model_config = AutoConfig.from_pretrained("facebook/hubert-base-ls960")
elif semantic_model == "wavlm_base_plus":
semantic_model_config = AutoConfig.from_pretrained("microsoft/wavlm-base-plus")
elif semantic_model == "hubert_base_general":
semantic_model_config = AutoConfig.from_pretrained("ZhenYe234/hubert_base_general_audio")
else:
raise ValueError(f"Unknown semantic model: {semantic_model}")
config = XcodecConfig(
target_bandwidths=target_bandwidths,
acoustic_model_config=acoustic_model_config,
semantic_model_config=semantic_model_config,
sample_rate=sample_rate,
codebook_size=model_config["generator"]["config"]["bins"],
)
# create model
if not torch.cuda.is_available():
raise ValueError("Run this script on a machine with a GPU for weight norm layers to be correctly copied.")
torch_device = "cuda"
model = XcodecModel(config).to(torch_device)
logger.info("Loading original checkpoint ...")
state_dict = safe_load(checkpoint_path)
# the original checkpoint has weight norm applied
model.apply_weight_norm()
logger.info("Converting model ...")
new_state_dict = convert_old_keys_to_new_keys(state_dict)
missing_keys, unexpected_keys = model.load_state_dict(new_state_dict, strict=True, assign=True) # strict=False)
if len(unexpected_keys) != 0:
raise ValueError(f"Unexpected keys: {unexpected_keys}")
if len(missing_keys) != 0:
raise ValueError(f"missing keys found: {missing_keys}")
model.remove_weight_norm()
if pytorch_dump_folder_path is not None:
model.save_pretrained(pytorch_dump_folder_path)
feature_extractor = DacFeatureExtractor(
sampling_rate=config.sample_rate,
hop_length=config.acoustic_model_config.hop_length,
)
if pytorch_dump_folder_path is not None:
feature_extractor.save_pretrained(pytorch_dump_folder_path)
if push_to_hub:
print("Pushing to the hub...")
feature_extractor.push_to_hub(push_to_hub)
model.push_to_hub(push_to_hub)
"""
Models checkpoints can be downloaded from here:
https://github.com/zhenye234/xcodec?tab=readme-ov-file#available-models
1) `xcodec_hubert_librispeech`:
```
# Download config and checkpoint files
wget https://huggingface.co/ZhenYe234/xcodec/resolve/main/config_hubert.yaml -P /raid/eric/xcodec_original
wget https://huggingface.co/ZhenYe234/xcodec/resolve/main/xcodec_speech_hubert_librispeech.pth -P /raid/eric/xcodec_original
# Run conversion:
python src/transformers/models/xcodec/convert_xcodec_weights_to_hf.py \
--checkpoint_path /raid/eric/xcodec_original/xcodec_speech_hubert_librispeech.pth \
--config_path /raid/eric/xcodec_original/config_hubert.yaml \
--push_to_hub hf-audio/xcodec-hubert-librispeech
```
2) `xcodec_hubert_general_audio`:
```
# Download config and checkpoint files
wget https://huggingface.co/ZhenYe234/xcodec/resolve/main/config_hubert_general.yaml -P /raid/eric/xcodec_original
wget https://huggingface.co/ZhenYe234/xcodec/resolve/main/xcodec_hubert_general_audio.pth -P /raid/eric/xcodec_original
# Run conversion:
python src/transformers/models/xcodec/convert_xcodec_weights_to_hf.py \
--checkpoint_path /raid/eric/xcodec_original/xcodec_hubert_general_audio.pth \
--config_path /raid/eric/xcodec_original/config_hubert_general.yaml \
--push_to_hub hf-audio/xcodec-hubert-general
```
3) `xcodec_hubert_general_audio_more_data` (more balanced dataset):
```
# Download config and checkpoint files
wget https://huggingface.co/ZhenYe234/xcodec/resolve/main/config_hubert_general.yaml -P /raid/eric/xcodec_original
wget https://huggingface.co/ZhenYe234/xcodec/resolve/main/xcodec_hubert_general_audio_v2.pth -P /raid/eric/xcodec_original
# Run conversion:
python src/transformers/models/xcodec/convert_xcodec_weights_to_hf.py \
--checkpoint_path /raid/eric/xcodec_original/xcodec_hubert_general_audio_v2.pth \
--config_path /raid/eric/xcodec_original/config_hubert_general.yaml \
--push_to_hub hf-audio/xcodec-hubert-general-balanced
```
4) `xcodec_wavlm_mls`:
```
# Download config and checkpoint files
wget https://huggingface.co/ZhenYe234/xcodec/resolve/main/config_wavlm.yaml -P /raid/eric/xcodec_original
wget https://huggingface.co/ZhenYe234/xcodec/resolve/main/xcodec_speech_wavlm_mls.pth -P /raid/eric/xcodec_original
# Run conversion:
python src/transformers/models/xcodec/convert_xcodec_weights_to_hf.py \
--checkpoint_path /raid/eric/xcodec_original/xcodec_speech_wavlm_mls.pth \
--config_path /raid/eric/xcodec_original/config_wavlm.yaml \
--push_to_hub hf-audio/xcodec-wavlm-mls
```
5) `xcodec_wavlm_more_data`:
```
# Download config and checkpoint files
wget https://huggingface.co/ZhenYe234/xcodec/resolve/main/config_wavlm.yaml -P /raid/eric/xcodec_original
wget https://huggingface.co/ZhenYe234/xcodec/resolve/main/xcodec_speech_wavlm_more_data.pth -P /raid/eric/xcodec_original
# Run conversion:
python src/transformers/models/xcodec/convert_xcodec_weights_to_hf.py \
--checkpoint_path /raid/eric/xcodec_original/xcodec_speech_wavlm_more_data.pth \
--config_path /raid/eric/xcodec_original/config_wavlm.yaml \
--push_to_hub hf-audio/xcodec-wavlm-more-data
"""
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument(
"--config_path", required=True, default=None, type=str, help="Path to hf config.yaml of model to convert"
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the Hugging Face hub."
)
args = parser.parse_args()
convert_checkpoint(
args.checkpoint_path,
args.config_path,
args.pytorch_dump_folder_path,
args.push_to_hub,
)
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/xcodec/convert_xcodec_weights_to_hf.py",
"license": "Apache License 2.0",
"lines": 287,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/xcodec/modeling_xcodec.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformers Xcodec model."""
import math
from dataclasses import dataclass
from functools import lru_cache
import torch
import torch.nn as nn
import torch.nn.functional as F
from ... import initialization as init
from ...audio_utils import conv1d_output_length
from ...modeling_utils import PreTrainedAudioTokenizerBase
from ...utils import ModelOutput, auto_docstring
from ..auto import AutoModel
from .configuration_xcodec import XcodecConfig
@dataclass
class XcodecOutput(ModelOutput):
"""
Args:
audio_codes (`torch.LongTensor` of shape `(batch_size, num_quantizers, codes_length)`, *optional*):
Discrete code indices computed using `model.encode`.
audio_values (`torch.FloatTensor` of shape `(batch_size, channels, num_samples)`, *optional*)
Decoded audio values obtained using the decoder part of Xcodec.
"""
audio_codes: torch.LongTensor | None = None
audio_values: torch.FloatTensor | None = None
@dataclass
class XcodecEncoderOutput(ModelOutput):
"""
Args:
audio_codes (`torch.LongTensor` of shape `(batch_size, num_quantizers, codes_length)`, *optional*):
Discrete code indices computed using `model.encode`.
"""
audio_codes: torch.LongTensor | None = None
@dataclass
class XcodecDecoderOutput(ModelOutput):
"""
Args:
audio_values (`torch.FloatTensor` of shape `(batch_size, channels, num_samples)`, *optional*):
Decoded audio values obtained using the decoder part of Xcodec.
"""
audio_values: torch.FloatTensor | None = None
class XcodecResidualUnit(nn.Module):
"""Residual block for SemanticEncoder and SemanticDecoder used in Xcodec."""
def __init__(self, config: XcodecConfig, in_channels: int, out_channels: int, dilation: int):
super().__init__()
self.activation = nn.ELU()
padding = ((config.unit_kernel_size - 1) // 2) * dilation
self.conv1 = nn.Conv1d(
in_channels,
out_channels,
config.unit_kernel_size,
stride=1,
padding=padding,
dilation=dilation,
groups=1,
bias=False,
)
self.conv2 = nn.Conv1d(in_channels=out_channels, out_channels=out_channels, kernel_size=1, bias=False)
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
output_tensor = self.activation(hidden_state)
output_tensor = self.conv1(output_tensor)
output_tensor = self.activation(output_tensor)
output_tensor = self.conv2(output_tensor)
return hidden_state + output_tensor
class XcodecSemanticEncoderBlock(nn.Module):
def __init__(self, config: XcodecConfig, in_channels: int, out_channels: int, stride: int):
super().__init__()
self.res_units = nn.ModuleList(
[XcodecResidualUnit(config, in_channels, in_channels, dilation) for dilation in config.block_dilations]
)
# special case: stride=1, do not use kernel=2
kernel = 3 if stride == 1 else (2 * stride)
padding = (kernel - 1) // 2
self.conv = nn.Conv1d(in_channels, out_channels, kernel_size=kernel, stride=stride, padding=padding, bias=True)
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
for unit in self.res_units:
hidden_state = unit(hidden_state)
hidden_state = self.conv(hidden_state)
return hidden_state
class SemanticEncoder(nn.Module):
def __init__(self, config):
super().__init__()
if len(config.strides) != len(config.channel_ratios):
raise ValueError("Number of strides must match the number of channel_ratios.")
self.conv = nn.Conv1d(
config.semantic_hidden_size,
config.semantic_hidden_size,
config.kernel_size,
1,
config.kernel_size // 2,
bias=False,
)
in_channels = config.semantic_hidden_size
conv_blocks = []
for i, stride in enumerate(config.strides):
out_channels = int(config.semantic_hidden_size * config.channel_ratios[i])
conv_blocks += [XcodecSemanticEncoderBlock(config, in_channels, out_channels, stride)]
in_channels = out_channels
self.conv_blocks = nn.ModuleList(conv_blocks)
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
hidden_state = self.conv(hidden_state)
for block in self.conv_blocks:
hidden_state = block(hidden_state)
return hidden_state
class SemanticDecoderBlock(nn.Module):
def __init__(self, config: XcodecConfig, in_channels: int, out_channels: int, stride: int):
super().__init__()
if stride == 1:
self.conv = nn.Conv1d(
in_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1,
bias=True,
)
else:
kernel_size = 2 * stride
padding = (stride + 1) // 2
output_padding = 1 if stride % 2 == 1 else 0
self.conv = nn.ConvTranspose1d(
in_channels, out_channels, kernel_size, stride, padding, output_padding, bias=False
)
self.res_units = nn.ModuleList(
[XcodecResidualUnit(config, out_channels, out_channels, dilation) for dilation in config.block_dilations]
)
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
hidden_state = self.conv(hidden_state)
for unit in self.res_units:
hidden_state = unit(hidden_state)
return hidden_state
class SemanticDecoder(nn.Module):
def __init__(self, config):
super().__init__()
self.conv1 = nn.Conv1d(
in_channels=config.semantic_hidden_size,
out_channels=int(config.semantic_hidden_size * config.channel_ratios[0]),
kernel_size=config.kernel_size,
stride=1,
padding=config.kernel_size // 2,
bias=False,
)
conv_blocks = []
for i, stride in enumerate(config.strides):
in_channels = int(config.semantic_hidden_size * config.channel_ratios[i])
if i < (len(config.channel_ratios) - 1):
out_channels = int(config.semantic_hidden_size * config.channel_ratios[i + 1])
else:
out_channels = config.semantic_hidden_size
conv_blocks += [SemanticDecoderBlock(config, in_channels, out_channels, stride)]
self.conv_blocks = nn.ModuleList(conv_blocks)
self.conv2 = nn.Conv1d(
config.semantic_hidden_size,
config.semantic_hidden_size,
config.kernel_size,
stride=1,
padding=config.kernel_size // 2,
bias=False,
)
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
hidden_state = self.conv1(hidden_state)
for block in self.conv_blocks:
hidden_state = block(hidden_state)
hidden_state = self.conv2(hidden_state)
return hidden_state
class XcodecEuclideanCodebook(nn.Module):
"""Codebook with Euclidean distance."""
def __init__(self, config):
super().__init__()
embed = torch.zeros(config.codebook_size, config.codebook_dim)
self.codebook_size = config.codebook_size
self.register_buffer("inited", torch.Tensor([True]))
self.register_buffer("cluster_size", torch.zeros(config.codebook_size))
self.register_buffer("embed", embed)
self.register_buffer("embed_avg", embed.clone())
# Copied from transformers.models.encodec.modeling_encodec.EncodecEuclideanCodebook.quantize
def quantize(self, hidden_states):
embed = self.embed.t()
scaled_states = hidden_states.pow(2).sum(1, keepdim=True)
dist = -(scaled_states - 2 * hidden_states @ embed + embed.pow(2).sum(0, keepdim=True))
embed_ind = dist.max(dim=-1).indices
return embed_ind
def encode(self, hidden_states):
shape = hidden_states.shape
hidden_states = hidden_states.reshape((-1, shape[-1]))
embed_ind = self.quantize(hidden_states)
embed_ind = embed_ind.view(*shape[:-1])
return embed_ind
def decode(self, embed_ind):
quantized = F.embedding(embed_ind.to(self.embed.device), self.embed)
return quantized
class XcodecVectorQuantization(nn.Module):
"""
Vector quantization implementation. Currently supports only euclidean distance.
"""
def __init__(self, config: XcodecConfig):
super().__init__()
self.codebook = XcodecEuclideanCodebook(config)
# Copied from transformers.models.encodec.modeling_encodec.EncodecVectorQuantization.encode
def encode(self, hidden_states):
hidden_states = hidden_states.permute(0, 2, 1)
embed_in = self.codebook.encode(hidden_states)
return embed_in
# Copied from transformers.models.encodec.modeling_encodec.EncodecVectorQuantization.decode
def decode(self, embed_ind):
quantize = self.codebook.decode(embed_ind)
quantize = quantize.permute(0, 2, 1)
return quantize
class XcodecResidualVectorQuantization(nn.Module):
"""
Residual vector quantization implementation. Follows Algorithm 1 in https://huggingface.co/papers/2107.03312
"""
def __init__(self, config: XcodecConfig):
super().__init__()
self.quantizers = nn.ModuleList([XcodecVectorQuantization(config) for _ in range(config.num_quantizers)])
self.frame_rate = config.frame_rate
self.codebook_size = config.codebook_size
self.num_quantizers = config.num_quantizers
def get_bandwidth_per_quantizer(self):
"""Return bandwidth per quantizer."""
return math.log2(self.codebook_size) * self.frame_rate / 1000
def get_num_quantizers_for_bandwidth(self, bandwidth=None) -> int:
"""Return num_quantizers based on specified target bandwidth."""
bw_per_q = self.get_bandwidth_per_quantizer()
num_quantizers = self.num_quantizers
if bandwidth is not None and bandwidth > 0.0:
num_quantizers = int(max(1, math.floor(bandwidth / bw_per_q)))
return num_quantizers
def encode(self, embeddings: torch.Tensor, bandwidth=None) -> torch.Tensor:
"""
Encode the input tensor into discrete indices using RVQ, with the number of quantizers selected based on the given bandwidth.
Each quantizer /codebook residually quantizes the input and returns the nearest indices in terms of Euclidian distance.
"""
num_quantizers = self.get_num_quantizers_for_bandwidth(bandwidth)
residual = embeddings
all_indices = []
for quantizer in self.quantizers[:num_quantizers]:
indices = quantizer.encode(residual)
quantized = quantizer.decode(indices)
residual = residual - quantized
all_indices.append(indices)
out_indices = torch.stack(all_indices)
return out_indices
def decode(self, codes: torch.Tensor) -> torch.Tensor:
"""Decode the given codes to their quantized representation."""
quantized_out = torch.tensor(0.0, device=codes.device)
for i, indices in enumerate(codes):
quantizer = self.quantizers[i]
quantized = quantizer.decode(indices)
quantized_out = quantized_out + quantized.to(codes.device)
return quantized_out
@auto_docstring
class XcodecPreTrainedModel(PreTrainedAudioTokenizerBase):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = XcodecConfig
base_model_prefix = "xcodec"
main_input_name = "input_values"
input_modalities = "audio"
_no_split_modules = ["XcodecResidualVectorQuantization"]
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
init.zeros_(module.bias)
elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)):
init.zeros_(module.bias)
init.ones_(module.weight)
elif isinstance(module, nn.Conv1d):
init.kaiming_normal_(module.weight)
if module.bias is not None:
k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0]))
init.uniform_(module.bias, a=-k, b=k)
elif module.__class__.__name__ == "Snake1d":
init.ones_(module.alpha)
elif isinstance(module, nn.ConvTranspose1d):
module.reset_parameters()
elif isinstance(module, nn.Embedding):
init.normal_(module.weight, mean=0.0, std=0.02)
elif isinstance(module, XcodecModel):
# The conv1d are not handled correctly, as `self.acoustic_encoder/decoder` are initialized from a PreTrainedModel,
# but then only the submodules are used (which are not PreTrainedModels...) -> here we reinit them as in DacModel
for submodule in module.acoustic_encoder.modules():
if isinstance(submodule, nn.Conv1d):
init.trunc_normal_(submodule.weight, std=0.02)
init.constant_(submodule.bias, 0)
for submodule in module.acoustic_decoder.modules():
if isinstance(submodule, nn.Conv1d):
init.trunc_normal_(submodule.weight, std=0.02)
init.constant_(submodule.bias, 0)
elif isinstance(module, XcodecEuclideanCodebook):
init.copy_(module.inited, torch.Tensor([True]))
init.zeros_(module.cluster_size)
init.zeros_(module.embed)
init.zeros_(module.embed_avg)
def apply_weight_norm(self):
"""Apply weight norm in the acoustic encoder and decoder because the original checkpoint has weight norm applied."""
weight_norm = torch.nn.utils.parametrizations.weight_norm
weight_norm(self.acoustic_encoder.conv1)
weight_norm(self.acoustic_encoder.conv2)
for block in self.acoustic_encoder.block:
weight_norm(block.conv1)
for res_unit in (block.res_unit1, block.res_unit2, block.res_unit3):
weight_norm(res_unit.conv1)
weight_norm(res_unit.conv2)
weight_norm(self.acoustic_decoder.conv1, name="weight")
weight_norm(self.acoustic_decoder.conv2, name="weight")
for block in self.acoustic_decoder.block:
weight_norm(block.conv_t1, name="weight")
for res_unit in (block.res_unit1, block.res_unit2, block.res_unit3):
weight_norm(res_unit.conv1, name="weight")
weight_norm(res_unit.conv2, name="weight")
def remove_weight_norm(self):
"""Remove the weight norm from the acoustic encoder and decoder."""
for module in (self.acoustic_encoder, self.acoustic_decoder):
for m in module.modules():
try:
torch.nn.utils.remove_weight_norm(m, name="weight")
except (ValueError, AttributeError):
pass
if hasattr(m, "parametrizations") and "weight" in m.parametrizations:
torch.nn.utils.parametrize.remove_parametrizations(m, "weight", leave_parametrized=True)
@lru_cache
def _get_conv1d_layers(self, module):
"""
Recursively iterate to fetch all Conv1d layers.
"""
def get_conv1d_layers_recursive(module: nn.Module):
params_list = []
if isinstance(module, nn.Conv1d):
params_list.append(module)
# Recursively check all child modules
for child in module.children():
params_list.extend(get_conv1d_layers_recursive(child))
return params_list
return tuple(get_conv1d_layers_recursive(module))
def _get_conv1d_output_lengths(self, input_length, module=None):
"""
For a given module, compute the output length that would be obtained after all Conv1d layers.
"""
if module is None:
module = self
conv1d_layers = self._get_conv1d_layers(module)
for layer in conv1d_layers:
input_length = conv1d_output_length(layer, input_length)
return input_length
@auto_docstring(custom_intro="""The Xcodec neural audio codec model.""")
class XcodecModel(XcodecPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
self.pad = config.hop_length // 2
acoustic_model = AutoModel.from_config(config.acoustic_model_config)
self.acoustic_encoder = acoustic_model.encoder
self.acoustic_decoder = acoustic_model.decoder
self._adjust_dac_decoder(self.acoustic_decoder)
self.encoder_semantic = SemanticEncoder(config)
self.decoder_semantic = SemanticDecoder(config)
self.semantic_model = AutoModel.from_config(config.semantic_model_config).eval()
self.fc = nn.Linear(config.hidden_size, config.hidden_size)
self.fc1 = nn.Linear(config.hidden_size, config.semantic_model_config.hidden_size)
self.fc2 = nn.Linear(config.hidden_size, config.acoustic_model_config.hidden_size)
self.quantizer = XcodecResidualVectorQuantization(config)
# Initialize weights and apply final processing
self.post_init()
@staticmethod
def _adjust_dac_decoder(decoder: nn.Module):
r"""
DAC implemented in Xcodec is slightly different from the HF version.
DAC in Xcodec adjusts the output padding in every ConvTranspose1d in the decoder and removes
the final `nn.Tanh` activation function.
"""
for module in decoder.modules():
if isinstance(module, nn.ConvTranspose1d):
stride = module.stride[0] if isinstance(module.stride, tuple) else module.stride
module.output_padding = (stride % 2,)
if hasattr(decoder, "tanh") and isinstance(decoder.tanh, nn.Tanh):
decoder.tanh = nn.Identity()
def _extract_semantic_features(self, input_values: torch.FloatTensor) -> torch.FloatTensor:
input_values = input_values[:, 0, :]
input_values = F.pad(input_values, (self.pad, self.pad))
with torch.no_grad():
outputs = self.semantic_model(input_values, output_hidden_states=True)
hidden_states = outputs.hidden_states
stacked = torch.stack(hidden_states, dim=1)
return stacked.mean(dim=1)
@auto_docstring
def encode(
self,
input_values: torch.Tensor,
bandwidth: float | None = None,
return_dict: bool | None = None,
) -> torch.Tensor | XcodecEncoderOutput:
r"""
input_values (`torch.FloatTensor` of shape `(batch_size, channels, num_samples)`):
Float values of the input audio waveform.
bandwidth (`float`, *optional*):
The target bandwidth in (kbps) supports only values in `config.target_bandwidths`.
Defaults to the highest available bandwidth `4.0` kbps.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`].
Returns:
`torch.LongTensor` of shape `(batch_size, num_quantizers, codes_length)` containing the discrete encoded audio codes.
"""
return_dict = return_dict if return_dict is not None else self.config.return_dict
channels = input_values.shape[1]
if channels != 1:
raise ValueError(f"Audio must be mono, but got {channels}")
if bandwidth is None:
bandwidth = self.config.target_bandwidths[-1]
elif bandwidth not in self.config.target_bandwidths:
raise ValueError(
f"This model doesn't support the bandwidth {bandwidth}. Select one of {self.config.target_bandwidths}."
)
e_semantic_input = self._extract_semantic_features(input_values).detach()
e_semantic = self.encoder_semantic(e_semantic_input.transpose(1, 2))
# original codebase infer to get the output length, but we can directly infer it
# from the model and know whether we should pad
if self._get_conv1d_output_lengths(input_values.shape[2], self.acoustic_encoder) != e_semantic.shape[2]:
e_acoustic = self.acoustic_encoder(F.pad(input_values, (self.pad, self.pad)))
else:
e_acoustic = self.acoustic_encoder(input_values)
embeddings = torch.cat([e_acoustic.to(e_semantic.device), e_semantic], dim=1)
embeddings = self.fc(embeddings.transpose(1, 2)).transpose(1, 2)
audio_codes = self.quantizer.encode(embeddings, bandwidth)
audio_codes = audio_codes.transpose(0, 1)
if not return_dict:
return audio_codes
return XcodecEncoderOutput(audio_codes)
@auto_docstring
def decode(
self,
audio_codes: torch.Tensor,
return_dict: bool | None = None,
) -> torch.Tensor | XcodecDecoderOutput:
r"""
audio_codes (`torch.LongTensor` of shape `(batch_size, num_quantizers, codes_length)`):
Discrete code indices computed using `model.encode`.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`]
Returns:
Decoded audio values of shape `(batch_size, channels, num_samples)` obtained using the decoder part of
Xcodec.
"""
return_dict = return_dict if return_dict is not None else self.config.return_dict
audio_codes = audio_codes.transpose(0, 1)
quantized = self.quantizer.decode(audio_codes)
quantized_acoustic = self.fc2(quantized.transpose(1, 2)).transpose(1, 2)
audio_values = self.acoustic_decoder(quantized_acoustic)
if not return_dict:
return audio_values
return XcodecDecoderOutput(audio_values)
@auto_docstring
def forward(
self,
input_values: torch.Tensor,
audio_codes: torch.Tensor | None = None,
bandwidth: float | None = None,
return_dict: bool | None = None,
) -> tuple[torch.Tensor, torch.Tensor] | XcodecOutput:
r"""
input_values (`torch.FloatTensor` of shape `(batch_size, channels, num_samples)`):
The raw float values of the input audio waveform.
audio_codes (`torch.LongTensor` of shape `(batch_size, num_quantizers, codes_length)`:
Discrete code indices computed using `model.encode`.
bandwidth (`float`, *optional*):
Target bandwidth in kbps. Must be one of `config.target_bandwidths`. Defaults to the highest available bandwidth.
bandwidth (`float`, *optional*):
Target bandwidth in kbps. Must be one of `config.target_bandwidths`. Defaults to the highest available bandwidth.
return_dict (`bool`, *optional*):
Whether to return a [`XcodecOutput`] instead of a plain tuple.
Returns:
`XcodecOutput` or tuple `(audio_codes, audio_values)`:
- `audio_codes` of shape `(batch_size, num_quantizers, codes_length)`: the quantized discrete codes.
- `audio_values` of shape `(batch_size, channels, num_samples)`: the reconstructed audio waveform given the codes.
Example:
```python
>>> from datasets import load_dataset
>>> from transformers import AutoFeatureExtractor, XcodecModel
>>> model_id = "hf-audio/xcodec-hubert-librispeech"
>>> model = XcodecModel.from_pretrained(model_id)
>>> feature_extractor = AutoFeatureExtractor.from_pretrained(model_id)
>>> dataset = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
>>> dataset = dataset.cast_column("audio", Audio(sampling_rate=feature_extractor.sampling_rate))
>>> audio_sample = dataset[0]['audio']['array']
>>> inputs = feature_extractor(raw_audio=audio_sample, return_tensors="pt")
>>> outputs = model(**inputs)
>>> audio_codes = outputs.audio_codes
>>> audio_values = outputs.audio_values
```
"""
return_dict = return_dict if return_dict is not None else self.config.return_dict
length = input_values.shape[-1]
if audio_codes is None:
audio_codes = self.encode(input_values, bandwidth, return_dict=False)
audio_values = self.decode(audio_codes, return_dict=return_dict)[0][..., :length]
if not return_dict:
return (audio_codes, audio_values)
return XcodecOutput(audio_codes=audio_codes, audio_values=audio_values)
__all__ = ["XcodecModel", "XcodecPreTrainedModel"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/xcodec/modeling_xcodec.py",
"license": "Apache License 2.0",
"lines": 516,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/xcodec/test_modeling_xcodec.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch Xcodec model."""
import inspect
import json
import math
import unittest
from pathlib import Path
import numpy as np
from datasets import Audio, load_dataset
from parameterized import parameterized
from tests.test_configuration_common import ConfigTester
from tests.test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from transformers import AutoFeatureExtractor, XcodecConfig
from transformers.testing_utils import (
is_torch_available,
require_deterministic_for_xpu,
require_torch,
slow,
torch_device,
)
if is_torch_available():
import torch
from transformers import DacConfig, HubertConfig, XcodecModel
@require_torch
class XcodecModelTester:
def __init__(
self,
parent,
batch_size=4,
num_channels=1,
sample_rate=16000,
codebook_size=1024,
num_samples=256,
is_training=False,
):
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.sample_rate = sample_rate
self.codebook_size = codebook_size
self.is_training = is_training
self.num_samples = num_samples
self.acoustic_model_config = DacConfig(
decoder_hidden_size=8, encoder_hidden_size=8, codebook_size=16, downsampling_ratios=[16, 16]
)
self.semantic_model_config = HubertConfig(
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=2,
intermediate_size=12,
conv_dim=(4, 4, 4, 4, 4, 4, 4),
)
def prepare_config_and_inputs(self):
config = self.get_config()
inputs_dict = {
"input_values": floats_tensor([self.batch_size, self.num_channels, self.num_samples], scale=1.0)
}
return config, inputs_dict
def prepare_config_and_inputs_for_common(self):
config, inputs_dict = self.prepare_config_and_inputs()
return config, inputs_dict
def prepare_config_and_inputs_for_model_class(self, model_class):
config, inputs_dict = self.prepare_config_and_inputs()
codes_length = math.ceil(self.num_samples / config.hop_length)
inputs_dict["audio_codes"] = ids_tensor(
[self.batch_size, config.num_quantizers, codes_length], config.codebook_size
)
return config, inputs_dict
def get_config(self):
return XcodecConfig(
sample_rate=self.sample_rate,
audio_channels=self.num_channels,
codebook_size=self.codebook_size,
acoustic_model_config=self.acoustic_model_config,
semantic_model_config=self.semantic_model_config,
)
def create_and_check_model_forward(self, config, inputs_dict):
model = XcodecModel(config=config).to(torch_device).eval()
result = model(input_values=inputs_dict["input_values"])
self.parent.assertEqual(result.audio_values.shape, (self.batch_size, self.num_channels, self.num_samples))
@require_torch
class XcodecModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (XcodecModel,) if is_torch_available() else ()
is_encoder_decoder = True
test_resize_embeddings = False
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
# model does not support returning hidden states
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
if "output_attentions" in inputs_dict:
inputs_dict.pop("output_attentions")
if "output_hidden_states" in inputs_dict:
inputs_dict.pop("output_hidden_states")
return inputs_dict
def setUp(self):
self.model_tester = XcodecModelTester(self)
self.config_tester = ConfigTester(
self, config_class=XcodecConfig, common_properties=[], has_text_modality=False
)
def test_config(self):
self.config_tester.run_common_tests()
def test_model_forward(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_forward(*config_and_inputs)
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["input_values", "audio_codes", "bandwidth", "return_dict"]
self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names)
def test_gradient_checkpointing_backward_compatibility(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
if not model_class.supports_gradient_checkpointing:
continue
config.text_encoder.gradient_checkpointing = True
config.audio_encoder.gradient_checkpointing = True
config.decoder.gradient_checkpointing = True
model = model_class(config)
self.assertTrue(model.is_gradient_checkpointing)
@unittest.skip(reason="The XcodecModel does not have `inputs_embeds` logics")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="The XcodecModel does not have `inputs_embeds` logics")
def test_model_get_set_embeddings(self):
pass
@unittest.skip(reason="The XcodecModel does not have the usual `attention` logic")
def test_retain_grad_hidden_states_attentions(self):
pass
@unittest.skip(reason="The XcodecModel does not have the usual `attention` logic")
def test_attention_outputs(self):
pass
@unittest.skip(reason="The XcodecModel does not have the usual `hidden_states` logic")
def test_hidden_states_output(self):
pass
# Copied from transformers.tests.encodec.test_modeling_encodecEncodecModelTest.test_determinism
def test_determinism(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def check_determinism(first, second):
# outputs are not tensors but list (since each sequence don't have the same frame_length)
out_1 = first.cpu().numpy()
out_2 = second.cpu().numpy()
out_1 = out_1[~np.isnan(out_1)]
out_2 = out_2[~np.isnan(out_2)]
max_diff = np.amax(np.abs(out_1 - out_2))
self.assertLessEqual(max_diff, 1e-5)
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
first = model(**self._prepare_for_class(inputs_dict, model_class))[0]
second = model(**self._prepare_for_class(inputs_dict, model_class))[0]
if isinstance(first, tuple) and isinstance(second, tuple):
for tensor1, tensor2 in zip(first, second):
check_determinism(tensor1, tensor2)
else:
check_determinism(first, second)
# Copied from transformers.tests.encodec.test_modeling_encodecEncodecModelTest.test_model_outputs_equivalence
def test_model_outputs_equivalence(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(t):
t[t != t] = 0
return t
def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}):
with torch.no_grad():
tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs)
dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs)
self.assertTrue(isinstance(tuple_output, tuple))
self.assertTrue(isinstance(dict_output, dict))
for tuple_value, dict_value in zip(tuple_output, dict_output.values()):
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(tuple_value), set_nan_tensor_to_zero(dict_value), atol=1e-5
),
msg=(
"Tuple and dict output are not equal. Difference:"
f" {torch.max(torch.abs(tuple_value - dict_value))}. Tuple has `nan`:"
f" {torch.isnan(tuple_value).any()} and `inf`: {torch.isinf(tuple_value)}. Dict has"
f" `nan`: {torch.isnan(dict_value).any()} and `inf`: {torch.isinf(dict_value)}."
),
)
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs)
@unittest.skip(reason="The XcodecModel does not have support dynamic compile yet")
def test_sdpa_can_compile_dynamic(self):
pass
# Copied from transformers.tests.encodec.test_modeling_encodec.normalize
def normalize(arr):
norm = np.linalg.norm(arr)
normalized_arr = arr / norm
return normalized_arr
# Copied from transformers.tests.encodec.test_modeling_encodec.compute_rmse
def compute_rmse(arr1, arr2):
arr1_np = arr1.cpu().numpy().squeeze()
arr2_np = arr2.cpu().numpy().squeeze()
max_length = min(arr1.shape[-1], arr2.shape[-1])
arr1_np = arr1_np[..., :max_length]
arr2_np = arr2_np[..., :max_length]
arr1_normalized = normalize(arr1_np)
arr2_normalized = normalize(arr2_np)
return np.sqrt(((arr1_normalized - arr2_normalized) ** 2).mean())
"""
Integration tests for XCodec
Code for reproducing expected outputs can be found here:
https://gist.github.com/ebezzam/cdaf8c223e59e7677b2ea6bc2dc8230b
One reason for higher tolerances is because of different implementation of `Snake1d` within Transformer version DAC
See here: https://github.com/huggingface/transformers/pull/39793#issue-3277407384
"""
RESULTS_PATH = Path(__file__).parent.parent.parent / "fixtures/xcodec/integration_tests.json"
with open(RESULTS_PATH, "r") as f:
raw_data = json.load(f)
# convert dicts into tuples ordered to match test args
EXPECTED_OUTPUTS_JSON = [
(
f"{d['repo_id']}_{d['bandwidth']}",
d["repo_id"],
d["bandwidth"],
d["codes"],
d["decoded"],
d["codec_error"],
d["codec_tol"],
d["dec_tol"],
)
for d in raw_data
]
@slow
@require_torch
class XcodecIntegrationTest(unittest.TestCase):
@parameterized.expand(EXPECTED_OUTPUTS_JSON)
@require_deterministic_for_xpu
def test_integration(
self, test_name, repo_id, bandwidth, exp_codes, exp_decoded, exp_codec_err, codec_tol, dec_tol
):
# load model
model = XcodecModel.from_pretrained(repo_id).to(torch_device).eval()
feature_extractor = AutoFeatureExtractor.from_pretrained(repo_id)
# load audio example
librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
librispeech_dummy = librispeech_dummy.cast_column(
"audio", Audio(sampling_rate=feature_extractor.sampling_rate)
)
audio_array = librispeech_dummy[0]["audio"]["array"]
inputs = feature_extractor(
raw_audio=audio_array, sampling_rate=feature_extractor.sampling_rate, return_tensors="pt"
).to(torch_device)
x = inputs["input_values"]
with torch.no_grad():
ENC_TOL = 0
audio_codes = model.encode(x, bandwidth=bandwidth, return_dict=False)
if exp_codes is not None:
exp_codes = torch.tensor(exp_codes).to(torch_device)
torch.testing.assert_close(
audio_codes[..., : exp_codes.shape[-1]],
exp_codes,
rtol=ENC_TOL,
atol=ENC_TOL,
)
# dec_tol = 1e-5 # increased to 1e-4 for passing on 4 kbps
input_values_dec = model.decode(audio_codes).audio_values
if exp_decoded is not None:
exp_decoded = torch.tensor(exp_decoded).to(torch_device)
torch.testing.assert_close(
input_values_dec[..., : exp_decoded.shape[-1]],
exp_decoded,
rtol=dec_tol,
atol=dec_tol,
)
# compute codec error
codec_err = compute_rmse(input_values_dec, x)
torch.testing.assert_close(codec_err, exp_codec_err, rtol=codec_tol, atol=codec_tol)
# make sure forward and decode gives same result
audio_values_enc_dec = model(x, bandwidth=bandwidth).audio_values
torch.testing.assert_close(input_values_dec, audio_values_enc_dec, rtol=1e-6, atol=1e-6)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/xcodec/test_modeling_xcodec.py",
"license": "Apache License 2.0",
"lines": 293,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:benchmark/benches/llama.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from logging import Logger
from threading import Event, Thread
from time import perf_counter, sleep
# Add the parent directory to Python path to import benchmarks_entrypoint
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import gpustat
import psutil
import psycopg2
from benchmarks_entrypoint import MetricsRecorder
# Optional heavy ML dependencies - only required when actually running the benchmark
try:
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig, StaticCache
TRANSFORMERS_AVAILABLE = True
except ImportError:
TRANSFORMERS_AVAILABLE = False
torch = None
AutoModelForCausalLM = None
AutoTokenizer = None
GenerationConfig = None
StaticCache = None
os.environ["HF_XET_HIGH_PERFORMANCE"] = "1"
os.environ["TOKENIZERS_PARALLELISM"] = "1"
# Only set torch precision if torch is available
if TRANSFORMERS_AVAILABLE:
torch.set_float32_matmul_precision("high")
def collect_metrics(benchmark_id, continue_metric_collection, metrics_recorder):
p = psutil.Process(os.getpid())
while not continue_metric_collection.is_set():
with p.oneshot():
cpu_util = p.cpu_percent()
mem_megabytes = p.memory_info().rss / (1024 * 1024)
gpu_stats = gpustat.GPUStatCollection.new_query()
gpu_util = gpu_stats[0]["utilization.gpu"]
gpu_mem_megabytes = gpu_stats[0]["memory.used"]
metrics_recorder.collect_device_measurements(
benchmark_id, cpu_util, mem_megabytes, gpu_util, gpu_mem_megabytes
)
sleep(0.01)
def run_benchmark(
logger: Logger,
repository: str,
branch: str,
commit_id: str,
commit_msg: str,
metrics_recorder=None,
num_tokens_to_generate=100,
):
# Check if required ML dependencies are available
if not TRANSFORMERS_AVAILABLE:
logger.error("Transformers and torch are required to run the LLaMA benchmark. Please install them with:")
logger.error("pip install torch transformers")
logger.error("Skipping LLaMA benchmark due to missing dependencies.")
return
continue_metric_collection = Event()
metrics_thread = None
model_id = "meta-llama/Llama-2-7b-hf"
# If no metrics_recorder is provided, create one for backward compatibility
if metrics_recorder is None:
try:
metrics_recorder = MetricsRecorder(
psycopg2.connect("dbname=metrics"), logger, repository, branch, commit_id, commit_msg, True
)
should_close_recorder = True
except Exception as e:
logger.error(f"Failed to create metrics recorder: {e}")
return
else:
should_close_recorder = False
try:
gpu_stats = gpustat.GPUStatCollection.new_query()
gpu_name = gpu_stats[0]["name"]
benchmark_id = metrics_recorder.initialise_benchmark({"gpu_name": gpu_name, "model_id": model_id})
logger.info(f"running benchmark #{benchmark_id} on {gpu_name} for {model_id}")
metrics_thread = Thread(
target=collect_metrics,
args=[benchmark_id, continue_metric_collection, metrics_recorder],
)
metrics_thread.start()
logger.info("started background thread to fetch device metrics")
os.environ["TOKENIZERS_PARALLELISM"] = "false" # silence warnings when compiling
device = "cuda"
logger.info("downloading weights")
# This is to avoid counting download in model load time measurement
model = AutoModelForCausalLM.from_pretrained(model_id, dtype=torch.float16)
gen_config = GenerationConfig(do_sample=False, top_p=1, temperature=1)
logger.info("loading model")
start = perf_counter()
model = AutoModelForCausalLM.from_pretrained(
model_id, dtype=torch.float16, generation_config=gen_config
).eval()
model.to(device)
torch.cuda.synchronize()
end = perf_counter()
model_load_time = end - start
logger.info(f"loaded model in: {model_load_time}s")
tokenizer = AutoTokenizer.from_pretrained(model_id)
prompt = "Why dogs are so cute?"
inputs = tokenizer(prompt, return_tensors="pt").to(device)
# Specify the max length (including both the prompt and the response)
# When calling `generate` with `cache_implementation="static" later, this is also used to create a `StaticCache` object
# with sequence length = `max_length`. The longer the more you will re-use it
seq_length = inputs["input_ids"].shape[1]
model.generation_config.max_length = seq_length + num_tokens_to_generate
batch_size = inputs["input_ids"].shape[0]
# Copied from the gpt-fast repo
def multinomial_sample_one_no_sync(probs_sort): # Does multinomial sampling without a cuda synchronization
q = torch.empty_like(probs_sort).exponential_(1)
return torch.argmax(probs_sort / q, dim=-1, keepdim=True).to(dtype=torch.int)
def logits_to_probs(logits, temperature: float = 1.0, top_k: int | None = None):
logits = logits / max(temperature, 1e-5)
if top_k is not None:
v, _ = torch.topk(logits, min(top_k, logits.size(-1)))
pivot = v.select(-1, -1).unsqueeze(-1)
logits = torch.where(logits < pivot, -float("Inf"), logits)
probs = torch.nn.functional.softmax(logits, dim=-1)
return probs
def sample(logits, temperature: float = 1.0, top_k: int | None = None):
probs = logits_to_probs(logits[0, -1], temperature, top_k)
idx_next = multinomial_sample_one_no_sync(probs)
return idx_next, probs
# First eager forward pass
logger.info("running first eager forward pass")
start = perf_counter()
_ = model(**inputs)
torch.cuda.synchronize()
end = perf_counter()
first_eager_fwd_pass_time = end - start
logger.info(f"completed first eager forward pass in: {first_eager_fwd_pass_time}s")
# Second eager forward pass (should be faster)
logger.info("running second eager forward pass")
start = perf_counter()
_ = model(**inputs)
torch.cuda.synchronize()
end = perf_counter()
second_eager_fwd_pass_time = end - start
logger.info(f"completed second eager forward pass in: {second_eager_fwd_pass_time}s")
# First eager generation
logger.info("running first eager generation")
start = perf_counter()
output = model.generate(**inputs)
torch.cuda.synchronize()
end = perf_counter()
first_eager_generate_time = end - start
logger.info(f"completed first eager generation in: {first_eager_generate_time}s")
logger.info(f"generated: {tokenizer.batch_decode(output.cpu().tolist())}")
# Second eager generation (should be faster)
logger.info("running second eager generation")
start = perf_counter()
output = model.generate(**inputs)
torch.cuda.synchronize()
end = perf_counter()
second_eager_generate_time = end - start
logger.info(f"completed second eager generation in: {second_eager_generate_time}s")
logger.info(f"generated: {tokenizer.batch_decode(output.cpu().tolist())}")
logger.info("running generation timing loop")
input_pos = torch.arange(0, seq_length, device=device)
inputs = inputs["input_ids"]
start = perf_counter()
with torch.nn.attention.sdpa_kernel(torch.nn.attention.SDPBackend.MATH):
logits = model(inputs, position_ids=input_pos).logits
next_token, probs = sample(logits, temperature=0.6, top_k=5)
torch.cuda.synchronize()
end = perf_counter()
time_to_first_token = end - start
input_pos = torch.tensor([seq_length], device=device, dtype=torch.int)
next_token = next_token.clone()
start = perf_counter()
with torch.nn.attention.sdpa_kernel(torch.nn.attention.SDPBackend.MATH):
logits = model(next_token, position_ids=input_pos).logits
next_token, probs = sample(logits, temperature=0.6, top_k=5)
torch.cuda.synchronize()
end = perf_counter()
time_to_second_token = end - start
input_pos = torch.tensor([seq_length + 1], device=device, dtype=torch.int)
next_token = next_token.clone()
start = perf_counter()
with torch.nn.attention.sdpa_kernel(torch.nn.attention.SDPBackend.MATH):
logits = model(next_token, position_ids=input_pos).logits
next_token, probs = sample(logits, temperature=0.6, top_k=5)
torch.cuda.synchronize()
end = perf_counter()
time_to_third_token = end - start
logger.info("running longer generation timing loop")
total_time = 0
for i in range(20):
input_pos = torch.tensor([seq_length + 2 + i], device=device, dtype=torch.int)
next_token = next_token.clone()
start = perf_counter()
with torch.nn.attention.sdpa_kernel(torch.nn.attention.SDPBackend.MATH):
logits = model(next_token, position_ids=input_pos).logits
next_token, probs = sample(logits, temperature=0.6, top_k=5)
torch.cuda.synchronize()
end = perf_counter()
total_time += end - start
mean_time_to_next_token = total_time / 20
logger.info("running compilation benchmarks")
# Now compile the model
model = torch.compile(model, mode="max-autotune", fullgraph=True)
# StaticCache for generation
with torch.device(device):
model.setup_caches(max_batch_size=batch_size, max_seq_len=seq_length + num_tokens_to_generate)
input_pos = torch.arange(0, seq_length, device=device)
inputs = tokenizer(prompt, return_tensors="pt").to(device)["input_ids"]
logger.info("compiling model")
model = AutoModelForCausalLM.from_pretrained(model_id, dtype=torch.float16, generation_config=gen_config)
model.to(device)
model = torch.compile(model, mode="max-autotune", fullgraph=True)
past_key_values = StaticCache(
model.config,
max_batch_size=batch_size,
device=device,
dtype=torch.float16,
max_cache_len=seq_length + 128,
)
# 1st call
start = perf_counter()
output = model.generate(**inputs, past_key_values=past_key_values)
end = perf_counter()
first_compile_generate_time = end - start
logger.info(f"completed first compile generation in: {first_compile_generate_time}s")
logger.info(f"generated: {tokenizer.batch_decode(output.cpu().tolist())}")
past_key_values = StaticCache(
model.config,
max_batch_size=batch_size,
device=device,
dtype=torch.float16,
max_cache_len=seq_length + 128,
)
# 2nd call
start = perf_counter()
output = model.generate(**inputs, past_key_values=past_key_values)
end = perf_counter()
second_compile_generate_time = end - start
logger.info(f"completed second compile generation in: {second_compile_generate_time}s")
logger.info(f"generated: {tokenizer.batch_decode(output.cpu().tolist())}")
past_key_values = StaticCache(
model.config,
max_batch_size=batch_size,
device=device,
dtype=torch.float16,
max_cache_len=seq_length + 128,
)
# 3rd call
start = perf_counter()
output = model.generate(**inputs, past_key_values=past_key_values)
end = perf_counter()
third_compile_generate_time = end - start
logger.info(f"completed third compile generation in: {third_compile_generate_time}s")
logger.info(f"generated: {tokenizer.batch_decode(output.cpu().tolist())}")
past_key_values = StaticCache(
model.config,
max_batch_size=batch_size,
device=device,
dtype=torch.float16,
max_cache_len=seq_length + 128,
)
# 4th call
start = perf_counter()
output = model.generate(**inputs, past_key_values=past_key_values)
end = perf_counter()
fourth_compile_generate_time = end - start
logger.info(f"completed fourth compile generation in: {fourth_compile_generate_time}s")
logger.info(f"generated: {tokenizer.batch_decode(output.cpu().tolist())}")
metrics_recorder.collect_model_measurements(
benchmark_id,
{
"model_load_time": model_load_time,
"first_eager_forward_pass_time_secs": first_eager_fwd_pass_time,
"second_eager_forward_pass_time_secs": second_eager_fwd_pass_time,
"first_eager_generate_time_secs": first_eager_generate_time,
"second_eager_generate_time_secs": second_eager_generate_time,
"time_to_first_token_secs": time_to_first_token,
"time_to_second_token_secs": time_to_second_token,
"time_to_third_token_secs": time_to_third_token,
"time_to_next_token_mean_secs": mean_time_to_next_token,
"first_compile_generate_time_secs": first_compile_generate_time,
"second_compile_generate_time_secs": second_compile_generate_time,
"third_compile_generate_time_secs": third_compile_generate_time,
"fourth_compile_generate_time_secs": fourth_compile_generate_time,
},
)
except Exception as e:
logger.error(f"Caught exception: {e}")
continue_metric_collection.set()
if metrics_thread is not None:
metrics_thread.join()
# Only close the recorder if we created it locally
if should_close_recorder:
metrics_recorder.close()
| {
"repo_id": "huggingface/transformers",
"file_path": "benchmark/benches/llama.py",
"license": "Apache License 2.0",
"lines": 305,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:utils/add_dates.py | import argparse
import os
import re
import subprocess
from datetime import date, datetime
from urllib.error import HTTPError
from urllib.request import Request, urlopen
from huggingface_hub import paper_info
from transformers import logging
logger = logging.get_logger(__name__)
ROOT = os.getcwd().split("utils")[0]
DOCS_PATH = os.path.join(ROOT, "docs/source/en/model_doc")
MODELS_PATH = os.path.join(ROOT, "src/transformers/models")
GITHUB_REPO_URL = "https://github.com/huggingface/transformers"
GITHUB_RAW_URL = "https://raw.githubusercontent.com/huggingface/transformers/main"
COPYRIGHT_DISCLAIMER = """<!--Copyright 2025 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->"""
ARXIV_PAPERS_NOT_IN_HF_PAPERS = {
"gemma3n.md": "2506.06644",
"xmod.md": "2205.06266",
}
def check_file_exists_on_github(file_path: str) -> bool:
"""Check if a file exists on the main branch of the GitHub repository.
Args:
file_path: Relative path from repository root
Returns:
True if file exists on GitHub main branch (or if check failed), False only if confirmed 404
Note:
On network errors or other issues, returns True (assumes file exists) with a warning.
This prevents the script from failing due to temporary network issues.
"""
# Convert absolute path to relative path from repository root if needed
if file_path.startswith(ROOT):
file_path = file_path[len(ROOT) :].lstrip("/")
# Construct the raw GitHub URL for the file
url = f"{GITHUB_RAW_URL}/{file_path}"
try:
# Make a HEAD request to check if file exists (more efficient than GET)
request = Request(url, method="HEAD")
request.add_header("User-Agent", "transformers-add-dates-script")
with urlopen(request, timeout=10) as response:
return response.status == 200
except HTTPError as e:
if e.code == 404:
# File doesn't exist on GitHub
return False
# HTTP error (non-404): assume file exists and continue with local git history
return True
except Exception:
# Network/timeout error: assume file exists and continue with local git history
return True
def get_modified_cards() -> list[str]:
"""Get the list of model names from modified files in docs/source/en/model_doc/"""
current_branch = subprocess.check_output(["git", "branch", "--show-current"], text=True).strip()
if current_branch == "main":
# On main branch, only uncommitted changes detected
result = subprocess.check_output(["git", "diff", "--name-only", "HEAD"], text=True)
else:
fork_point_sha = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
result = subprocess.check_output(f"git diff --name-only {fork_point_sha}".split()).decode("utf-8")
model_names = []
for line in result.strip().split("\n"):
if line:
# Check if the file is in the model_doc directory
if line.startswith("docs/source/en/model_doc/") and line.endswith(".md"):
file_path = os.path.join(ROOT, line)
if os.path.exists(file_path):
model_name = os.path.splitext(os.path.basename(line))[0]
if model_name not in ["auto", "timm_wrapper"]:
model_names.append(model_name)
return model_names
def get_paper_link(model_card: str | None, path: str | None) -> str:
"""Get the first paper link from the model card content."""
if model_card is not None and not model_card.endswith(".md"):
model_card = f"{model_card}.md"
file_path = path or os.path.join(DOCS_PATH, f"{model_card}")
model_card = os.path.basename(file_path)
with open(file_path, "r", encoding="utf-8") as f:
content = f.read()
# Find known paper links
paper_ids = re.findall(r"https://huggingface\.co/papers/\d+\.\d+", content)
paper_ids += re.findall(r"https://arxiv\.org/abs/\d+\.\d+", content)
paper_ids += re.findall(r"https://arxiv\.org/pdf/\d+\.\d+", content)
if len(paper_ids) == 0:
return "No_paper"
return paper_ids[0]
def get_first_commit_date(model_name: str | None) -> str:
"""Get the first commit date of the model's init file or model.md. This date is considered as the date the model was added to HF transformers"""
if model_name.endswith(".md"):
model_name = f"{model_name[:-3]}"
model_name_src = model_name
if "-" in model_name:
model_name_src = model_name.replace("-", "_")
file_path = os.path.join(MODELS_PATH, model_name_src, "__init__.py")
# If the init file is not found (only true for legacy models), the doc's first commit date is used
if not os.path.exists(file_path):
file_path = os.path.join(DOCS_PATH, f"{model_name}.md")
# Check if file exists on GitHub main branch
file_exists_on_github = check_file_exists_on_github(file_path)
if not file_exists_on_github:
# File does not exist on GitHub main branch (new model), use today's date
final_date = date.today().isoformat()
else:
# File exists on GitHub main branch, get the first commit date from local git history
final_date = subprocess.check_output(
["git", "log", "--reverse", "--pretty=format:%ad", "--date=iso", file_path], text=True
)
return final_date.strip().split("\n")[0][:10]
def get_release_date(link: str) -> str:
if link.startswith("https://huggingface.co/papers/"):
link = link.replace("https://huggingface.co/papers/", "")
try:
info = paper_info(link)
return info.published_at.date().isoformat()
except Exception as e:
# Error fetching release date, function returns None (will use placeholder)
logger.debug(f"Could not fetch paper info for {link}: {e}")
elif link.startswith("https://arxiv.org/abs/") or link.startswith("https://arxiv.org/pdf/"):
return r"{release_date}"
def replace_paper_links(file_path: str) -> bool:
"""Replace arxiv links with huggingface links if valid, and replace hf.co with huggingface.co"""
with open(file_path, "r", encoding="utf-8") as f:
content = f.read()
original_content = content
# Replace hf.co with huggingface.co
content = content.replace("https://hf.co/", "https://huggingface.co/")
# Find all arxiv links
arxiv_links = re.findall(r"https://arxiv\.org/abs/(\d+\.\d+)", content)
arxiv_links += re.findall(r"https://arxiv\.org/pdf/(\d+\.\d+)", content)
for paper_id in arxiv_links:
try:
# Check if paper exists on huggingface
paper_info(paper_id)
# If no exception, replace the link
old_link = f"https://arxiv.org/abs/{paper_id}"
if old_link not in content:
old_link = f"https://arxiv.org/pdf/{paper_id}"
new_link = f"https://huggingface.co/papers/{paper_id}"
content = content.replace(old_link, new_link)
except Exception:
# Paper not available on huggingface, keep arxiv link
continue
# Write back only if content changed
if content != original_content:
with open(file_path, "w", encoding="utf-8") as f:
f.write(content)
return True
return False
def _normalize_model_card_name(model_card: str) -> str:
"""Ensure model card has .md extension"""
return model_card if model_card.endswith(".md") else f"{model_card}.md"
def _should_skip_model_card(model_card: str) -> bool:
"""Check if model card should be skipped"""
return model_card in ("auto.md", "timm_wrapper.md")
def _read_model_card_content(model_card: str) -> str:
"""Read and return the content of a model card"""
file_path = os.path.join(DOCS_PATH, model_card)
with open(file_path, "r", encoding="utf-8") as f:
return f.read()
def _get_dates_pattern_match(content: str):
"""Search for the dates pattern in content and return match object"""
pattern = r"\n\*This model was released on (.*) and added to Hugging Face Transformers on (\d{4}-\d{2}-\d{2})\.\*"
return re.search(pattern, content)
def _dates_differ_significantly(date1: str, date2: str) -> bool:
"""Check if two dates differ by more than 1 day"""
try:
d1 = datetime.strptime(date1, "%Y-%m-%d")
d2 = datetime.strptime(date2, "%Y-%m-%d")
return abs((d1 - d2).days) > 1
except Exception:
return True # If dates can't be parsed, consider them different
def check_missing_dates(model_card_list: list[str]) -> list[str]:
"""Check which model cards are missing release dates and return their names"""
missing_dates = []
for model_card in model_card_list:
model_card = _normalize_model_card_name(model_card)
if _should_skip_model_card(model_card):
continue
content = _read_model_card_content(model_card)
if not _get_dates_pattern_match(content):
missing_dates.append(model_card)
return missing_dates
def check_incorrect_dates(model_card_list: list[str]) -> list[str]:
"""Check which model cards have incorrect HF commit dates and return their names"""
incorrect_dates = []
for model_card in model_card_list:
model_card = _normalize_model_card_name(model_card)
if _should_skip_model_card(model_card):
continue
content = _read_model_card_content(model_card)
match = _get_dates_pattern_match(content)
if match:
existing_hf_date = match.group(2)
actual_hf_date = get_first_commit_date(model_name=model_card)
if _dates_differ_significantly(existing_hf_date, actual_hf_date):
incorrect_dates.append(model_card)
return incorrect_dates
def insert_dates(model_card_list: list[str]):
"""Insert or update release and commit dates in model cards"""
for model_card in model_card_list:
model_card = _normalize_model_card_name(model_card)
if _should_skip_model_card(model_card):
continue
file_path = os.path.join(DOCS_PATH, model_card)
# First replace arxiv paper links with hf paper link if possible
replace_paper_links(file_path)
# Read content and ensure copyright disclaimer exists
content = _read_model_card_content(model_card)
markers = list(re.finditer(r"-->", content))
if len(markers) == 0:
# No copyright marker found, adding disclaimer to the top
content = COPYRIGHT_DISCLAIMER + "\n\n" + content
with open(file_path, "w", encoding="utf-8") as f:
f.write(content)
markers = list(re.finditer(r"-->", content))
# Get dates
hf_commit_date = get_first_commit_date(model_name=model_card)
paper_link = get_paper_link(model_card=model_card, path=file_path)
if paper_link in ("No_paper", "blog"):
release_date = r"{release_date}"
else:
release_date = get_release_date(paper_link)
match = _get_dates_pattern_match(content)
# Update or insert the dates line
if match:
# Preserve existing release date unless it's a placeholder
existing_release_date = match.group(1)
existing_hf_date = match.group(2)
if existing_release_date not in (r"{release_date}", "None"):
release_date = existing_release_date
if _dates_differ_significantly(existing_hf_date, hf_commit_date) or existing_release_date != release_date:
old_line = match.group(0)
new_line = f"\n*This model was released on {release_date} and added to Hugging Face Transformers on {hf_commit_date}.*"
content = content.replace(old_line, new_line)
with open(file_path, "w", encoding="utf-8") as f:
f.write(content)
else:
# Insert new dates line after copyright marker
insert_index = markers[0].end()
date_info = f"\n*This model was released on {release_date} and added to Hugging Face Transformers on {hf_commit_date}.*"
content = content[:insert_index] + date_info + content[insert_index:]
with open(file_path, "w", encoding="utf-8") as f:
f.write(content)
def get_all_model_cards():
"""Get all model cards from the docs path"""
all_files = os.listdir(DOCS_PATH)
model_cards = []
for file in all_files:
if file.endswith(".md"):
model_name = os.path.splitext(file)[0]
if model_name not in ["auto", "timm_wrapper"]:
model_cards.append(model_name)
return sorted(model_cards)
def main(all=False, models=None, check_only=False):
if check_only:
# Check all model cards for missing dates
all_model_cards = get_all_model_cards()
missing_dates = check_missing_dates(all_model_cards)
# Check modified model cards for incorrect dates
modified_cards = get_modified_cards()
incorrect_dates = check_incorrect_dates(modified_cards)
if missing_dates or incorrect_dates:
problematic_cards = missing_dates + incorrect_dates
model_names = [card.replace(".md", "") for card in problematic_cards]
raise ValueError(
f"Missing or incorrect dates in the following model cards: {' '.join(problematic_cards)}\n"
f"Run `python utils/add_dates.py --models {' '.join(model_names)}` to fix them."
)
return
# Determine which model cards to process
if all:
model_cards = get_all_model_cards()
elif models:
model_cards = models
else:
model_cards = get_modified_cards()
if not model_cards:
return
insert_dates(model_cards)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Add release and commit dates to model cards")
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument("--models", nargs="+", help="Specify model cards to process (without .md extension)")
group.add_argument("--all", action="store_true", help="Process all model cards in the docs directory")
group.add_argument("--check-only", action="store_true", help="Check if the dates are already present")
args = parser.parse_args()
try:
main(args.all, args.models, args.check_only)
except subprocess.CalledProcessError as e:
print(
f"An error occurred while executing git commands but it can be ignored (git issue) most probably local: {e}"
)
| {
"repo_id": "huggingface/transformers",
"file_path": "utils/add_dates.py",
"license": "Apache License 2.0",
"lines": 300,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
huggingface/transformers:src/transformers/models/tvp/image_processing_tvp_fast.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Image processor class for TVP."""
from typing import Optional
import torch
import torchvision.transforms.v2.functional as tvF
from ...image_processing_utils import BatchFeature
from ...image_processing_utils_fast import (
BaseImageProcessorFast,
group_images_by_shape,
reorder_images,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ImageInput,
PILImageResampling,
SizeDict,
make_nested_list_of_images,
)
from ...processing_utils import Unpack
from ...utils import TensorType, auto_docstring
from .image_processing_tvp import TvpImageProcessorKwargs
@auto_docstring
class TvpImageProcessorFast(BaseImageProcessorFast):
resample = PILImageResampling.BILINEAR
image_mean = IMAGENET_STANDARD_MEAN
image_std = IMAGENET_STANDARD_STD
size = {"longest_edge": 448}
default_to_square = False
crop_size = {"height": 448, "width": 448}
do_resize = True
do_center_crop = True
do_rescale = True
rescale_factor = 1 / 255
do_pad = True
pad_size = {"height": 448, "width": 448}
constant_values = 0
pad_mode = "constant"
do_normalize = True
do_flip_channel_order = True
valid_kwargs = TvpImageProcessorKwargs
def __init__(self, **kwargs: Unpack[TvpImageProcessorKwargs]):
super().__init__(**kwargs)
@auto_docstring
def preprocess(
self,
videos: ImageInput | list[ImageInput] | list[list[ImageInput]],
**kwargs: Unpack[TvpImageProcessorKwargs],
) -> BatchFeature:
return super().preprocess(videos, **kwargs)
def _prepare_images_structure(
self,
images: ImageInput,
**kwargs,
) -> ImageInput:
"""
Prepare the images structure for processing.
Args:
images (`ImageInput`):
The input images to process.
Returns:
`ImageInput`: The images with a valid nesting.
"""
images = self.fetch_images(images)
return make_nested_list_of_images(images, **kwargs)
def resize(
self,
image: "torch.Tensor",
size: SizeDict,
interpolation: Optional["tvF.InterpolationMode"] = None,
antialias: bool = True,
**kwargs,
) -> "torch.Tensor":
"""
Resize an image to the specified size.
Args:
image (`torch.Tensor`):
Image to resize.
size (`SizeDict` or `dict`):
Size dictionary. If `size` has `longest_edge`, resize the longest edge to that value
while maintaining aspect ratio. Otherwise, use the base class resize method.
interpolation (`tvF.InterpolationMode`, *optional*):
Interpolation method to use.
antialias (`bool`, *optional*, defaults to `True`):
Whether to use antialiasing.
Returns:
`torch.Tensor`: The resized image.
"""
interpolation = interpolation if interpolation is not None else tvF.InterpolationMode.BILINEAR
# Handle longest_edge case (TVP-specific)
if size.longest_edge:
# Get current dimensions
current_height, current_width = image.shape[-2:]
# Calculate new dimensions maintaining aspect ratio
if current_height >= current_width:
ratio = current_width * 1.0 / current_height
new_height = size.longest_edge
new_width = int(new_height * ratio)
else:
ratio = current_height * 1.0 / current_width
new_width = size.longest_edge
new_height = int(new_width * ratio)
return super().resize(
image, SizeDict(height=new_height, width=new_width), interpolation=interpolation, antialias=antialias
)
# Use base class resize method for other cases
return super().resize(image, size, interpolation, antialias, **kwargs)
def _flip_channel_order(self, frames: "torch.Tensor") -> "torch.Tensor":
"""
Flip channel order from RGB to BGR.
The slow processor puts the red channel at the end (BGR format),
but the channel order is different. We need to match the exact
channel order of the slow processor:
Slow processor:
- Channel 0: Blue (originally Red)
- Channel 1: Green
- Channel 2: Red (originally Blue)
"""
# Assuming frames are in channels_first format (..., C, H, W)
frames = frames.flip(-3)
return frames
def _preprocess(
self,
images: list[list["torch.Tensor"]],
do_resize: bool,
size: SizeDict | dict,
interpolation: Optional["tvF.InterpolationMode"],
do_center_crop: bool,
crop_size: SizeDict | dict,
do_rescale: bool,
rescale_factor: float,
do_pad: bool,
pad_size: SizeDict,
constant_values: float | list[float],
pad_mode: str,
do_normalize: bool,
image_mean: float | list[float] | None,
image_std: float | list[float] | None,
do_flip_channel_order: bool,
return_tensors: str | TensorType | None,
disable_grouping: bool | None,
**kwargs,
) -> BatchFeature:
"""
Preprocess videos using the fast image processor.
This method processes each video frame through the same pipeline as the original
TVP image processor but uses torchvision operations for better performance.
"""
grouped_images, grouped_images_index = group_images_by_shape(
images, disable_grouping=disable_grouping, is_nested=True
)
processed_images_grouped = {}
for shape, stacked_frames in grouped_images.items():
# Resize if needed
if do_resize:
stacked_frames = self.resize(stacked_frames, size, interpolation)
# Center crop if needed
if do_center_crop:
stacked_frames = self.center_crop(stacked_frames, crop_size)
# Rescale and normalize using fused method for consistency
stacked_frames = self.rescale_and_normalize(
stacked_frames, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
# Pad if needed
if do_pad:
stacked_frames = self.pad(stacked_frames, pad_size, fill_value=constant_values, pad_mode=pad_mode)
stacked_frames = torch.stack(stacked_frames, dim=0)
# Flip channel order if needed (RGB to BGR)
if do_flip_channel_order:
stacked_frames = self._flip_channel_order(stacked_frames)
processed_images_grouped[shape] = stacked_frames
processed_images = reorder_images(processed_images_grouped, grouped_images_index, is_nested=True)
if return_tensors == "pt":
processed_images = [torch.stack(images, dim=0) for images in processed_images]
processed_images = torch.stack(processed_images, dim=0)
return BatchFeature(data={"pixel_values": processed_images}, tensor_type=return_tensors)
__all__ = ["TvpImageProcessorFast"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/tvp/image_processing_tvp_fast.py",
"license": "Apache License 2.0",
"lines": 189,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/dinov3_convnext/configuration_dinov3_convnext.py | # Copyright 2025 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ConvNeXT model configuration"""
from ...backbone_utils import BackboneConfigMixin
from ...configuration_utils import PreTrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
class DINOv3ConvNextConfig(BackboneConfigMixin, PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`DINOv3ConvNextModel`]. It is used to instantiate an
DINOv3ConvNext model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the DINOv3ConvNext
[facebook/dinov3-convnext-tiny-pretrain-lvd1689m](https://huggingface.co/facebook/dinov3-convnext-tiny-pretrain-lvd1689m) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
hidden_sizes (`list[int]`, *optional*, defaults to [96, 192, 384, 768]):
Dimensionality (hidden size) at each stage.
depths (`list[int]`, *optional*, defaults to [3, 3, 9, 3]):
The number of layers for each stage.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in each block. If string, `"gelu"`, `"relu"`,
`"selu"` and `"gelu_new"` are supported.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
layer_scale_init_value (`float`, *optional*, defaults to 1e-06):
The initial value for the layer scale.
drop_path_rate (`float`, *optional*, defaults to 0.0):
The drop rate for stochastic depth.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of input images.
out_features (`list[str]`, *optional*):
If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
(depending on how many stages the model has). If unset and `out_indices` is set, will default to the
corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
out_indices (`list[int]`, *optional*):
If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
If unset and `out_features` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
Example:
```python
>>> from transformers import DINOv3ConvNextConfig, DINOv3ConvNextModel
>>> # Initializing a DINOv3ConvNext (tiny variant) style configuration
>>> config = DINOv3ConvNextConfig()
>>> # Initializing a model (with random weights)
>>> model = DINOv3ConvNextModel(config)
>>> # Accessing the model config
>>> config = model.config
```"""
model_type = "dinov3_convnext"
def __init__(
self,
num_channels: int = 3,
hidden_sizes: list[int] | None = None,
depths: list[int] | None = None,
hidden_act: str = "gelu",
initializer_range: float = 0.02,
layer_norm_eps: float = 1e-6,
layer_scale_init_value: float = 1e-6,
drop_path_rate: float = 0.0,
image_size: int = 224,
out_features: list[str] | None = None,
out_indices: list[int] | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.num_channels = num_channels
self.hidden_sizes = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
self.depths = [3, 3, 9, 3] if depths is None else depths
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.layer_scale_init_value = layer_scale_init_value
self.drop_path_rate = drop_path_rate
self.image_size = image_size
self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(self.depths) + 1)]
self.set_output_features_output_indices(out_indices=out_indices, out_features=out_features)
@property
def num_stages(self) -> int:
return len(self.hidden_sizes)
__all__ = ["DINOv3ConvNextConfig"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/dinov3_convnext/configuration_dinov3_convnext.py",
"license": "Apache License 2.0",
"lines": 98,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/dinov3_convnext/convert_dinov3_convnext_to_hf.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert DINOv3 checkpoints from the original repository.
URL: https://github.com/facebookresearch/dinov3/tree/main
"""
import argparse
import os
import re
from io import BytesIO
import httpx
import torch
from huggingface_hub import HfApi, hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import DINOv3ConvNextConfig, DINOv3ConvNextModel, DINOv3ViTImageProcessorFast
HUB_MODELS = {
"convnext_tiny": "facebook/dinov3-convnext-tiny-pretrain-lvd1689m",
"convnext_small": "facebook/dinov3-convnext-small-pretrain-lvd1689m",
"convnext_base": "facebook/dinov3-convnext-base-pretrain-lvd1689m",
"convnext_large": "facebook/dinov3-convnext-large-pretrain-lvd1689m",
}
HUB_CHECKPOINTS = {
"convnext_tiny": "dinov3_convnext_tiny_pretrain_lvd1689m-21b726bb.pth",
"convnext_small": "dinov3_convnext_small_pretrain_lvd1689m-296db49d.pth",
"convnext_base": "dinov3_convnext_base_pretrain_lvd1689m-801f2ba9.pth",
"convnext_large": "dinov3_convnext_large_pretrain_lvd1689m-61fa432d.pth",
}
# fmt: off
ORIGINAL_TO_CONVERTED_KEY_MAPPING = {
r"dwconv": r"depthwise_conv",
r"pwconv": r"pointwise_conv",
r"norm": r"layer_norm",
r"stages.(\d+).(\d+)": r"stages.\1.layers.\2",
r"downsample_layers.(\d+).(\d+)": r"stages.\1.downsample_layers.\2",
}
# fmt: on
def get_dinov3_config(model_name: str) -> DINOv3ConvNextConfig:
# size of the architecture
if model_name == "convnext_tiny":
return DINOv3ConvNextConfig(
depths=[3, 3, 9, 3],
hidden_sizes=[96, 192, 384, 768],
)
elif model_name == "convnext_small":
return DINOv3ConvNextConfig(
depths=[3, 3, 27, 3],
hidden_sizes=[96, 192, 384, 768],
)
elif model_name == "convnext_base":
return DINOv3ConvNextConfig(
depths=[3, 3, 27, 3],
hidden_sizes=[128, 256, 512, 1024],
)
elif model_name == "convnext_large":
return DINOv3ConvNextConfig(
depths=[3, 3, 27, 3],
hidden_sizes=[192, 384, 768, 1536],
)
else:
raise ValueError("Model not supported")
def prepare_img():
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
with httpx.stream("GET", url) as response:
image = Image.open(BytesIO(response.read())).convert("RGB")
return image
def get_transform(resize_size: int = 224):
to_tensor = transforms.ToTensor()
resize = transforms.Resize((resize_size, resize_size), antialias=True)
normalize = transforms.Normalize(
mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225),
)
return transforms.Compose([to_tensor, resize, normalize])
def get_image_processor(resize_size: int = 224):
return DINOv3ViTImageProcessorFast(
do_resize=True,
size={"height": resize_size, "width": resize_size},
resample=2, # BILINEAR
)
def convert_old_keys_to_new_keys(state_dict_keys: dict | None = None):
"""
This function should be applied only once, on the concatenated keys to efficiently rename using
the key mappings.
"""
output_dict = {}
if state_dict_keys is not None:
old_text = "\n".join(state_dict_keys)
new_text = old_text
for pattern, replacement in ORIGINAL_TO_CONVERTED_KEY_MAPPING.items():
if replacement is None:
new_text = re.sub(pattern, "", new_text) # an empty line
continue
new_text = re.sub(pattern, replacement, new_text)
output_dict = dict(zip(old_text.split("\n"), new_text.split("\n")))
return output_dict
@torch.no_grad()
def convert_and_test_dinov3_checkpoint(args):
expected_outputs = {
"convnext_tiny_cls": [-6.372119, 1.300791, 2.074303, -0.079975, 0.607205],
"convnext_tiny_patch": [0.490530, -3.713466, 1.848513, -1.040319, -1.090818],
"convnext_small_cls": [-0.903914, 1.412183, 0.287465, 0.175296, -2.397940],
"convnext_small_patch": [-1.081114, 0.637362, 3.748765, 0.170179, 1.445153],
"convnext_base_cls": [0.155366, -0.378771, -0.735157, -2.818718, 0.015095],
"convnext_base_patch": [3.039118, 0.778155, -1.961322, -1.607147, -2.411941],
"convnext_large_cls": [-2.219094, -0.594451, -2.300294, -0.957415, -0.520473],
"convnext_large_patch": [-1.477349, -0.217038, -3.128137, 0.418962, 0.334949],
}
model_name = args.model_name
config = get_dinov3_config(model_name)
# print(config)
model = DINOv3ConvNextModel(config).eval()
state_dict_path = hf_hub_download(repo_id=HUB_MODELS[model_name], filename=HUB_CHECKPOINTS[model_name])
original_state_dict = torch.load(state_dict_path)
original_keys = list(original_state_dict.keys())
new_keys = convert_old_keys_to_new_keys(original_keys)
converted_state_dict = {}
for key in original_keys:
new_key = new_keys[key]
weight_tensor = original_state_dict[key]
if key == "norms.3.weight" or key == "norms.3.bias":
continue
converted_state_dict[new_key] = weight_tensor
model.load_state_dict(converted_state_dict, strict=True)
model = model.eval()
transform = get_transform()
image_processor = get_image_processor()
image = prepare_img()
# check preprocessing
original_pixel_values = transform(image).unsqueeze(0) # add batch dimension
inputs = image_processor(image, return_tensors="pt")
torch.testing.assert_close(original_pixel_values, inputs["pixel_values"], atol=1e-6, rtol=1e-6)
print("Preprocessing looks ok!")
with torch.inference_mode(), torch.autocast("cuda", dtype=torch.float):
model_output = model(**inputs)
last_layer_class_token = model_output.pooler_output
last_layer_patch_tokens = model_output.last_hidden_state[:, 1:]
actual_outputs = {}
actual_outputs[f"{model_name}_cls"] = last_layer_class_token[0, :5].tolist()
actual_outputs[f"{model_name}_patch"] = last_layer_patch_tokens[0, 0, :5].tolist()
print("Actual: ", [round(x, 6) for x in actual_outputs[f"{model_name}_cls"]])
print("Expected:", expected_outputs[f"{model_name}_cls"])
torch.testing.assert_close(
torch.Tensor(actual_outputs[f"{model_name}_cls"]),
torch.Tensor(expected_outputs[f"{model_name}_cls"]),
atol=1e-3,
rtol=1e-3,
)
print("Actual: ", [round(x, 6) for x in actual_outputs[f"{model_name}_patch"]])
print("Expected:", expected_outputs[f"{model_name}_patch"])
torch.testing.assert_close(
torch.Tensor(actual_outputs[f"{model_name}_patch"]),
torch.Tensor(expected_outputs[f"{model_name}_patch"]),
atol=1e-3,
rtol=1e-3,
)
print("Forward pass looks ok!")
save_dir = os.path.join(args.save_dir, model_name)
os.makedirs(save_dir, exist_ok=True)
model.save_pretrained(save_dir)
image_processor.save_pretrained(save_dir)
print(f"Model saved to {save_dir}")
if args.push_to_hub:
api = HfApi()
repo = HUB_MODELS[model_name]
api.upload_folder(folder_path=save_dir, repo_id=repo, repo_type="model")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model-name",
default="convnext_tiny",
type=str,
choices=["convnext_tiny", "convnext_small", "convnext_base", "convnext_large"],
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--save-dir",
default="converted_models",
type=str,
help="Directory to save the converted model.",
)
parser.add_argument(
"--push-to-hub",
action="store_true",
help="Push the converted model to the Hugging Face Hub.",
)
args = parser.parse_args()
convert_and_test_dinov3_checkpoint(args)
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/dinov3_convnext/convert_dinov3_convnext_to_hf.py",
"license": "Apache License 2.0",
"lines": 199,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/dinov3_convnext/modeling_dinov3_convnext.py | # Copyright 2025 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch ConvNext model."""
import numpy as np
import torch
from torch import nn
from ... import initialization as init
from ...activations import ACT2FN
from ...backbone_utils import BackboneMixin
from ...modeling_outputs import BackboneOutput, BaseModelOutputWithPoolingAndNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import auto_docstring, logging
from ...utils.generic import can_return_tuple
from .configuration_dinov3_convnext import DINOv3ConvNextConfig
logger = logging.get_logger(__name__)
# Copied from transformers.models.beit.modeling_beit.drop_path
def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
"""
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
if drop_prob == 0.0 or not training:
return input
keep_prob = 1 - drop_prob
shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
random_tensor.floor_() # binarize
output = input.div(keep_prob) * random_tensor
return output
# Copied from transformers.models.convnext.modeling_convnext.ConvNextDropPath with ConvNext->DINOv3ConvNext
class DINOv3ConvNextDropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob: float | None = None) -> None:
super().__init__()
self.drop_prob = drop_prob
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
return drop_path(hidden_states, self.drop_prob, self.training)
def extra_repr(self) -> str:
return f"p={self.drop_prob}"
class DINOv3ConvNextLayerNorm(nn.LayerNorm):
r"""LayerNorm that supports two data formats: channels_last (default) or channels_first.
The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch_size, height,
width, channels) while channels_first corresponds to inputs with shape (batch_size, channels, height, width).
"""
def __init__(self, *args, data_format="channels_last", **kwargs):
super().__init__(*args, **kwargs)
if data_format not in ["channels_last", "channels_first"]:
raise NotImplementedError(f"Unsupported data format: {data_format}")
self.data_format = data_format
def forward(self, features: torch.Tensor) -> torch.Tensor:
"""
Args:
features: Tensor of shape (batch_size, channels, height, width) OR (batch_size, height, width, channels)
"""
if self.data_format == "channels_first":
features = features.permute(0, 2, 3, 1)
features = super().forward(features)
features = features.permute(0, 3, 1, 2)
else:
features = super().forward(features)
return features
class DINOv3ConvNextLayer(nn.Module):
"""This corresponds to the `Block` class in the original implementation.
There are two equivalent implementations:
1) DwConv, LayerNorm (channels_first), Conv, GELU, Conv (all in (N, C, H, W) format)
2) DwConv, Permute, LayerNorm (channels_last), Linear, GELU, Linear, Permute
The authors used (2) as they find it slightly faster in PyTorch.
Args:
config ([`DINOv3ConvNextConfig`]):
Model config.
channels (`int`):
Number of input (and output) channels.
drop_path (`float`):
Drop path rate. Default: 0.0.
"""
def __init__(self, config: DINOv3ConvNextConfig, channels: int, drop_path: float = 0.0):
super().__init__()
self.depthwise_conv = nn.Conv2d(channels, channels, kernel_size=7, padding=3, groups=channels)
self.layer_norm = DINOv3ConvNextLayerNorm(channels, eps=config.layer_norm_eps)
self.pointwise_conv1 = nn.Linear(channels, 4 * channels) # can be seen as a 1x1 conv
self.activation_fn = ACT2FN[config.hidden_act]
self.pointwise_conv2 = nn.Linear(4 * channels, channels) # can be seen as a 1x1 conv
self.gamma = nn.Parameter(torch.full((channels,), config.layer_scale_init_value), requires_grad=True)
self.drop_path = DINOv3ConvNextDropPath(drop_path) if drop_path > 0.0 else nn.Identity()
def forward(self, features: torch.Tensor) -> torch.Tensor:
"""
Args:
features: Tensor of shape (batch_size, channels, height, width)
"""
residual = features
features = self.depthwise_conv(features)
features = features.permute(0, 2, 3, 1) # to channels last
features = self.layer_norm(features)
features = self.pointwise_conv1(features)
features = self.activation_fn(features)
features = self.pointwise_conv2(features)
features = features * self.gamma
features = features.permute(0, 3, 1, 2) # back to channels first
features = residual + self.drop_path(features)
return features
class DINOv3ConvNextStage(nn.Module):
""" """
def __init__(self, config: DINOv3ConvNextConfig, stage_idx: int):
super().__init__()
in_channels = config.hidden_sizes[stage_idx - 1] if stage_idx > 0 else config.num_channels
out_channels = config.hidden_sizes[stage_idx]
if stage_idx == 0:
self.downsample_layers = nn.ModuleList(
[
nn.Conv2d(config.num_channels, out_channels, kernel_size=4, stride=4),
DINOv3ConvNextLayerNorm(out_channels, eps=config.layer_norm_eps, data_format="channels_first"),
]
)
else:
self.downsample_layers = nn.ModuleList(
[
DINOv3ConvNextLayerNorm(in_channels, eps=config.layer_norm_eps, data_format="channels_first"),
nn.Conv2d(in_channels, out_channels, kernel_size=2, stride=2),
]
)
num_stage_layers = config.depths[stage_idx]
num_previous_layers = sum(config.depths[:stage_idx])
num_total_layers = sum(config.depths)
drop_path_rates = np.linspace(0, config.drop_path_rate, num_total_layers).tolist()
self.layers = nn.ModuleList(
[
DINOv3ConvNextLayer(config, channels=out_channels, drop_path=drop_path_rates[i])
for i in range(num_previous_layers, num_previous_layers + num_stage_layers)
]
)
def forward(self, features: torch.Tensor) -> torch.Tensor:
"""
Args:
features: Tensor of shape (batch_size, channels, height, width)
"""
for layer in self.downsample_layers:
features = layer(features)
for layer in self.layers:
features = layer(features)
return features
@auto_docstring
class DINOv3ConvNextPreTrainedModel(PreTrainedModel):
config: DINOv3ConvNextConfig
base_model_prefix = "dinov3_convnext"
main_input_name = "pixel_values"
input_modalities = ("image",)
_no_split_modules = ["DINOv3ConvNextLayer"]
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights"""
super()._init_weights(module)
if isinstance(module, DINOv3ConvNextLayer):
if module.gamma is not None:
init.constant_(module.gamma, self.config.layer_scale_init_value)
@auto_docstring
class DINOv3ConvNextModel(DINOv3ConvNextPreTrainedModel):
def __init__(self, config: DINOv3ConvNextConfig):
super().__init__(config)
self.config = config
self.stages = nn.ModuleList([DINOv3ConvNextStage(config, stage_idx) for stage_idx in range(config.num_stages)])
self.layer_norm = nn.LayerNorm(config.hidden_sizes[-1], eps=config.layer_norm_eps) # final norm layer
self.pool = nn.AdaptiveAvgPool2d(1)
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self, pixel_values: torch.FloatTensor, output_hidden_states: bool | None = None, **kwargs
) -> BaseModelOutputWithPoolingAndNoAttention:
hidden_states = pixel_values
output_hidden_states = output_hidden_states or self.config.output_hidden_states
all_hidden_states = [hidden_states] if output_hidden_states else []
for stage in self.stages:
hidden_states = stage(hidden_states)
# store intermediate stage outputs
if output_hidden_states:
all_hidden_states.append(hidden_states)
# make global representation, a.k.a [CLS] token
pooled_output = self.pool(hidden_states)
# (batch_size, channels, height, width) -> (batch_size, height * width, channels)
pooled_output = pooled_output.flatten(2).transpose(1, 2)
hidden_states = hidden_states.flatten(2).transpose(1, 2)
# concat "cls" and "patch tokens" as (batch_size, 1 + height * width, channels)
hidden_states = torch.cat([pooled_output, hidden_states], dim=1)
hidden_states = self.layer_norm(hidden_states)
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=hidden_states,
pooler_output=hidden_states[:, 0],
hidden_states=tuple(all_hidden_states) if output_hidden_states else None,
)
@auto_docstring
class DINOv3ConvNextBackbone(BackboneMixin, DINOv3ConvNextPreTrainedModel):
config: DINOv3ConvNextConfig
def __init__(self, config: DINOv3ConvNextConfig):
super().__init__(config)
self.num_features = [config.num_channels] + list(config.hidden_sizes)
self.stages = nn.ModuleList([DINOv3ConvNextStage(config, s) for s in range(config.num_stages)])
self.post_init()
def get_input_embeddings(self):
return None
@can_return_tuple
@auto_docstring
def forward(
self,
pixel_values: torch.FloatTensor,
output_hidden_states: bool | None = None,
**kwargs,
) -> BackboneOutput:
if output_hidden_states is None:
output_hidden_states = self.config.output_hidden_states
hidden_states = pixel_values
all_hidden_states: list[torch.Tensor] = [hidden_states]
for stage in self.stages:
hidden_states = stage(hidden_states)
all_hidden_states.append(hidden_states)
# hidden_states are already in NCHW (batch_size, channels, height, width) format
feature_maps: list[torch.Tensor] = []
for stage, hidden_states in zip(self.stage_names, all_hidden_states):
if stage in self.out_features:
feature_maps.append(hidden_states)
return BackboneOutput(
feature_maps=tuple(feature_maps),
hidden_states=tuple(all_hidden_states) if output_hidden_states else None,
)
__all__ = ["DINOv3ConvNextModel", "DINOv3ConvNextPreTrainedModel", "DINOv3ConvNextBackbone"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/dinov3_convnext/modeling_dinov3_convnext.py",
"license": "Apache License 2.0",
"lines": 234,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/dinov3_vit/configuration_dinov3_vit.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DINOv3 model configuration"""
from ...backbone_utils import BackboneConfigMixin
from ...configuration_utils import PreTrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
class DINOv3ViTConfig(BackboneConfigMixin, PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`DINOv3Model`]. It is used to instantiate an
DINOv3 model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the DINOv3
[facebook/dinov3-vits16-pretrain-lvd1689m](https://huggingface.co/facebook/dinov3-vits16-pretrain-lvd1689m) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
patch_size (`int`, *optional*, defaults to 16):
The size (resolution) of each patch.
hidden_size (`int`, *optional*, defaults to 384):
Dimensionality of the encoder layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 1536):
Dimensionality of the "intermediate" (i.e., feed-forward) layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 6):
Number of attention heads for each attention layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
rope_theta (`float`, *optional*, defaults to 100.0):
The base period of the RoPE embeddings.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
query_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the query projection.
key_bias (`bool`, *optional*, defaults to `False`):
Whether to add a bias to the key projection.
value_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the value projection.
proj_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the output projection.
mlp_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the MLP layers.
layerscale_value (`float`, *optional*, defaults to 1.0):
Initial value to use for layer scale.
drop_path_rate (`float`, *optional*, defaults to 0.0):
Stochastic depth rate per sample (when applied in the main path of residual layers).
use_gated_mlp (`bool`, *optional*, defaults to `False`):
Whether to use the SwiGLU feedforward neural network.
num_register_tokens (`int`, *optional*, defaults to 0):
The number of register tokens.
pos_embed_shift (`float`, *optional*):
Amount to randomly shift position embedding coordinates in [-shift, shift],
applied only in training mode if not `None`.
pos_embed_jitter (`float`, *optional*):
Amount to randomly jitter position embedding coordinates in log-uniform value in [1/jitter, jitter],
applied only in training mode if not `None`.
pos_embed_rescale (`float`, *optional*, defaults to 2.0):
Amount to randomly rescale position embedding coordinates in log-uniform value in [1/rescale, rescale],
applied only in training mode if not `None`.
out_features (`list[str]`, *optional*):
If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
(depending on how many stages the model has). Will default to the last stage if unset.
out_indices (`list[int]`, *optional*):
If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc.
(depending on how many stages the model has). Will default to the last stage if unset.
apply_layernorm (`bool`, *optional*, defaults to `True`):
Whether to apply layer normalization to the feature maps when used as backbone.
reshape_hidden_states (`bool`, *optional*, defaults to `True`):
Whether to reshape the hidden states to spatial dimensions when used as backbone.
Example:
```python
>>> from transformers import DINOv3ViTConfig, DINOv3ViTModel
>>> # Initializing a DINOv3 ViT-small style configuration
>>> config = DINOv3ViTConfig()
>>> # Initializing a model (with random weights) from the config
>>> model = DINOv3ViTModel(config)
>>> # Accessing the model config
>>> config = model.config
```"""
model_type = "dinov3_vit"
def __init__(
self,
patch_size: int = 16,
hidden_size: int = 384,
intermediate_size: int = 1536,
num_hidden_layers: int = 12,
num_attention_heads: int = 6,
hidden_act: str = "gelu",
attention_dropout: float = 0.0,
initializer_range: float = 0.02,
layer_norm_eps: float = 1e-5,
rope_theta: float = 100.0,
image_size: int = 224,
num_channels: int = 3,
query_bias: bool = True,
key_bias: bool = False,
value_bias: bool = True,
proj_bias: bool = True,
mlp_bias: bool = True,
layerscale_value: float = 1.0,
drop_path_rate: float = 0.0,
use_gated_mlp: bool = False,
num_register_tokens: int = 0,
# train augs
pos_embed_shift: float | None = None,
pos_embed_jitter: float | None = None,
pos_embed_rescale: float | None = 2.0,
out_features: list[str] | None = None,
out_indices: list[int] | None = None,
apply_layernorm: bool = True,
reshape_hidden_states: bool = True,
**kwargs,
):
super().__init__(**kwargs)
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.attention_dropout = attention_dropout
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.layerscale_value = layerscale_value
self.drop_path_rate = drop_path_rate
self.use_gated_mlp = use_gated_mlp
self.rope_theta = rope_theta
self.query_bias = query_bias
self.key_bias = key_bias
self.value_bias = value_bias
self.proj_bias = proj_bias
self.mlp_bias = mlp_bias
self.num_register_tokens = num_register_tokens
# train augs
self.pos_embed_shift = pos_embed_shift
self.pos_embed_jitter = pos_embed_jitter
self.pos_embed_rescale = pos_embed_rescale
# Initialize backbone-specific configuration
self.apply_layernorm = apply_layernorm
self.reshape_hidden_states = reshape_hidden_states
# Initialize backbone stage names
stage_names = ["stem"] + [f"stage{i}" for i in range(1, num_hidden_layers + 1)]
self.stage_names = stage_names
# Initialize backbone features/indices
self.set_output_features_output_indices(out_indices=out_indices, out_features=out_features)
__all__ = ["DINOv3ViTConfig"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/dinov3_vit/configuration_dinov3_vit.py",
"license": "Apache License 2.0",
"lines": 168,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/dinov3_vit/convert_dinov3_vit_to_hf.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert DINOv3 checkpoints from the original repository.
URL: https://github.com/facebookresearch/dinov3/tree/main
"""
import argparse
import os
import re
from io import BytesIO
import httpx
import torch
from huggingface_hub import HfApi, hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import DINOv3ViTConfig, DINOv3ViTImageProcessorFast, DINOv3ViTModel
HUB_MODELS = {
"vits16_lvd1689m": "facebook/dinov3-vits16-pretrain-lvd1689m",
"vits16plus_lvd1689m": "facebook/dinov3-vits16plus-pretrain-lvd1689m",
"vitb16_lvd1689m": "facebook/dinov3-vitb16-pretrain-lvd1689m",
"vitl16_lvd1689m": "facebook/dinov3-vitl16-pretrain-lvd1689m",
"vitl16_sat493m": "facebook/dinov3-vitl16-pretrain-sat493m",
"vith16plus_lvd1689m": "facebook/dinov3-vith16plus-pretrain-lvd1689m",
"vit7b16_lvd1689m": "facebook/dinov3-vit7b16-pretrain-lvd1689m",
"vit7b16_sat493m": "facebook/dinov3-vit7b16-pretrain-sat493m",
}
HUB_CHECKPOINTS = {
"vits16_lvd1689m": "dinov3_vits16_pretrain_lvd1689m-08c60483.pth",
"vits16plus_lvd1689m": "dinov3_vits16plus_pretrain_lvd1689m-4057cbaa.pth",
"vitb16_lvd1689m": "dinov3_vitb16_pretrain_lvd1689m-73cec8be.pth",
"vitl16_lvd1689m": "dinov3_vitl16_pretrain_lvd1689m-8aa4cbdd.pth",
"vitl16_sat493m": "dinov3_vitl16_pretrain_sat493m-eadcf0ff.pth",
"vith16plus_lvd1689m": "dinov3_vith16plus_pretrain_lvd1689m-7c1da9a5.pth",
"vit7b16_lvd1689m": "dinov3_vit7b16_pretrain_lvd1689m-a955f4ea.pth",
"vit7b16_sat493m": "dinov3_vit7b16_pretrain_sat493m-a6675841.pth",
}
# fmt: off
ORIGINAL_TO_CONVERTED_KEY_MAPPING = {
r"cls_token": r"embeddings.cls_token",
r"mask_token": r"embeddings.mask_token",
r"storage_tokens": r"embeddings.register_tokens",
r"patch_embed.proj": r"embeddings.patch_embeddings",
r"periods": r"inv_freq",
r"rope_embed": r"rope_embeddings",
r"blocks.(\d+).attn.proj": r"layer.\1.attention.o_proj",
r"blocks.(\d+).attn.": r"layer.\1.attention.",
r"blocks.(\d+).ls(\d+).gamma": r"layer.\1.layer_scale\2.lambda1",
r"blocks.(\d+).mlp.fc1": r"layer.\1.mlp.up_proj",
r"blocks.(\d+).mlp.fc2": r"layer.\1.mlp.down_proj",
r"blocks.(\d+).mlp": r"layer.\1.mlp",
r"blocks.(\d+).norm": r"layer.\1.norm",
r"w1": r"gate_proj",
r"w2": r"up_proj",
r"w3": r"down_proj",
}
# fmt: on
def convert_old_keys_to_new_keys(state_dict_keys: dict | None = None):
"""
This function should be applied only once, on the concatenated keys to efficiently rename using
the key mappings.
"""
output_dict = {}
if state_dict_keys is not None:
old_text = "\n".join(state_dict_keys)
new_text = old_text
for pattern, replacement in ORIGINAL_TO_CONVERTED_KEY_MAPPING.items():
if replacement is None:
new_text = re.sub(pattern, "", new_text) # an empty line
continue
new_text = re.sub(pattern, replacement, new_text)
output_dict = dict(zip(old_text.split("\n"), new_text.split("\n")))
return output_dict
def split_qkv(state_dict: dict):
keys = [x for x in state_dict.keys() if "qkv" in x]
for key in keys:
qkv = state_dict.pop(key)
q, k, v = torch.chunk(qkv, 3, dim=0)
state_dict[key.replace("qkv", "q_proj")] = q
state_dict[key.replace("qkv", "k_proj")] = k
state_dict[key.replace("qkv", "v_proj")] = v
return state_dict
def get_dinov3_config(model_name: str) -> DINOv3ViTConfig:
# size of the architecture
if model_name == "vits16_lvd1689m":
return DINOv3ViTConfig(
patch_size=16,
hidden_size=384,
intermediate_size=1536,
num_hidden_layers=12,
num_attention_heads=6,
proj_bias=True,
num_register_tokens=4,
use_gated_mlp=False,
hidden_act="gelu",
)
elif model_name == "vits16plus_lvd1689m":
return DINOv3ViTConfig(
patch_size=16,
hidden_size=384,
intermediate_size=1536,
num_hidden_layers=12,
num_attention_heads=6,
num_register_tokens=4,
use_gated_mlp=True,
hidden_act="silu",
)
elif model_name == "vitb16_lvd1689m":
return DINOv3ViTConfig(
patch_size=16,
hidden_size=768,
intermediate_size=3072,
num_hidden_layers=12,
num_attention_heads=12,
proj_bias=True,
num_register_tokens=4,
use_gated_mlp=False,
hidden_act="gelu",
)
elif model_name in ("vitl16_lvd1689m", "vitl16_sat493m"):
return DINOv3ViTConfig(
patch_size=16,
hidden_size=1024,
intermediate_size=4096,
num_hidden_layers=24,
num_attention_heads=16,
num_register_tokens=4,
use_gated_mlp=False,
hidden_act="gelu",
)
elif model_name == "vith16plus_lvd1689m":
return DINOv3ViTConfig(
patch_size=16,
hidden_size=1280,
intermediate_size=5120,
num_hidden_layers=32,
num_attention_heads=20,
num_register_tokens=4,
use_gated_mlp=True,
hidden_act="silu",
)
elif model_name in ("vit7b16_lvd1689m", "vit7b16_sat493m"):
return DINOv3ViTConfig(
patch_size=16,
hidden_size=4096,
intermediate_size=8192,
num_hidden_layers=40,
num_attention_heads=32,
query_bias=False,
value_bias=False,
num_register_tokens=4,
use_gated_mlp=True,
hidden_act="silu",
)
else:
raise ValueError("Model not supported")
def prepare_img():
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
with httpx.stream("GET", url) as response:
image = Image.open(BytesIO(response.read())).convert("RGB")
return image
def get_transform(resize_size: int = 224):
to_tensor = transforms.ToTensor()
resize = transforms.Resize((resize_size, resize_size), antialias=True)
normalize = transforms.Normalize(
mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225),
)
return transforms.Compose([to_tensor, resize, normalize])
def get_image_processor(resize_size: int = 224):
return DINOv3ViTImageProcessorFast(
do_resize=True,
size={"height": resize_size, "width": resize_size},
resample=2, # BILINEAR
)
@torch.no_grad()
def convert_and_test_dinov3_checkpoint(args):
expected_outputs = {
"vits16_lvd1689m_cls": [0.463561, -0.415609, 0.408236, -0.126613, -0.286636],
"vits16_lvd1689m_patch": [-0.038754, -0.250895, -0.016392, -0.455473, 0.571582],
"vits16plus_lvd1689m_cls": [-0.471349, -1.365778, -0.317983, 0.377219, -0.769085],
"vits16plus_lvd1689m_patch": [0.144551, -0.388117, -0.393433, -0.157695, -0.600380],
"vitb16_lvd1689m_cls": [1.034643, -0.180609, -0.341018, -0.066376, -0.011383],
"vitb16_lvd1689m_patch": [-0.082523, -0.456272, -0.728029, -0.430680, -0.152880],
"vitl16_lvd1689m_cls": [0.484527, -0.582214, 0.480636, 0.592040, 0.945166],
"vitl16_lvd1689m_patch": [-0.211367, -0.490863, -0.257131, 0.101763, 0.154511],
"vith16plus_lvd1689m_cls": [-0.064575, -0.148866, -0.621524, 0.634878, 0.152695],
"vith16plus_lvd1689m_patch": [-0.093817, 0.287407, -0.050036, 0.428043, 0.094561],
"vit7b16_lvd1689m_cls": [0.275439, -0.261353, 0.067772, 0.049936, -0.158747],
"vit7b16_lvd1689m_patch": [0.044442, -0.052542, 0.070777, -0.065111, -0.026546],
"vitl16_sat493m_cls": [-0.33235, 0.34052, -0.22087, 0.21434, 0.09003],
"vitl16_sat493m_patch": [0.18488, 0.30309, -0.20689, 0.12848, 0.06207],
"vit7b16_sat493m_cls": [-0.19779, 0.11819, -0.00581, -0.21055, -0.03971],
"vit7b16_sat493m_patch": [-0.12423, 0.07879, -0.10057, 0.02835, -0.11727],
}
model_name = args.model_name
config = get_dinov3_config(model_name)
model = DINOv3ViTModel(config).eval()
state_dict_path = hf_hub_download(repo_id=HUB_MODELS[model_name], filename=HUB_CHECKPOINTS[model_name])
original_state_dict = torch.load(state_dict_path, mmap=True)
original_state_dict = split_qkv(original_state_dict)
original_keys = list(original_state_dict.keys())
new_keys = convert_old_keys_to_new_keys(original_keys)
converted_state_dict = {}
for key in original_keys:
new_key = new_keys[key]
weight_tensor = original_state_dict[key]
if "bias_mask" in key or "attn.k_proj.bias" in key or "local_cls_norm" in key:
continue
if "embeddings.mask_token" in new_key:
weight_tensor = weight_tensor.unsqueeze(1)
if "inv_freq" in new_key:
continue
converted_state_dict[new_key] = weight_tensor
model.load_state_dict(converted_state_dict, strict=True)
model = model.eval()
transform = get_transform()
image_processor = get_image_processor()
image = prepare_img()
# check preprocessing
original_pixel_values = transform(image).unsqueeze(0) # add batch dimension
inputs = image_processor(image, return_tensors="pt")
torch.testing.assert_close(original_pixel_values, inputs["pixel_values"], atol=1e-6, rtol=1e-6)
print("Preprocessing looks ok!")
with torch.inference_mode(), torch.autocast("cuda", dtype=torch.float):
model_output = model(**inputs)
last_layer_class_token = model_output.pooler_output
last_layer_patch_tokens = model_output.last_hidden_state[:, config.num_register_tokens + 1 :]
actual_outputs = {}
actual_outputs[f"{model_name}_cls"] = last_layer_class_token[0, :5].tolist()
actual_outputs[f"{model_name}_patch"] = last_layer_patch_tokens[0, 0, :5].tolist()
print("Actual: ", [round(x, 6) for x in actual_outputs[f"{model_name}_cls"]])
print("Expected:", expected_outputs[f"{model_name}_cls"])
torch.testing.assert_close(
torch.Tensor(actual_outputs[f"{model_name}_cls"]),
torch.Tensor(expected_outputs[f"{model_name}_cls"]),
atol=1e-3,
rtol=1e-3,
)
torch.testing.assert_close(
torch.Tensor(actual_outputs[f"{model_name}_patch"]),
torch.Tensor(expected_outputs[f"{model_name}_patch"]),
atol=1e-3,
rtol=1e-3,
)
print("Forward pass looks ok!")
save_dir = os.path.join(args.save_dir, model_name)
os.makedirs(save_dir, exist_ok=True)
model.save_pretrained(save_dir)
image_processor.save_pretrained(save_dir)
print(f"Model saved to {save_dir}")
if args.push_to_hub:
api = HfApi()
repo = HUB_MODELS[model_name]
api.upload_folder(folder_path=save_dir, repo_id=repo, repo_type="model")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model-name",
default="vith16plus_lvd1689m",
type=str,
choices=[
"vits16_lvd1689m",
"vits16plus_lvd1689m",
"vitb16_lvd1689m",
"vitl16_lvd1689m",
"vitl16_sat493m",
"vith16plus_lvd1689m",
"vit7b16_lvd1689m",
"vit7b16_sat493m",
],
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--save-dir",
default="converted_models",
type=str,
help="Directory to save the converted model.",
)
parser.add_argument(
"--push-to-hub",
action="store_true",
help="Push the converted model to the Hugging Face Hub.",
)
args = parser.parse_args()
convert_and_test_dinov3_checkpoint(args)
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/dinov3_vit/convert_dinov3_vit_to_hf.py",
"license": "Apache License 2.0",
"lines": 296,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/dinov3_vit/image_processing_dinov3_vit_fast.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Image processor class for DINOv3."""
from typing import Optional
import torch
import torchvision.transforms.v2.functional as tvF
from transformers.image_processing_base import BatchFeature
from transformers.image_processing_utils_fast import BaseImageProcessorFast, group_images_by_shape, reorder_images
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling, SizeDict
from transformers.utils import (
TensorType,
auto_docstring,
logging,
)
from transformers.utils.import_utils import requires
logger = logging.get_logger(__name__)
@auto_docstring
@requires(backends=("torchvision", "torch"))
class DINOv3ViTImageProcessorFast(BaseImageProcessorFast):
resample = PILImageResampling.BILINEAR
image_mean = IMAGENET_DEFAULT_MEAN
image_std = IMAGENET_DEFAULT_STD
size = {"height": 224, "width": 224}
do_resize = True
do_rescale = True
do_normalize = True
# Overridden for DINOv3 to preserve order of transforms
# rescale -> resize -> normalize
def _preprocess(
self,
images: list["torch.Tensor"],
do_resize: bool,
size: SizeDict,
interpolation: Optional["tvF.InterpolationMode"],
do_center_crop: bool,
crop_size: SizeDict,
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
image_mean: float | list[float] | None,
image_std: float | list[float] | None,
disable_grouping: bool | None,
return_tensors: str | TensorType | None,
**kwargs,
) -> BatchFeature:
# Group images by size for batched resizing
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
resized_images_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_rescale:
stacked_images = self.rescale(stacked_images, rescale_factor)
if do_resize:
stacked_images = self.resize(
image=stacked_images, size=size, interpolation=interpolation, antialias=True
)
resized_images_grouped[shape] = stacked_images
resized_images = reorder_images(resized_images_grouped, grouped_images_index)
# Group images by size for further processing
# Needed in case do_resize is False, or resize returns images with different sizes
grouped_images, grouped_images_index = group_images_by_shape(resized_images, disable_grouping=disable_grouping)
processed_images_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_center_crop:
stacked_images = self.center_crop(stacked_images, crop_size)
if do_normalize:
stacked_images = self.normalize(stacked_images, image_mean, image_std)
processed_images_grouped[shape] = stacked_images
processed_images = reorder_images(processed_images_grouped, grouped_images_index)
return BatchFeature(data={"pixel_values": processed_images}, tensor_type=return_tensors)
__all__ = ["DINOv3ViTImageProcessorFast"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/dinov3_vit/image_processing_dinov3_vit_fast.py",
"license": "Apache License 2.0",
"lines": 81,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/dinov3_vit/modular_dinov3_vit.py | # Copyright 2025 Meta AI and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch DINOv3 model."""
import math
from collections.abc import Callable
import numpy as np
import torch
from torch import nn
from transformers.models.arcee.modeling_arcee import ArceeMLP
from transformers.models.dinov2.modeling_dinov2 import (
Dinov2DropPath,
Dinov2LayerScale,
Dinov2PreTrainedModel,
eager_attention_forward,
)
from transformers.models.llama.modeling_llama import LlamaMLP
from transformers.models.pixtral.modeling_pixtral import PixtralAttention, rotate_half
from ... import initialization as init
from ...backbone_utils import BackboneMixin
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import BackboneOutput, BaseModelOutputWithPooling
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS
from ...processing_utils import Unpack
from ...pytorch_utils import compile_compatible_method_lru_cache
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
from ...utils.generic import maybe_autocast, merge_with_config_defaults
from ...utils.output_capturing import capture_outputs
from .configuration_dinov3_vit import DINOv3ViTConfig
logger = logging.get_logger(__name__)
class DINOv3ViTEmbeddings(nn.Module):
"""
Construct the CLS token, mask token, position and patch embeddings.
"""
def __init__(self, config: DINOv3ViTConfig):
super().__init__()
self.config = config
self.cls_token = nn.Parameter(torch.randn(1, 1, config.hidden_size))
self.mask_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
self.register_tokens = nn.Parameter(torch.empty(1, config.num_register_tokens, config.hidden_size))
self.patch_embeddings = nn.Conv2d(
config.num_channels, config.hidden_size, kernel_size=config.patch_size, stride=config.patch_size
)
def forward(self, pixel_values: torch.Tensor, bool_masked_pos: torch.Tensor | None = None) -> torch.Tensor:
batch_size = pixel_values.shape[0]
target_dtype = self.patch_embeddings.weight.dtype
# (batch_size, num_channels, height, width) -> (batch_size, num_patches, hidden_size)
patch_embeddings = self.patch_embeddings(pixel_values.to(dtype=target_dtype))
patch_embeddings = patch_embeddings.flatten(2).transpose(1, 2)
if bool_masked_pos is not None:
mask_token = self.mask_token.to(patch_embeddings.dtype)
patch_embeddings = torch.where(bool_masked_pos.unsqueeze(-1), mask_token, patch_embeddings)
# Add CLS and register tokens
cls_token = self.cls_token.expand(batch_size, -1, -1)
register_tokens = self.register_tokens.expand(batch_size, -1, -1)
embeddings = torch.cat([cls_token, register_tokens, patch_embeddings], dim=1)
return embeddings
@compile_compatible_method_lru_cache(maxsize=32)
def get_patches_center_coordinates(
num_patches_h: int, num_patches_w: int, dtype: torch.dtype, device: torch.device
) -> torch.Tensor:
"""
Computes the 2D coordinates of the centers of image patches, normalized to the range [-1, +1].
The center of each patch is exactly halfway between its top-left and bottom-right corners.
Args:
num_patches_h (int): Number of patches along the vertical (height) axis.
num_patches_w (int): Number of patches along the horizontal (width) axis.
dtype (torch.dtype): The desired data type of the returned tensor.
Returns:
torch.Tensor: A tensor of shape (height * width, 2), where each row contains the (y, x)
coordinates of a patch center, normalized to [-1, +1].
"""
coords_h = torch.arange(0.5, num_patches_h, dtype=dtype, device=device)
coords_w = torch.arange(0.5, num_patches_w, dtype=dtype, device=device)
coords_h = coords_h / num_patches_h
coords_w = coords_w / num_patches_w
# (height, width, 2) -> (height * width, 2)
coords = torch.stack(torch.meshgrid(coords_h, coords_w, indexing="ij"), dim=-1)
coords = coords.flatten(0, 1)
# Shift range [0, 1] to [-1, +1]
coords = 2.0 * coords - 1.0
return coords
def augment_patches_center_coordinates(
coords: torch.Tensor,
shift: float | None = None,
jitter: float | None = None,
rescale: float | None = None,
) -> torch.Tensor:
# Shift coords by adding a uniform value in [-shift, shift]
if shift is not None:
shift_hw = torch.empty((1, 2), device=coords.device, dtype=coords.dtype)
shift_hw = shift_hw.uniform_(-shift, shift)
coords = coords + shift_hw
# Jitter coords by multiplying the range [-1, 1] by a log-uniform value in [1/jitter, jitter]
if jitter is not None:
jitter_range = np.log(jitter)
jitter_hw = torch.empty((1, 2), device=coords.device, dtype=coords.dtype)
jitter_hw = jitter_hw.uniform_(-jitter_range, jitter_range).exp()
coords = coords * jitter_hw
# Rescale coords by multiplying the range [-1, 1] by a log-uniform value in [1/rescale, rescale]
if rescale is not None:
rescale_range = np.log(rescale)
rescale_hw = torch.empty(1, device=coords.device, dtype=coords.dtype)
rescale_hw = rescale_hw.uniform_(-rescale_range, rescale_range).exp()
coords = coords * rescale_hw
return coords
class DINOv3ViTRopePositionEmbedding(nn.Module):
inv_freq: torch.Tensor
def __init__(self, config: DINOv3ViTConfig):
super().__init__()
self.config = config
self.base = config.rope_theta
self.head_dim = config.hidden_size // config.num_attention_heads
self.num_patches_h = config.image_size // config.patch_size
self.num_patches_w = config.image_size // config.patch_size
inv_freq = 1 / self.base ** torch.arange(0, 1, 4 / self.head_dim, dtype=torch.float32) # (head_dim / 4,)
self.register_buffer("inv_freq", inv_freq, persistent=False)
def forward(self, pixel_values: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
_, _, height, width = pixel_values.shape
num_patches_h = height // self.config.patch_size
num_patches_w = width // self.config.patch_size
device = pixel_values.device
device_type = device.type if isinstance(device.type, str) and device.type != "mps" else "cpu"
with maybe_autocast(device_type=device_type, enabled=False): # Force float32
# Although we could precompute static patch_coords from image_size and patch_size in the config,
# the model was trained with random_scale, so it can process images of varying sizes.
# Therefore, it's better to compute patch_coords dynamically (with lru_cache).
patch_coords = get_patches_center_coordinates(
num_patches_h, num_patches_w, dtype=torch.float32, device=device
)
if self.training:
patch_coords = augment_patches_center_coordinates(
patch_coords,
shift=self.config.pos_embed_shift,
jitter=self.config.pos_embed_jitter,
rescale=self.config.pos_embed_rescale,
)
# (height * width, 2, head_dim / 4) -> (height * width, head_dim / 2) -> (height * width, head_dim)
angles = 2 * math.pi * patch_coords[:, :, None] * self.inv_freq[None, None, :]
angles = angles.flatten(1, 2)
angles = angles.tile(2)
cos = torch.cos(angles)
sin = torch.sin(angles)
dtype = pixel_values.dtype
return cos.to(dtype=dtype), sin.to(dtype=dtype)
def apply_rotary_pos_emb(
q: torch.Tensor, k: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor, **kwargs
) -> tuple[torch.Tensor, torch.Tensor]:
"""Applies Rotary Position Embedding to the query and key tensors, but only to the patch tokens,
ignoring the prefix tokens (cls token and register tokens).
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
num_tokens = q.shape[-2]
num_patches = sin.shape[-2]
num_prefix_tokens = num_tokens - num_patches # cls token + register tokens
q_prefix_tokens, q_patches = q.split((num_prefix_tokens, num_patches), dim=-2)
k_prefix_tokens, k_patches = k.split((num_prefix_tokens, num_patches), dim=-2)
# apply rope only to patch tokens
q_patches = (q_patches * cos) + (rotate_half(q_patches) * sin)
k_patches = (k_patches * cos) + (rotate_half(k_patches) * sin)
q = torch.cat((q_prefix_tokens, q_patches), dim=-2)
k = torch.cat((k_prefix_tokens, k_patches), dim=-2)
return q, k
class DINOv3ViTAttention(PixtralAttention):
def __init__(self, config: DINOv3ViTConfig):
super().__init__(config)
self.q_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.query_bias)
self.k_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.key_bias)
self.v_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.value_bias)
self.o_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.proj_bias)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor | None = None,
position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, torch.Tensor | None]:
"""Input shape: Batch x Time x Channel"""
batch_size, patches, _ = hidden_states.size()
query_states = self.q_proj(hidden_states)
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
query_states = query_states.view(batch_size, patches, self.num_heads, self.head_dim).transpose(1, 2)
key_states = key_states.view(batch_size, patches, self.num_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(batch_size, patches, self.num_heads, self.head_dim).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface(
self.config._attn_implementation, eager_attention_forward
)
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(batch_size, patches, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class DINOv3ViTLayerScale(Dinov2LayerScale):
pass
class DINOv3ViTDropPath(Dinov2DropPath):
pass
class DINOv3ViTMLP(ArceeMLP):
pass
class DINOv3ViTGatedMLP(LlamaMLP):
pass
class DINOv3ViTLayer(GradientCheckpointingLayer):
"""This corresponds to the Block class in the original implementation."""
def __init__(self, config: DINOv3ViTConfig):
super().__init__()
self.norm1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.attention = DINOv3ViTAttention(config)
self.layer_scale1 = DINOv3ViTLayerScale(config)
self.drop_path = DINOv3ViTDropPath(config.drop_path_rate) if config.drop_path_rate > 0.0 else nn.Identity()
self.norm2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
if config.use_gated_mlp:
self.mlp = DINOv3ViTGatedMLP(config)
else:
self.mlp = DINOv3ViTMLP(config)
self.layer_scale2 = DINOv3ViTLayerScale(config)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor | None = None,
position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None,
) -> torch.Tensor:
# Attention with residual connection
residual = hidden_states
hidden_states = self.norm1(hidden_states)
hidden_states, _ = self.attention(
hidden_states,
attention_mask=attention_mask,
position_embeddings=position_embeddings,
)
hidden_states = self.layer_scale1(hidden_states)
hidden_states = self.drop_path(hidden_states) + residual
# MLP with residual connection
residual = hidden_states
hidden_states = self.norm2(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = self.layer_scale2(hidden_states)
hidden_states = self.drop_path(hidden_states) + residual
return hidden_states
@auto_docstring
class DINOv3ViTPreTrainedModel(Dinov2PreTrainedModel):
_can_record_outputs = {
"hidden_states": DINOv3ViTLayer,
"attentions": DINOv3ViTAttention,
}
@torch.no_grad()
def _init_weights(self, module) -> None:
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
init.trunc_normal_(module.weight, mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
init.zeros_(module.bias)
elif isinstance(module, nn.LayerNorm):
init.zeros_(module.bias)
init.ones_(module.weight)
elif isinstance(module, DINOv3ViTEmbeddings):
init.trunc_normal_(module.cls_token, mean=0.0, std=self.config.initializer_range)
if module.config.num_register_tokens > 0:
init.trunc_normal_(module.register_tokens, mean=0.0, std=self.config.initializer_range)
init.zeros_(module.mask_token)
elif isinstance(module, DINOv3ViTLayerScale):
init.constant_(module.lambda1, self.config.layerscale_value)
elif isinstance(module, DINOv3ViTRopePositionEmbedding):
inv_freq = 1 / module.base ** torch.arange(0, 1, 4 / module.head_dim, dtype=torch.float32)
init.copy_(module.inv_freq, inv_freq)
@auto_docstring
class DINOv3ViTModel(DINOv3ViTPreTrainedModel):
def __init__(self, config: DINOv3ViTConfig):
super().__init__(config)
self.config = config
self.embeddings = DINOv3ViTEmbeddings(config)
self.rope_embeddings = DINOv3ViTRopePositionEmbedding(config)
self.layer = nn.ModuleList([DINOv3ViTLayer(config) for _ in range(config.num_hidden_layers)])
self.norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.patch_embeddings
@merge_with_config_defaults
@capture_outputs(tie_last_hidden_states=False)
@auto_docstring
def forward(
self,
pixel_values: torch.Tensor,
bool_masked_pos: torch.Tensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutputWithPooling:
r"""
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, sequence_length)`):
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). Only relevant for
pre-training.
"""
pixel_values = pixel_values.to(self.embeddings.patch_embeddings.weight.dtype)
hidden_states = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos)
position_embeddings = self.rope_embeddings(pixel_values)
for i, layer_module in enumerate(self.layer):
hidden_states = layer_module(
hidden_states,
position_embeddings=position_embeddings,
)
sequence_output = self.norm(hidden_states)
pooled_output = sequence_output[:, 0, :]
return BaseModelOutputWithPooling(last_hidden_state=sequence_output, pooler_output=pooled_output)
@auto_docstring
class DINOv3ViTBackbone(BackboneMixin, DINOv3ViTPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.embeddings = DINOv3ViTEmbeddings(config)
self.rope_embeddings = DINOv3ViTRopePositionEmbedding(config)
self.layer = nn.ModuleList([DINOv3ViTLayer(config) for _ in range(config.num_hidden_layers)])
self.norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.gradient_checkpointing = False
self.num_features = [config.hidden_size for _ in range(config.num_hidden_layers + 1)]
self.post_init()
def get_input_embeddings(self):
return self.embeddings.patch_embeddings
@merge_with_config_defaults
@capture_outputs
@can_return_tuple
def forward(
self,
pixel_values: torch.Tensor,
output_hidden_states: bool | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> BackboneOutput:
pixel_values = pixel_values.to(self.embeddings.patch_embeddings.weight.dtype)
hidden_states = self.embeddings(pixel_values)
position_embeddings = self.rope_embeddings(pixel_values)
stage_hidden_states: list[torch.Tensor] = [hidden_states]
for layer_module in self.layer:
hidden_states = layer_module(hidden_states, position_embeddings=position_embeddings)
stage_hidden_states.append(hidden_states)
batch_size, _, image_height, image_width = pixel_values.shape
patch_size = self.config.patch_size
num_patches_height = image_height // patch_size
num_patches_width = image_width // patch_size
num_prefix = 1 + getattr(self.config, "num_register_tokens", 0)
feature_maps = []
sequence_output = None
last_stage_idx = len(self.stage_names) - 1
for idx, (stage_name, hidden_state) in enumerate(zip(self.stage_names, stage_hidden_states)):
if idx == last_stage_idx:
hidden_state = self.norm(hidden_state)
sequence_output = hidden_state
elif self.config.apply_layernorm:
hidden_state = self.norm(hidden_state)
if stage_name in self.out_features:
patch_tokens = hidden_state[:, num_prefix:, :]
if self.config.reshape_hidden_states:
fmap = (
patch_tokens.reshape(batch_size, num_patches_height, num_patches_width, patch_tokens.shape[-1])
.permute(0, 3, 1, 2)
.contiguous()
)
else:
fmap = patch_tokens
feature_maps.append(fmap)
output = BackboneOutput(feature_maps=tuple(feature_maps))
output.last_hidden_state = sequence_output
return output
__all__ = ["DINOv3ViTModel", "DINOv3ViTPreTrainedModel", "DINOv3ViTBackbone"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/dinov3_vit/modular_dinov3_vit.py",
"license": "Apache License 2.0",
"lines": 387,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/dinov3_convnext/test_modeling_dinov3_convnext.py | # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch ConvNext model."""
import unittest
from functools import cached_property
from transformers import DINOv3ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DINOv3ConvNextBackbone, DINOv3ConvNextModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class DINOv3ConvNextModelTester:
def __init__(
self,
parent,
batch_size=13,
image_size=32,
num_channels=3,
hidden_sizes=[10, 20, 30, 40],
depths=[2, 2, 3, 2],
is_training=False,
use_labels=True,
intermediate_size=37,
hidden_act="gelu",
num_labels=10,
initializer_range=0.02,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.num_channels = num_channels
self.hidden_sizes = hidden_sizes
self.depths = depths
self.is_training = is_training
self.use_labels = use_labels
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.num_labels = num_labels
self.initializer_range = initializer_range
self.scope = scope
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
labels = None
if self.use_labels:
labels = ids_tensor([self.batch_size], self.num_labels)
config = self.get_config()
return config, pixel_values, labels
def get_config(self):
return DINOv3ConvNextConfig(
num_channels=self.num_channels,
hidden_sizes=self.hidden_sizes,
depths=self.depths,
hidden_act=self.hidden_act,
is_decoder=False,
initializer_range=self.initializer_range,
num_labels=self.num_labels,
)
def create_and_check_model(self, config, pixel_values, labels):
model = DINOv3ConvNextModel(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape,
(
self.batch_size,
1 + self.image_size // 32 * self.image_size // 32,
self.hidden_sizes[-1],
),
)
def create_and_check_backbone(self, config, pixel_values, labels):
model = DINOv3ConvNextBackbone(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
# verify hidden states
self.parent.assertEqual(len(result.feature_maps), len(config.out_features))
expected_size = self.image_size // (4 * (2 ** (len(config.depths) - 1)))
self.parent.assertListEqual(
list(result.feature_maps[0].shape), [self.batch_size, model.channels[0], expected_size, expected_size]
)
# verify channels
self.parent.assertEqual(len(model.channels), len(config.out_features))
# verify backbone works with out_features=None
config.out_features = None
model = DINOv3ConvNextBackbone(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps), 1)
self.parent.assertListEqual(
list(result.feature_maps[0].shape), [self.batch_size, model.channels[0], expected_size, expected_size]
)
# verify channels
self.parent.assertEqual(len(model.channels), 1)
model = DINOv3ConvNextBackbone(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps), 1)
self.parent.assertListEqual(
list(result.feature_maps[0].shape), [self.batch_size, model.channels[0], expected_size, expected_size]
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values, labels = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class DINOv3ConvNextModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as ConvNext does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (DINOv3ConvNextModel,) if is_torch_available() else ()
pipeline_model_mapping = {"image-feature-extraction": DINOv3ConvNextModel} if is_torch_available() else {}
test_resize_embeddings = False
has_attentions = False
def setUp(self):
self.model_tester = DINOv3ConvNextModelTester(self)
self.config_tester = ConfigTester(
self,
config_class=DINOv3ConvNextConfig,
has_text_modality=False,
hidden_size=37,
common_properties=["num_channels", "hidden_sizes"],
)
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(reason="DINOv3ConvNext does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="DINOv3ConvNext does not support input and output embeddings")
def test_model_get_set_embeddings(self):
pass
@unittest.skip(reason="DINOv3ConvNext does not use feedforward chunking")
def test_feed_forward_chunking(self):
pass
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_backbone(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*config_and_inputs)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
self.assertEqual(len(hidden_states), 5)
# DINOv3ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[1].shape[-2:]),
[self.model_tester.image_size // 4, self.model_tester.image_size // 4],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
@slow
def test_model_from_pretrained(self):
model_name = "facebook/dinov3-convnext-tiny-pretrain-lvd1689m"
model = DINOv3ConvNextModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@unittest.skip(reason="DINOv3ConvNext does not retain grads for first hidden state (original pixel_values)")
def test_retain_grad_hidden_states_attentions(self):
pass
# We will verify our results on an image of cute cats
def prepare_img():
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
class DINOv3ConvNextModelIntegrationTest(unittest.TestCase):
@cached_property
def default_image_processor(self):
return (
AutoImageProcessor.from_pretrained("facebook/dinov3-convnext-tiny-pretrain-lvd1689m")
if is_vision_available()
else None
)
@slow
def test_inference_no_head(self):
model = DINOv3ConvNextModel.from_pretrained("facebook/dinov3-convnext-tiny-pretrain-lvd1689m").to(torch_device)
image_processor = self.default_image_processor
image = prepare_img()
inputs = image_processor(image, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the last hidden states
_, _, height, width = inputs["pixel_values"].shape
expected_seq_length = (height * width) // 4 ** (model.config.num_stages + 1) + 1 # +1 for the "CLS" token
expected_shape = torch.Size((1, expected_seq_length, model.config.hidden_sizes[-1]))
self.assertEqual(outputs.last_hidden_state.shape, expected_shape)
last_layer_cls_token = outputs.pooler_output
expected_slice = torch.tensor([-6.3721, 1.3008, 2.0743, -0.0800, 0.6072], device=torch_device)
torch.testing.assert_close(last_layer_cls_token[0, :5], expected_slice, rtol=1e-4, atol=1e-4)
last_layer_patch_tokens = outputs.last_hidden_state[:, 1:]
expected_slice = torch.tensor([0.4905, -3.7135, 1.8485, -1.0403, -1.0908], device=torch_device)
torch.testing.assert_close(last_layer_patch_tokens[0, 0, :5], expected_slice, rtol=1e-4, atol=1e-4)
@require_torch
class DINOv3ConvNextBackboneTest(unittest.TestCase, BackboneTesterMixin):
all_model_classes = (DINOv3ConvNextBackbone,) if is_torch_available() else ()
config_class = DINOv3ConvNextConfig
has_attentions = False
def setUp(self):
self.model_tester = DINOv3ConvNextModelTester(self)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/dinov3_convnext/test_modeling_dinov3_convnext.py",
"license": "Apache License 2.0",
"lines": 237,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/models/dinov3_vit/test_image_processing_dinov3_vit_fast.py | # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torchvision_available
from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs
if is_torchvision_available():
from transformers import DINOv3ViTImageProcessorFast
class DINOv3ViTImageProcessingTester:
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
do_resize=True,
size=None,
do_center_crop=True,
crop_size=None,
do_normalize=True,
image_mean=[0.48145466, 0.4578275, 0.40821073],
image_std=[0.26862954, 0.26130258, 0.27577711],
do_convert_rgb=True,
):
super().__init__()
size = size if size is not None else {"shortest_edge": 20}
crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.do_convert_rgb = do_convert_rgb
def prepare_image_processor_dict(self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def expected_output_image_shape(self, images):
return self.num_channels, self.crop_size["height"], self.crop_size["width"]
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
@require_torch
@require_vision
class DINOv3ViTImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = None
fast_image_processing_class = DINOv3ViTImageProcessorFast if is_torchvision_available() else None
test_slow_image_processor = False
def setUp(self):
super().setUp()
self.image_processor_tester = DINOv3ViTImageProcessingTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "do_resize"))
self.assertTrue(hasattr(image_processing, "size"))
self.assertTrue(hasattr(image_processing, "do_center_crop"))
self.assertTrue(hasattr(image_processing, "center_crop"))
self.assertTrue(hasattr(image_processing, "do_normalize"))
self.assertTrue(hasattr(image_processing, "image_mean"))
self.assertTrue(hasattr(image_processing, "image_std"))
self.assertTrue(hasattr(image_processing, "do_convert_rgb"))
def test_image_processor_from_dict_with_kwargs(self):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {"shortest_edge": 20})
self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18})
image_processor = image_processing_class.from_dict(
self.image_processor_dict, size={"height": 42, "width": 42}
)
self.assertEqual(image_processor.size, {"height": 42, "width": 42})
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/dinov3_vit/test_image_processing_dinov3_vit_fast.py",
"license": "Apache License 2.0",
"lines": 109,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/models/dinov3_vit/test_modeling_dinov3_vit.py | # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch DINOv3 model."""
import unittest
from functools import cached_property
from transformers import DINOv3ViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import DINOv3ViTBackbone, DINOv3ViTModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class DINOv3ViTModelTester:
def __init__(
self,
parent,
batch_size=13,
image_size=30,
patch_size=2,
num_channels=3,
is_training=False,
use_labels=True,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
type_sequence_label_size=10,
initializer_range=0.02,
num_register_tokens=2,
mask_ratio=0.5,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.is_training = is_training
self.use_labels = use_labels
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_register_tokens = num_register_tokens
self.scope = scope
num_patches = (image_size // patch_size) ** 2
self.seq_length = num_patches + 1 + self.num_register_tokens
self.mask_ratio = mask_ratio
self.num_masks = int(mask_ratio * self.seq_length)
self.mask_length = num_patches
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
labels = None
if self.use_labels:
labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
config = self.get_config()
return config, pixel_values, labels
def get_config(self):
return DINOv3ViTConfig(
image_size=self.image_size,
patch_size=self.patch_size,
num_channels=self.num_channels,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
is_decoder=False,
initializer_range=self.initializer_range,
num_register_tokens=self.num_register_tokens,
stage_names=["embeddings"] + [f"stage{i}" for i in range(1, self.num_hidden_layers + 1)],
out_indices=[0, 1],
reshape_hidden_states=True,
)
def create_and_check_backbone(self, config, pixel_values, labels):
config.out_features = ["stage1", "stage2"]
config.reshape_hidden_states = True
model = DINOv3ViTBackbone(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(pixel_values)
self.parent.assertEqual(len(outputs.feature_maps), 2)
for fm in outputs.feature_maps:
b, c, h, w = fm.shape
self.parent.assertEqual(b, self.batch_size)
self.parent.assertEqual(c, self.hidden_size)
self.parent.assertGreater(h, 0)
self.parent.assertGreater(w, 0)
def test_output_hidden_states(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**inputs_dict, output_hidden_states=True)
self.assertIsNotNone(outputs.hidden_states)
expected_num_hidden_states = config.num_hidden_layers + 1
self.assertEqual(len(outputs.hidden_states), expected_num_hidden_states)
for hidden_state in outputs.hidden_states:
expected_shape = (
self.model_tester.batch_size,
self.model_tester.seq_length,
self.model_tester.hidden_size,
)
self.assertEqual(hidden_state.shape, expected_shape)
def create_and_check_model(self, config, pixel_values, labels):
model = DINOv3ViTModel(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
self.parent.assertEqual(
result.last_hidden_state.shape,
(self.batch_size, self.seq_length, self.hidden_size),
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
pixel_values,
labels,
) = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class Dinov3ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as Dinov3 does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (DINOv3ViTModel, DINOv3ViTBackbone) if is_torch_available() else ()
pipeline_model_mapping = (
{
"image-feature-extraction": DINOv3ViTModel,
}
if is_torch_available()
else {}
)
test_resize_embeddings = False
test_attention_outputs = False
def setUp(self):
self.model_tester = DINOv3ViTModelTester(self)
self.config_tester = ConfigTester(self, config_class=DINOv3ViTConfig, has_text_modality=False, hidden_size=37)
def test_backbone(self):
config, pixel_values, labels = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(config, pixel_values, labels)
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(reason="Dinov3 does not use inputs_embeds")
def test_inputs_embeds(self):
pass
def test_model_get_set_embeddings(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, nn.Linear))
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skip(reason="Dinov3 does not support feedforward chunking yet")
def test_feed_forward_chunking(self):
pass
@slow
def test_model_from_pretrained(self):
model_name = "facebook/dinov3-vits16-pretrain-lvd1689m"
model = DINOv3ViTModel.from_pretrained(model_name)
self.assertIsNotNone(model)
# We will verify our results on an image of cute cats
def prepare_img():
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
class DINOv3ViTModelIntegrationTest(unittest.TestCase):
@cached_property
def default_image_processor(self):
return (
AutoImageProcessor.from_pretrained("facebook/dinov3-vits16-pretrain-lvd1689m")
if is_vision_available()
else None
)
@slow
def test_inference_no_head(self):
model = DINOv3ViTModel.from_pretrained("facebook/dinov3-vits16-pretrain-lvd1689m").to(torch_device)
image_processor = self.default_image_processor
image = prepare_img()
inputs = image_processor(image, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the last hidden states
# in DINOv3 with Registers, the seq length equals the number of patches + 1 + num_register_tokens (we add 1 for the [CLS] token)
_, _, height, width = inputs["pixel_values"].shape
num_patches = (height // model.config.patch_size) * (width // model.config.patch_size)
expected_seq_length = num_patches + 1 + model.config.num_register_tokens
expected_shape = torch.Size((1, expected_seq_length, model.config.hidden_size))
self.assertEqual(outputs.last_hidden_state.shape, expected_shape)
last_layer_cls_token = outputs.pooler_output
expected_slice = torch.tensor([0.4637, -0.4160, 0.4086, -0.1265, -0.2865], device=torch_device)
torch.testing.assert_close(last_layer_cls_token[0, :5], expected_slice, rtol=1e-4, atol=1e-4)
last_layer_patch_tokens = outputs.last_hidden_state[:, model.config.num_register_tokens + 1 :]
expected_slice = torch.tensor([-0.0386, -0.2509, -0.0161, -0.4556, 0.5716], device=torch_device)
torch.testing.assert_close(last_layer_patch_tokens[0, 0, :5], expected_slice, rtol=1e-4, atol=1e-4)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/dinov3_vit/test_modeling_dinov3_vit.py",
"license": "Apache License 2.0",
"lines": 233,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/sam2/configuration_sam2.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SAM2 model configuration"""
from ...configuration_utils import PreTrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING, AutoConfig
logger = logging.get_logger(__name__)
class Sam2HieraDetConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Sam2HieraDetModel`]. It is used to instantiate
a HieraDet model as defined in the original sam2 repo according to the specified arguments, defining the model architecture.
Instantiating a configuration defaults will yield a similar configuration to that of SAM 2.1 Hiera-tiny
[facebook/sam2.1-hiera-tiny](https://huggingface.co/facebook/sam2.1-hiera-tiny) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 96):
The hidden dimension of the image encoder.
num_attention_heads (`int`, *optional*, defaults to 1):
Number of attention heads for each attention layer in the Transformer encoder.
num_channels (`int`, *optional*, defaults to 3):
The number of channels in the image.
image_size (`list[int]`, *optional*, defaults to `[1024, 1024]`):
The size of the image.
patch_kernel_size (`list[int]`, *optional*, defaults to `[7, 7]`):
The kernel size of the patch.
patch_stride (`list[int]`, *optional*, defaults to `[4, 4]`):
The stride of the patch.
patch_padding (`list[int]`, *optional*, defaults to `[3, 3]`):
The padding of the patch.
query_stride (`list[int]`, *optional*, defaults to `[2, 2]`):
The downsample stride between stages.
window_positional_embedding_background_size (`list[int]`, *optional*, defaults to `[7, 7]`):
The window size per stage when not using global attention.
num_query_pool_stages (`int`, *optional*, defaults to 3):
The number of query pool stages.
blocks_per_stage (`list[int]`, *optional*, defaults to `[1, 2, 7, 2]`):
The number of blocks per stage.
embed_dim_per_stage (`list[int]`, *optional*, defaults to `[96, 192, 384, 768]`):
The embedding dimension per stage.
num_attention_heads_per_stage (`list[int]`, *optional*, defaults to `[1, 2, 4, 8]`):
The number of attention heads per stage.
window_size_per_stage (`list[int]`, *optional*, defaults to `[8, 4, 14, 7]`):
The window size per stage.
global_attention_blocks (`list[int]`, *optional*, defaults to `[5, 7, 9]`):
The blocks where global attention is used.
mlp_ratio (`float`, *optional*, defaults to 4.0):
The ratio of the MLP hidden dimension to the embedding dimension.
hidden_act (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function in the neck.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon for the layer normalization.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
"""
base_config_key = "backbone_config"
model_type = "sam2_hiera_det_model"
def __init__(
self,
hidden_size=96,
num_attention_heads=1,
num_channels=3,
image_size=None,
patch_kernel_size=None,
patch_stride=None,
patch_padding=None,
query_stride=None,
window_positional_embedding_background_size=None,
num_query_pool_stages=3,
blocks_per_stage=None,
embed_dim_per_stage=None,
num_attention_heads_per_stage=None,
window_size_per_stage=None,
global_attention_blocks=None,
mlp_ratio=4.0,
hidden_act="gelu",
layer_norm_eps=1e-6,
initializer_range=0.02,
**kwargs,
):
super().__init__(**kwargs)
image_size = image_size if image_size is not None else [1024, 1024]
patch_kernel_size = patch_kernel_size if patch_kernel_size is not None else [7, 7]
patch_stride = patch_stride if patch_stride is not None else [4, 4]
patch_padding = patch_padding if patch_padding is not None else [3, 3]
query_stride = query_stride if query_stride is not None else [2, 2]
window_positional_embedding_background_size = (
window_positional_embedding_background_size
if window_positional_embedding_background_size is not None
else [7, 7]
)
blocks_per_stage = blocks_per_stage if blocks_per_stage is not None else [1, 2, 7, 2]
embed_dim_per_stage = embed_dim_per_stage if embed_dim_per_stage is not None else [96, 192, 384, 768]
num_attention_heads_per_stage = (
num_attention_heads_per_stage if num_attention_heads_per_stage is not None else [1, 2, 4, 8]
)
window_size_per_stage = window_size_per_stage if window_size_per_stage is not None else [8, 4, 14, 7]
global_attention_blocks = global_attention_blocks if global_attention_blocks is not None else [5, 7, 9]
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
self.num_channels = num_channels
self.image_size = image_size
self.patch_kernel_size = patch_kernel_size
self.patch_stride = patch_stride
self.patch_padding = patch_padding
self.query_stride = query_stride
self.window_positional_embedding_background_size = window_positional_embedding_background_size
self.num_query_pool_stages = num_query_pool_stages
self.blocks_per_stage = blocks_per_stage
self.embed_dim_per_stage = embed_dim_per_stage
self.num_attention_heads_per_stage = num_attention_heads_per_stage
self.window_size_per_stage = window_size_per_stage
self.global_attention_blocks = global_attention_blocks
self.mlp_ratio = mlp_ratio
self.hidden_act = hidden_act
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range
class Sam2VisionConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Sam2VisionModel`]. It is used to instantiate a SAM
vision encoder according to the specified arguments, defining the model architecture. Instantiating a configuration
defaults will yield a similar configuration to that of SAM 2.1 Hiera-tiny
[facebook/sam2.1-hiera-tiny](https://huggingface.co/facebook/sam2.1-hiera-tiny) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
backbone_config (`Union[dict, "PreTrainedConfig"]`, *optional*, defaults to `Sam2HieraDetConfig()`):
Configuration for the vision backbone. This is used to instantiate the backbone using
`AutoModel.from_config`.
backbone_channel_list (`List[int]`, *optional*, defaults to `[768, 384, 192, 96]`):
The list of channel dimensions for the backbone.
backbone_feature_sizes (`List[List[int]]`, *optional*, defaults to `[[256, 256], [128, 128], [64, 64]]`):
The spatial sizes of the feature maps from the backbone.
fpn_hidden_size (`int`, *optional*, defaults to 256):
The hidden dimension of the FPN.
fpn_kernel_size (`int`, *optional*, defaults to 1):
The kernel size for the convolutions in the neck.
fpn_stride (`int`, *optional*, defaults to 1):
The stride for the convolutions in the neck.
fpn_padding (`int`, *optional*, defaults to 0):
The padding for the convolutions in the neck.
fpn_top_down_levels (`List[int]`, *optional*, defaults to `[2, 3]`):
The levels for the top-down FPN connections.
num_feature_levels (`int`, *optional*, defaults to 3):
The number of feature levels from the FPN to use.
hidden_act (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function in the neck.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon for the layer normalization.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
"""
base_config_key = "vision_config"
model_type = "sam2_vision_model"
sub_configs = {
"backbone_config": AutoConfig,
}
def __init__(
self,
backbone_config=None,
backbone_channel_list=None,
backbone_feature_sizes=None,
fpn_hidden_size=256,
fpn_kernel_size=1,
fpn_stride=1,
fpn_padding=0,
fpn_top_down_levels=None,
num_feature_levels=3,
hidden_act="gelu",
layer_norm_eps=1e-6,
initializer_range=0.02,
**kwargs,
):
backbone_channel_list = [768, 384, 192, 96] if backbone_channel_list is None else backbone_channel_list
backbone_feature_sizes = (
[[256, 256], [128, 128], [64, 64]] if backbone_feature_sizes is None else backbone_feature_sizes
)
fpn_top_down_levels = [2, 3] if fpn_top_down_levels is None else fpn_top_down_levels
if isinstance(backbone_config, dict):
backbone_config["model_type"] = backbone_config.get("model_type", "sam2_hiera_det_model")
backbone_config = CONFIG_MAPPING[backbone_config["model_type"]](**backbone_config)
elif isinstance(backbone_config, Sam2HieraDetConfig):
pass
elif backbone_config is None:
backbone_config = Sam2HieraDetConfig()
self.backbone_config = backbone_config
# Neck
self.backbone_channel_list = backbone_channel_list
self.backbone_feature_sizes = backbone_feature_sizes
self.fpn_hidden_size = fpn_hidden_size
self.fpn_kernel_size = fpn_kernel_size
self.fpn_stride = fpn_stride
self.fpn_padding = fpn_padding
self.fpn_top_down_levels = fpn_top_down_levels
self.num_feature_levels = num_feature_levels
self.hidden_act = hidden_act
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range
super().__init__(**kwargs)
class Sam2PromptEncoderConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Sam2PromptEncoder`]. The [`Sam2PromptEncoder`]
module is used to encode the input 2D points and bounding boxes.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 256):
Dimensionality of the hidden states.
image_size (`int`, *optional*, defaults to 1024):
The expected output resolution of the image.
patch_size (`int`, *optional*, defaults to 16):
The size (resolution) of each patch.
mask_input_channels (`int`, *optional*, defaults to 16):
The number of channels to be fed to the `MaskDecoder` module.
num_point_embeddings (`int`, *optional*, defaults to 4):
The number of point embeddings to be used.
hidden_act (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function in the encoder and pooler.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
scale (`float`, *optional*, defaults to 1):
The scale factor for the prompt encoder.
"""
base_config_key = "prompt_encoder_config"
def __init__(
self,
hidden_size=256,
image_size=1024,
patch_size=16,
mask_input_channels=16,
num_point_embeddings=4,
hidden_act="gelu",
layer_norm_eps=1e-6,
scale=1,
**kwargs,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.image_size = image_size
self.patch_size = patch_size
self.mask_input_channels = mask_input_channels
self.num_point_embeddings = num_point_embeddings
self.hidden_act = hidden_act
self.layer_norm_eps = layer_norm_eps
self.scale = scale
class Sam2MaskDecoderConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Sam2MaskDecoder`]. It is used to instantiate a SAM2
memory encoder according to the specified arguments, defining the model architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 256):
Dimensionality of the hidden states.
hidden_act (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function in the SAM2 mask decoder.
mlp_dim (`int`, *optional*, defaults to 2048):
The dimension of the MLP in the two-way transformer.
num_hidden_layers (`int`, *optional*, defaults to 2):
The number of hidden layers in the two-way transformer.
num_attention_heads (`int`, *optional*, defaults to 8):
The number of attention heads in the two-way transformer.
attention_downsample_rate (`int`, *optional*, defaults to 2):
The downsample rate for the attention layers.
num_multimask_outputs (`int`, *optional*, defaults to 3):
The number of multimask outputs.
iou_head_depth (`int`, *optional*, defaults to 3):
The depth of the IoU head.
iou_head_hidden_dim (`int`, *optional*, defaults to 256):
The hidden dimension of the IoU head.
dynamic_multimask_via_stability (`bool`, *optional*, defaults to `True`):
Whether to use dynamic multimask via stability.
dynamic_multimask_stability_delta (`float`, *optional*, defaults to 0.05):
The stability delta for the dynamic multimask.
dynamic_multimask_stability_thresh (`float`, *optional*, defaults to 0.98):
The stability threshold for the dynamic multimask.
"""
base_config_key = "mask_decoder_config"
def __init__(
self,
hidden_size=256,
hidden_act="gelu",
mlp_dim=2048,
num_hidden_layers=2,
num_attention_heads=8,
attention_downsample_rate=2,
num_multimask_outputs=3,
iou_head_depth=3,
iou_head_hidden_dim=256,
dynamic_multimask_via_stability=True,
dynamic_multimask_stability_delta=0.05,
dynamic_multimask_stability_thresh=0.98,
**kwargs,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.num_multimask_outputs = num_multimask_outputs
self.hidden_act = hidden_act
self.iou_head_depth = iou_head_depth
self.iou_head_hidden_dim = iou_head_hidden_dim
self.dynamic_multimask_via_stability = dynamic_multimask_via_stability
self.dynamic_multimask_stability_delta = dynamic_multimask_stability_delta
self.dynamic_multimask_stability_thresh = dynamic_multimask_stability_thresh
# TwoWayTransformer configuration
self.num_hidden_layers = num_hidden_layers
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
self.mlp_dim = mlp_dim
self.attention_downsample_rate = attention_downsample_rate
class Sam2Config(PreTrainedConfig):
r"""
[`Sam2Config`] is the configuration class to store the configuration of a [`Sam2Model`]. It is used to instantiate a
SAM2 model according to the specified arguments, defining the memory attention, memory encoder, and image encoder
configs. Instantiating a configuration defaults will yield a similar configuration to that of the SAM 2.1 Hiera-tiny
[facebook/sam2.1-hiera-tiny](https://huggingface.co/facebook/sam2.1-hiera-tiny) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
<Tip>
SAM2 checkpoints with `model_type="sam2_video"` are compatible with `Sam2Model` since the video variant weights
are a superset of the image-only model weights. You may see a warning about model type mismatch when loading
such checkpoints, which can be safely ignored in this case.
</Tip>
Args:
vision_config (Union[`dict`, `Sam2VisionConfig`], *optional*):
Dictionary of configuration options used to initialize [`Sam2VisionConfig`].
prompt_encoder_config (Union[`dict`, `Sam2PromptEncoderConfig`], *optional*):
Dictionary of configuration options used to initialize [`Sam2PromptEncoderConfig`].
mask_decoder_config (Union[`dict`, `Sam2MaskDecoderConfig`], *optional*):
Dictionary of configuration options used to initialize [`Sam2MaskDecoderConfig`].
initializer_range (`float`, *optional*, defaults to 0.02):
Standard deviation for parameter initialization.
Example:
```python
>>> from transformers import (
... Sam2VisionConfig,
... Sam2PromptEncoderConfig,
... Sam2MaskDecoderConfig,
... Sam2Model,
... )
>>> # Initializing a Sam2Config with `"facebook/sam2.1_hiera_tiny"` style configuration
>>> configuration = Sam2Config()
>>> # Initializing a Sam2Model (with random weights) from the `"facebook/sam2.1_hiera_tiny"` style configuration
>>> model = Sam2Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
>>> # We can also initialize a Sam2Config from a Sam2VisionConfig, Sam2PromptEncoderConfig, and Sam2MaskDecoderConfig
>>> # Initializing SAM2 vision encoder, memory attention, and memory encoder configurations
>>> vision_config = Sam2VisionConfig()
>>> prompt_encoder_config = Sam2PromptEncoderConfig()
>>> mask_decoder_config = Sam2MaskDecoderConfig()
>>> config = Sam2Config(vision_config, prompt_encoder_config, mask_decoder_config)
```"""
model_type = "sam2"
sub_configs = {
"vision_config": AutoConfig,
"prompt_encoder_config": Sam2PromptEncoderConfig,
"mask_decoder_config": Sam2MaskDecoderConfig,
}
def __init__(
self,
vision_config=None,
prompt_encoder_config=None,
mask_decoder_config=None,
initializer_range=0.02,
**kwargs,
):
vision_config = vision_config if vision_config is not None else {}
prompt_encoder_config = prompt_encoder_config if prompt_encoder_config is not None else {}
mask_decoder_config = mask_decoder_config if mask_decoder_config is not None else {}
if isinstance(vision_config, dict):
vision_config["model_type"] = vision_config.get("model_type", "sam2_vision_model")
vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config)
if isinstance(prompt_encoder_config, Sam2PromptEncoderConfig):
prompt_encoder_config = prompt_encoder_config.to_dict()
if isinstance(mask_decoder_config, Sam2MaskDecoderConfig):
mask_decoder_config = mask_decoder_config.to_dict()
self.vision_config = vision_config
self.prompt_encoder_config = Sam2PromptEncoderConfig(**prompt_encoder_config)
self.mask_decoder_config = Sam2MaskDecoderConfig(**mask_decoder_config)
self.initializer_range = initializer_range
super().__init__(**kwargs)
__all__ = [
"Sam2Config",
"Sam2HieraDetConfig",
"Sam2VisionConfig",
"Sam2PromptEncoderConfig",
"Sam2MaskDecoderConfig",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/sam2/configuration_sam2.py",
"license": "Apache License 2.0",
"lines": 399,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/sam2/convert_sam2_to_hf.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Convert SAM checkpoints from the original repository.
URL: https://github.com/facebookresearch/segment-anything-2.
"""
import argparse
import re
from io import BytesIO
import httpx
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
Sam2Config,
Sam2HieraDetConfig,
Sam2ImageProcessorFast,
Sam2MaskDecoderConfig,
Sam2Model,
Sam2Processor,
Sam2PromptEncoderConfig,
Sam2VisionConfig,
)
def get_config(model_name):
if "hiera_tiny" in model_name:
hiera_det_config = Sam2HieraDetConfig()
vision_config = Sam2VisionConfig(backbone_config=hiera_det_config)
elif "hiera_small" in model_name:
hiera_det_config = Sam2HieraDetConfig(blocks_per_stage=[1, 2, 11, 2], global_attention_blocks=[7, 10, 13])
vision_config = Sam2VisionConfig(backbone_config=hiera_det_config)
elif "hiera_base_plus" in model_name:
hiera_det_config = Sam2HieraDetConfig(
hidden_size=112,
embed_dim_per_stage=[112, 224, 448, 896],
num_attention_heads_per_stage=[2, 4, 8, 16],
blocks_per_stage=[2, 3, 16, 3],
global_attention_blocks=[12, 16, 20],
window_positional_embedding_background_size=(14, 14),
)
vision_config = Sam2VisionConfig(
backbone_config=hiera_det_config,
backbone_channel_list=[896, 448, 224, 112],
)
elif "hiera_large" in model_name:
hiera_det_config = Sam2HieraDetConfig(
hidden_size=144,
embed_dim_per_stage=[144, 288, 576, 1152],
num_attention_heads_per_stage=[2, 4, 8, 16],
blocks_per_stage=[2, 6, 36, 4],
global_attention_blocks=[23, 33, 43],
window_positional_embedding_background_size=(7, 7),
window_size_per_stage=[8, 4, 16, 8],
)
vision_config = Sam2VisionConfig(
backbone_config=hiera_det_config,
backbone_channel_list=[1152, 576, 288, 144],
)
prompt_encoder_config = Sam2PromptEncoderConfig()
mask_decoder_config = Sam2MaskDecoderConfig()
if "sam2.1" in model_name:
enable_temporal_pos_encoding_for_object_pointers = True
enable_occlusion_spatial_embedding = True
else:
enable_temporal_pos_encoding_for_object_pointers = False
enable_occlusion_spatial_embedding = False
config = Sam2Config(
vision_config=vision_config,
prompt_encoder_config=prompt_encoder_config,
mask_decoder_config=mask_decoder_config,
enable_temporal_pos_encoding_for_object_pointers=enable_temporal_pos_encoding_for_object_pointers,
enable_occlusion_spatial_embedding=enable_occlusion_spatial_embedding,
)
return config
KEYS_TO_MODIFY_MAPPING = {
"iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
"iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
"iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
"mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
"mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
"mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
"mask_downscaling.0": "mask_embed.conv1",
"mask_downscaling.1": "mask_embed.layer_norm1",
"mask_downscaling.3": "mask_embed.conv2",
"mask_downscaling.4": "mask_embed.layer_norm2",
"mask_downscaling.6": "mask_embed.conv3",
"dwconv": "depthwise_conv",
"pwconv": "pointwise_conv",
"fuser": "memory_fuser",
"point_embeddings": "point_embed",
"pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
"obj_ptr_tpos_proj": "temporal_positional_encoding_projection_layer",
"no_obj_embed_spatial": "occlusion_spatial_embedding_parameter",
"sam_prompt_encoder": "prompt_encoder",
"sam_mask_decoder": "mask_decoder",
"maskmem_tpos_enc": "memory_temporal_positional_encoding",
"gamma": "scale",
"image_encoder.neck": "vision_encoder.neck",
"image_encoder": "vision_encoder.backbone",
"neck.0": "neck.conv1",
"neck.1": "neck.layer_norm1",
"neck.2": "neck.conv2",
"neck.3": "neck.layer_norm2",
"pix_feat_proj": "feature_projection",
"patch_embed.proj": "patch_embed.projection",
"no_mem_embed": "no_memory_embedding",
"no_mem_pos_enc": "no_memory_positional_encoding",
"obj_ptr": "object_pointer",
".norm": ".layer_norm",
"trunk.": "",
"out_proj": "o_proj",
}
def replace_keys(state_dict):
model_state_dict = {}
output_hypernetworks_mlps_pattern = r".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
output_mask_decoder_mlps_pattern = r"mask_decoder.transformer.layers.(\d+).mlp.layers.(\d+).*"
output_mask_decoder_score_head_pattern = r"mask_decoder.pred_obj_score_head.layers.(\d+).*"
output_vision_encoder_mlps_pattern = r"vision_encoder.backbone.blocks.(\d+).mlp.layers.(\d+).*"
output_vision_encoder_neck_pattern = r"vision_encoder.neck.convs.(\d+).conv"
output_memory_encoder_projection_pattern = r"memory_encoder.o_proj.*"
output_object_pointer_proj_pattern = r"object_pointer_proj.layers.(\d+).*"
# Stack the point embed module list:
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
key = key.replace(key_to_modify, new_key)
# vision_encoder.blocks.0.mlp.layers.1.weight -> vision_encoder.blocks.0.mlp.proj_out.weight
if re.match(output_vision_encoder_mlps_pattern, key):
layer_nb = int(re.match(output_vision_encoder_mlps_pattern, key).group(2))
if layer_nb == 0:
key = key.replace("layers.0", "proj_in")
elif layer_nb == 1:
key = key.replace("layers.1", "proj_out")
# mask_decoder.transformer.layers.0.mlp.layers.1.weight -> mask_decoder.transformer.layers.1.mlp.proj_out.weight
if re.match(output_mask_decoder_mlps_pattern, key):
layer_nb = int(re.match(output_mask_decoder_mlps_pattern, key).group(2))
if layer_nb == 0:
key = key.replace("mlp.layers.0", "mlp.proj_in")
elif layer_nb == 1:
key = key.replace("mlp.layers.1", "mlp.proj_out")
# mask_decoder.pred_obj_score_head.layers.1.weight -> mask_decoder.pred_obj_score_head.proj_in.weight
if re.match(output_mask_decoder_score_head_pattern, key):
layer_nb = int(re.match(output_mask_decoder_score_head_pattern, key).group(1))
if layer_nb == 0:
key = key.replace("layers.0", "proj_in")
elif layer_nb == 1:
key = key.replace("layers.1", "layers.0")
elif layer_nb == 2:
key = key.replace("layers.2", "proj_out")
if re.match(output_hypernetworks_mlps_pattern, key):
layer_nb = int(re.match(output_hypernetworks_mlps_pattern, key).group(2))
if layer_nb == 0:
key = key.replace("layers.0", "proj_in")
elif layer_nb == 1:
key = key.replace("layers.1", "layers.0")
elif layer_nb == 2:
key = key.replace("layers.2", "proj_out")
# vision_encoder.neck.convs.1.conv.bias -> vision_encoder.neck.convs.1.bias
if re.match(output_vision_encoder_neck_pattern, key):
key = key.replace(".conv.", ".")
# memory_encoder.out_proj.weight -> memory_encoder.projection.weight
if re.match(output_memory_encoder_projection_pattern, key):
key = key.replace(".o_proj.", ".projection.")
if re.match(output_object_pointer_proj_pattern, key):
layer_nb = int(re.match(output_object_pointer_proj_pattern, key).group(1))
if layer_nb == 0:
key = key.replace("layers.0", "proj_in")
elif layer_nb == 1:
key = key.replace("layers.1", "layers.0")
elif layer_nb == 2:
key = key.replace("layers.2", "proj_out")
model_state_dict[key] = value
model_state_dict["shared_image_embedding.positional_embedding"] = model_state_dict[
"prompt_encoder.shared_embedding.positional_embedding"
]
model_state_dict["prompt_encoder.point_embed.weight"] = torch.cat(
[model_state_dict.pop(f"prompt_encoder.point_embed.{i}.weight") for i in range(4)],
dim=0,
)
return model_state_dict
def convert_sam2_checkpoint(model_name, checkpoint_path, pytorch_dump_folder, push_to_hub):
config = get_config(model_name)
state_dict = torch.load(checkpoint_path, map_location="cpu")["model"]
state_dict = replace_keys(state_dict)
image_processor = Sam2ImageProcessorFast()
processor = Sam2Processor(image_processor=image_processor)
hf_model = Sam2Model(config)
hf_model.eval()
device = "cuda" if torch.cuda.is_available() else "cpu"
missing_keys, unexpected_keys = hf_model.load_state_dict(state_dict, strict=False)
hf_model = hf_model.to(device)
for pattern in Sam2Model._keys_to_ignore_on_load_unexpected:
unexpected_keys = [k for k in unexpected_keys if re.search(pattern, k) is None]
if missing_keys or unexpected_keys:
print("Missing keys:", missing_keys)
print("Unexpected keys:", unexpected_keys)
raise ValueError("Missing or unexpected keys in the state dict")
url = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
with httpx.stream("GET", url) as response:
raw_image = Image.open(BytesIO(response.read())).convert("RGB")
input_points = [[[[1000, 600]]]]
input_labels = [[[1]]]
inputs = processor(
images=np.array(raw_image), input_points=input_points, input_labels=input_labels, return_tensors="pt"
).to(device)
with torch.no_grad():
output = hf_model(**inputs)
scores = output.iou_scores.squeeze()
if model_name == "sam2.1_hiera_tiny":
assert torch.allclose(scores, torch.tensor([0.0316, 0.9647, 0.1029]).cuda(), atol=1e-2)
elif model_name == "sam2.1_hiera_small":
assert torch.allclose(scores, torch.tensor([0.9664, 0.1494, 0.0456]).cuda(), atol=1e-2)
elif model_name == "sam2.1_hiera_base_plus":
assert torch.allclose(scores, torch.tensor([0.0361, 0.9775, 0.1307]).cuda(), atol=1e-2)
elif model_name == "sam2.1_hiera_large":
assert torch.allclose(scores, torch.tensor([0.9648, 0.0371, 0.1898]).cuda(), atol=1e-2)
elif model_name == "sam2_hiera_tiny":
assert torch.allclose(scores, torch.tensor([0.0439, 0.9567, 0.1415]).cuda(), atol=1e-2)
elif model_name == "sam2_hiera_small":
assert torch.allclose(scores, torch.tensor([0.9593, 0.1633, 0.0392]).cuda(), atol=1e-2)
elif model_name == "sam2_hiera_base_plus":
assert torch.allclose(scores, torch.tensor([0.0423, 0.9815, 0.0897]).cuda(), atol=1e-2)
elif model_name == "sam2_hiera_large":
assert torch.allclose(scores, torch.tensor([0.9514, 0.0535, 0.1787]).cuda(), atol=1e-2)
else:
raise ValueError(f"Model {model_name} not supported")
if pytorch_dump_folder is not None:
processor.save_pretrained(pytorch_dump_folder)
hf_model.save_pretrained(pytorch_dump_folder)
if push_to_hub:
repo_id = f"danelcsb/{pytorch_dump_folder.split('/')[-1]}"
processor.push_to_hub(repo_id)
hf_model.push_to_hub(repo_id)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
choices = [
"sam2.1_hiera_tiny",
"sam2.1_hiera_small",
"sam2.1_hiera_base_plus",
"sam2.1_hiera_large",
"sam2_hiera_tiny",
"sam2_hiera_small",
"sam2_hiera_base_plus",
"sam2_hiera_large",
]
parser.add_argument(
"--model_name",
default="sam2.1_hiera_tiny",
choices=choices,
type=str,
help="Name of the original model to convert",
)
parser.add_argument(
"--checkpoint_path",
type=str,
required=False,
help="Path to the original checkpoint",
)
parser.add_argument("--pytorch_dump_folder_path", default="", type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
args = parser.parse_args()
hf_model_name = args.model_name.replace("_", "-")
checkpoint_path = (
hf_hub_download(f"facebook/{hf_model_name}", f"{args.model_name.lower()}.pt")
if args.checkpoint_path is None
else args.checkpoint_path
)
convert_sam2_checkpoint(args.model_name, checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/sam2/convert_sam2_to_hf.py",
"license": "Apache License 2.0",
"lines": 283,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/sam2/modular_sam2.py | # Copyright 2025 The Meta AI Authors and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch SAM 2 model."""
from collections.abc import Callable
from dataclasses import dataclass
from typing import Union
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from ... import initialization as init
from ...activations import ACT2FN
from ...image_processing_utils import BatchFeature, get_size_dict
from ...image_processing_utils_fast import BaseImageProcessorFast
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
SizeDict,
pil_torch_interpolation_mapping,
)
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import BaseModelOutputWithPooling
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...processing_utils import ImagesKwargs, Unpack
from ...utils import ModelOutput, TensorType, auto_docstring, can_return_tuple, logging
from ...utils.generic import (
TransformersKwargs,
is_flash_attention_requested,
merge_with_config_defaults,
)
from ...utils.output_capturing import capture_outputs
from ..auto import AutoModel
from ..maskformer.modeling_maskformer import MaskFormerSinePositionEmbedding
from ..sam.image_processing_sam_fast import SamImageProcessorFast
from ..sam.modeling_sam import (
SamLayerNorm,
SamMaskDecoder,
SamMaskEmbedding,
SamModel,
SamPromptEncoder,
SamTwoWayAttentionBlock,
SamTwoWayTransformer,
eager_attention_forward,
)
from ..vitdet.modeling_vitdet import window_partition, window_unpartition
from .configuration_sam2 import (
Sam2Config,
Sam2HieraDetConfig,
Sam2MaskDecoderConfig,
Sam2PromptEncoderConfig,
Sam2VisionConfig,
)
logger = logging.get_logger(__name__)
class Sam2FastImageProcessorKwargs(ImagesKwargs, total=False):
r"""
mask_size (`dict[str, int]`, *optional*):
The size `{"height": int, "width": int}` to resize the segmentation maps to.
"""
mask_size: dict[str, int]
@auto_docstring
class Sam2ImageProcessorFast(SamImageProcessorFast):
resample = PILImageResampling.BILINEAR
image_mean = IMAGENET_DEFAULT_MEAN
image_std = IMAGENET_DEFAULT_STD
size = {"height": 1024, "width": 1024}
mask_size = {"height": 256, "width": 256}
do_resize = True
do_rescale = True
do_normalize = True
do_convert_rgb = True
valid_kwargs = Sam2FastImageProcessorKwargs
# modular artefacts
do_pad = None
pad_size = None
mask_pad_size = None
def __init__(self, **kwargs: Unpack[Sam2FastImageProcessorKwargs]):
BaseImageProcessorFast.__init__(self, **kwargs)
def _preprocess(
self,
images: list["torch.Tensor"],
return_tensors: str | TensorType | None,
**kwargs,
) -> "torch.Tensor":
return BaseImageProcessorFast._preprocess(self, images, return_tensors=return_tensors, **kwargs).pixel_values
@auto_docstring
def preprocess(
self,
images: ImageInput,
segmentation_maps: ImageInput | None = None,
**kwargs: Unpack[Sam2FastImageProcessorKwargs],
) -> BatchFeature:
r"""
segmentation_maps (`ImageInput`, *optional*):
The segmentation maps to preprocess.
"""
return super().preprocess(images, segmentation_maps, **kwargs)
def _preprocess_image_like_inputs(
self,
images: ImageInput,
segmentation_maps: ImageInput | None,
do_convert_rgb: bool,
input_data_format: ChannelDimension,
device: Union[str, "torch.device"] | None = None,
**kwargs: Unpack[Sam2FastImageProcessorKwargs],
) -> BatchFeature:
"""
Preprocess image-like inputs.
"""
images = self._prepare_image_like_inputs(
images=images, do_convert_rgb=do_convert_rgb, input_data_format=input_data_format, device=device
)
original_sizes = [image.shape[-2:] for image in images]
images_kwargs = kwargs.copy()
pixel_values = self._preprocess(images, **images_kwargs)
data = {
"pixel_values": pixel_values,
"original_sizes": original_sizes,
}
if segmentation_maps is not None:
processed_segmentation_maps = self._prepare_image_like_inputs(
images=segmentation_maps,
expected_ndims=2,
do_convert_rgb=False,
input_data_format=ChannelDimension.FIRST,
)
segmentation_maps_kwargs = kwargs.copy()
segmentation_maps_kwargs.update(
{
"do_normalize": False,
"do_rescale": False,
"interpolation": pil_torch_interpolation_mapping[PILImageResampling.NEAREST],
"size": segmentation_maps_kwargs.pop("mask_size"),
}
)
processed_segmentation_maps = self._preprocess(
images=processed_segmentation_maps, **segmentation_maps_kwargs
)
data["labels"] = processed_segmentation_maps.squeeze(1).to(torch.int64)
return BatchFeature(data=data, tensor_type=kwargs["return_tensors"])
def _further_process_kwargs(
self,
size: SizeDict | None = None,
mask_size: SizeDict | None = None,
default_to_square: bool | None = None,
image_mean: float | list[float] | None = None,
image_std: float | list[float] | None = None,
data_format: ChannelDimension | None = None,
**kwargs,
) -> dict:
"""
Update kwargs that need further processing before being validated
Can be overridden by subclasses to customize the processing of kwargs.
"""
if kwargs is None:
kwargs = {}
if size is not None:
size = SizeDict(**get_size_dict(size=size, default_to_square=default_to_square))
if mask_size is not None:
mask_size = SizeDict(**get_size_dict(mask_size, param_name="mask_size"))
if isinstance(image_mean, list):
image_mean = tuple(image_mean)
if isinstance(image_std, list):
image_std = tuple(image_std)
if data_format is None:
data_format = ChannelDimension.FIRST
kwargs["size"] = size
kwargs["mask_size"] = mask_size
kwargs["image_mean"] = image_mean
kwargs["image_std"] = image_std
kwargs["data_format"] = data_format
# torch resize uses interpolation instead of resample
# Check if resample is an int before checking if it's an instance of PILImageResampling
# because if pillow < 9.1.0, resample is an int and PILImageResampling is a module.
# Checking PILImageResampling will fail with error `TypeError: isinstance() arg 2 must be a type or tuple of types`.
resample = kwargs.pop("resample")
kwargs["interpolation"] = (
pil_torch_interpolation_mapping[resample] if isinstance(resample, (PILImageResampling, int)) else resample
)
return kwargs
def _apply_non_overlapping_constraints(self, pred_masks: torch.Tensor) -> torch.Tensor:
"""
Apply non-overlapping constraints to the object scores in pred_masks. Here we
keep only the highest scoring object at each spatial location in pred_masks.
"""
batch_size = pred_masks.size(0)
if batch_size == 1:
return pred_masks
device = pred_masks.device
# "max_obj_inds": object index of the object with the highest score at each location
max_obj_inds = torch.argmax(pred_masks, dim=0, keepdim=True)
# "batch_obj_inds": object index of each object slice (along dim 0) in `pred_masks`
batch_obj_inds = torch.arange(batch_size, device=device)[:, None, None, None]
keep = max_obj_inds == batch_obj_inds
# suppress overlapping regions' scores below -10.0 so that the foreground regions
# don't overlap (here sigmoid(-10.0)=4.5398e-05)
pred_masks = torch.where(keep, pred_masks, torch.clamp(pred_masks, max=-10.0))
return pred_masks
def post_process_masks(
self,
masks,
original_sizes,
mask_threshold=0.0,
binarize=True,
max_hole_area=0.0,
max_sprinkle_area=0.0,
apply_non_overlapping_constraints=False,
**kwargs,
):
"""
Remove padding and upscale masks to the original image size.
Args:
masks (`Union[torch.Tensor, List[torch.Tensor], np.ndarray, List[np.ndarray]]`):
Batched masks from the mask_decoder in (batch_size, num_channels, height, width) format.
original_sizes (`Union[torch.Tensor, List[Tuple[int,int]]]`):
The original sizes of each image before it was resized to the model's expected input shape, in (height,
width) format.
mask_threshold (`float`, *optional*, defaults to 0.0):
Threshold for binarization and post-processing operations.
binarize (`bool`, *optional*, defaults to `True`):
Whether to binarize the masks.
max_hole_area (`float`, *optional*, defaults to 0.0):
The maximum area of a hole to fill.
max_sprinkle_area (`float`, *optional*, defaults to 0.0):
The maximum area of a sprinkle to fill.
apply_non_overlapping_constraints (`bool`, *optional*, defaults to `False`):
Whether to apply non-overlapping constraints to the masks.
Returns:
(`torch.Tensor`): Batched masks in batch_size, num_channels, height, width) format, where (height, width)
is given by original_size.
"""
if isinstance(original_sizes, (torch.Tensor, np.ndarray)):
original_sizes = original_sizes.tolist()
# TODO: add connected components kernel for postprocessing
output_masks = []
for i, original_size in enumerate(original_sizes):
if isinstance(masks[i], np.ndarray):
masks[i] = torch.from_numpy(masks[i])
elif not isinstance(masks[i], torch.Tensor):
raise TypeError("Input masks should be a list of `torch.tensors` or a list of `np.ndarray`")
interpolated_mask = F.interpolate(masks[i], original_size, mode="bilinear", align_corners=False)
if apply_non_overlapping_constraints:
interpolated_mask = self._apply_non_overlapping_constraints(interpolated_mask)
if binarize:
interpolated_mask = interpolated_mask > mask_threshold
output_masks.append(interpolated_mask)
return output_masks
def _get_preprocess_shape(self):
raise NotImplementedError("No _get_preprocess_shape for SAM 2.")
def resize(self):
raise NotImplementedError("No need to override resize for SAM 2.")
@dataclass
@auto_docstring(custom_intro="Base class for the vision encoder's outputs.")
class Sam2VisionEncoderOutput(BaseModelOutputWithPooling):
r"""
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, height, width, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each stage) of shape `(batch_size, height, width, hidden_size)`. Hidden-states of the
model at the output of each stage.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
the self-attention heads.
fpn_hidden_states (`tuple(torch.FloatTensor)`):
Tuple of `torch.FloatTensor` (one for each feature level, from high to low resolution) of shape
`(batch_size, hidden_size, height, width)`. Feature maps from the Feature Pyramid Network neck.
fpn_position_encoding (`tuple(torch.FloatTensor)`):
Tuple of `torch.FloatTensor` (one for each feature level, from high to low resolution) of shape
`(batch_size, hidden_size, height, width)`. Positional encodings corresponding to the `fpn_hidden_states`.
"""
fpn_hidden_states: torch.FloatTensor | None = None
fpn_position_encoding: torch.FloatTensor | None = None
@dataclass
@auto_docstring(custom_intro="Base class for the Sam2 model's output.")
class Sam2ImageSegmentationOutput(ModelOutput):
r"""
iou_scores (`torch.FloatTensor` of shape `(batch_size, point_batch_size, num_masks)`):
The Intersection over Union (IoU) scores of the predicted masks.
pred_masks (`torch.FloatTensor` of shape `(batch_size, point_batch_size, num_masks, height, width)`):
The predicted low-resolution masks. This is an alias for `low_res_masks`. These masks need to be post-processed
by the processor to be brought to the original image size.
object_score_logits (`torch.FloatTensor` of shape `(batch_size, point_batch_size, 1)`):
Logits for the object score, indicating if an object is present.
image_embeddings (`tuple(torch.FloatTensor)`):
The features from the FPN, which are used by the mask decoder. This is a tuple of `torch.FloatTensor` where each
tensor has shape `(batch_size, channels, height, width)`.
vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of each stage) of shape `(batch_size, height, width, hidden_size)`.
Hidden-states of the vision model at the output of each stage.
vision_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights of the vision model.
mask_decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights of the mask decoder.
"""
iou_scores: torch.FloatTensor | None = None
pred_masks: torch.FloatTensor | None = None
object_score_logits: torch.FloatTensor | None = None
image_embeddings: tuple[torch.FloatTensor, ...] = None
vision_hidden_states: tuple[torch.FloatTensor, ...] | None = None
vision_attentions: tuple[torch.FloatTensor, ...] | None = None
mask_decoder_attentions: tuple[torch.FloatTensor, ...] | None = None
class Sam2PatchEmbeddings(nn.Module):
r"""
Turns pixel values into patch embeddings for transformer consumption.
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`Sam2ImageProcessorFast.__call__`] for details.
Returns:
embeddings (`torch.FloatTensor`):
Patch embeddings depend on image_size, patch_kernel_size, patch_stride and patch_padding
"""
def __init__(self, config: Sam2HieraDetConfig):
super().__init__()
num_channels = config.num_channels
hidden_size = config.hidden_size
self.projection = nn.Conv2d(
num_channels,
hidden_size,
kernel_size=config.patch_kernel_size,
stride=config.patch_stride,
padding=config.patch_padding,
)
def forward(self, pixel_values):
_, num_channels, height, width = pixel_values.shape
embeddings = self.projection(pixel_values.to(self.projection.weight.dtype)).permute(0, 2, 3, 1)
return embeddings
class Sam2SinePositionEmbedding(MaskFormerSinePositionEmbedding):
pass
class Sam2VisionNeck(nn.Module):
def __init__(self, config: Sam2VisionConfig):
super().__init__()
self.config = config
self.position_encoding = Sam2SinePositionEmbedding(num_pos_feats=config.fpn_hidden_size // 2, normalize=True)
self.convs = nn.ModuleList()
for in_channels in config.backbone_channel_list:
self.convs.append(
nn.Conv2d(
in_channels=in_channels,
out_channels=config.fpn_hidden_size,
kernel_size=config.fpn_kernel_size,
stride=config.fpn_stride,
padding=config.fpn_padding,
),
)
self.fpn_top_down_levels = config.fpn_top_down_levels
def forward(self, hidden_states: torch.Tensor) -> tuple[tuple[torch.Tensor, ...], tuple[torch.Tensor, ...]]:
fpn_hidden_states = ()
fpn_position_encoding = ()
# forward in top-down order (from low to high resolution)
n = len(self.convs) - 1
for i in range(n, -1, -1):
lateral_features = hidden_states[i].permute(0, 3, 1, 2)
lateral_features = self.convs[n - i](lateral_features.to(self.convs[i].weight.dtype))
if i not in self.fpn_top_down_levels or i == n:
prev_features = lateral_features
else:
top_down_features = F.interpolate(
prev_features.to(dtype=torch.float32),
scale_factor=2.0,
mode="nearest",
align_corners=None,
antialias=False,
).to(lateral_features.dtype)
prev_features = lateral_features + top_down_features
prev_position_encoding = self.position_encoding(
prev_features.shape, prev_features.device, prev_features.dtype
).to(prev_features.dtype)
fpn_hidden_states += (prev_features,)
fpn_position_encoding += (prev_position_encoding,)
return fpn_hidden_states, fpn_position_encoding
def do_pool(x: torch.Tensor, query_stride: int | None = None) -> torch.Tensor:
if query_stride is None:
return x
# (B, H, W, C) -> (B, C, H, W)
x = x.permute(0, 3, 1, 2)
x = nn.functional.max_pool2d(x, kernel_size=query_stride, stride=query_stride, ceil_mode=False)
# (B, C, H', W') -> (B, H', W', C)
x = x.permute(0, 2, 3, 1)
return x
class Sam2MultiScaleAttention(nn.Module):
def __init__(
self,
config: Sam2HieraDetConfig,
dim: int,
dim_out: int,
num_attention_heads: int,
query_stride: tuple[int, int] | None = None,
):
super().__init__()
self.config = config
self.dim = dim
self.dim_out = dim_out
self.query_stride = query_stride
self.num_attention_heads = num_attention_heads
head_dim = dim_out // num_attention_heads
self.scale = head_dim**-0.5
self.qkv = nn.Linear(dim, dim_out * 3)
self.proj = nn.Linear(dim_out, dim_out)
self.is_causal = False
def forward(self, hidden_states: torch.Tensor, **kwargs) -> torch.Tensor:
batch_size, height, width, _ = hidden_states.shape
# qkv with shape (B, H * W, 3, nHead, C)
qkv = self.qkv(hidden_states).reshape(batch_size, height * width, 3, self.num_attention_heads, -1)
# q, k, v with shape (B, H * W, nheads, C)
query, key, value = torch.unbind(qkv, 2)
attn_weights = (query * self.scale) @ key.transpose(-2, -1)
attn_weights = torch.nn.functional.softmax(attn_weights, dtype=torch.float32, dim=-1).to(query.dtype)
# Q pooling (for downsample at stage changes)
if self.query_stride:
query = do_pool(query.reshape(batch_size, height, width, -1), self.query_stride)
height, width = query.shape[1:3] # downsampled shape
query = query.reshape(batch_size, height * width, self.num_attention_heads, -1)
# transpose query, key, value to (B, nHead, H * W, C)
query = query.transpose(1, 2)
key = key.transpose(1, 2)
value = value.transpose(1, 2)
attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface(
self.config._attn_implementation, eager_attention_forward
)
attn_output, _ = attention_interface(
self,
query,
key,
value,
attention_mask=None,
is_causal=self.is_causal,
scaling=self.scale,
**kwargs,
)
attn_output = attn_output.reshape(batch_size, height, width, -1)
attn_output = self.proj(attn_output)
return attn_output
class Sam2FeedForward(nn.Module):
def __init__(
self,
input_dim: int,
hidden_dim: int,
output_dim: int,
num_layers: int,
activation: str = "relu",
sigmoid_output: bool = False,
):
super().__init__()
self.num_layers = num_layers
self.activation = ACT2FN[activation]
self.proj_in = nn.Linear(input_dim, hidden_dim)
self.proj_out = nn.Linear(hidden_dim, output_dim)
self.layers = nn.ModuleList([nn.Linear(hidden_dim, hidden_dim) for _ in range(num_layers - 2)])
self.sigmoid_output = sigmoid_output
def forward(self, hidden_states):
hidden_states = self.proj_in(hidden_states)
hidden_states = self.activation(hidden_states)
for layer in self.layers:
hidden_states = self.activation(layer(hidden_states))
hidden_states = self.proj_out(hidden_states)
if self.sigmoid_output:
hidden_states = F.sigmoid(hidden_states)
return hidden_states
class Sam2MultiScaleBlock(GradientCheckpointingLayer):
def __init__(
self,
config: Sam2HieraDetConfig,
stage_idx: int,
block_idx: int,
total_block_idx: int,
):
super().__init__()
# take embed dim from previous stage if first block of stage
self.dim = (
config.embed_dim_per_stage[stage_idx - 1]
if stage_idx > 0 and block_idx == 0
else config.embed_dim_per_stage[stage_idx]
)
self.dim_out = config.embed_dim_per_stage[stage_idx]
self.layer_norm1 = nn.LayerNorm(self.dim, eps=config.layer_norm_eps)
# take window size from previous stage if first block of stage
self.window_size = (
config.window_size_per_stage[stage_idx - 1]
if stage_idx > 0 and block_idx == 0
else config.window_size_per_stage[stage_idx]
)
self.window_size = 0 if total_block_idx in config.global_attention_blocks else self.window_size
# use query stride for first block of stage if stage is a query pool stage
self.query_stride = (
config.query_stride if 0 < stage_idx <= config.num_query_pool_stages and block_idx == 0 else None
)
self.attn = Sam2MultiScaleAttention(
config,
self.dim,
self.dim_out,
num_attention_heads=config.num_attention_heads_per_stage[stage_idx],
query_stride=self.query_stride,
)
self.layer_norm2 = nn.LayerNorm(self.dim_out, eps=config.layer_norm_eps)
self.mlp = Sam2FeedForward(
self.dim_out,
int(self.dim_out * config.mlp_ratio),
self.dim_out,
num_layers=2,
activation=config.hidden_act,
)
if self.dim != self.dim_out:
self.proj = nn.Linear(self.dim, self.dim_out)
def forward(
self,
hidden_states: torch.Tensor,
**kwargs: Unpack[TransformersKwargs],
) -> torch.FloatTensor:
residual = hidden_states # batch_size, height, width, channel
hidden_states = self.layer_norm1(hidden_states)
# Skip connection
if self.dim != self.dim_out:
residual = do_pool(self.proj(hidden_states), self.query_stride)
# Window partition
window_size = self.window_size
if self.window_size > 0:
H, W = hidden_states.shape[1], hidden_states.shape[2]
hidden_states, pad_hw = window_partition(hidden_states, window_size)
# Window Attention + Q Pooling (if stage change)
attn_output = self.attn(
hidden_states=hidden_states,
**kwargs,
)
hidden_states = attn_output
if self.query_stride:
# Shapes have changed due to Q pooling
window_size = self.window_size // self.query_stride[0]
H, W = residual.shape[1:3]
pad_h = (window_size - H % window_size) % window_size
pad_w = (window_size - W % window_size) % window_size
pad_hw = (H + pad_h, W + pad_w)
# Reverse window partition
if self.window_size > 0:
hidden_states = window_unpartition(hidden_states, window_size, pad_hw, (H, W))
hidden_states = residual + hidden_states
layernorm_output = self.layer_norm2(hidden_states)
hidden_states = hidden_states + self.mlp(layernorm_output)
return hidden_states
@dataclass
@auto_docstring(
custom_intro="""
Hiera model's outputs that also contains a pooling of the last hidden states.
"""
)
class Sam2HieraDetModelOutput(ModelOutput):
r"""
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, height, width, hidden_size)`):
hidden-states at the output of the last layer of the model.
intermediate_hidden_states (`tuple[torch.FloatTensor]` of shape `(batch_size, height, width, hidden_size)`):
Sequence of hidden-states at the output of the intermediate layers of the model.
"""
last_hidden_state: torch.FloatTensor | None = None
intermediate_hidden_states: tuple[torch.FloatTensor, ...] | None = None
hidden_states: tuple[torch.FloatTensor, ...] | None = None
attentions: tuple[torch.FloatTensor, ...] | None = None
@auto_docstring
class Sam2PreTrainedModel(PreTrainedModel):
config_class = Sam2Config
base_model_prefix = "sam2"
main_input_name = "pixel_values"
input_modalities = ("image",)
_supports_sdpa = True
_supports_flash_attn = True
_supports_attention_backend = True
_keys_to_ignore_on_load_unexpected = [
r"^memory_.*",
r"^mask_downsample.*",
r"^object_pointer_proj.*",
r"^temporal_positional_encoding_projection_layer.*",
"no_memory_positional_encoding",
"no_object_pointer",
"occlusion_spatial_embedding_parameter",
]
@torch.no_grad()
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, Sam2HieraDetModel):
if module.pos_embed is not None:
init.zeros_(module.pos_embed)
if module.pos_embed_window is not None:
init.zeros_(module.pos_embed_window)
elif isinstance(module, Sam2PositionalEmbedding):
init.normal_(module.positional_embedding, std=module.scale)
elif isinstance(module, Sam2Model):
if module.no_memory_embedding is not None:
init.zeros_(module.no_memory_embedding)
class Sam2HieraDetModel(Sam2PreTrainedModel):
config_class = Sam2HieraDetConfig
main_input_name = "pixel_values"
_can_record_outputs = {
"hidden_states": Sam2MultiScaleBlock,
"attentions": Sam2MultiScaleAttention,
}
def __init__(self, config: Sam2HieraDetConfig):
super().__init__(config)
self.patch_embed = Sam2PatchEmbeddings(config)
# Windowed positional embedding (https://huggingface.co/papers/2311.05613)
self.pos_embed = nn.Parameter(
torch.zeros(1, config.hidden_size, *config.window_positional_embedding_background_size)
)
self.pos_embed_window = nn.Parameter(
torch.zeros(1, config.hidden_size, config.window_size_per_stage[0], config.window_size_per_stage[0])
)
self.stage_ends = (np.cumsum(config.blocks_per_stage) - 1).tolist()
self.blocks = nn.ModuleList()
total_block_idx = 0
for stage_idx, blocks_per_stage in enumerate(config.blocks_per_stage):
for block_idx in range(blocks_per_stage):
block = Sam2MultiScaleBlock(
config=config, stage_idx=stage_idx, block_idx=block_idx, total_block_idx=total_block_idx
)
self.blocks.append(block)
total_block_idx += 1
self.post_init()
def get_input_embeddings(self):
return self.patch_embed
def _get_pos_embed(self, hw: tuple[int, int]) -> torch.Tensor:
h, w = hw
window_embed = self.pos_embed_window
pos_embed = F.interpolate(self.pos_embed, size=(h, w), mode="bicubic")
pos_embed = pos_embed + window_embed.tile([x // y for x, y in zip(pos_embed.shape, window_embed.shape)])
pos_embed = pos_embed.permute(0, 2, 3, 1)
return pos_embed
@merge_with_config_defaults
@capture_outputs
def forward(
self,
pixel_values: torch.FloatTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | Sam2HieraDetModelOutput:
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
hidden_states = self.patch_embed(pixel_values)
hidden_states = hidden_states + self._get_pos_embed(hidden_states.shape[1:3])
intermediate_hidden_states = ()
for i, block_module in enumerate(self.blocks):
hidden_states = block_module(hidden_states, **kwargs)
if i in self.stage_ends:
intermediate_hidden_states = intermediate_hidden_states + (hidden_states,)
return Sam2HieraDetModelOutput(
last_hidden_state=hidden_states,
intermediate_hidden_states=intermediate_hidden_states,
)
@auto_docstring(
custom_intro="""
The vision model from Sam without any head or projection on top.
"""
)
class Sam2VisionModel(Sam2PreTrainedModel):
config_class = Sam2VisionConfig
main_input_name = "pixel_values"
_can_record_outputs = {
"hidden_states": Sam2MultiScaleBlock,
"attentions": Sam2MultiScaleAttention,
}
def __init__(self, config: Sam2VisionConfig):
super().__init__(config)
self.config = config
self.backbone = AutoModel.from_config(config.backbone_config)
self.neck = Sam2VisionNeck(config)
self.num_feature_levels = config.num_feature_levels
self.post_init()
def get_input_embeddings(self):
return self.backbone.get_input_embeddings()
@can_return_tuple
def forward(
self,
pixel_values: torch.FloatTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | Sam2VisionEncoderOutput:
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
# Forward through backbone
backbone_output = self.backbone(pixel_values, **kwargs)
hidden_states = backbone_output.last_hidden_state
intermediate_hidden_states = backbone_output.intermediate_hidden_states
fpn_hidden_states, fpn_position_encoding = self.neck(intermediate_hidden_states)
# Select last `num_feature_levels` feature levels from FPN and reverse order to get features from high to low resolution
fpn_hidden_states = fpn_hidden_states[-self.num_feature_levels :][::-1]
fpn_position_encoding = fpn_position_encoding[-self.num_feature_levels :][::-1]
return Sam2VisionEncoderOutput(
last_hidden_state=hidden_states,
fpn_hidden_states=fpn_hidden_states,
fpn_position_encoding=fpn_position_encoding,
hidden_states=backbone_output.hidden_states,
attentions=backbone_output.attentions,
)
class Sam2PositionalEmbedding(nn.Module):
def __init__(self, config: Sam2PromptEncoderConfig):
super().__init__()
self.scale = config.scale
positional_embedding = self.scale * torch.randn((2, config.hidden_size // 2))
self.register_buffer("positional_embedding", positional_embedding)
def forward(self, input_coords, input_shape=None):
"""Positionally encode points that are normalized to [0,1]."""
coordinates = input_coords.clone()
if input_shape is not None:
coordinates[:, :, :, 0] = coordinates[:, :, :, 0] / input_shape[1]
coordinates[:, :, :, 1] = coordinates[:, :, :, 1] / input_shape[0]
coordinates.to(torch.float32)
# assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape
coordinates = 2 * coordinates - 1
coordinates = coordinates.to(self.positional_embedding.dtype)
coordinates = coordinates @ self.positional_embedding
coordinates = 2 * np.pi * coordinates
# outputs d_1 x ... x d_n x channel shape
return torch.cat([torch.sin(coordinates), torch.cos(coordinates)], dim=-1)
class Sam2MaskEmbedding(SamMaskEmbedding):
pass
class Sam2PromptEncoder(SamPromptEncoder):
def __init__(self, config: Sam2PromptEncoderConfig):
nn.Module.__init__(self)
self.shared_embedding = Sam2PositionalEmbedding(config)
self.mask_embed = Sam2MaskEmbedding(config)
self.no_mask_embed = nn.Embedding(1, config.hidden_size)
self.image_embedding_size = (config.image_size // config.patch_size, config.image_size // config.patch_size)
self.mask_input_size = (4 * config.image_size // config.patch_size, 4 * config.image_size // config.patch_size)
self.input_image_size = config.image_size
self.point_embed = nn.Embedding(config.num_point_embeddings, config.hidden_size)
self.hidden_size = config.hidden_size
self.not_a_point_embed = nn.Embedding(1, config.hidden_size)
def _embed_points(self, points: torch.Tensor, labels: torch.Tensor, pad: bool) -> torch.Tensor:
"""Embeds point prompts."""
points = points + 0.5 # Shift to center of pixel
if pad:
points = torch.nn.functional.pad(points, (0, 0, 0, 1), mode="constant", value=0)
labels = torch.nn.functional.pad(labels, (0, 1), mode="constant", value=-1)
input_shape = (self.input_image_size, self.input_image_size)
point_embedding = self.shared_embedding(points, input_shape)
# torch.where and expanding the labels tensor is required by the ONNX export
point_embedding = torch.where(labels[..., None] == -1, self.not_a_point_embed.weight, point_embedding)
# This is required for the ONNX export. The dtype, device need to be explicitly
# specified as otherwise torch.onnx.export interprets as double
point_embedding = torch.where(
labels[..., None] != -10,
point_embedding,
torch.zeros_like(point_embedding),
)
# Add point embeddings for labels >= 0
point_embedding = point_embedding + self.point_embed(labels.clamp(min=0)) * (labels >= 0).unsqueeze(-1)
return point_embedding
def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor:
"""Embeds box prompts."""
boxes = boxes + 0.5 # Shift to center of pixel
coords = boxes.view(*boxes.shape[:2], 2, 2)
# add padding point for consistency with the original implementation
coords = torch.nn.functional.pad(coords, (0, 0, 0, 1), mode="constant", value=0)
corner_embedding = self.shared_embedding(coords, (self.input_image_size, self.input_image_size))
corner_embedding[:, :, 0, :] += self.point_embed.weight[2]
corner_embedding[:, :, 1, :] += self.point_embed.weight[3]
corner_embedding[:, :, 2, :] = self.not_a_point_embed.weight.expand_as(corner_embedding[:, :, 2, :])
return corner_embedding
class Sam2Attention(nn.Module):
"""
SAM2's attention layer that allows for downscaling the size of the embedding after projection to queries, keys, and
values.
"""
def __init__(self, config, downsample_rate=None):
super().__init__()
downsample_rate = config.attention_downsample_rate if downsample_rate is None else downsample_rate
self.config = config
self.hidden_size = config.hidden_size
self.internal_dim = config.hidden_size // downsample_rate
self.num_attention_heads = config.num_attention_heads
self.head_dim = self.internal_dim // config.num_attention_heads
self.scaling = self.head_dim**-0.5
self.is_causal = False
self.q_proj = nn.Linear(self.hidden_size, self.internal_dim)
self.k_proj = nn.Linear(self.hidden_size, self.internal_dim)
self.v_proj = nn.Linear(self.hidden_size, self.internal_dim)
self.o_proj = nn.Linear(self.internal_dim, self.hidden_size)
def forward(
self,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_similarity: torch.Tensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, torch.Tensor]:
# Input projections
batch_size, point_batch_size = query.shape[:2]
new_shape = (batch_size * point_batch_size, -1, self.num_attention_heads, self.head_dim)
query = self.q_proj(query).view(*new_shape).transpose(1, 2)
key = self.k_proj(key).view(*new_shape).transpose(1, 2)
value = self.v_proj(value).view(*new_shape).transpose(1, 2)
attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface(
self.config._attn_implementation, eager_attention_forward
)
if is_flash_attention_requested(self.config) and attention_similarity is not None:
# Target guided masks are represented as float masks and are incompatible with Flash Attention
# Fallback to SDPA for this call only so the rest of the model can still benefit from FA
attention_interface = ALL_ATTENTION_FUNCTIONS["sdpa"]
logger.warning_once(
"Falling back to SDPA for target-guided attention because "
"Flash Attention does not support additive bias masks."
)
attn_output, attn_weights = attention_interface(
self,
query,
key,
value,
attention_mask=attention_similarity,
dropout=0.0,
scaling=self.scaling,
is_causal=self.is_causal,
**kwargs,
)
attn_output = attn_output.reshape(
batch_size, point_batch_size, -1, self.num_attention_heads * self.head_dim
).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class Sam2TwoWayAttentionBlock(SamTwoWayAttentionBlock, GradientCheckpointingLayer):
def __init__(self, config: Sam2MaskDecoderConfig, skip_first_layer_pe: bool = False):
nn.Module.__init__(self)
self.self_attn = Sam2Attention(config, downsample_rate=1)
self.layer_norm1 = nn.LayerNorm(config.hidden_size)
self.cross_attn_token_to_image = Sam2Attention(config)
self.layer_norm2 = nn.LayerNorm(config.hidden_size)
self.mlp = Sam2FeedForward(
config.hidden_size, config.mlp_dim, config.hidden_size, num_layers=config.num_hidden_layers
)
self.layer_norm3 = nn.LayerNorm(config.hidden_size)
self.layer_norm4 = nn.LayerNorm(config.hidden_size)
self.cross_attn_image_to_token = Sam2Attention(config)
self.skip_first_layer_pe = skip_first_layer_pe
class Sam2TwoWayTransformer(SamTwoWayTransformer):
pass
class Sam2LayerNorm(SamLayerNorm):
pass
class Sam2MaskDecoder(SamMaskDecoder):
def __init__(self, config: Sam2MaskDecoderConfig):
super().__init__(config)
del self.iou_prediction_head
self.iou_prediction_head = Sam2FeedForward(
self.hidden_size,
config.iou_head_hidden_dim,
self.num_mask_tokens,
config.iou_head_depth,
sigmoid_output=True,
)
self.conv_s0 = nn.Conv2d(config.hidden_size, config.hidden_size // 8, kernel_size=1, stride=1)
self.conv_s1 = nn.Conv2d(config.hidden_size, config.hidden_size // 4, kernel_size=1, stride=1)
self.obj_score_token = nn.Embedding(1, self.hidden_size)
self.pred_obj_score_head = Sam2FeedForward(self.hidden_size, self.hidden_size, 1, 3)
self.dynamic_multimask_via_stability = config.dynamic_multimask_via_stability
self.dynamic_multimask_stability_delta = config.dynamic_multimask_stability_delta
self.dynamic_multimask_stability_thresh = config.dynamic_multimask_stability_thresh
def _get_stability_scores(self, mask_logits):
"""
Compute stability scores of the mask logits based on the IoU between upper and
lower thresholds.
"""
mask_logits = mask_logits.flatten(-2)
stability_delta = self.dynamic_multimask_stability_delta
area_i = torch.sum(mask_logits > stability_delta, dim=-1).float()
area_u = torch.sum(mask_logits > -stability_delta, dim=-1).float()
stability_scores = torch.where(area_u > 0, area_i / area_u, 1.0)
return stability_scores
def _dynamic_multimask_via_stability(self, all_mask_logits, all_iou_scores):
"""
When outputting a single mask, if the stability score from the current single-mask
output (based on output token 0) falls below a threshold, we instead select from
multi-mask outputs (based on output token 1~3) the mask with the highest predicted
IoU score. This is intended to ensure a valid mask for both clicking and tracking.
"""
# The best mask from multimask output tokens (1~3)
multimask_logits = all_mask_logits[:, :, 1:, :, :]
multimask_iou_scores = all_iou_scores[:, :, 1:]
best_scores_inds = torch.argmax(multimask_iou_scores, dim=-1) # [B, P]
best_scores_inds_expanded = best_scores_inds.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1)
best_scores_inds_expanded = best_scores_inds_expanded.expand(
-1, -1, 1, multimask_logits.size(-2), multimask_logits.size(-1)
)
best_multimask_logits = torch.gather(multimask_logits, 2, best_scores_inds_expanded) # [B, P, 1, H, W]
best_multimask_iou_scores = torch.gather(multimask_iou_scores, 2, best_scores_inds.unsqueeze(-1)) # [B, P, 1]
# The mask from singlemask output token 0 and its stability score
singlemask_logits = all_mask_logits[:, :, 0:1, :, :]
singlemask_iou_scores = all_iou_scores[:, :, 0:1]
stability_scores = self._get_stability_scores(singlemask_logits)
is_stable = stability_scores >= self.dynamic_multimask_stability_thresh
# Dynamically fall back to best multimask output upon low stability scores.
mask_logits_out = torch.where(
is_stable[..., None, None].expand_as(singlemask_logits),
singlemask_logits,
best_multimask_logits,
)
iou_scores_out = torch.where(
is_stable.expand_as(singlemask_iou_scores),
singlemask_iou_scores,
best_multimask_iou_scores,
)
return mask_logits_out, iou_scores_out
def forward(
self,
image_embeddings: torch.Tensor,
image_positional_embeddings: torch.Tensor,
sparse_prompt_embeddings: torch.Tensor,
dense_prompt_embeddings: torch.Tensor,
multimask_output: bool,
high_resolution_features: list[torch.Tensor],
attention_similarity: torch.Tensor | None = None,
target_embedding: torch.Tensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Predict masks given image and prompt embeddings.
Args:
image_embeddings (`torch.Tensor`):
The embeddings from the image encoder.
image_positional_embeddings (`torch.Tensor`):
Positional encoding with the shape of image_embeddings.
sparse_prompt_embeddings (`torch.Tensor`):
The embeddings of the points and boxes.
dense_prompt_embeddings (`torch.Tensor`):
The embeddings of the mask inputs.
multimask_output (`bool`):
Whether to return multiple masks or a single mask.
high_resolution_features (`list[torch.Tensor]`, *optional*):
The high-resolution features from the vision encoder.
attention_similarity (`torch.Tensor`, *optional*):
The attention similarity tensor.
target_embedding (`torch.Tensor`, *optional*):
The target embedding.
"""
batch_size, num_channels, height, width = image_embeddings.shape
point_batch_size = sparse_prompt_embeddings.shape[1]
# Concatenate output tokens
output_tokens = torch.cat(
[
self.obj_score_token.weight,
self.iou_token.weight,
self.mask_tokens.weight,
],
dim=0,
)
output_tokens = output_tokens.repeat(batch_size, point_batch_size, 1, 1)
if sparse_prompt_embeddings.shape[0] != 0:
tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=2)
else:
tokens = output_tokens
point_embeddings = tokens.to(self.iou_token.weight.dtype)
# Expand per-image data in batch direction to be per-mask
image_embeddings = image_embeddings + dense_prompt_embeddings
image_embeddings = image_embeddings.repeat_interleave(point_batch_size, dim=0)
image_positional_embeddings = image_positional_embeddings.repeat_interleave(point_batch_size, 0)
# Run the transformer
point_embeddings, image_embeddings = self.transformer(
point_embeddings=point_embeddings,
image_embeddings=image_embeddings,
image_positional_embeddings=image_positional_embeddings,
attention_similarity=attention_similarity,
target_embedding=target_embedding,
**kwargs,
)
iou_token_out = point_embeddings[:, :, 1, :]
mask_tokens_out = point_embeddings[:, :, 2 : (2 + self.num_mask_tokens), :]
# Upscale mask embeddings and predict masks using the mask tokens
image_embeddings = image_embeddings.transpose(2, 3).view(
batch_size * point_batch_size, num_channels, height, width
)
feat_s0, feat_s1 = high_resolution_features
feat_s0 = feat_s0.repeat_interleave(point_batch_size, dim=0)
feat_s1 = feat_s1.repeat_interleave(point_batch_size, dim=0)
upscaled_embedding = self.upscale_conv1(image_embeddings) + feat_s1
upscaled_embedding = self.activation(self.upscale_layer_norm(upscaled_embedding))
upscaled_embedding = self.activation(self.upscale_conv2(upscaled_embedding) + feat_s0)
hyper_in_list: list[torch.Tensor] = []
for i in range(self.num_mask_tokens):
current_mlp = self.output_hypernetworks_mlps[i]
hyper_in_list += [current_mlp(mask_tokens_out[:, :, i, :])]
hyper_in = torch.stack(hyper_in_list, dim=2)
_, num_channels, height, width = upscaled_embedding.shape
upscaled_embedding = upscaled_embedding.view(batch_size, point_batch_size, num_channels, height * width)
masks = (hyper_in @ upscaled_embedding).view(batch_size, point_batch_size, -1, height, width)
# Generate mask quality predictions
iou_pred = self.iou_prediction_head(iou_token_out)
object_score_logits = self.pred_obj_score_head(point_embeddings[:, :, 0, :])
# Select the correct mask or masks for output
if multimask_output:
mask_slice = slice(1, None)
masks = masks[:, :, mask_slice, :, :]
iou_pred = iou_pred[:, :, mask_slice]
elif self.dynamic_multimask_via_stability and not self.training:
mask_slice = slice(0, 1)
masks, iou_pred = self._dynamic_multimask_via_stability(masks, iou_pred)
else:
mask_slice = slice(0, 1)
masks = masks[:, :, mask_slice, :, :]
iou_pred = iou_pred[:, :, mask_slice]
sam_tokens_out = mask_tokens_out[:, :, mask_slice] # [b, 3, c] shape
return masks, iou_pred, sam_tokens_out, object_score_logits
@auto_docstring(
custom_intro="""
Segment Anything Model 2 (SAM 2) for generating segmentation masks, given an input image and
input points and labels, boxes, or masks.
"""
)
class Sam2Model(SamModel):
_tied_weights_keys = {}
def __init__(self, config: Sam2Config):
PreTrainedModel.__init__(self, config)
self.shared_image_embedding = Sam2PositionalEmbedding(config.prompt_encoder_config)
self.vision_encoder = AutoModel.from_config(config.vision_config)
self.prompt_encoder = Sam2PromptEncoder(config.prompt_encoder_config)
# The module using it is not a PreTrainedModel subclass so we need this
config.mask_decoder_config._attn_implementation = config._attn_implementation
self.mask_decoder = Sam2MaskDecoder(config.mask_decoder_config)
self.num_feature_levels = config.vision_config.num_feature_levels
self.backbone_feature_sizes = config.vision_config.backbone_feature_sizes
# a single token to indicate no memory embedding from previous frames
self.hidden_dim = config.vision_config.fpn_hidden_size
self.no_memory_embedding = torch.nn.Parameter(torch.zeros(1, 1, self.hidden_dim))
self.post_init()
def get_image_wide_positional_embeddings(self) -> torch.Tensor:
size = self.prompt_encoder.image_embedding_size
target_device = self.shared_image_embedding.positional_embedding.device
target_dtype = self.shared_image_embedding.positional_embedding.dtype
grid = torch.ones(size, device=target_device, dtype=target_dtype)
y_embed = grid.cumsum(dim=0) - 0.5
x_embed = grid.cumsum(dim=1) - 0.5
y_embed = y_embed / size[0]
x_embed = x_embed / size[1]
positional_embedding = self.shared_image_embedding(torch.stack([x_embed, y_embed], dim=-1))
return positional_embedding.permute(2, 0, 1).unsqueeze(0) # channel x height x width
@torch.no_grad()
def get_image_embeddings(
self,
pixel_values: torch.FloatTensor,
**kwargs: Unpack[TransformersKwargs],
) -> list[torch.Tensor]:
r"""
Returns the image embeddings by passing the pixel values through the vision encoder.
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Input pixel values
"""
batch_size = pixel_values.shape[0]
image_outputs = self.get_image_features(pixel_values, return_dict=True, **kwargs)
feature_maps = image_outputs.fpn_hidden_states
# add no memory embedding to the last feature map
feature_maps[-1] = feature_maps[-1] + self.no_memory_embedding
# reshape feature maps to the same shape as the backbone feature sizes
image_embeddings = [
feat.permute(1, 2, 0).view(batch_size, -1, *feat_size)
for feat, feat_size in zip(feature_maps, self.backbone_feature_sizes)
]
return image_embeddings
@can_return_tuple
@auto_docstring
def get_image_features(
self,
pixel_values: torch.FloatTensor,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | Sam2VisionEncoderOutput:
r"""
pixel_values (`torch.FloatTensor`):
Input pixel values of shape `(batch_size, num_channels, height, width)`.
"""
vision_outputs: Sam2VisionEncoderOutput = self.vision_encoder(pixel_values, return_dict=True, **kwargs)
feature_maps = vision_outputs.fpn_hidden_states
feature_maps_position_embeddings = vision_outputs.fpn_position_encoding
# precompute projected level 0 and level 1 features in SAM decoder
# to avoid running it again on every SAM click
feature_maps = list(feature_maps)
feature_maps[0] = self.mask_decoder.conv_s0(feature_maps[0])
feature_maps[1] = self.mask_decoder.conv_s1(feature_maps[1])
# flatten NxCxHxW to HWxNxC
feature_maps = [feature_map.flatten(2).permute(2, 0, 1) for feature_map in feature_maps]
feature_maps_position_embeddings = [
feature_map_position_embedding.flatten(2).permute(2, 0, 1)
for feature_map_position_embedding in feature_maps_position_embeddings
]
vision_outputs.fpn_hidden_states = feature_maps
vision_outputs.fpn_position_encoding = feature_maps_position_embeddings
return vision_outputs
@merge_with_config_defaults
@capture_outputs
@auto_docstring
def forward(
self,
pixel_values: torch.FloatTensor | None = None,
input_points: torch.FloatTensor | None = None,
input_labels: torch.LongTensor | None = None,
input_boxes: torch.FloatTensor | None = None,
input_masks: torch.LongTensor | None = None,
image_embeddings: torch.FloatTensor | None = None,
multimask_output: bool = True,
attention_similarity: torch.FloatTensor | None = None,
target_embedding: torch.FloatTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> Sam2ImageSegmentationOutput:
r"""
input_points (`torch.FloatTensor` of shape `(batch_size, num_points, 2)`):
Input 2D spatial points, this is used by the prompt encoder to encode the prompt. Generally yields to much
better results. The points can be obtained by passing a list of list of list to the processor that will
create corresponding `torch` tensors of dimension 4. The first dimension is the image batch size, the
second dimension is the point batch size (i.e. how many segmentation masks do we want the model to predict
per input point), the third dimension is the number of points per segmentation mask (it is possible to pass
multiple points for a single mask), and the last dimension is the x (vertical) and y (horizontal)
coordinates of the point. If a different number of points is passed either for each image, or for each
mask, the processor will create "PAD" points that will correspond to the (0, 0) coordinate, and the
computation of the embedding will be skipped for these points using the labels.
input_labels (`torch.LongTensor` of shape `(batch_size, point_batch_size, num_points)`):
Input labels for the points, this is used by the prompt encoder to encode the prompt. According to the
official implementation, there are 3 types of labels
- `1`: the point is a point that contains the object of interest
- `0`: the point is a point that does not contain the object of interest
- `-1`: the point corresponds to the background
We added the label:
- `-10`: the point is a padding point, thus should be ignored by the prompt encoder
The padding labels should be automatically done by the processor.
input_boxes (`torch.FloatTensor` of shape `(batch_size, num_boxes, 4)`):
Input boxes for the points, this is used by the prompt encoder to encode the prompt. Generally yields to
much better generated masks. The boxes can be obtained by passing a list of list of list to the processor,
that will generate a `torch` tensor, with each dimension corresponding respectively to the image batch
size, the number of boxes per image and the coordinates of the top left and bottom right point of the box.
In the order (`x1`, `y1`, `x2`, `y2`):
- `x1`: the x coordinate of the top left point of the input box
- `y1`: the y coordinate of the top left point of the input box
- `x2`: the x coordinate of the bottom right point of the input box
- `y2`: the y coordinate of the bottom right point of the input box
input_masks (`torch.FloatTensor` of shape `(batch_size, image_size, image_size)`):
SAM model also accepts segmentation masks as input. The mask will be embedded by the prompt encoder to
generate a corresponding embedding, that will be fed later on to the mask decoder. These masks needs to be
manually fed by the user, and they need to be of shape (`batch_size`, `image_size`, `image_size`).
image_embeddings (`torch.FloatTensor` of shape `(batch_size, output_channels, window_size, window_size)`):
Image embeddings, this is used by the mask decoder to generate masks and iou scores. For more memory
efficient computation, users can first retrieve the image embeddings using the `get_image_embeddings`
method, and then feed them to the `forward` method instead of feeding the `pixel_values`.
multimask_output (`bool`, *optional*):
In the original implementation and paper, the model always outputs 3 masks per image (or per point / per
bounding box if relevant). However, it is possible to just output a single mask, that corresponds to the
"best" mask, by specifying `multimask_output=False`.
attention_similarity (`torch.FloatTensor`, *optional*):
Attention similarity tensor, to be provided to the mask decoder for target-guided attention in case the
model is used for personalization as introduced in [PerSAM](https://huggingface.co/papers/2305.03048).
target_embedding (`torch.FloatTensor`, *optional*):
Embedding of the target concept, to be provided to the mask decoder for target-semantic prompting in case
the model is used for personalization as introduced in [PerSAM](https://huggingface.co/papers/2305.03048).
Example:
```python
>>> from PIL import Image
>>> import httpx
>>> from io import BytesIO
>>> from transformers import AutoModel, AutoProcessor
>>> model = AutoModel.from_pretrained("danelcsb/sam2.1_hiera_tiny")
>>> processor = AutoProcessor.from_pretrained("danelcsb/sam2.1_hiera_tiny")
>>> url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/sam-car.png"
>>> with httpx.stream("GET", url) as response:
... raw_image = Image.open(BytesIO(response.read())).convert("RGB")
>>> input_points = [[[400, 650]]] # 2D location of a window on the car
>>> inputs = processor(images=raw_image, input_points=input_points, return_tensors="pt")
>>> # Get segmentation mask
>>> outputs = model(**inputs)
>>> # Postprocess masks
>>> masks = processor.post_process_masks(
... outputs.pred_masks, inputs["original_sizes"]
... )
```
"""
if not ((pixel_values is None) ^ (image_embeddings is None)):
raise ValueError("Exactly one of pixel_values or image_embeddings must be provided.")
if input_points is not None and input_boxes is not None:
if input_points.shape[1] != input_boxes.shape[1]:
raise ValueError(
f"You should provide as many bounding boxes as input points per box. Got {input_points.shape[1]} and {input_boxes.shape[1]}."
)
image_positional_embeddings = self.get_image_wide_positional_embeddings()
# repeat with batch size
batch_size = pixel_values.shape[0] if pixel_values is not None else image_embeddings[-1].shape[0]
image_positional_embeddings = image_positional_embeddings.repeat(batch_size, 1, 1, 1)
vision_attentions = None
vision_hidden_states = None
if pixel_values is not None:
image_outputs: Sam2VisionEncoderOutput = self.get_image_features(pixel_values, return_dict=True, **kwargs)
feature_maps = image_outputs.fpn_hidden_states
vision_hidden_states = image_outputs.hidden_states
vision_attentions = image_outputs.attentions
# add no memory embedding to the last feature map
feature_maps[-1] = feature_maps[-1] + self.no_memory_embedding
# reshape feature maps to the same shape as the backbone feature sizes
image_embeddings = [
feat.permute(1, 2, 0).view(batch_size, -1, *feat_size)
for feat, feat_size in zip(feature_maps, self.backbone_feature_sizes)
]
if input_points is not None and input_labels is None:
input_labels = torch.ones_like(input_points[:, :, :, 0], dtype=torch.int, device=input_points.device)
if input_points is None and input_boxes is None:
# If no points are provide, pad with an empty point (with label -1)
input_points = torch.zeros(
batch_size, 1, 1, 2, dtype=image_embeddings[-1].dtype, device=image_embeddings[-1].device
)
input_labels = -torch.ones(batch_size, 1, 1, dtype=torch.int32, device=image_embeddings[-1].device)
if input_masks is not None:
# If mask_inputs is provided, downsize it into low-res mask input if needed
# and feed it as a dense mask prompt into the SAM mask encoder
if input_masks.shape[-2:] != self.prompt_encoder.mask_input_size:
input_masks = F.interpolate(
input_masks.float(),
size=self.prompt_encoder.mask_input_size,
align_corners=False,
mode="bilinear",
antialias=True, # use antialias for downsampling
).to(input_masks.dtype)
sparse_embeddings, dense_embeddings = self.prompt_encoder(
input_points=input_points,
input_labels=input_labels,
input_boxes=input_boxes,
input_masks=input_masks,
)
low_res_multimasks, iou_scores, _, object_score_logits = self.mask_decoder(
image_embeddings=image_embeddings[-1],
image_positional_embeddings=image_positional_embeddings,
sparse_prompt_embeddings=sparse_embeddings,
dense_prompt_embeddings=dense_embeddings,
multimask_output=multimask_output,
high_resolution_features=image_embeddings[:-1],
attention_similarity=attention_similarity,
target_embedding=target_embedding,
**kwargs,
)
return Sam2ImageSegmentationOutput(
iou_scores=iou_scores,
pred_masks=low_res_multimasks,
object_score_logits=object_score_logits,
image_embeddings=image_embeddings,
vision_hidden_states=vision_hidden_states,
vision_attentions=vision_attentions,
)
__all__ = [
"Sam2Model",
"Sam2VisionModel",
"Sam2PreTrainedModel",
"Sam2ImageProcessorFast",
"Sam2HieraDetModel",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/sam2/modular_sam2.py",
"license": "Apache License 2.0",
"lines": 1255,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/sam2/processing_sam2.py | # Copyright 2025 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Processor class for SAM2.
"""
from copy import deepcopy
import numpy as np
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, auto_docstring, is_torch_available, logging
from ...utils.import_utils import requires
logger = logging.get_logger(__name__)
if is_torch_available():
import torch
@requires(backends=("torch",))
@auto_docstring
class Sam2Processor(ProcessorMixin):
def __init__(self, image_processor, target_size: int | None = None, point_pad_value: int = -10, **kwargs):
r"""
target_size (`int`, *optional*):
The target size (in pixels) for normalizing input points and bounding boxes. If not provided, defaults
to the image processor's size configuration. All input coordinates (points and boxes) are normalized
to this size before being passed to the model. This ensures consistent coordinate representation
regardless of the original image dimensions.
point_pad_value (`int`, *optional*, defaults to -10):
The value used for padding input points when batching sequences of different lengths. This value is
used to mark padded positions and is preserved during coordinate normalization.
"""
super().__init__(image_processor, **kwargs)
self.point_pad_value = point_pad_value
self.target_size = target_size if target_size is not None else self.image_processor.size["height"]
@auto_docstring
def __call__(
self,
images: ImageInput | None = None,
segmentation_maps: ImageInput | None = None,
input_points: list[list[list[list[float]]]] | torch.Tensor | None = None,
input_labels: list[list[list[int]]] | torch.Tensor | None = None,
input_boxes: list[list[list[float]]] | torch.Tensor | None = None,
original_sizes: list[list[float]] | torch.Tensor | None = None,
return_tensors: str | TensorType | None = None,
**kwargs,
) -> BatchEncoding:
r"""
segmentation_maps (`ImageInput`, *optional*):
The segmentation maps to process.
input_points (`list[list[list[list[float]]]]`, `torch.Tensor`, *optional*):
The points to add to the frame.
input_labels (`list[list[list[int]]]`, `torch.Tensor`, *optional*):
The labels for the points.
input_boxes (`list[list[list[float]]]`, `torch.Tensor`, *optional*):
The bounding boxes to add to the frame.
original_sizes (`list[list[float]]`, `torch.Tensor`, *optional*):
The original sizes of the images.
Returns:
A [`BatchEncoding`] with the following fields:
- `pixel_values` (`torch.Tensor`): The processed image(s).
- `original_sizes` (`list[list[float]]`): The original sizes of the images.
- `labels` (`torch.Tensor`): The processed segmentation maps (if provided).
- `input_points` (`torch.Tensor`): The processed points.
- `input_labels` (`torch.Tensor`): The processed labels.
- `input_boxes` (`torch.Tensor`): The processed bounding boxes.
"""
if images is not None:
encoding_image_processor = self.image_processor(
images,
segmentation_maps=segmentation_maps,
return_tensors=return_tensors,
**kwargs,
)
elif original_sizes is not None:
if isinstance(original_sizes, torch.Tensor):
original_sizes = original_sizes.cpu().tolist()
encoding_image_processor = BatchEncoding({"original_sizes": original_sizes}, tensor_type=return_tensors)
else:
raise ValueError("Either images or original_sizes must be provided")
# pop arguments that are not used in the forward but used nevertheless
original_sizes = encoding_image_processor["original_sizes"]
# Check original_sizes is of length 1 or len(images)
if images is not None and len(original_sizes) != 1 and len(original_sizes) != len(images):
raise ValueError(
"original_sizes must be of length 1 or len(images). If you are passing a single image, you must pass a single original_size."
)
# Process input points, labels, and boxes if provided
if input_points is not None or input_labels is not None or input_boxes is not None:
# Validate and convert inputs to standardized format
processed_points = self._validate_single_input(
input_points,
expected_depth=4,
input_name="points",
expected_format="[image level, object level, point level, point coordinates]",
expected_coord_size=2,
)
processed_labels = self._validate_single_input(
input_labels,
expected_depth=3,
input_name="labels",
expected_format="[image level, object level, point level]",
)
processed_boxes = self._validate_single_input(
input_boxes,
expected_depth=3,
input_name="boxes",
expected_format="[image level, box level, box coordinates]",
expected_coord_size=4,
)
# Get padding requirements for all inputs
if processed_points is not None:
points_max_dims = self._get_nested_dimensions(processed_points)[:3]
if processed_labels is not None:
labels_max_dims = self._get_nested_dimensions(processed_labels)[:3]
if processed_boxes is not None:
boxes_max_dims = self._get_nested_dimensions(processed_boxes)[:2]
# Ensure points and labels have consistent dimensions
if processed_points is not None and processed_labels is not None:
if points_max_dims != labels_max_dims:
raise ValueError(
"Input points and labels have inconsistent dimensions. Please ensure they have the same dimensions."
)
# Check that boxes don't need padding (model limitation)
if processed_boxes is not None and len(processed_boxes) >= 2:
if any(len(img_boxes) < boxes_max_dims[1] for img_boxes in processed_boxes):
raise ValueError(
"Input boxes have inconsistent dimensions that would require padding, "
"but boxes cannot be padded due to model limitations. "
"Please ensure all images have the same number of boxes."
)
# Pad and normalize all inputs to final tensor format
if processed_points is not None:
padded_points = self._pad_nested_list(processed_points, points_max_dims + [2])
final_points = torch.tensor(padded_points, dtype=torch.float32)
self._normalize_tensor_coordinates(final_points, original_sizes, preserve_padding=True)
encoding_image_processor.update({"input_points": final_points})
if processed_labels is not None:
padded_labels = self._pad_nested_list(processed_labels, labels_max_dims)
final_labels = torch.tensor(padded_labels, dtype=torch.int64)
encoding_image_processor.update({"input_labels": final_labels})
if processed_boxes is not None:
final_boxes = torch.tensor(processed_boxes, dtype=torch.float32)
self._normalize_tensor_coordinates(final_boxes, original_sizes, is_bounding_box=True)
encoding_image_processor.update({"input_boxes": final_boxes})
return encoding_image_processor
def _normalize_coordinates(
self, target_size: int, coords: "torch.Tensor", original_size, is_bounding_box=False
) -> "torch.Tensor":
"""
Expects a numpy array of length 2 in the final dimension. Requires the original image size in (H, W) format.
Args:
target_size (`int`):
The target size of the image.
coords (`torch.Tensor`):
The coordinates to be normalized.
original_size (`tuple`):
The original size of the image.
is_bounding_box (`bool`, *optional*, defaults to `False`):
Whether the coordinates are bounding boxes.
"""
old_h, old_w = original_size
new_h, new_w = target_size, target_size
coords = deepcopy(coords).float()
if is_bounding_box:
coords = coords.reshape(-1, 2, 2)
coords[..., 0] = coords[..., 0] * (new_w / old_w)
coords[..., 1] = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
coords = coords.reshape(-1, 4)
return coords
def _convert_to_nested_list(self, data, expected_depth, current_depth=0):
"""
Recursively convert various input formats (tensors, numpy arrays, lists) to nested lists.
Args:
data: Input data in any format
expected_depth: Expected nesting depth
current_depth: Current depth in recursion
Returns:
Nested list representation of the data
"""
if data is None:
return None
# Convert tensor/numpy to list if we're at a leaf level or if it's a multi-dimensional array
if isinstance(data, torch.Tensor): # PyTorch tensor
if current_depth == expected_depth - 2 or len(data.shape) <= 2: # At coordinate level or small tensor
return data.numpy().tolist()
else:
return [self._convert_to_nested_list(item, expected_depth, current_depth + 1) for item in data]
elif isinstance(data, np.ndarray): # NumPy array
if current_depth == expected_depth - 2 or len(data.shape) <= 2: # At coordinate level or small array
return data.tolist()
else:
return [self._convert_to_nested_list(item, expected_depth, current_depth + 1) for item in data]
elif isinstance(data, list):
if current_depth == expected_depth:
# We've reached the expected depth, return as is
return data
else:
# Continue recursion
return [self._convert_to_nested_list(item, expected_depth, current_depth + 1) for item in data]
elif isinstance(data, (int, float)):
return data
else:
raise TypeError(f"Unsupported data type: {type(data)}")
def _get_nested_dimensions(self, nested_list, max_dims=None):
"""
Get the maximum dimensions at each level of nesting.
Args:
nested_list (`list`):
Nested list structure.
max_dims (`list`, *optional*):
Current maximum dimensions (for recursion).
Returns:
`list`: A list of maximum dimensions for each nesting level.
"""
if max_dims is None:
max_dims = []
if not isinstance(nested_list, list):
return max_dims
if len(max_dims) == 0:
max_dims.append(len(nested_list))
else:
max_dims[0] = max(max_dims[0], len(nested_list))
if len(nested_list) > 0:
for item in nested_list:
if isinstance(item, list):
sub_dims = self._get_nested_dimensions(item)
# Merge sub_dims into max_dims
for i, dim in enumerate(sub_dims):
if i + 1 >= len(max_dims):
max_dims.append(dim)
else:
max_dims[i + 1] = max(max_dims[i + 1], dim)
return max_dims
def _pad_nested_list(self, nested_list, target_dims, current_level=0, pad_value=None):
"""
Recursively pad a nested list to match target dimensions.
Args:
nested_list (`list`):
Nested list to pad.
target_dims (`list`):
Target dimensions for each level.
current_level (`int`, *optional*, defaults to 0):
Current nesting level.
pad_value (`int`, *optional*):
Value to use for padding.
Returns:
`list`: The padded nested list.
"""
if pad_value is None:
pad_value = self.point_pad_value
if current_level >= len(target_dims):
return nested_list
# Ensure we have a list
if not isinstance(nested_list, list):
nested_list = [nested_list]
# Pad current level
current_size = len(nested_list)
target_size = target_dims[current_level]
# Pad with appropriate values
if current_level == len(target_dims) - 1:
# At the coordinate level, pad with pad_value
nested_list.extend([pad_value] * (target_size - current_size))
else:
# At higher levels, pad with nested structures
if current_size > 0:
# Create appropriately sized template
if current_level < len(target_dims) - 2:
# For non-coordinate levels, create empty nested structure
template_dims = target_dims[current_level + 1 :]
template = self._create_empty_nested_structure(template_dims, pad_value)
else:
# For coordinate level, create list of pad_values
template = [pad_value] * target_dims[current_level + 1]
nested_list.extend([deepcopy(template) for _ in range(target_size - current_size)])
else:
# Create from scratch
template_dims = target_dims[current_level + 1 :]
template = self._create_empty_nested_structure(template_dims, pad_value)
nested_list.extend([deepcopy(template) for _ in range(target_size)])
# Recursively pad sublists
if current_level < len(target_dims) - 1:
for i in range(len(nested_list)):
if isinstance(nested_list[i], list):
nested_list[i] = self._pad_nested_list(nested_list[i], target_dims, current_level + 1, pad_value)
return nested_list
def _create_empty_nested_structure(self, dims, pad_value):
"""
Create an empty nested structure with given dimensions filled with pad_value.
Args:
dims (`list`):
The dimensions of the nested structure.
pad_value (`int`):
The value to fill the structure with.
"""
if len(dims) == 1:
return [pad_value] * dims[0]
else:
return [self._create_empty_nested_structure(dims[1:], pad_value) for _ in range(dims[0])]
def _get_nesting_level(self, input_list):
"""
Get the nesting level of a list structure.
Args:
input_list (`list`):
The list to get the nesting level of.
"""
if isinstance(input_list, list):
if len(input_list) == 0:
return 1
return 1 + self._get_nesting_level(input_list[0])
elif isinstance(input_list, (np.ndarray, torch.Tensor)):
# For arrays/tensors, the nesting level is the number of dimensions
return len(input_list.shape)
return 0
def _validate_single_input(
self,
data: torch.Tensor | np.ndarray | list,
expected_depth: int,
input_name: str,
expected_format: str,
expected_coord_size: int | None = None,
) -> list:
"""
Validate a single input by ensuring proper nesting and raising an error if the input is not valid.
Args:
data (`torch.Tensor`, `np.ndarray`, or `list`):
Input data to process.
expected_depth (`int`):
Expected nesting depth.
input_name (`str`):
Name of the input for error messages.
expected_format (`str`):
The expected format of the input.
expected_coord_size (`int`, *optional*):
Expected coordinate size (2 for points, 4 for boxes, None for labels).
.
"""
if data is None:
return None
# Handle tensors and numpy arrays first
if isinstance(data, (torch.Tensor, np.ndarray)):
# For tensors/arrays, we can directly check the number of dimensions
if data.ndim != expected_depth:
raise ValueError(
f"Input {input_name} must be a tensor/array with {expected_depth} dimensions. The expected nesting format is {expected_format}. Got {data.ndim} dimensions."
)
elif expected_coord_size is not None:
if data.shape[-1] != expected_coord_size:
raise ValueError(
f"Input {input_name} must be a tensor/array with {expected_coord_size} as the last dimension, got {data.shape[-1]}."
)
return self._convert_to_nested_list(data, expected_depth)
# Handle nested lists
if isinstance(data, list):
current_depth = self._get_nesting_level(data)
if current_depth != expected_depth:
raise ValueError(
f"Input {input_name} must be a nested list with {expected_depth} levels. The expected nesting format is {expected_format}. Got {current_depth} levels."
)
return self._convert_to_nested_list(data, expected_depth)
def _normalize_tensor_coordinates(self, tensor, original_sizes, is_bounding_box=False, preserve_padding=False):
"""
Helper method to normalize coordinates in a tensor across multiple images.
Args:
tensor (`torch.Tensor`):
Input tensor with coordinates.
original_sizes (`list`):
Original image sizes.
is_bounding_box (`bool`, *optional*, defaults to `False`):
Whether coordinates are bounding boxes.
preserve_padding (`bool`, *optional*, defaults to `False`):
Whether to preserve padding values (for points).
"""
if preserve_padding:
# For points: avoid normalizing pad values
mask = tensor != self.point_pad_value
coord_mask = mask.all(dim=-1, keepdim=True)
for img_idx in range(len(original_sizes)):
if img_idx < tensor.shape[0]:
original_size = original_sizes[img_idx] if img_idx < len(original_sizes) else original_sizes[0]
normalized_coords = self._normalize_coordinates(
self.target_size, tensor[img_idx], original_size, is_bounding_box=is_bounding_box
)
if preserve_padding:
# Only update non-padded values
img_mask = coord_mask[img_idx]
tensor[img_idx] = torch.where(
img_mask.expand_as(tensor[img_idx]), normalized_coords, tensor[img_idx]
)
else:
tensor[img_idx] = normalized_coords
def post_process_masks(
self,
masks,
original_sizes,
mask_threshold=0.0,
binarize=True,
max_hole_area=0.0,
max_sprinkle_area=0.0,
apply_non_overlapping_constraints=False,
**kwargs,
):
"""
Remove padding and upscale masks to the original image size.
Args:
masks (`Union[List[torch.Tensor], List[np.ndarray]]`):
Batched masks from the mask_decoder in (batch_size, num_channels, height, width) format.
original_sizes (`Union[torch.Tensor, List[Tuple[int,int]]]`):
The original sizes of each image before it was resized to the model's expected input shape, in (height,
width) format.
mask_threshold (`float`, *optional*, defaults to 0.0):
Threshold for binarization and post-processing operations.
binarize (`bool`, *optional*, defaults to `True`):
Whether to binarize the masks.
max_hole_area (`float`, *optional*, defaults to 0.0):
The maximum area of a hole to fill.
max_sprinkle_area (`float`, *optional*, defaults to 0.0):
The maximum area of a sprinkle to fill.
apply_non_overlapping_constraints (`bool`, *optional*, defaults to `False`):
Whether to apply non-overlapping constraints to the masks.
Returns:
(`torch.Tensor`): Batched masks in batch_size, num_channels, height, width) format, where (height, width)
is given by original_size.
"""
return self.image_processor.post_process_masks(
masks,
original_sizes,
mask_threshold,
binarize,
max_hole_area,
max_sprinkle_area,
apply_non_overlapping_constraints,
**kwargs,
)
@property
def model_input_names(self):
image_processor_input_names = self.image_processor.model_input_names
return list(image_processor_input_names + ["original_sizes"])
__all__ = ["Sam2Processor"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/sam2/processing_sam2.py",
"license": "Apache License 2.0",
"lines": 448,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/sam2_video/convert_sam2_video_to_hf.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Convert SAM checkpoints from the original repository.
URL: https://github.com/facebookresearch/segment-anything-2.
"""
import argparse
import re
from io import BytesIO
import httpx
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
Sam2HieraDetConfig,
Sam2ImageProcessorFast,
Sam2VideoConfig,
Sam2VideoMaskDecoderConfig,
Sam2VideoModel,
Sam2VideoProcessor,
Sam2VideoPromptEncoderConfig,
Sam2VideoVideoProcessor,
Sam2VisionConfig,
)
def get_config(model_name):
if "hiera_tiny" in model_name:
hiera_det_config = Sam2HieraDetConfig()
vision_config = Sam2VisionConfig(backbone_config=hiera_det_config)
elif "hiera_small" in model_name:
hiera_det_config = Sam2HieraDetConfig(blocks_per_stage=[1, 2, 11, 2], global_attention_blocks=[7, 10, 13])
vision_config = Sam2VisionConfig(backbone_config=hiera_det_config)
elif "hiera_base_plus" in model_name:
hiera_det_config = Sam2HieraDetConfig(
hidden_size=112,
embed_dim_per_stage=[112, 224, 448, 896],
num_attention_heads_per_stage=[2, 4, 8, 16],
blocks_per_stage=[2, 3, 16, 3],
global_attention_blocks=[12, 16, 20],
window_positional_embedding_background_size=(14, 14),
)
vision_config = Sam2VisionConfig(
backbone_config=hiera_det_config,
backbone_channel_list=[896, 448, 224, 112],
)
elif "hiera_large" in model_name:
hiera_det_config = Sam2HieraDetConfig(
hidden_size=144,
embed_dim_per_stage=[144, 288, 576, 1152],
num_attention_heads_per_stage=[2, 4, 8, 16],
blocks_per_stage=[2, 6, 36, 4],
global_attention_blocks=[23, 33, 43],
window_positional_embedding_background_size=(7, 7),
window_size_per_stage=[8, 4, 16, 8],
)
vision_config = Sam2VisionConfig(
backbone_config=hiera_det_config,
backbone_channel_list=[1152, 576, 288, 144],
)
prompt_encoder_config = Sam2VideoPromptEncoderConfig()
mask_decoder_config = Sam2VideoMaskDecoderConfig()
if "sam2.1" in model_name:
enable_temporal_pos_encoding_for_object_pointers = True
enable_occlusion_spatial_embedding = True
else:
enable_temporal_pos_encoding_for_object_pointers = False
enable_occlusion_spatial_embedding = False
config = Sam2VideoConfig(
vision_config=vision_config,
prompt_encoder_config=prompt_encoder_config,
mask_decoder_config=mask_decoder_config,
enable_temporal_pos_encoding_for_object_pointers=enable_temporal_pos_encoding_for_object_pointers,
enable_occlusion_spatial_embedding=enable_occlusion_spatial_embedding,
)
return config
KEYS_TO_MODIFY_MAPPING = {
"iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
"iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
"iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
"mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
"mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
"mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
"mask_downscaling.0": "mask_embed.conv1",
"mask_downscaling.1": "mask_embed.layer_norm1",
"mask_downscaling.3": "mask_embed.conv2",
"mask_downscaling.4": "mask_embed.layer_norm2",
"mask_downscaling.6": "mask_embed.conv3",
"dwconv": "depthwise_conv",
"pwconv": "pointwise_conv",
"fuser": "memory_fuser",
"point_embeddings": "point_embed",
"pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
"obj_ptr_tpos_proj": "temporal_positional_encoding_projection_layer",
"no_obj_embed_spatial": "occlusion_spatial_embedding_parameter",
"sam_prompt_encoder": "prompt_encoder",
"sam_mask_decoder": "mask_decoder",
"maskmem_tpos_enc": "memory_temporal_positional_encoding",
"gamma": "scale",
"image_encoder.neck": "vision_encoder.neck",
"image_encoder": "vision_encoder.backbone",
"neck.0": "neck.conv1",
"neck.1": "neck.layer_norm1",
"neck.2": "neck.conv2",
"neck.3": "neck.layer_norm2",
"pix_feat_proj": "feature_projection",
"patch_embed.proj": "patch_embed.projection",
"no_mem_embed": "no_memory_embedding",
"no_mem_pos_enc": "no_memory_positional_encoding",
"obj_ptr": "object_pointer",
".norm": ".layer_norm",
"trunk.": "",
"out_proj": "o_proj",
}
def replace_keys(state_dict, config):
model_state_dict = {}
output_hypernetworks_mlps_pattern = r".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
output_mask_decoder_mlps_pattern = r"mask_decoder.transformer.layers.(\d+).mlp.layers.(\d+).*"
output_mask_decoder_score_head_pattern = r"mask_decoder.pred_obj_score_head.layers.(\d+).*"
output_vision_encoder_mlps_pattern = r"vision_encoder.backbone.blocks.(\d+).mlp.layers.(\d+).*"
output_vision_encoder_neck_pattern = r"vision_encoder.neck.convs.(\d+).conv"
output_memory_encoder_projection_pattern = r"memory_encoder.o_proj.*"
output_object_pointer_proj_pattern = r"object_pointer_proj.layers.(\d+).*"
output_memory_encoder_mask_downsampler_pattern = r"memory_encoder.mask_downsampler.encoder.(\d+).*"
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
key = key.replace(key_to_modify, new_key)
# vision_encoder.blocks.0.mlp.layers.1.weight -> vision_encoder.blocks.0.mlp.proj_out.weight
if re.match(output_vision_encoder_mlps_pattern, key):
layer_nb = int(re.match(output_vision_encoder_mlps_pattern, key).group(2))
if layer_nb == 0:
key = key.replace("layers.0", "proj_in")
elif layer_nb == 1:
key = key.replace("layers.1", "proj_out")
# mask_decoder.transformer.layers.0.mlp.layers.1.weight -> mask_decoder.transformer.layers.1.mlp.proj_out.weight
if re.match(output_mask_decoder_mlps_pattern, key):
layer_nb = int(re.match(output_mask_decoder_mlps_pattern, key).group(2))
if layer_nb == 0:
key = key.replace("mlp.layers.0", "mlp.proj_in")
elif layer_nb == 1:
key = key.replace("mlp.layers.1", "mlp.proj_out")
# mask_decoder.pred_obj_score_head.layers.1.weight -> mask_decoder.pred_obj_score_head.proj_in.weight
if re.match(output_mask_decoder_score_head_pattern, key):
layer_nb = int(re.match(output_mask_decoder_score_head_pattern, key).group(1))
if layer_nb == 0:
key = key.replace("layers.0", "proj_in")
elif layer_nb == 1:
key = key.replace("layers.1", "layers.0")
elif layer_nb == 2:
key = key.replace("layers.2", "proj_out")
if re.match(output_hypernetworks_mlps_pattern, key):
layer_nb = int(re.match(output_hypernetworks_mlps_pattern, key).group(2))
if layer_nb == 0:
key = key.replace("layers.0", "proj_in")
elif layer_nb == 1:
key = key.replace("layers.1", "layers.0")
elif layer_nb == 2:
key = key.replace("layers.2", "proj_out")
# vision_encoder.neck.convs.1.conv.bias -> vision_encoder.neck.convs.1.bias
if re.match(output_vision_encoder_neck_pattern, key):
key = key.replace(".conv.", ".")
# memory_encoder.o_proj.weight -> memory_encoder.projection.weight
if re.match(output_memory_encoder_projection_pattern, key):
key = key.replace(".o_proj.", ".projection.")
if re.match(output_object_pointer_proj_pattern, key):
layer_nb = int(re.match(output_object_pointer_proj_pattern, key).group(1))
if layer_nb == 0:
key = key.replace("layers.0", "proj_in")
elif layer_nb == 1:
key = key.replace("layers.1", "layers.0")
elif layer_nb == 2:
key = key.replace("layers.2", "proj_out")
if re.match(output_memory_encoder_mask_downsampler_pattern, key):
layer_nb = int(re.match(output_memory_encoder_mask_downsampler_pattern, key).group(1))
if layer_nb == 12:
key = key.replace(f"encoder.{layer_nb}", "final_conv")
elif layer_nb % 3 == 0:
key = key.replace(f"encoder.{layer_nb}", f"layers.{layer_nb // 3}.conv")
elif layer_nb % 3 == 1:
key = key.replace(f"encoder.{layer_nb}", f"layers.{layer_nb // 3}.layer_norm")
model_state_dict[key] = value
model_state_dict["shared_image_embedding.positional_embedding"] = model_state_dict[
"prompt_encoder.shared_embedding.positional_embedding"
]
model_state_dict["prompt_encoder.point_embed.weight"] = torch.cat(
[model_state_dict.pop(f"prompt_encoder.point_embed.{i}.weight") for i in range(4)],
dim=0,
)
return model_state_dict
def convert_sam2_checkpoint(model_name, checkpoint_path, pytorch_dump_folder, push_to_hub):
config = get_config(model_name)
state_dict = torch.load(checkpoint_path, map_location="cpu")["model"]
state_dict = replace_keys(state_dict, config)
image_processor = Sam2ImageProcessorFast()
video_processor = Sam2VideoVideoProcessor()
processor = Sam2VideoProcessor(image_processor=image_processor, video_processor=video_processor)
hf_model = Sam2VideoModel(config)
hf_model.eval()
device = "cuda" if torch.cuda.is_available() else "cpu"
missing_keys, unexpected_keys = hf_model.load_state_dict(state_dict, strict=True)
hf_model = hf_model.to(device)
print("Missing keys:", missing_keys)
print("Unexpected keys:", unexpected_keys)
url = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
with httpx.stream("GET", url) as response:
raw_image = Image.open(BytesIO(response.read())).convert("RGB")
input_points = [[[[1000, 600]]]]
input_labels = [[[1]]]
inputs = processor(
images=np.array(raw_image), input_points=input_points, input_labels=input_labels, return_tensors="pt"
).to(device)
with torch.no_grad():
output = hf_model._single_frame_forward(**inputs)
scores = output.iou_scores.squeeze()
if model_name == "sam2.1_hiera_tiny":
assert torch.allclose(scores, torch.tensor([0.0316, 0.9647, 0.1029]).cuda(), atol=1e-2)
elif model_name == "sam2.1_hiera_small":
assert torch.allclose(scores, torch.tensor([0.9664, 0.1494, 0.0456]).cuda(), atol=1e-2)
elif model_name == "sam2.1_hiera_base_plus":
assert torch.allclose(scores, torch.tensor([0.0361, 0.9775, 0.1307]).cuda(), atol=1e-2)
elif model_name == "sam2.1_hiera_large":
assert torch.allclose(scores, torch.tensor([0.9648, 0.0371, 0.1898]).cuda(), atol=1e-2)
elif model_name == "sam2_hiera_tiny":
assert torch.allclose(scores, torch.tensor([0.0439, 0.9567, 0.1415]).cuda(), atol=1e-2)
elif model_name == "sam2_hiera_small":
assert torch.allclose(scores, torch.tensor([0.9593, 0.1633, 0.0392]).cuda(), atol=1e-2)
elif model_name == "sam2_hiera_base_plus":
assert torch.allclose(scores, torch.tensor([0.0423, 0.9815, 0.0897]).cuda(), atol=1e-2)
elif model_name == "sam2_hiera_large":
assert torch.allclose(scores, torch.tensor([0.9514, 0.0535, 0.1787]).cuda(), atol=1e-2)
else:
raise ValueError(f"Model {model_name} not supported")
if pytorch_dump_folder is not None:
processor.save_pretrained(pytorch_dump_folder)
hf_model.save_pretrained(pytorch_dump_folder)
if push_to_hub:
repo_id = f"yonigozlan/{pytorch_dump_folder.split('/')[-1]}"
processor.push_to_hub(repo_id)
hf_model.push_to_hub(repo_id)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
choices = [
"sam2.1_hiera_tiny",
"sam2.1_hiera_small",
"sam2.1_hiera_base_plus",
"sam2.1_hiera_large",
"sam2_hiera_tiny",
"sam2_hiera_small",
"sam2_hiera_base_plus",
"sam2_hiera_large",
]
parser.add_argument(
"--model_name",
default="sam2.1_hiera_tiny",
choices=choices,
type=str,
help="Name of the original model to convert",
)
parser.add_argument(
"--checkpoint_path",
type=str,
required=False,
help="Path to the original checkpoint",
)
parser.add_argument("--pytorch_dump_folder_path", default="", type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
args = parser.parse_args()
hf_model_name = args.model_name.replace("_", "-")
checkpoint_path = (
hf_hub_download(f"facebook/{hf_model_name}", f"{args.model_name.lower()}.pt")
if args.checkpoint_path is None
else args.checkpoint_path
)
convert_sam2_checkpoint(args.model_name, checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/sam2_video/convert_sam2_video_to_hf.py",
"license": "Apache License 2.0",
"lines": 289,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/sam2_video/modular_sam2_video.py | # Copyright 2025 The Meta AI Authors and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch SAM 2 model."""
import math
from collections import OrderedDict
from collections.abc import Callable, Iterator
from dataclasses import dataclass
from typing import Any, Union
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from tqdm import tqdm
from ... import initialization as init
from ...activations import ACT2FN
from ...configuration_utils import PreTrainedConfig
from ...modeling_flash_attention_utils import FlashAttentionKwargs
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...processing_utils import ProcessorMixin, Unpack
from ...utils import (
ModelOutput,
auto_docstring,
logging,
)
from ...utils.generic import TransformersKwargs
from ...utils.output_capturing import OutputRecorder
from ...video_utils import VideoInput
from ..auto import CONFIG_MAPPING, AutoConfig
from ..sam2.configuration_sam2 import (
Sam2MaskDecoderConfig,
Sam2PromptEncoderConfig,
)
from ..sam2.modeling_sam2 import (
Sam2FeedForward,
Sam2ImageSegmentationOutput,
Sam2LayerNorm,
Sam2Model,
Sam2PositionalEmbedding,
Sam2SinePositionEmbedding,
Sam2TwoWayAttentionBlock,
eager_attention_forward,
)
from ..sam2.processing_sam2 import Sam2Processor
logger = logging.get_logger(__name__)
class Sam2VideoPromptEncoderConfig(Sam2PromptEncoderConfig):
pass
class Sam2VideoMaskDecoderConfig(Sam2MaskDecoderConfig):
pass
class Sam2VideoConfig(PreTrainedConfig):
r"""
[`Sam2Config`] is the configuration class to store the configuration of a [`Sam2Model`]. It is used to instantiate a
SAM2 model according to the specified arguments, defining the memory attention, memory encoder, and image encoder
configs. Instantiating a configuration defaults will yield a similar configuration to that of the SAM 2.1 Hiera-tiny
[facebook/sam2.1-hiera-tiny](https://huggingface.co/facebook/sam2.1-hiera-tiny) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vision_config (Union[`dict`, `Sam2VisionConfig`], *optional*):
Dictionary of configuration options used to initialize [`Sam2VisionConfig`].
prompt_encoder_config (Union[`dict`, `Sam2PromptEncoderConfig`], *optional*):
Dictionary of configuration options used to initialize [`Sam2PromptEncoderConfig`].
mask_decoder_config (Union[`dict`, `Sam2MaskDecoderConfig`], *optional*):
Dictionary of configuration options used to initialize [`Sam2MaskDecoderConfig`].
initializer_range (`float`, *optional*, defaults to 0.02):
Standard deviation for parameter initialization.
num_maskmem (`int`, *optional*, defaults to 7):
The number of memory slots for the mask memory.
image_size (`int`, *optional*, defaults to 1024):
The size of the input images.
sigmoid_scale_for_mem_enc (`float`, *optional*, defaults to 20.0):
Scale factor for the sigmoid function in the memory encoder.
sigmoid_bias_for_mem_enc (`float`, *optional*, defaults to -10.0):
Bias for the sigmoid function in the memory encoder.
enable_occlusion_spatial_embedding (`bool`, *optional*, defaults to `True`):
Whether to enable spatial embedding for occlusions.
multimask_output_in_sam (`bool`, *optional*, defaults to `True`):
Whether to output multiple masks from the SAM head.
multimask_min_pt_num (`int`, *optional*, defaults to 0):
The minimum number of points to trigger multimask output.
multimask_max_pt_num (`int`, *optional*, defaults to 1):
The maximum number of points to trigger multimask output.
multimask_output_for_tracking (`bool`, *optional*, defaults to `True`):
Whether to use multimask output for tracking.
max_object_pointers_in_encoder (`int`, *optional*, defaults to 16):
The maximum number of object pointers in the encoder.
max_cond_frame_num (`int`, *optional*, defaults to -1):
Maximum number of conditioning frames to use in memory attention. Set to -1 to use all conditioning frames.
enable_temporal_pos_encoding_for_object_pointers (`bool`, *optional*, defaults to `True`):
Whether to enable temporal positional encoding for object pointers.
memory_attention_hidden_size (`int`, *optional*, defaults to 256):
Dimensionality of the memory attention hidden states.
memory_attention_num_layers (`int`, *optional*, defaults to 4):
The number of layers in the memory attention module.
memory_attention_num_attention_heads (`int`, *optional*, defaults to 1):
Number of attention heads for each attention layer in the memory attention.
memory_attention_downsample_rate (`int`, *optional*, defaults to 1):
The downsample rate for the attention layers.
memory_attention_feed_forward_hidden_size (`int`, *optional*, defaults to 2048):
The dimension of the feedforward network in the memory attention module.
memory_attention_feed_forward_hidden_act (`str`, *optional*, defaults to `"relu"`):
The non-linear activation function in the feedforward network in the memory attention module.
memory_attention_dropout (`float`, *optional*, defaults to 0.1):
The dropout rate for the memory attention module.
memory_attention_rope_theta (`float`, *optional*, defaults to 10000):
The Rope theta parameter.
memory_attention_rope_feat_sizes (`list[int]`, *optional*, defaults to `[64, 64]`):
The feature sizes for the Rope positional encoding.
memory_attention_rope_dropout (`float`, *optional*, defaults to 0.1):
The dropout rate for the Rope positional encoding.
memory_encoder_hidden_size (`int`, *optional*, defaults to 256):
Dimensionality of the memory encoder hidden states.
memory_encoder_output_channels (`int`, *optional*, defaults to 64):
The number of output channels for the memory encoder.
mask_downsampler_embed_dim (`int`, *optional*, defaults to 256):
The dimension of the mask downsampler embedding.
mask_downsampler_kernel_size (`int`, *optional*, defaults to 3):
The kernel size for the mask downsampler.
mask_downsampler_stride (`int`, *optional*, defaults to 2):
The stride for the mask downsampler.
mask_downsampler_padding (`int`, *optional*, defaults to 1):
The padding for the mask downsampler.
mask_downsampler_total_stride (`int`, *optional*, defaults to 16):
The total stride for the mask downsampler.
mask_downsampler_hidden_act (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function in the mask downsampler.
memory_fuser_num_layers (`int`, *optional*, defaults to 2):
The number of layers in the memory fuser.
memory_fuser_embed_dim (`int`, *optional*, defaults to 256):
The dimension of the embedding layer in the memory fuser.
memory_fuser_intermediate_dim (`int`, *optional*, defaults to 1024):
The dimension of the intermediate layer in the memory fuser.
memory_fuser_kernel_size (`int`, *optional*, defaults to 7):
The kernel size for the memory fuser.
memory_fuser_padding (`int`, *optional*, defaults to 3):
The padding for the memory fuser.
memory_fuser_layer_scale_init_value (`float`, *optional*, defaults to 1e-06):
The initial value for the layer scale in the memory fuser.
memory_fuser_hidden_act (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function in the memory fuser.
kwargs (*optional*):
Dictionary of keyword arguments.
Example:
```python
>>> from transformers import (
... Sam2VisionConfig,
... Sam2PromptEncoderConfig,
... Sam2MaskDecoderConfig,
... Sam2Model,
... )
>>> # Initializing a Sam2Config with `"facebook/sam2.1_hiera_tiny"` style configuration
>>> configuration = Sam2config()
>>> # Initializing a Sam2Model (with random weights) from the `"facebook/sam2.1_hiera_tiny"` style configuration
>>> model = Sam2Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
>>> # We can also initialize a Sam2Config from a Sam2VisionConfig, Sam2PromptEncoderConfig, and Sam2MaskDecoderConfig
>>> # Initializing SAM2 vision encoder, memory attention, and memory encoder configurations
>>> vision_config = Sam2VisionConfig()
>>> prompt_encoder_config = Sam2PromptEncoderConfig()
>>> mask_decoder_config = Sam2MaskDecoderConfig()
>>> config = Sam2Config(vision_config, prompt_encoder_config, mask_decoder_config)
```"""
model_type = "sam2_video"
sub_configs = {
"vision_config": AutoConfig,
"prompt_encoder_config": Sam2VideoPromptEncoderConfig,
"mask_decoder_config": Sam2VideoMaskDecoderConfig,
}
def __init__(
self,
vision_config=None,
prompt_encoder_config=None,
mask_decoder_config=None,
initializer_range=0.02,
num_maskmem=7,
image_size=1024,
sigmoid_scale_for_mem_enc=20.0,
sigmoid_bias_for_mem_enc=-10.0,
enable_occlusion_spatial_embedding=True,
multimask_output_in_sam=True,
multimask_min_pt_num=0,
multimask_max_pt_num=1,
multimask_output_for_tracking=True,
max_object_pointers_in_encoder=16,
max_cond_frame_num=-1,
enable_temporal_pos_encoding_for_object_pointers=True,
# memory attention
memory_attention_hidden_size=256,
memory_attention_num_layers=4,
memory_attention_num_attention_heads=1,
memory_attention_downsample_rate=1,
memory_attention_feed_forward_hidden_size=2048,
memory_attention_feed_forward_hidden_act="relu",
memory_attention_dropout=0.1,
memory_attention_rope_theta=10000,
memory_attention_rope_feat_sizes=None,
memory_attention_rope_dropout=0.1,
# memory encoder
memory_encoder_hidden_size=256,
memory_encoder_output_channels=64,
mask_downsampler_embed_dim=256,
mask_downsampler_kernel_size=3,
mask_downsampler_stride=2,
mask_downsampler_padding=1,
mask_downsampler_total_stride=16,
mask_downsampler_hidden_act="gelu",
memory_fuser_num_layers=2,
memory_fuser_embed_dim=256,
memory_fuser_intermediate_dim=1024,
memory_fuser_kernel_size=7,
memory_fuser_padding=3,
memory_fuser_layer_scale_init_value=1e-6,
memory_fuser_hidden_act="gelu",
**kwargs,
):
super().__init__(**kwargs)
vision_config = vision_config if vision_config is not None else {}
prompt_encoder_config = prompt_encoder_config if prompt_encoder_config is not None else {}
mask_decoder_config = mask_decoder_config if mask_decoder_config is not None else {}
memory_attention_rope_feat_sizes = (
[64, 64] if memory_attention_rope_feat_sizes is None else memory_attention_rope_feat_sizes
)
if isinstance(vision_config, dict):
vision_config["model_type"] = vision_config.get("model_type", "sam2_vision_model")
vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config)
if isinstance(prompt_encoder_config, Sam2VideoPromptEncoderConfig):
prompt_encoder_config = prompt_encoder_config.to_dict()
if isinstance(mask_decoder_config, Sam2VideoMaskDecoderConfig):
mask_decoder_config = mask_decoder_config.to_dict()
self.vision_config = vision_config
self.prompt_encoder_config = Sam2VideoPromptEncoderConfig(**prompt_encoder_config)
self.mask_decoder_config = Sam2VideoMaskDecoderConfig(**mask_decoder_config)
self.initializer_range = initializer_range
self.num_maskmem = num_maskmem # default 1 input frame + 6 previous frames
self.image_size = image_size
self.sigmoid_scale_for_mem_enc = sigmoid_scale_for_mem_enc
self.sigmoid_bias_for_mem_enc = sigmoid_bias_for_mem_enc
self.multimask_output_in_sam = multimask_output_in_sam
self.multimask_min_pt_num = multimask_min_pt_num
self.multimask_max_pt_num = multimask_max_pt_num
self.multimask_output_for_tracking = multimask_output_for_tracking
self.max_object_pointers_in_encoder = max_object_pointers_in_encoder
self.max_cond_frame_num = max_cond_frame_num
# The next 4 are True for sam2.1 and False for sam2
self.enable_occlusion_spatial_embedding = enable_occlusion_spatial_embedding
self.enable_temporal_pos_encoding_for_object_pointers = enable_temporal_pos_encoding_for_object_pointers
# memory attention
self.memory_attention_hidden_size = memory_attention_hidden_size
self.memory_attention_num_layers = memory_attention_num_layers
self.memory_attention_num_attention_heads = memory_attention_num_attention_heads
self.memory_attention_downsample_rate = memory_attention_downsample_rate
self.memory_attention_feed_forward_hidden_size = memory_attention_feed_forward_hidden_size
self.memory_attention_feed_forward_hidden_act = memory_attention_feed_forward_hidden_act
self.memory_attention_dropout = memory_attention_dropout
self.memory_attention_rope_theta = memory_attention_rope_theta
self.memory_attention_rope_feat_sizes = memory_attention_rope_feat_sizes
self.memory_attention_rope_dropout = memory_attention_rope_dropout
# memory encoder
self.memory_encoder_hidden_size = memory_encoder_hidden_size
self.memory_encoder_output_channels = memory_encoder_output_channels
self.mask_downsampler_embed_dim = mask_downsampler_embed_dim
self.mask_downsampler_kernel_size = mask_downsampler_kernel_size
self.mask_downsampler_stride = mask_downsampler_stride
self.mask_downsampler_padding = mask_downsampler_padding
self.mask_downsampler_total_stride = mask_downsampler_total_stride
self.mask_downsampler_hidden_act = mask_downsampler_hidden_act
self.memory_fuser_num_layers = memory_fuser_num_layers
self.memory_fuser_embed_dim = memory_fuser_embed_dim
self.memory_fuser_intermediate_dim = memory_fuser_intermediate_dim
self.memory_fuser_kernel_size = memory_fuser_kernel_size
self.memory_fuser_padding = memory_fuser_padding
self.memory_fuser_layer_scale_init_value = memory_fuser_layer_scale_init_value
self.memory_fuser_hidden_act = memory_fuser_hidden_act
class Sam2VideoInferenceCache:
"""Cache for vision features and model constants."""
def __init__(
self,
inference_device: torch.device | str = "cpu",
inference_state_device: torch.device | str = "cpu",
max_vision_features_cache_size: int = 1,
):
self.inference_device = inference_device
self.inference_state_device = inference_state_device
self.max_vision_features_cache_size = max_vision_features_cache_size
self._vision_features = {}
def cache_vision_features(self, frame_idx: int, features: dict):
"""Cache vision features with automatic device management."""
cached = {}
if len(self._vision_features) >= self.max_vision_features_cache_size:
# remove the oldest frame
self._vision_features.pop(min(self._vision_features.keys()))
for key, value in features.items():
if isinstance(value, torch.Tensor):
cached[key] = value.to(self.inference_state_device, non_blocking=True)
elif isinstance(value, (list, tuple)) and value and isinstance(value[0], torch.Tensor):
cached[key] = [v.to(self.inference_state_device, non_blocking=True) for v in value]
else:
cached[key] = value
self._vision_features[frame_idx] = cached
def get_vision_features(self, frame_idx: int) -> dict | None:
"""Get cached vision features, automatically moved to inference device."""
if frame_idx not in self._vision_features:
return None
cached = self._vision_features[frame_idx]
moved = {}
for key, value in cached.items():
if isinstance(value, torch.Tensor):
moved[key] = value.to(self.inference_device, non_blocking=True)
elif isinstance(value, (list, tuple)) and value and isinstance(value[0], torch.Tensor):
moved[key] = [v.to(self.inference_device, non_blocking=True) for v in value]
else:
moved[key] = value
return moved
def clear_all(self):
"""Clear all cached data."""
self._vision_features.clear()
class Sam2VideoInferenceSession:
r"""
Manages video inference session parameters, state and cache.
Args:
video (`torch.FloatTensor`, *optional*):
The video to process. No need to provide when streaming.
video_height (`int`, *optional*):
The height of the video.
video_width (`int`, *optional*):
The width of the video.
inference_device (`torch.device`, *optional*, defaults to `"cpu"`):
The device to use for inference.
inference_state_device (`torch.device`, *optional*, defaults to `"cpu"`):
The device to store the inference state on.
video_storage_device (`torch.device`, *optional*, defaults to `"cpu"`):
The device to store the video on.
dtype (`torch.dtype`, *optional*, defaults to `"float32"`):
The dtype to use for the video.
max_vision_features_cache_size (`int`, *optional*, defaults to 1):
The maximum number of vision features to cache.
"""
def __init__(
self,
video: torch.FloatTensor | None = None,
video_height: int | None = None,
video_width: int | None = None,
inference_device: torch.device | str = "cpu",
inference_state_device: torch.device | str = "cpu",
video_storage_device: torch.device | str = "cpu",
dtype: torch.dtype | str = "float32",
max_vision_features_cache_size: int = 1,
):
# store as a dictionary to avoid double memory allocation with torch.cat when adding new frames
self.processed_frames = (
dict(enumerate(video.to(video_storage_device, dtype=dtype))) if video is not None else None
)
self.video_height = video_height
self.video_width = video_width
self.inference_device = inference_device
self.inference_state_device = inference_state_device
self.video_storage_device = video_storage_device
self.dtype = dtype
self.max_vision_features_cache_size = max_vision_features_cache_size
# Cache for computed features
self.cache = Sam2VideoInferenceCache(
inference_device=self.inference_device,
inference_state_device=self.inference_state_device,
max_vision_features_cache_size=self.max_vision_features_cache_size,
)
# Persistent object tracking state
self._obj_id_to_idx = OrderedDict()
self._obj_idx_to_id = OrderedDict()
self.obj_ids = []
# Persistent user inputs
self.point_inputs_per_obj = {}
self.mask_inputs_per_obj = {}
# Persistent model outputs/history
self.output_dict_per_obj = {}
self.frames_tracked_per_obj = {}
# Session state flags
self.obj_with_new_inputs = []
@property
def num_frames(self) -> int | None:
return len(self.processed_frames) if self.processed_frames is not None else None
# Object management
def obj_id_to_idx(self, obj_id: int) -> int:
"""Map object ID to index, creating new entry if needed."""
obj_idx = self._obj_id_to_idx.get(obj_id, None)
if obj_idx is not None:
return obj_idx
obj_idx = len(self._obj_id_to_idx)
self._obj_id_to_idx[obj_id] = obj_idx
self._obj_idx_to_id[obj_idx] = obj_id
self.obj_ids = list(self._obj_id_to_idx)
self.point_inputs_per_obj[obj_idx] = {}
self.mask_inputs_per_obj[obj_idx] = {}
self.output_dict_per_obj[obj_idx] = {
"cond_frame_outputs": {},
"non_cond_frame_outputs": {},
}
self.frames_tracked_per_obj[obj_idx] = {}
return obj_idx
# Video Inference specific functions
def obj_idx_to_id(self, obj_idx: int) -> int:
"""Map model-side object index to client-side object id."""
return self._obj_idx_to_id[obj_idx]
def get_obj_num(self) -> int:
"""Get the total number of unique object ids received so far in this session."""
return len(self._obj_idx_to_id)
# Input management with device handling
def add_point_inputs(self, obj_idx: int, frame_idx: int, inputs: dict):
"""Add point inputs with automatic device placement."""
device_inputs = {}
for key, value in inputs.items():
if isinstance(value, torch.Tensor):
device_inputs[key] = value.to(self.inference_device, non_blocking=False)
else:
device_inputs[key] = value
self.point_inputs_per_obj[obj_idx][frame_idx] = device_inputs
def remove_point_inputs(self, obj_idx: int, frame_idx: int):
"""Remove point inputs."""
self.point_inputs_per_obj[obj_idx].pop(frame_idx, None)
def add_mask_inputs(self, obj_idx: int, frame_idx: int, inputs: torch.Tensor):
"""Add mask inputs with automatic device placement."""
self.mask_inputs_per_obj[obj_idx][frame_idx] = inputs.to(
self.inference_device, dtype=self.dtype, non_blocking=True
)
def remove_mask_inputs(self, obj_idx: int, frame_idx: int):
"""Remove mask inputs."""
self.mask_inputs_per_obj[obj_idx].pop(frame_idx, None)
# Output management with smart device placement
def store_output(
self,
obj_idx: int,
frame_idx: int,
output_key: str | None = None,
output_value: torch.Tensor | dict | None = None,
is_conditioning_frame: bool = True,
):
"""
Store output with smart device management.
If output_key is None, the output is stored as a dictionary.
Args:
obj_idx (int): The index of the object.
frame_idx (int): The index of the frame.
output_key (Optional[str]): The key of the output. If None, the output is stored as a dictionary.
output_value (Optional[Union[torch.Tensor, dict]]): The value of the output.
is_conditioning_frame (bool): Whether the output is for a conditioning frame.
"""
storage_key = "cond_frame_outputs" if is_conditioning_frame else "non_cond_frame_outputs"
if output_key is None and isinstance(output_value, dict):
self.output_dict_per_obj[obj_idx][storage_key][frame_idx] = {}
for key, value in output_value.items():
self.store_output(obj_idx, frame_idx, key, value, is_conditioning_frame)
return
# Device placement: small tensors stay on inference device, large ones go to inference state device
if output_key in ["object_pointer", "object_score_logits"]: # Small tensors
self.output_dict_per_obj[obj_idx][storage_key][frame_idx][output_key] = output_value
elif isinstance(output_value, torch.Tensor): # Large tensors like masks, features
self.output_dict_per_obj[obj_idx][storage_key][frame_idx][output_key] = output_value.to(
self.inference_state_device, non_blocking=True
)
else:
self.output_dict_per_obj[obj_idx][storage_key][frame_idx][output_key] = output_value
def get_output(
self,
obj_idx: int,
frame_idx: int,
output_key: str,
is_conditioning_frame: bool = True,
):
"""
Get output with smart device management.
Args:
obj_idx (int): The index of the object.
frame_idx (int): The index of the frame.
output_key (str): The key of the output.
is_conditioning_frame (bool): Whether the output is for a conditioning frame.
"""
storage_key = "cond_frame_outputs" if is_conditioning_frame else "non_cond_frame_outputs"
out = self.output_dict_per_obj[obj_idx][storage_key].get(frame_idx, None)
# move to inference device if needed
if out is None:
return None
value = out[output_key]
if isinstance(value, torch.Tensor):
value = value.to(self.inference_device, non_blocking=True)
return value
# Video frame management
def add_new_frame(self, pixel_values: torch.Tensor, frame_idx: int | None = None) -> int:
"""Add new frame with automatic device placement."""
pixel_values = pixel_values.to(self.video_storage_device, dtype=self.dtype, non_blocking=True)
if pixel_values.dim() == 4:
pixel_values = pixel_values.squeeze(0)
if frame_idx is None:
frame_idx = len(self.processed_frames) if self.processed_frames is not None else 0
if self.processed_frames is None:
self.processed_frames = {frame_idx: pixel_values}
else:
self.processed_frames[frame_idx] = pixel_values
return frame_idx
def get_frame(self, frame_idx: int) -> torch.Tensor:
"""Get frame from video."""
return self.processed_frames[frame_idx].to(self.inference_device, non_blocking=True)
def reset_tracking_data(self):
"""Reset tracking data but keep cache."""
self._obj_id_to_idx.clear()
self._obj_idx_to_id.clear()
self.obj_ids.clear()
self.point_inputs_per_obj.clear()
self.mask_inputs_per_obj.clear()
self.output_dict_per_obj.clear()
self.frames_tracked_per_obj.clear()
self.obj_with_new_inputs = []
# Note: cache and video data are preserved
def reset_inference_session(self):
"""Reset tracking data and cache."""
self._obj_id_to_idx.clear()
self._obj_idx_to_id.clear()
self.obj_ids.clear()
self.point_inputs_per_obj.clear()
self.mask_inputs_per_obj.clear()
self.output_dict_per_obj.clear()
self.frames_tracked_per_obj.clear()
self.obj_with_new_inputs = []
self.cache.clear_all()
class Sam2VideoProcessor(Sam2Processor):
def __init__(
self, image_processor, video_processor, target_size: int | None = None, point_pad_value: int = -10, **kwargs
):
ProcessorMixin.__init__(self, image_processor, video_processor, **kwargs)
self.point_pad_value = point_pad_value
self.target_size = target_size if target_size is not None else self.image_processor.size["height"]
def init_video_session(
self,
video: VideoInput | None = None,
inference_device: Union[str, "torch.device"] = "cpu",
inference_state_device: Union[str, "torch.device"] | None = None,
processing_device: Union[str, "torch.device"] | None = None,
video_storage_device: Union[str, "torch.device"] | None = None,
max_vision_features_cache_size: int = 1,
dtype: torch.dtype = torch.float32,
):
"""
Initializes a video session for inference.
If a video is provided (async inference), the video will be processed and stored on the `video_storage_device`.
Args:
video (`VideoInput`, *optional*):
The video to process. No need to provide when streaming.
inference_device (`str` or `torch.device`, *optional*, defaults to "cpu"):
The device to use for inference.
inference_state_device (`str` or `torch.device`, *optional*):
The device to store the inference state on.
processing_device (`str` or `torch.device`, *optional*):
The device to use for video processing.
video_storage_device (`str` or `torch.device`, *optional*):
The device to store the processed video frames on.
max_vision_features_cache_size (`int`, *optional*, defaults to 1):
The maximum number of vision features to cache.
dtype (`torch.dtype`, *optional*, defaults to `torch.float32`):
The torch dtype to use for the whole session.
"""
video_storage_device = video_storage_device if video_storage_device is not None else inference_device
inference_state_device = inference_state_device if inference_state_device is not None else inference_device
processing_device = processing_device if processing_device is not None else inference_device
pixel_values_video = None
video_height = None
video_width = None
if video is not None:
processed_video = self.video_processor(videos=video, device=processing_device, return_tensors="pt")
pixel_values_video = processed_video.pixel_values_videos[0]
video_height = processed_video.original_sizes[0][0]
video_width = processed_video.original_sizes[0][1]
inference_session = Sam2VideoInferenceSession(
video=pixel_values_video,
video_height=video_height,
video_width=video_width,
inference_device=inference_device,
video_storage_device=video_storage_device,
inference_state_device=inference_state_device,
dtype=dtype,
max_vision_features_cache_size=max_vision_features_cache_size,
)
return inference_session
def add_inputs_to_inference_session(
self,
inference_session: Sam2VideoInferenceSession,
frame_idx: int,
obj_ids: list[int] | int,
input_points: list[list[list[list[float]]]] | torch.Tensor | None = None,
input_labels: list[list[list[int]]] | torch.Tensor | None = None,
input_boxes: list[list[list[float]]] | torch.Tensor | None = None,
input_masks: np.ndarray | torch.Tensor | list[np.ndarray] | list[torch.Tensor] | None = None,
original_size: tuple[int, int] | None = None,
clear_old_inputs: bool = True,
) -> Sam2VideoInferenceSession:
"""
Process new points, boxes, or masks for a video frame and add them to the inference session.
Args:
inference_session (`Sam2VideoInferenceSession`):
The inference session for the video.
frame_idx (`int`):
The index of the frame to process.
obj_ids (`list[int]` or `int`):
The object ID(s) to associate with the points or box.
These can be any integers and can be reused later on to specify an object.
input_points (`list[list[list[list[float]]]]`, `torch.Tensor`, *optional*):
The points to add to the frame.
input_labels (`list[list[list[int]]]`, `torch.Tensor`, *optional*):
The labels for the points.
input_boxes (`list[list[list[float]]]`, `torch.Tensor`, *optional*):
The bounding boxes to add to the frame.
input_masks (`np.ndarray`, `torch.Tensor`, `list[np.ndarray]`, or `list[torch.Tensor]`, *optional*):
The mask(s) to add to the frame.
original_size (`tuple[int, int]`, *optional*):
The original size of the video. Provide when streaming.
clear_old_inputs (`bool`, *optional*, defaults to `True`):
Whether to clear old inputs for the object.
"""
if isinstance(obj_ids, int):
obj_ids = [obj_ids]
# Validate inputs
if (input_points is not None) != (input_labels is not None):
raise ValueError("points and labels must be provided together")
if input_points is None and input_boxes is None and input_masks is None:
raise ValueError("at least one of points, boxes, or masks must be provided as input")
if input_masks is not None and (input_points is not None or input_boxes is not None):
raise ValueError("masks cannot be provided together with points or boxes")
if input_masks is not None:
return self.process_new_mask_for_video_frame(inference_session, frame_idx, obj_ids, input_masks)
else:
return self.process_new_points_or_boxes_for_video_frame(
inference_session,
frame_idx,
obj_ids,
input_points,
input_labels,
input_boxes,
original_size,
clear_old_inputs,
)
def process_new_points_or_boxes_for_video_frame(
self,
inference_session: Sam2VideoInferenceSession,
frame_idx: int,
obj_ids: list[int],
input_points: list[list[list[list[float]]]] | torch.Tensor | None = None,
input_labels: list[list[list[int]]] | torch.Tensor | None = None,
input_boxes: list[list[list[float]]] | torch.Tensor | None = None,
original_size: tuple[int, int] | None = None,
clear_old_inputs: bool = True,
) -> Sam2VideoInferenceSession:
"""
Process new points or boxes for a video frame and add them to the inference session.
Args:
inference_session (`Sam2VideoInferenceSession`):
The inference session for the video.
frame_idx (`int`):
The index of the frame to process.
obj_ids (`list[int]`):
The object ID(s) to associate with the points or box.
These can be any integers and can be reused later on to specify an object.
input_points (`list[list[list[list[float]]]]`, `torch.Tensor`, *optional*):
The points to add to the frame.
input_labels (`list[list[list[int]]]`, `torch.Tensor`, *optional*):
The labels for the points.
input_boxes (`list[list[list[float]]]`, `torch.Tensor`, *optional*):
The bounding boxes to add to the frame.
original_size (`tuple[int, int]`, *optional*):
The original size of the video. Provide when streaming.
clear_old_inputs (`bool`, *optional*, defaults to `True`):
Whether to clear old inputs for the object.
"""
if original_size is not None:
inference_session.video_height = original_size[0]
inference_session.video_width = original_size[1]
elif inference_session.video_height is None or inference_session.video_width is None:
raise ValueError("original_size must be provided when adding points or boxes on a first streamed frame")
original_sizes = [[inference_session.video_height, inference_session.video_width]]
encoded_inputs = self(
input_points=input_points,
input_labels=input_labels,
input_boxes=input_boxes,
original_sizes=original_sizes,
return_tensors="pt",
)
input_points = encoded_inputs.get("input_points", None)
input_labels = encoded_inputs.get("input_labels", None)
input_boxes = encoded_inputs.get("input_boxes", None)
if input_points is not None:
if input_points.shape[1] != len(obj_ids):
raise ValueError(
f"Number of object ids ({len(obj_ids)}) does not match number of points ({input_points.shape[1]})"
)
else:
input_points = torch.zeros(1, len(obj_ids), 0, 2, dtype=torch.float32)
if input_labels is not None:
if input_labels.shape[1] != len(obj_ids):
raise ValueError(
f"Number of object ids ({len(obj_ids)}) does not match number of labels ({input_labels.shape[1]})"
)
else:
input_labels = torch.zeros(1, len(obj_ids), 0, dtype=torch.int32)
if input_boxes is not None:
if input_boxes.shape[1] != len(obj_ids):
raise ValueError(
f"Number of object ids ({len(obj_ids)}) does not match number of boxes ({input_boxes.shape[1]})"
)
if input_boxes is not None:
if not clear_old_inputs:
raise ValueError(
"cannot add box without clearing old points, since "
"box prompt must be provided before any point prompt "
"(please use clear_old_points=True instead)"
)
box_coords = input_boxes.reshape(1, -1, 2, 2)
box_labels = torch.tensor([2, 3], dtype=torch.int32).repeat(1, box_coords.shape[1], 1)
input_points = torch.cat([box_coords, input_points], dim=2)
input_labels = torch.cat([box_labels, input_labels], dim=2)
for obj_id, idx in zip(obj_ids, range(len(obj_ids))):
obj_idx = inference_session.obj_id_to_idx(obj_id)
input_points_for_obj = input_points[:, idx, :, :].unsqueeze(1)
input_labels_for_obj = input_labels[:, idx, :].unsqueeze(1)
# Handle existing points
if not clear_old_inputs:
existing_points = inference_session.point_inputs_per_obj[obj_idx].get(frame_idx, None)
if existing_points is not None:
# Concatenate with existing points
input_points_for_obj = torch.cat(
[existing_points["point_coords"].to(input_points_for_obj.device), input_points_for_obj], dim=2
)
input_labels_for_obj = torch.cat(
[existing_points["point_labels"].to(input_labels_for_obj.device), input_labels_for_obj], dim=2
)
point_inputs = {
"point_coords": input_points_for_obj,
"point_labels": input_labels_for_obj,
}
inference_session.add_point_inputs(obj_idx, frame_idx, point_inputs)
inference_session.remove_mask_inputs(obj_idx, frame_idx) # Clear any mask inputs
inference_session.obj_with_new_inputs = obj_ids
def process_new_mask_for_video_frame(
self,
inference_session: Sam2VideoInferenceSession,
frame_idx: int,
obj_ids: list[int],
input_masks: np.ndarray | torch.Tensor | list[np.ndarray] | list[torch.Tensor],
):
"""
Add new mask to a frame and add them to the inference session.
Args:
inference_session (`Sam2VideoInferenceSession`):
The inference session for the video.
frame_idx (`int`):
The index of the frame to process.
obj_ids (`list[int]`):
The object ID(s) to associate with the mask.
These can be any integers and can be reused later on to specify an object.
input_masks (`np.ndarray`, `torch.Tensor`, `list[np.ndarray]`, or `list[torch.Tensor]`):
The mask(s) to add to the frame.
"""
if not isinstance(input_masks, list):
input_masks = [input_masks]
if len(input_masks) != len(obj_ids):
raise ValueError(
f"Number of object ids ({len(obj_ids)}) does not match number of masks ({len(input_masks)})"
)
for obj_id, mask in zip(obj_ids, input_masks):
obj_idx = inference_session.obj_id_to_idx(obj_id)
device = inference_session.inference_device
# Process mask
if not isinstance(mask, torch.Tensor):
mask = torch.tensor(mask, dtype=torch.bool)
nb_dim = mask.dim()
if nb_dim > 4 or nb_dim < 2:
raise ValueError(f"Mask has an unsupported number of dimensions: {nb_dim}")
for i in range(4 - nb_dim):
mask = mask.unsqueeze(0)
mask_H, mask_W = mask.shape[-2:]
mask_inputs_orig = mask.to(device)
mask_inputs_orig = mask_inputs_orig.float().to(device)
# Resize mask if needed
if mask_H != self.target_size or mask_W != self.target_size:
mask_inputs = torch.nn.functional.interpolate(
mask_inputs_orig,
size=(self.target_size, self.target_size),
align_corners=False,
mode="bilinear",
antialias=True,
)
mask_inputs = (mask_inputs >= 0.5).float()
else:
mask_inputs = mask_inputs_orig
inference_session.add_mask_inputs(obj_idx, frame_idx, mask_inputs)
inference_session.remove_point_inputs(obj_idx, frame_idx) # Clear any point inputs
inference_session.obj_with_new_inputs = obj_ids
class Sam2VideoLayerNorm(Sam2LayerNorm):
pass
class Sam2VideoPositionEmbeddingSine(Sam2SinePositionEmbedding):
pass
class Sam2VideoTwoWayAttentionBlock(Sam2TwoWayAttentionBlock):
pass
class Sam2VideoFeedForward(Sam2FeedForward):
pass
class Sam2VideoImageSegmentationOutput(Sam2ImageSegmentationOutput):
r"""
iou_scores (`torch.FloatTensor` of shape `(batch_size, point_batch_size, num_masks)`):
The Intersection over Union (IoU) scores of the predicted masks.
pred_masks (`torch.FloatTensor` of shape `(batch_size, point_batch_size, num_masks, height, width)`):
The predicted low-resolution masks. This is an alias for `low_res_masks`. These masks need to be post-processed
by the processor to be brought to the original image size.
object_score_logits (`torch.FloatTensor` of shape `(batch_size, point_batch_size, 1)`):
Logits for the object score, indicating if an object is present.
image_embeddings (`tuple(torch.FloatTensor)`):
The features from the FPN, which are used by the mask decoder. This is a tuple of `torch.FloatTensor` where each
tensor has shape `(batch_size, channels, height, width)`.
vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of each stage) of shape `(batch_size, height, width, hidden_size)`.
Hidden-states of the vision model at the output of each stage.
vision_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights of the vision model.
mask_decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights of the mask decoder.
high_res_masks (`torch.FloatTensor` of shape `(batch_size, point_batch_size, num_masks, image_size, image_size)`, *optional*):
The predicted masks, upscaled to the original image size. Only used for Sam2VideoModel.
object_pointer (`torch.FloatTensor` of shape `(batch_size, point_batch_size, hidden_size)`, *optional*):
A tensor representing the object pointer, used for tracking in videos. Only used for Sam2VideoModel.
"""
high_res_masks: torch.FloatTensor | None = None
object_pointer: torch.FloatTensor | None = None
@dataclass
@auto_docstring(custom_intro="Base class for the Sam2 model's output.")
class Sam2VideoSegmentationOutput(ModelOutput):
r"""
object_ids (`list[int]`, *optional*):
List of object IDs being tracked in the current frame.
pred_masks (`torch.FloatTensor` of shape `(batch_size, num_masks, height, width)`):
The predicted masks stored at the model's resolution.
object_score_logits (`torch.FloatTensor` of shape `(batch_size,)`, *optional*):
Logits for the object scores, indicating if objects are present.
frame_idx (`int`):
The frame index of the video.
"""
object_ids: list[int] | None = None
pred_masks: torch.FloatTensor | None = None
object_score_logits: torch.FloatTensor | None = None
frame_idx: int | None = None
@auto_docstring
class Sam2VideoPreTrainedModel(PreTrainedModel):
config_class = Sam2VideoConfig
base_model_prefix = "sam2_video"
main_input_name = "pixel_values"
input_modalities = "video"
_supports_sdpa = True
_supports_flash_attn = True
_supports_attention_backend = True
@torch.no_grad()
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, Sam2VideoModel):
if module.no_memory_positional_encoding is not None:
init.zeros_(module.no_memory_positional_encoding)
if module.memory_temporal_positional_encoding is not None:
init.zeros_(module.memory_temporal_positional_encoding)
if module.no_object_pointer is not None:
init.zeros_(module.no_object_pointer)
if module.occlusion_spatial_embedding_parameter is not None:
init.zeros_(module.occlusion_spatial_embedding_parameter)
if isinstance(module, Sam2VideoMemoryFuserCXBlock):
if module.scale is not None:
init.zeros_(module.scale)
elif isinstance(module, Sam2VideoVisionRotaryEmbedding):
inv_freq = module.create_inv_freq()
init.copy_(module.rope_embeddings_cos, inv_freq.cos())
init.copy_(module.rope_embeddings_sin, inv_freq.sin())
elif isinstance(module, Sam2VideoPositionalEmbedding):
init.normal_(module.positional_embedding, std=module.scale)
class Sam2VideoVisionRotaryEmbedding(nn.Module):
"""
Vision Rotary Position Embedding for SAM2, following transformers library standards.
Supports 2D (axial) rotary embeddings for spatial dimensions.
"""
def __init__(self, config: Sam2VideoConfig):
super().__init__()
self.dim = config.memory_attention_hidden_size // (
config.memory_attention_downsample_rate * config.memory_attention_num_attention_heads
)
# Ensure even dimension for proper axial splitting
if self.dim % 4 != 0:
raise ValueError("Dimension must be divisible by 4 for axial RoPE")
self.end_x, self.end_y = config.memory_attention_rope_feat_sizes
self.memory_attention_rope_theta = config.memory_attention_rope_theta
# directly register the cos and sin embeddings as we have a fixed feature shape
inv_freq = self.create_inv_freq()
self.register_buffer("rope_embeddings_cos", inv_freq.cos(), persistent=False)
self.register_buffer("rope_embeddings_sin", inv_freq.sin(), persistent=False)
@torch.no_grad()
def forward(self) -> tuple[torch.Tensor, torch.Tensor]:
# As the feature map size is fixed, we can just return the pre-computed embeddings.
return self.rope_embeddings_cos, self.rope_embeddings_sin
def create_inv_freq(self):
freqs = 1.0 / (
self.memory_attention_rope_theta ** (torch.arange(0, self.dim, 4)[: (self.dim // 4)].float() / self.dim)
)
# Generate 2D position indices for axial rotary embedding
flattened_indices = torch.arange(self.end_x * self.end_y, dtype=torch.long)
x_positions = flattened_indices % self.end_x
y_positions = torch.div(flattened_indices, self.end_x, rounding_mode="floor")
freqs_x = torch.outer(x_positions, freqs).float()
freqs_y = torch.outer(y_positions, freqs).float()
inv_freq = torch.cat([freqs_x, freqs_y], dim=-1)
inv_freq = inv_freq.repeat_interleave(2, dim=-1)
return inv_freq
def rotate_pairwise(x):
"""
pairwise rotation of the hidden dims of the input. Differerent from Llama Half-Tensor Rotation.
This is an optimized version of the following more explicit implementation:
```python
x_rotated = torch.zeros_like(x, dtype=x.dtype, device=x.device)
x_rotated[..., ::2] = -x[..., 1::2]
x_rotated[..., 1::2] = x[..., ::2]
return x_rotated
```
"""
x = x.view(*x.shape[:-1], -1, 2)
x1, x2 = x.unbind(dim=-1)
x = torch.stack((-x2, x1), dim=-1)
return x.flatten(start_dim=-2)
# TODO: This leads to ~1e-07 max diff and ~1e-09 avg diff for q_embed and k_embed from the original implementation, most likely due to the use of complex tensors in the original implementation.
def apply_rotary_pos_emb_2d(
q: torch.Tensor,
k: torch.Tensor,
cos: torch.Tensor,
sin: torch.Tensor,
num_k_exclude_rope: int = 0,
repeat_freqs_k: bool = False,
) -> tuple[torch.Tensor, torch.Tensor]:
"""
Apply rotary position embedding to query and key tensors for vision models.
Follows the standard transformers library pattern.
Args:
q: Query tensor of shape (..., seq_len, head_dim)
k: Key tensor of shape (..., seq_len, head_dim)
cos: Cosine position embedding of shape (seq_len, head_dim)
sin: Sine position embedding of shape (seq_len, head_dim)
repeat_freqs_k: Whether to repeat frequencies for keys (for cross-attention)
Returns:
Rotated (q, k) tensors
"""
k_rot, k_pass = k[..., : k.shape[-2] - num_k_exclude_rope, :], k[..., k.shape[-2] - num_k_exclude_rope :, :]
q_embed = q.float() # force upscale to float32 as in the original implementation
q_embed = (q_embed * cos) + (rotate_pairwise(q_embed) * sin)
if k_rot.shape[-2] == 0:
# Handle case where keys might be empty due to dropout
return q_embed.type_as(q), torch.cat([k_rot, k_pass], dim=-2)
# Handle key tensor - may need to repeat frequencies if different sequence length
if repeat_freqs_k and k_rot.shape[-2] != q.shape[-2]:
# Repeat cos/sin to match key sequence length
repeat_factor = k_rot.shape[-2] // q.shape[-2]
cos_k = cos.repeat(1, 1, repeat_factor, 1)
sin_k = sin.repeat(1, 1, repeat_factor, 1)
else:
cos_k = cos
sin_k = sin
# Apply rotary embedding to keys
k_embed = k_rot.float() # force upscale to float32 as in the original implementation
k_embed = (k_embed * cos_k) + (rotate_pairwise(k_embed) * sin_k)
# Concatenate back to full shape
k_embed = torch.cat([k_embed.type_as(k), k_pass], dim=-2)
return q_embed.type_as(q), k_embed
class Sam2VideoRoPEAttention(nn.Module):
"""Attention with rotary position encoding."""
def __init__(
self,
config: Sam2VideoConfig,
kv_in_dim: int | None = None,
rope_k_repeat=False,
):
super().__init__()
self.config = config
self.hidden_size = config.memory_attention_hidden_size
self.internal_dim = self.hidden_size // config.memory_attention_downsample_rate
self.num_attention_heads = config.memory_attention_num_attention_heads
self.head_dim = self.internal_dim // config.memory_attention_num_attention_heads
self.scaling = self.head_dim**-0.5
self.is_causal = False
self.kv_in_dim = kv_in_dim if kv_in_dim is not None else self.hidden_size
self.q_proj = nn.Linear(self.hidden_size, self.internal_dim)
self.k_proj = nn.Linear(self.kv_in_dim, self.internal_dim)
self.v_proj = nn.Linear(self.kv_in_dim, self.internal_dim)
self.o_proj = nn.Linear(self.internal_dim, self.hidden_size)
self.rope_k_repeat = rope_k_repeat
self.dropout_p = config.memory_attention_rope_dropout
def forward(
self,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
num_k_exclude_rope: int = 0,
**kwargs: Unpack[FlashAttentionKwargs],
) -> Tensor:
# Input projections
batch_size, point_batch_size = query.shape[:2]
new_shape = (batch_size * point_batch_size, -1, self.num_attention_heads, self.head_dim)
query = self.q_proj(query).view(*new_shape).transpose(1, 2)
key = self.k_proj(key).view(*new_shape).transpose(1, 2)
value = self.v_proj(value).view(*new_shape).transpose(1, 2)
cos, sin = position_embeddings
# Apply rotary position encoding, excluding some keys if specified
query, key = apply_rotary_pos_emb_2d(
query, key, cos, sin, repeat_freqs_k=self.rope_k_repeat, num_k_exclude_rope=num_k_exclude_rope
)
attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface(
self.config._attn_implementation, eager_attention_forward
)
attn_output, attn_weights = attention_interface(
self,
query,
key,
value,
attention_mask=None,
dropout=0.0 if not self.training else self.dropout_p,
scaling=self.scaling,
is_causal=self.is_causal,
**kwargs,
)
attn_output = attn_output.reshape(
batch_size, point_batch_size, -1, self.num_attention_heads * self.head_dim
).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class Sam2VideoMemoryAttentionLayer(nn.Module):
def __init__(self, config: Sam2VideoConfig):
super().__init__()
hidden_size = config.memory_attention_hidden_size
self.self_attn = Sam2VideoRoPEAttention(config)
self.cross_attn_image = Sam2VideoRoPEAttention(config, kv_in_dim=64, rope_k_repeat=True)
# Implementation of Feedforward model
self.linear1 = nn.Linear(hidden_size, config.memory_attention_feed_forward_hidden_size)
self.dropout = nn.Dropout(config.memory_attention_dropout)
self.linear2 = nn.Linear(config.memory_attention_feed_forward_hidden_size, hidden_size)
self.layer_norm1 = nn.LayerNorm(hidden_size)
self.layer_norm2 = nn.LayerNorm(hidden_size)
self.layer_norm3 = nn.LayerNorm(hidden_size)
self.dropout1 = nn.Dropout(config.memory_attention_dropout)
self.dropout2 = nn.Dropout(config.memory_attention_dropout)
self.dropout3 = nn.Dropout(config.memory_attention_dropout)
self.activation = ACT2FN[config.memory_attention_feed_forward_hidden_act]
def forward(
self,
queries: Tensor,
keys: Tensor,
key_point_embedding: Tensor,
rope_position_embeddings: tuple[Tensor, Tensor],
num_k_exclude_rope: int = 0,
) -> torch.Tensor:
# Self-Attention
query = self.layer_norm1(queries)
query, _ = self.self_attn(query=query, key=query, value=query, position_embeddings=rope_position_embeddings)
queries = queries + self.dropout1(query)
# Cross-Attention
query = self.layer_norm2(queries)
query, _ = self.cross_attn_image(
query=query,
key=keys + key_point_embedding,
value=keys,
position_embeddings=rope_position_embeddings,
num_k_exclude_rope=num_k_exclude_rope,
)
queries = queries + self.dropout2(query)
# MLP
query = self.layer_norm3(queries)
query = self.linear2(self.dropout(self.activation(self.linear1(query))))
queries = queries + self.dropout3(query)
return queries
class Sam2VideoMemoryAttention(nn.Module):
def __init__(self, config: Sam2VideoConfig):
super().__init__()
self.layers = nn.ModuleList(
[Sam2VideoMemoryAttentionLayer(config) for _ in range(config.memory_attention_num_layers)]
)
self.layer_norm = nn.LayerNorm(config.memory_attention_hidden_size)
self.rotary_emb = Sam2VideoVisionRotaryEmbedding(config=config)
def forward(
self,
current_vision_features: torch.Tensor,
memory: torch.Tensor,
current_vision_position_embeddings: Tensor | None = None,
memory_posision_embeddings: Tensor | None = None,
num_object_pointer_tokens: int = 0,
):
"""
Args:
current_vision_features (`torch.FloatTensor`):
The current vision features used for self-attention.
memory (`torch.FloatTensor`):
The memory features used for cross-attention.
current_vision_position_embeddings (`torch.FloatTensor`, *optional*):
The position embeddings for the current vision features.
memory_posision_embeddings (`torch.FloatTensor`, *optional*):
The position embeddings for the memory features.
num_object_pointer_tokens (`int`, *optional*, defaults to 0):
The number of object pointer tokens.
"""
output = current_vision_features
if current_vision_position_embeddings is not None:
output = output + 0.1 * current_vision_position_embeddings
# Convert to batch first
output = output.transpose(0, 1)
memory = memory.transpose(0, 1).unsqueeze(1)
memory_posision_embeddings = memory_posision_embeddings.transpose(0, 1).unsqueeze(1)
rope_position_embeddings = self.rotary_emb()
for layer in self.layers:
output = layer(
queries=output.unsqueeze(1) if output.ndim == 3 else output,
keys=memory,
key_point_embedding=memory_posision_embeddings,
rope_position_embeddings=rope_position_embeddings,
num_k_exclude_rope=num_object_pointer_tokens,
)
normed_output = self.layer_norm(output)
# Convert back to seq first
normed_output = normed_output.transpose(0, 1)
return normed_output
# Lightly adapted from ConvNext (https://github.com/facebookresearch/ConvNeXt)
class Sam2VideoMemoryFuserCXBlock(GradientCheckpointingLayer):
def __init__(self, config: Sam2VideoConfig):
super().__init__()
self.depthwise_conv = nn.Conv2d(
config.memory_fuser_embed_dim,
config.memory_fuser_embed_dim,
kernel_size=config.memory_fuser_kernel_size,
padding=config.memory_fuser_padding,
groups=config.memory_fuser_embed_dim,
) # depthwise conv
self.layer_norm = Sam2VideoLayerNorm(config.memory_fuser_embed_dim, eps=1e-6, data_format="channels_first")
self.activation = ACT2FN[config.memory_fuser_hidden_act]
self.pointwise_conv1 = nn.Linear(
config.memory_fuser_embed_dim, config.memory_fuser_intermediate_dim
) # pointwise/1x1 convs, implemented with linear layers
self.pointwise_conv2 = nn.Linear(config.memory_fuser_intermediate_dim, config.memory_fuser_embed_dim)
self.scale = nn.Parameter(
config.memory_fuser_layer_scale_init_value * torch.ones(config.memory_fuser_embed_dim),
requires_grad=True,
)
def forward(self, hidden_states):
input = hidden_states
hidden_states = self.depthwise_conv(hidden_states)
hidden_states = self.layer_norm(hidden_states)
hidden_states = hidden_states.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C)
hidden_states = self.pointwise_conv1(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = self.pointwise_conv2(hidden_states)
hidden_states = self.scale * hidden_states
hidden_states = hidden_states.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W)
hidden_states = input + hidden_states
return hidden_states
class Sam2VideoMemoryFuser(nn.Module):
def __init__(self, config: Sam2VideoConfig):
super().__init__()
self.layers = nn.ModuleList(
[Sam2VideoMemoryFuserCXBlock(config) for _ in range(config.memory_fuser_num_layers)]
)
def forward(self, hidden_states):
# normally hidden_states: (N, C, H, W)
for layer in self.layers:
hidden_states = layer(hidden_states)
return hidden_states
class Sam2VideoMaskDownSamplerLayer(nn.Module):
def __init__(self, config: Sam2VideoConfig, in_channels: int, out_channels: int):
super().__init__()
self.conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size=config.mask_downsampler_kernel_size,
stride=config.mask_downsampler_stride,
padding=config.mask_downsampler_padding,
)
self.layer_norm = Sam2VideoLayerNorm(out_channels, eps=1e-6, data_format="channels_first")
self.activation = ACT2FN[config.mask_downsampler_hidden_act]
def forward(self, x):
return self.activation(self.layer_norm(self.conv(x)))
class Sam2VideoMaskDownSampler(nn.Module):
"""
Progressively downsample a mask by total_stride, each time by stride.
Note that LayerNorm is applied per *token*, like in ViT.
With each downsample (by a factor stride**2), channel capacity increases by the same factor.
In the end, we linearly project to embed_dim channels.
"""
def __init__(self, config: Sam2VideoConfig):
super().__init__()
num_layers = int(math.log2(config.mask_downsampler_total_stride) // math.log2(config.mask_downsampler_stride))
self.layers = nn.ModuleList()
self.activation = ACT2FN[config.mask_downsampler_hidden_act]
mask_in_chans, mask_out_chans = 1, 1
for _ in range(num_layers):
mask_out_chans = mask_in_chans * (config.mask_downsampler_stride**2)
self.layers.append(Sam2VideoMaskDownSamplerLayer(config, mask_in_chans, mask_out_chans))
mask_in_chans = mask_out_chans
self.final_conv = nn.Conv2d(mask_out_chans, config.mask_downsampler_embed_dim, kernel_size=1)
def forward(self, x):
for layer in self.layers:
x = layer(x)
x = self.final_conv(x)
return x
class Sam2VideoMemoryEncoder(nn.Module):
def __init__(self, config: Sam2VideoConfig):
super().__init__()
hidden_size = config.memory_encoder_hidden_size
output_channels = config.memory_encoder_output_channels
self.mask_downsampler = Sam2VideoMaskDownSampler(config)
self.feature_projection = nn.Conv2d(hidden_size, hidden_size, kernel_size=1)
self.memory_fuser = Sam2VideoMemoryFuser(config)
self.position_encoding = Sam2VideoPositionEmbeddingSine(num_pos_feats=output_channels // 2, normalize=True)
self.projection = nn.Conv2d(hidden_size, output_channels, kernel_size=1)
def forward(
self,
vision_features: torch.Tensor,
masks: torch.Tensor,
) -> tuple[torch.Tensor, torch.Tensor]:
## Process masks
masks = self.mask_downsampler(masks)
## Fuse pixel_features and downsampled masks
vision_features = self.feature_projection(vision_features)
vision_features = vision_features + masks
vision_features = self.memory_fuser(vision_features)
vision_features = self.projection(vision_features)
vision_pos_enc = self.position_encoding(vision_features.shape, vision_features.device, vision_features.dtype)
return vision_features, vision_pos_enc
class Sam2VideoPositionalEmbedding(Sam2PositionalEmbedding):
pass
# a large negative value as a placeholder score for missing objects
NO_OBJ_SCORE = -1024.0
def get_1d_sine_pe(pos_inds, dim, temperature=10000):
"""
Get 1D sine positional embedding as in the original Transformer paper.
"""
pe_dim = dim // 2
dim_t = torch.arange(pe_dim, dtype=torch.float32, device=pos_inds.device)
dim_t = temperature ** (2 * (dim_t // 2) / pe_dim)
pos_embed = pos_inds.unsqueeze(-1) / dim_t
pos_embed = torch.cat([pos_embed.sin(), pos_embed.cos()], dim=-1)
return pos_embed
@auto_docstring
class Sam2VideoModel(Sam2Model):
input_modalities = ("video", "text")
_keys_to_ignore_on_load_unexpected = []
_can_record_outputs = {"mask_decoder_attentions": OutputRecorder(Sam2VideoTwoWayAttentionBlock, index=2)}
def __init__(self, config: Sam2VideoConfig):
super().__init__(config)
self.config = config
# For video sequence inference
self.image_size = config.image_size
self.memory_attention = Sam2VideoMemoryAttention(config)
self.memory_encoder = Sam2VideoMemoryEncoder(config)
self.no_memory_positional_encoding = torch.nn.Parameter(
torch.zeros(1, 1, config.vision_config.fpn_hidden_size)
)
self.mem_dim = config.memory_encoder_output_channels
self.num_maskmem = config.num_maskmem # Number of memories accessible
# Temporal encoding of the memories
self.memory_temporal_positional_encoding = torch.nn.Parameter(
torch.zeros(self.num_maskmem, 1, 1, self.mem_dim)
)
self.no_object_pointer = torch.nn.Parameter(torch.zeros(1, self.hidden_dim))
# A conv layer to downsample the mask prompt to stride 4 (the same stride as
# low-res SAM mask logits) and to change its scales from 0~1 to SAM logit scale,
# so that it can be fed into the SAM mask decoder to generate a pointer.
self.mask_downsample = torch.nn.Conv2d(1, 1, kernel_size=4, stride=4)
# a feedforward layer on SAM output tokens to turn them into object pointers
self.object_pointer_proj = Sam2VideoFeedForward(self.hidden_dim, self.hidden_dim, self.hidden_dim, 3)
if self.config.enable_temporal_pos_encoding_for_object_pointers:
# a linear projection on temporal positional encoding in object pointers to
# avoid potential interference with spatial positional encoding
self.temporal_positional_encoding_projection_layer = torch.nn.Linear(self.hidden_dim, self.mem_dim)
else:
self.temporal_positional_encoding_projection_layer = torch.nn.Identity()
self.occlusion_spatial_embedding_parameter = None # compatibility with Sam2
if config.enable_occlusion_spatial_embedding:
self.occlusion_spatial_embedding_parameter = torch.nn.Parameter(torch.zeros(1, self.mem_dim))
self.post_init()
@torch.no_grad()
def get_prompt_embeddings(
self,
input_points: torch.FloatTensor | None = None,
input_labels: torch.LongTensor | None = None,
input_boxes: torch.FloatTensor | None = None,
input_masks: torch.LongTensor | None = None,
) -> tuple[torch.Tensor, torch.Tensor]:
r"""
Returns the prompt embeddings by passing the input points, labels, boxes and masks through the prompt encoder.
Args:
input_points (`torch.FloatTensor` of shape `(batch_size, point_batch_size, num_points_per_image, 2)`):
Optional input points for the prompt encoder. The padding of the point is automatically done by the
processor. `point_batch_size` refers to the number of masks that we want the model to predict per
point. The model will output `point_batch_size` times 3 masks in total.
input_labels (`torch.LongTensor` of shape `(batch_size, point_batch_size, num_points_per_image)`):
Optional input labels for the prompt encoder. The padding of the labels is automatically done by the
processor, or can be fed by the user.
input_boxes (`torch.FloatTensor` of shape `(batch_size, num_boxes_per_image, 4)`):
Optional input boxes for the prompt encoder. The padding of the boxes is automatically done by the
processor. users can also pass manually the input boxes.
input_masks (`torch.LongTensor` of shape `(batch_size, image_size, image_size)`):
Optional input masks for the prompt encoder.
"""
prompt_output = self.prompt_encoder(
input_points=input_points,
input_labels=input_labels,
input_boxes=input_boxes,
input_masks=input_masks,
)
return prompt_output
def _prepare_vision_features(
self,
inference_session: Sam2VideoInferenceSession,
frame_idx: int,
batch_size: int,
) -> tuple[torch.Tensor, list[torch.Tensor]]:
"""Prepare vision features for a frame."""
# Check if features are cached
if cached_features := inference_session.cache.get_vision_features(frame_idx):
vision_feats = cached_features["vision_feats"]
vision_pos_embeds = cached_features["vision_pos_embeds"]
else:
# Compute features using image encoder
image_batch = inference_session.get_frame(frame_idx).unsqueeze(0) # Add batch dimension
image_outputs = self.get_image_features(image_batch, return_dict=True)
vision_feats = image_outputs.fpn_hidden_states
vision_pos_embeds = image_outputs.fpn_position_encoding
# Cache features
inference_session.cache.cache_vision_features(
frame_idx, {"vision_feats": vision_feats, "vision_pos_embeds": vision_pos_embeds}
)
# Expand to batch size if needed
if batch_size > 1:
vision_feats = vision_feats.expand(batch_size, -1, -1, -1)
vision_pos_embeds = [pe.expand(batch_size, -1, -1, -1) for pe in vision_pos_embeds]
return vision_feats, vision_pos_embeds
def _single_frame_forward(
self,
pixel_values: torch.FloatTensor | None = None,
input_points: torch.FloatTensor | None = None,
input_labels: torch.LongTensor | None = None,
input_boxes: torch.FloatTensor | None = None,
input_masks: torch.LongTensor | None = None,
image_embeddings: torch.FloatTensor | None = None,
multimask_output: bool = True,
attention_similarity: torch.FloatTensor | None = None,
target_embedding: torch.FloatTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> Sam2VideoImageSegmentationOutput:
"""
input_points (`torch.FloatTensor` of shape `(batch_size, num_points, 2)`):
Input 2D spatial points, this is used by the prompt encoder to encode the prompt. Generally yields to much
better results. The points can be obtained by passing a list of list of list to the processor that will
create corresponding `torch` tensors of dimension 4. The first dimension is the image batch size, the
second dimension is the point batch size (i.e. how many segmentation masks do we want the model to predict
per input point), the third dimension is the number of points per segmentation mask (it is possible to pass
multiple points for a single mask), and the last dimension is the x (vertical) and y (horizontal)
coordinates of the point. If a different number of points is passed either for each image, or for each
mask, the processor will create "PAD" points that will correspond to the (0, 0) coordinate, and the
computation of the embedding will be skipped for these points using the labels.
input_labels (`torch.LongTensor` of shape `(batch_size, point_batch_size, num_points)`):
Input labels for the points, this is used by the prompt encoder to encode the prompt. According to the
official implementation, there are 3 types of labels
- `1`: the point is a point that contains the object of interest
- `0`: the point is a point that does not contain the object of interest
- `-1`: the point corresponds to the background
We added the label:
- `-10`: the point is a padding point, thus should be ignored by the prompt encoder
The padding labels should be automatically done by the processor.
input_boxes (`torch.FloatTensor` of shape `(batch_size, num_boxes, 4)`):
Input boxes for the points, this is used by the prompt encoder to encode the prompt. Generally yields to
much better generated masks. The boxes can be obtained by passing a list of list of list to the processor,
that will generate a `torch` tensor, with each dimension corresponding respectively to the image batch
size, the number of boxes per image and the coordinates of the top left and bottom right point of the box.
In the order (`x1`, `y1`, `x2`, `y2`):
- `x1`: the x coordinate of the top left point of the input box
- `y1`: the y coordinate of the top left point of the input box
- `x2`: the x coordinate of the bottom right point of the input box
- `y2`: the y coordinate of the bottom right point of the input box
input_masks (`torch.FloatTensor` of shape `(batch_size, image_size, image_size)`):
SAM model also accepts segmentation masks as input. The mask will be embedded by the prompt encoder to
generate a corresponding embedding, that will be fed later on to the mask decoder. These masks needs to be
manually fed by the user, and they need to be of shape (`batch_size`, `image_size`, `image_size`).
image_embeddings (`torch.FloatTensor` of shape `(batch_size, output_channels, window_size, window_size)`):
Image embeddings, this is used by the mask decoder to generate masks and iou scores. For more memory
efficient computation, users can first retrieve the image embeddings using the `get_image_embeddings`
method, and then feed them to the `forward` method instead of feeding the `pixel_values`.
multimask_output (`bool`, *optional*):
In the original implementation and paper, the model always outputs 3 masks per image (or per point / per
bounding box if relevant). However, it is possible to just output a single mask, that corresponds to the
"best" mask, by specifying `multimask_output=False`.
attention_similarity (`torch.FloatTensor`, *optional*):
Attention similarity tensor, to be provided to the mask decoder for target-guided attention in case the
model is used for personalization as introduced in [PerSAM](https://huggingface.co/papers/2305.03048).
target_embedding (`torch.FloatTensor`, *optional*):
Embedding of the target concept, to be provided to the mask decoder for target-semantic prompting in case
the model is used for personalization as introduced in [PerSAM](https://huggingface.co/papers/2305.03048).
"""
if not ((pixel_values is None) ^ (image_embeddings is None)):
raise ValueError("Exactly one of pixel_values or image_embeddings must be provided.")
if input_points is not None and input_boxes is not None:
if input_points.shape[1] != input_boxes.shape[1]:
raise ValueError(
f"You should provide as many bounding boxes as input points per box. Got {input_points.shape[1]} and {input_boxes.shape[1]}."
)
elif input_points is not None:
num_objects = input_points.shape[1]
elif input_boxes is not None:
num_objects = input_boxes.shape[1]
elif input_masks is not None:
num_objects = input_masks.shape[1]
else:
num_objects = 1
image_positional_embeddings = self.get_image_wide_positional_embeddings()
# repeat with batch size
batch_size = pixel_values.shape[0] if pixel_values is not None else image_embeddings[-1].shape[0]
image_positional_embeddings = image_positional_embeddings.repeat(batch_size, 1, 1, 1)
vision_attentions = None
vision_hidden_states = None
if pixel_values is not None:
image_outputs = self.get_image_features(pixel_values, return_dict=True, **kwargs)
feature_maps = image_outputs.fpn_hidden_states
vision_hidden_states = image_outputs.hidden_states
vision_attentions = image_outputs.attentions
# add no memory embedding to the last feature map
feature_maps[-1] = feature_maps[-1] + self.no_memory_embedding
# reshape feature maps to the same shape as the backbone feature sizes
image_embeddings = [
feat.permute(1, 2, 0).view(batch_size, -1, *feat_size)
for feat, feat_size in zip(feature_maps, self.backbone_feature_sizes)
]
if input_points is not None and input_labels is None:
input_labels = torch.ones_like(input_points[:, :, :, 0], dtype=torch.int, device=input_points.device)
if input_points is None and input_boxes is None:
# If no points are provide, pad with an empty point (with label -1)
input_points = torch.zeros(
batch_size, 1, 1, 2, dtype=image_embeddings[-1].dtype, device=image_embeddings[-1].device
)
input_labels = -torch.ones(batch_size, 1, 1, dtype=torch.int32, device=image_embeddings[-1].device)
if input_masks is not None:
# If mask_inputs is provided, downsize it into low-res mask input if needed
# and feed it as a dense mask prompt into the SAM mask encoder
if input_masks.shape[-2:] != self.prompt_encoder.mask_input_size:
input_masks = F.interpolate(
input_masks.float(),
size=self.prompt_encoder.mask_input_size,
align_corners=False,
mode="bilinear",
antialias=True, # use antialias for downsampling
).to(input_masks.dtype)
sparse_embeddings, dense_embeddings = self.prompt_encoder(
input_points=input_points,
input_labels=input_labels,
input_boxes=input_boxes,
input_masks=input_masks,
)
low_res_multimasks, iou_scores, sam_output_tokens, object_score_logits = self.mask_decoder(
image_embeddings=image_embeddings[-1],
image_positional_embeddings=image_positional_embeddings,
sparse_prompt_embeddings=sparse_embeddings,
dense_prompt_embeddings=dense_embeddings,
multimask_output=multimask_output,
high_resolution_features=image_embeddings[:-1],
attention_similarity=attention_similarity,
target_embedding=target_embedding,
**kwargs,
)
is_obj_appearing = object_score_logits > 0
# Mask used for spatial memories is always a *hard* choice between obj and no obj,
# consistent with the actual mask prediction
low_res_multimasks = torch.where(
is_obj_appearing[:, None, None],
low_res_multimasks,
NO_OBJ_SCORE,
)
# convert masks from possibly bfloat16 (or float16) to float32
# (older PyTorch versions before 2.1 don't support `interpolate` on bf16)
high_res_multimasks = (
F.interpolate(
low_res_multimasks.squeeze(1).float(),
size=(self.image_size, self.image_size),
mode="bilinear",
align_corners=False,
)
.unsqueeze(1)
.to(low_res_multimasks.dtype)
)
sam_output_token = sam_output_tokens[:, :, 0]
if multimask_output:
# take the best mask prediction (with the highest IoU estimation)
best_iou_inds = torch.argmax(iou_scores, dim=-1)
batch_inds = torch.arange(batch_size, device=high_res_multimasks.device)
object_batch_inds = torch.arange(num_objects, device=high_res_multimasks.device)
low_res_masks = low_res_multimasks[batch_inds, object_batch_inds, best_iou_inds]
high_res_masks = high_res_multimasks[batch_inds, object_batch_inds, best_iou_inds]
if sam_output_tokens.size(2) > 1:
sam_output_token = sam_output_tokens[batch_inds, object_batch_inds, best_iou_inds]
else:
low_res_masks, high_res_masks = low_res_multimasks[:, :, 0], high_res_multimasks[:, :, 0]
# Extract object pointer from the SAM output token (with occlusion handling)
object_pointer = self.object_pointer_proj(sam_output_token)
lambda_is_obj_appearing = is_obj_appearing.to(object_pointer.dtype)
object_pointer = lambda_is_obj_appearing * object_pointer
object_pointer = object_pointer + (1 - lambda_is_obj_appearing) * self.no_object_pointer
return Sam2VideoImageSegmentationOutput(
iou_scores=iou_scores,
pred_masks=low_res_masks,
high_res_masks=high_res_masks,
object_pointer=object_pointer,
object_score_logits=object_score_logits,
image_embeddings=image_embeddings,
vision_hidden_states=vision_hidden_states,
vision_attentions=vision_attentions,
)
def _use_mask_as_output(
self,
backbone_features: torch.Tensor,
high_res_features: list[torch.Tensor],
mask_inputs: torch.Tensor,
) -> Sam2VideoImageSegmentationOutput:
"""
Directly turn binary `mask_inputs` into a output mask logits without using SAM.
(same input and output shapes as in forward above).
"""
# Use -10/+20 as logits for neg/pos pixels (very close to 0/1 in prob after sigmoid).
out_scale, out_bias = 20.0, -10.0 # sigmoid(-10.0)=4.5398e-05
mask_inputs_float = mask_inputs.to(backbone_features[0].dtype)
# Ensure mask is at self.image_size resolution for consistency
if mask_inputs_float.shape[-2:] != (self.image_size, self.image_size):
mask_inputs_float = F.interpolate(
mask_inputs_float.float(),
size=(self.image_size, self.image_size),
align_corners=False,
mode="bilinear",
antialias=True,
).to(mask_inputs.dtype)
high_res_masks = mask_inputs_float * out_scale + out_bias
low_res_masks = F.interpolate(
high_res_masks.float(),
size=self.prompt_encoder.mask_input_size,
align_corners=False,
mode="bilinear",
antialias=True, # use antialias for downsampling
).to(backbone_features[0].dtype)
# a dummy IoU prediction of all 1's under mask input
iou_scores = mask_inputs.new_ones(mask_inputs.size(0), 1).to(backbone_features[0].dtype)
# produce an object pointer using the SAM decoder from the mask input
object_pointer = self._single_frame_forward(
input_masks=self.mask_downsample(mask_inputs_float.to(backbone_features[0].dtype)),
image_embeddings=high_res_features + [backbone_features],
).object_pointer
# In this method, we are treating mask_input as output, e.g. using it directly to create spatial mem;
# Below, we follow the same design axiom to use mask_input to decide if obj appears or not instead of relying
# on the object_scores from the SAM decoder.
is_obj_appearing = torch.any(mask_inputs.flatten(1).float() > 0.0, dim=1)
is_obj_appearing = is_obj_appearing[..., None]
lambda_is_obj_appearing = is_obj_appearing.to(backbone_features[0].dtype)
object_score_logits = out_scale * lambda_is_obj_appearing + out_bias
object_pointer = lambda_is_obj_appearing * object_pointer
object_pointer = object_pointer + (1 - lambda_is_obj_appearing) * self.no_object_pointer
return Sam2VideoImageSegmentationOutput(
iou_scores=iou_scores,
pred_masks=low_res_masks,
high_res_masks=high_res_masks,
object_pointer=object_pointer,
object_score_logits=object_score_logits.unsqueeze(-1),
image_embeddings=high_res_features + [backbone_features],
)
def _select_closest_cond_frames(self, frame_idx, cond_frame_outputs, max_cond_frame_num):
"""
Select up to `max_cond_frame_num` conditioning frames from `cond_frame_outputs`
that are temporally closest to the current frame at `frame_idx`. Here, we take
- a) the closest conditioning frame before `frame_idx` (if any);
- b) the closest conditioning frame after `frame_idx` (if any);
- c) any other temporally closest conditioning frames until reaching a total
of `max_cond_frame_num` conditioning frames.
Outputs:
- selected_outputs: selected items (keys & values) from `cond_frame_outputs`.
- unselected_outputs: items (keys & values) not selected in `cond_frame_outputs`.
"""
if max_cond_frame_num == -1 or len(cond_frame_outputs) <= max_cond_frame_num:
selected_outputs = cond_frame_outputs
unselected_outputs = {}
else:
selected_outputs = {}
# the closest conditioning frame before `frame_idx` (if any)
idx_before = max((t for t in cond_frame_outputs if t < frame_idx), default=None)
if idx_before is not None:
selected_outputs[idx_before] = cond_frame_outputs[idx_before]
# the closest conditioning frame after `frame_idx` (if any)
idx_after = min((t for t in cond_frame_outputs if t >= frame_idx), default=None)
if idx_after is not None:
selected_outputs[idx_after] = cond_frame_outputs[idx_after]
# add other temporally closest conditioning frames until reaching a total
# of `max_cond_frame_num` conditioning frames.
num_remain = max_cond_frame_num - len(selected_outputs)
inds_remain = sorted(
(t for t in cond_frame_outputs if t not in selected_outputs),
key=lambda x: abs(x - frame_idx),
)[:num_remain]
selected_outputs.update((t, cond_frame_outputs[t]) for t in inds_remain)
unselected_outputs = {t: v for t, v in cond_frame_outputs.items() if t not in selected_outputs}
return selected_outputs, unselected_outputs
def _gather_memory_frame_outputs(
self,
inference_session: Sam2VideoInferenceSession,
obj_idx: int,
frame_idx: int,
track_in_reverse_time: bool = False,
) -> list[tuple[int, dict]]:
"""
Get memory frames from conditioning and non-conditioning outputs.
Returns:
List of (relative_temporal_offset, output_data) tuples.
"""
temporal_positions_and_previous_outputs = []
# Add conditioning frame outputs (limited by max_cond_frame_num)
conditioning_outputs = inference_session.output_dict_per_obj[obj_idx]["cond_frame_outputs"]
if not conditioning_outputs:
raise ValueError(
"maskmem_features in conditioning outputs cannot be empty when not is_initial_conditioning_frame"
)
conditioning_outputs, unselected_conditioning_outputs = self._select_closest_cond_frames(
frame_idx, conditioning_outputs, max_cond_frame_num=self.config.max_cond_frame_num
)
# Store (temporal_position, output_data) tuples
temporal_positions_and_previous_outputs = [(0, out) for out in conditioning_outputs.values()]
# Add non-conditioning memory frames (up to self.num_maskmem - 1)
# These are typically frames tracked by the model without direct user input.
# Frames are selected with a stride, prioritizing the most recent ones. Here we only support stride = 1 for simplicity.
for relative_temporal_offset in range(self.num_maskmem - 1, 0, -1):
# relative_temporal_offset: how many frames before (or after if reversing) the current frame
if not track_in_reverse_time:
previous_frame_idx = frame_idx - relative_temporal_offset
else:
previous_frame_idx = frame_idx + relative_temporal_offset
# check if the output is already stored without using get_output to avoid unnecessary memory transfers between CPU and GPU
output_data = inference_session.output_dict_per_obj[obj_idx]["non_cond_frame_outputs"].get(
previous_frame_idx, unselected_conditioning_outputs.get(previous_frame_idx, None)
)
temporal_positions_and_previous_outputs.append((relative_temporal_offset, output_data))
return temporal_positions_and_previous_outputs
def _build_memory_attention_inputs(
self,
temporal_positions_and_previous_outputs: list[tuple[int, dict]],
device: torch.device,
) -> tuple[list[torch.Tensor], list[torch.Tensor]]:
"""
Concatenate memory features and positional embeddings from previous frames.
Returns:
Tuple of (memories_to_concatenate, memory_positional_embeddings_to_concatenate).
"""
memories_to_concatenate = []
memory_positional_embeddings_to_concatenate = []
for relative_temporal_offset, prev_output_data in temporal_positions_and_previous_outputs:
if prev_output_data is None:
continue # Skip if no output data for this temporal position (e.g., padding frames)
# Load memory features (potentially from CPU to GPU)
# Features are flattened: (Batch, Channels, H, W) -> (H*W, Batch, Channels)
memory_features = prev_output_data["maskmem_features"].to(device, non_blocking=True)
memories_to_concatenate.append(memory_features)
# Spatial positional encoding (potentially from CPU to GPU)
spatial_memory_pos_embed = prev_output_data["maskmem_pos_enc"].to(device, non_blocking=True)
# Add temporal positional encoding
# self.memory_temporal_positional_encoding shape: (NumMaskMem, 1, 1, MemDim)
combined_memory_pos_embed = (
spatial_memory_pos_embed + self.memory_temporal_positional_encoding[relative_temporal_offset - 1]
)
memory_positional_embeddings_to_concatenate.append(combined_memory_pos_embed)
return memories_to_concatenate, memory_positional_embeddings_to_concatenate
def _get_object_pointers(
self,
inference_session: Sam2VideoInferenceSession,
obj_idx: int,
frame_idx: int,
num_total_frames: int,
device: torch.device,
track_in_reverse_time: bool = False,
streaming: bool = False,
) -> tuple[list[int], list[torch.Tensor], int]:
"""
Get object pointers and their positional embeddings from past frames.
Returns:
Tuple of (temporal_offsets, pointer_tokens, max_object_pointers_to_use).
"""
temporal_position_sign_multiplier = -1 if track_in_reverse_time else 1
# Determine max object pointers to use
if streaming:
max_object_pointers_to_use = self.config.max_object_pointers_in_encoder
else:
max_object_pointers_to_use = min(num_total_frames, self.config.max_object_pointers_in_encoder)
temporal_offsets: list[int] = []
pointer_tokens: list[torch.Tensor] = []
# Add object pointers from selected conditioning frames
# Optionally, only include pointers from past frames during evaluation
conditioning_outputs = inference_session.output_dict_per_obj[obj_idx]["cond_frame_outputs"]
eligible_conditioning_outputs = conditioning_outputs
if not self.training:
eligible_conditioning_outputs = {
temporal_idx: out
for temporal_idx, out in conditioning_outputs.items()
if (temporal_idx >= frame_idx if track_in_reverse_time else temporal_idx <= frame_idx)
}
for temporal_idx, out_data in eligible_conditioning_outputs.items():
temporal_difference = (frame_idx - temporal_idx) * temporal_position_sign_multiplier
temporal_offsets.append(temporal_difference)
pointer_tokens.append(out_data["object_pointer"].to(device))
# Add object pointers from non-conditioning frames (up to max_object_pointers_to_use - 1)
for t_diff_offset in range(1, max_object_pointers_to_use):
ref_frame_idx = frame_idx + t_diff_offset if track_in_reverse_time else frame_idx - t_diff_offset
if ref_frame_idx < 0 or (
not streaming and num_total_frames is not None and ref_frame_idx >= num_total_frames
):
break # Stop if frame index is out of bounds
# check if the output is already stored without using get_output to avoid unnecessary memory transfers between CPU and GPU
out_data = inference_session.output_dict_per_obj[obj_idx]["non_cond_frame_outputs"].get(
ref_frame_idx, None
)
if out_data is not None:
temporal_offsets.append(t_diff_offset)
pointer_tokens.append(out_data["object_pointer"].to(device))
return temporal_offsets, pointer_tokens, max_object_pointers_to_use
def _process_object_pointers(
self,
temporal_offsets: list[int],
pointer_tokens: list[torch.Tensor],
max_object_pointers_to_use: int,
batch_size: int,
num_channels: int,
device: torch.device,
) -> tuple[torch.Tensor, torch.Tensor]:
"""
Process object pointers and compute their positional embeddings.
Returns:
Tuple of (object_pointers, object_pointers_pos_embed).
"""
if not pointer_tokens:
return None, None
# Stack object pointers: List of (Batch, Channels) -> (SeqLen_ptr, Batch, Channels)
object_pointers = torch.stack(pointer_tokens, dim=0)
if self.config.enable_temporal_pos_encoding_for_object_pointers:
max_temporal_diff = float(max_object_pointers_to_use - 1)
# Determine dimensionality for temporal positional encoding of pointers
pointer_tpos_dim = num_channels
# Normalize temporal differences before sine PE calculation
normalized_temporal_diffs = (
torch.tensor(temporal_offsets, device=device, dtype=torch.float32) / max_temporal_diff
)
sine_pe = get_1d_sine_pe(normalized_temporal_diffs, dim=pointer_tpos_dim).to(object_pointers.dtype)
projected_sine_pe = self.temporal_positional_encoding_projection_layer(sine_pe)
object_pointers_pos_embed = projected_sine_pe.unsqueeze(1).expand(-1, batch_size, self.mem_dim)
else:
object_pointers_pos_embed = object_pointers.new_zeros(
len(temporal_offsets), batch_size, self.mem_dim, dtype=object_pointers.dtype
)
if self.mem_dim < num_channels:
# If memory dimension is smaller, reshape/split pointers and repeat positional encoding
num_splits = num_channels // self.mem_dim
object_pointers = object_pointers.reshape(-1, batch_size, num_splits, self.mem_dim)
object_pointers = object_pointers.permute(0, 2, 1, 3).flatten(
0, 1
) # (SeqLen_ptr*num_splits, Batch, MemDim)
object_pointers_pos_embed = object_pointers_pos_embed.repeat_interleave(num_splits, dim=0)
return object_pointers, object_pointers_pos_embed
def _prepare_memory_conditioned_features(
self,
inference_session: Sam2VideoInferenceSession,
frame_idx: int,
obj_idx: int,
is_initial_conditioning_frame: bool,
current_vision_features: list[torch.Tensor],
current_vision_positional_embeddings: list[torch.Tensor],
num_total_frames: int,
track_in_reverse_time: bool = False,
streaming: bool = False,
) -> torch.Tensor:
"""
Fuse current frame's visual features with memory from previous frames for enhanced object tracking.
This method conditions the current frame's visual features on temporal memory from previous frames,
enabling consistent object tracking across video sequences. For initial conditioning frames, it uses
no-memory embeddings. For subsequent frames, it retrieves and integrates memory features from both
conditioning frames (user interactions) and non-conditioning frames (tracked results) via cross-attention.
Args:
inference_session (`Sam2VideoInferenceSession`):
The video inference session object.
frame_idx (`int`):
Index of the current frame being processed.
obj_idx (`int`):
Index of the object being processed.
is_initial_conditioning_frame (`bool`):
Whether this is an initial conditioning frame with user inputs (True) or a subsequent
tracking frame (False).
current_vision_features (`torch.Tensor`):
Highest-level vision features of shape `(seq_len, batch_size, channels)`.
current_vision_positional_embeddings (`torch.Tensor`):
Positional embedding tensors corresponding to the highest-level vision features.
num_total_frames (`int`):
Total number of frames in the video sequence.
track_in_reverse_time (`bool`, *optional*, defaults to `False`):
Whether tracking is performed in reverse temporal order.
streaming (`bool`, *optional*, defaults to `False`):
Whether this is streaming inference mode.
Returns:
`torch.Tensor`: Memory-conditioned feature tensor of shape `(batch_size, channels, height, width)`
suitable for input to the SAM decoder.
"""
# Get dimensions from the highest-level (lowest-resolution) feature map
batch_size = current_vision_features.size(1)
num_channels = self.hidden_dim
height, width = self.backbone_feature_sizes[-1]
device = current_vision_features.device
# If memory is disabled (e.g., for single image SAM), return current features directly.
if self.num_maskmem == 0:
# Permute (SeqLen, Batch, Channels) -> (Batch, Channels, SeqLen) then view as (Batch, Channels, Height, Width)
# Assuming SeqLen = Height * Width for the last feature map
current_feature_map = current_vision_features.permute(1, 2, 0).view(
batch_size, num_channels, height, width
)
return current_feature_map
# Step 1: Handle initial conditioning frames
if is_initial_conditioning_frame:
# For initial conditioning frames, no prior memory is used directly in this block.
# If configured, directly add a learnable "no memory" embedding.
# current_vision_features has shape (SeqLen, Batch, Channels)
conditioned_feature_map_flat = current_vision_features + self.no_memory_embedding
# Reshape to (Batch, Channels, Height, Width)
conditioned_feature_map = conditioned_feature_map_flat.permute(1, 2, 0).view(
batch_size, num_channels, height, width
)
return conditioned_feature_map
# Step 2: Get memory frames and concatenate their features
temporal_positions_and_previous_outputs = self._gather_memory_frame_outputs(
inference_session, obj_idx, frame_idx, track_in_reverse_time
)
memories_to_concatenate, memory_positional_embeddings_to_concatenate = self._build_memory_attention_inputs(
temporal_positions_and_previous_outputs, device
)
# Step 3: Get and process object pointers
temporal_offsets, pointer_tokens, max_object_pointers_to_use = self._get_object_pointers(
inference_session, obj_idx, frame_idx, num_total_frames, device, track_in_reverse_time, streaming
)
num_object_pointer_tokens = 0
if pointer_tokens:
object_pointers, object_pointers_pos_embed = self._process_object_pointers(
temporal_offsets, pointer_tokens, max_object_pointers_to_use, batch_size, num_channels, device
)
if object_pointers is not None:
memories_to_concatenate.append(object_pointers)
memory_positional_embeddings_to_concatenate.append(object_pointers_pos_embed)
num_object_pointer_tokens = object_pointers.shape[0]
# Step 4: Concatenate all retrieved memories and their positional embeddings
combined_memory = torch.cat(memories_to_concatenate, dim=0).to(dtype=inference_session.dtype)
combined_memory_positional_embeddings = torch.cat(memory_positional_embeddings_to_concatenate, dim=0)
# Step 5: Forward through the memory attention mechanism
conditioned_feature_map_flat = self.memory_attention(
current_vision_features=current_vision_features,
current_vision_position_embeddings=current_vision_positional_embeddings,
memory=combined_memory,
memory_posision_embeddings=combined_memory_positional_embeddings, # Corrected typo from API
num_object_pointer_tokens=num_object_pointer_tokens,
)
# Reshape from (Batch, H*W, Channels) to (Batch, Channels, Height, Width)
conditioned_feature_map = (
conditioned_feature_map_flat.squeeze(1).permute(0, 2, 1).view(batch_size, num_channels, height, width)
)
return conditioned_feature_map
def _use_multimask(self, is_init_cond_frame: bool, point_inputs: dict | None) -> bool:
"""Whether to use multimask output in the SAM head."""
num_pts = 0 if point_inputs is None else point_inputs["point_labels"].size(2)
multimask_output = (
self.config.multimask_output_in_sam
and (is_init_cond_frame or self.config.multimask_output_for_tracking)
and (self.config.multimask_min_pt_num <= num_pts <= self.config.multimask_max_pt_num)
)
return multimask_output
def _run_single_frame_inference(
self,
inference_session: Sam2VideoInferenceSession,
frame_idx: int,
obj_idx: int,
batch_size: int,
is_init_cond_frame: bool,
point_inputs: torch.Tensor | None,
mask_inputs: torch.Tensor | None,
reverse: bool,
prev_sam_mask_logits: torch.Tensor | None = None,
streaming: bool = False,
) -> dict[str, Any]:
"""
Perform a single tracking step for video object segmentation.
Args:
inference_session (`Sam2VideoInferenceSession`):
The video inference session object.
frame_idx (`int`):
Index of the current frame.
obj_idx (`int`):
Index of the current object.
batch_size (`int`):
Batch size of the current frame.
is_init_cond_frame (`bool`):
Whether this is an initial conditioning frame with user inputs.
point_inputs (`dict`, *optional*):
Point prompt inputs for the current frame.
mask_inputs (`torch.Tensor`, *optional*):
Mask prompt inputs for the current frame.
reverse (`bool`, *optional*, defaults to `False`):
Whether to track in reverse time order.
prev_sam_mask_logits (`torch.Tensor`, *optional*):
Previously predicted SAM mask logits that can be fed with new clicks.
streaming (`bool`, *optional*, defaults to `False`):
Whether this is streaming inference.
Returns:
`dict`: Dictionary containing the tracking results for the current frame, including:
- pred_masks: Predicted low-resolution masks.
- object_pointer: Object pointer for memory.
- high_res_masks: High-resolution masks for batched memory encoding.
- object_score_logits: Object score logits (inference only).
"""
# Retrieve correct image features
current_vision_feats, current_vision_pos_embeds = self._prepare_vision_features(
inference_session, frame_idx, batch_size
)
# point and mask should not appear as input simultaneously on the same frame
if point_inputs is not None and mask_inputs is not None:
raise ValueError(
"point_inputs and mask_inputs should not appear as input simultaneously on the same frame"
)
# High-resolution feature maps for the SAM head, reshape (HW)BC => BCHW
if len(current_vision_feats) > 1:
high_res_features = [
x.permute(1, 2, 0).view(x.size(1), x.size(2), *s)
for x, s in zip(current_vision_feats[:-1], self.backbone_feature_sizes[:-1])
]
else:
high_res_features = None
if mask_inputs is not None:
# We directly output the mask input (see it as a GT mask) without using a SAM prompt encoder + mask decoder.
pix_feat = current_vision_feats[-1].permute(1, 2, 0)
pix_feat = pix_feat.view(-1, self.hidden_dim, *self.backbone_feature_sizes[-1])
sam_outputs = self._use_mask_as_output(pix_feat, high_res_features, mask_inputs)
else:
# fused the visual feature with previous memory features in the memory bank
pix_feat = self._prepare_memory_conditioned_features(
inference_session=inference_session,
frame_idx=frame_idx,
obj_idx=obj_idx,
is_initial_conditioning_frame=is_init_cond_frame,
current_vision_features=current_vision_feats[-1],
current_vision_positional_embeddings=current_vision_pos_embeds[-1],
num_total_frames=inference_session.num_frames,
track_in_reverse_time=reverse,
streaming=streaming,
)
# apply SAM-style segmentation head
# here we might feed previously predicted low-res SAM mask logits into the SAM mask decoder,
# e.g. in demo where such logits come from earlier interaction instead of correction sampling
# (in this case, any `mask_inputs` shouldn't reach here as they are sent to _use_mask_as_output instead)
if prev_sam_mask_logits is not None:
mask_inputs = prev_sam_mask_logits
multimask_output = self._use_multimask(is_init_cond_frame, point_inputs)
sam_outputs = self._single_frame_forward(
pixel_values=None, # Vision features already computed
input_points=point_inputs["point_coords"] if point_inputs is not None else None,
input_labels=point_inputs["point_labels"] if point_inputs is not None else None,
input_masks=mask_inputs,
image_embeddings=high_res_features + [pix_feat],
multimask_output=multimask_output,
)
# Memory encoding is now handled in batch by the caller (forward method)
current_out = {
"pred_masks": sam_outputs.pred_masks,
"object_pointer": sam_outputs.object_pointer,
"high_res_masks": sam_outputs.high_res_masks, # Needed for batched memory encoding
}
if not self.training:
current_out["object_score_logits"] = sam_outputs.object_score_logits
return current_out
def _encode_new_memory(
self,
current_vision_feats: torch.Tensor,
pred_masks_high_res: torch.Tensor,
object_score_logits: torch.Tensor,
is_mask_from_pts: bool,
) -> tuple[torch.Tensor, list[torch.Tensor]]:
"""Encode the current image and its prediction into a memory feature."""
batch_size = current_vision_feats.size(1) # batch size on this frame
channels = self.hidden_dim
height, width = self.backbone_feature_sizes[-1] # top-level (lowest-resolution) feature size
mask_input_size_h, mask_input_size_w = self.prompt_encoder.mask_input_size
mask_mem_size_h = mask_input_size_h * 4
mask_mem_size_w = mask_input_size_w * 4
if pred_masks_high_res.shape[2:] != (mask_mem_size_h, mask_mem_size_w):
# downsample the predicted high-res masks into the mask encoder input size
pred_masks_high_res = F.interpolate(
pred_masks_high_res.float(),
size=(mask_mem_size_h, mask_mem_size_w),
align_corners=False,
mode="bilinear",
antialias=True, # use antialias for downsampling
).to(pred_masks_high_res.dtype)
# top-level feature, (HW)BC => BCHW
pix_feat = current_vision_feats.permute(1, 2, 0).view(batch_size, channels, height, width)
if is_mask_from_pts and not self.training:
# binarize the mask logits
mask_for_mem = (pred_masks_high_res > 0).to(pred_masks_high_res.dtype)
else:
# apply sigmoid on the raw mask logits to turn them into range (0, 1)
mask_for_mem = torch.sigmoid(pred_masks_high_res)
# apply scale and bias terms to the sigmoid probabilities
mask_for_mem = mask_for_mem * self.config.sigmoid_scale_for_mem_enc
mask_for_mem = mask_for_mem + self.config.sigmoid_bias_for_mem_enc
maskmem_features, maskmem_pos_enc = self.memory_encoder(
pix_feat,
mask_for_mem,
)
# add a no-object embedding to the spatial memory to indicate that the frame
# is predicted to be occluded (i.e. no object is appearing in the frame)
if self.occlusion_spatial_embedding_parameter is not None:
is_obj_appearing = (object_score_logits > 0).float()
maskmem_features += (1 - is_obj_appearing[..., None]) * self.occlusion_spatial_embedding_parameter[
..., None, None
].expand(*maskmem_features.shape)
# convert to bfloat16 to save memory, and for consistency with the original implementation
maskmem_features = maskmem_features.to(torch.bfloat16).flatten(2).permute(2, 0, 1)
maskmem_pos_enc = maskmem_pos_enc.to(pred_masks_high_res.dtype).flatten(2).permute(2, 0, 1)
return maskmem_features, maskmem_pos_enc
@torch.inference_mode()
@auto_docstring(custom_intro="Propagate the objects through a streamed video frame.")
def forward(
self,
inference_session: Sam2VideoInferenceSession,
frame_idx: int | None = None,
frame: torch.Tensor | None = None,
reverse: bool = False,
run_mem_encoder: bool = True,
**kwargs,
) -> Sam2VideoSegmentationOutput:
r"""
inference_session (`Sam2VideoInferenceSession`):
The video inference session object.
frame_idx (`int`, *optional*):
The index of the frame on which to run inference. No need to provide when inferring
on a new streamed frame.
frame (`torch.Tensor`, *optional*):
The frame to process. Provide when streaming.
reverse (`bool`, *optional*, defaults to `False`):
Whether to propagate in reverse.
run_mem_encoder (`bool`, *optional*, defaults to `True`):
Whether to run the memory encoder on predicted masks. The memory encoder is batched across all objects for efficiency.
"""
if frame is not None:
frame_idx = inference_session.add_new_frame(frame, frame_idx)
if frame is not None and inference_session.get_obj_num() == 0:
raise ValueError("No objects are provided for tracking; please add inputs first.")
num_objects = inference_session.get_obj_num()
pred_masks_per_obj = [None] * num_objects
object_score_logits_per_obj = [None] * num_objects
# Collect data for batched memory encoding
objects_needing_memory_encoding = []
high_res_masks_for_memory = []
object_score_logits_for_memory = []
is_mask_from_pts_per_obj = []
# Note: We avoid batched inference here because per-object inputs (clicks/masks)
# can differ across objects.
for obj_idx in range(num_objects):
obj_id = inference_session.obj_idx_to_id(obj_idx)
has_new_inputs = obj_id in inference_session.obj_with_new_inputs
has_cond_output = frame_idx in inference_session.output_dict_per_obj[obj_idx]["cond_frame_outputs"]
# If this object has no new inputs and this frame already has a
# conditioning output, reuse the cached masks instead of recomputing.
if (not has_new_inputs) and has_cond_output:
pred_masks = inference_session.get_output(obj_idx, frame_idx, "pred_masks", is_conditioning_frame=True)
object_score_logits = inference_session.get_output(
obj_idx, frame_idx, "object_score_logits", is_conditioning_frame=True
)
is_init_cond_frame = True
else:
# Defaults when there are no new inputs
is_init_cond_frame = False
point_inputs = None
mask_inputs = None
if has_new_inputs:
is_init_cond_frame = frame_idx not in inference_session.frames_tracked_per_obj[obj_idx]
if is_init_cond_frame:
reverse = False
point_inputs = inference_session.point_inputs_per_obj[obj_idx].get(frame_idx, None)
mask_inputs = inference_session.mask_inputs_per_obj[obj_idx].get(frame_idx, None)
if point_inputs is not None or mask_inputs is not None:
inference_session.obj_with_new_inputs.remove(obj_id)
current_out = self._run_single_frame_inference(
inference_session=inference_session,
obj_idx=obj_idx,
frame_idx=frame_idx,
batch_size=1, # run on the slice of a single object
is_init_cond_frame=is_init_cond_frame,
point_inputs=point_inputs,
mask_inputs=mask_inputs,
reverse=reverse,
streaming=frame is not None,
)
inference_session.store_output(
obj_idx, frame_idx, output_value=current_out, is_conditioning_frame=is_init_cond_frame
)
pred_masks = current_out["pred_masks"]
object_score_logits = current_out["object_score_logits"]
# Collect data for batched memory encoding
if run_mem_encoder and self.num_maskmem > 0:
objects_needing_memory_encoding.append(obj_idx)
high_res_masks_for_memory.append(current_out["high_res_masks"])
object_score_logits_for_memory.append(object_score_logits)
is_mask_from_pts_per_obj.append(point_inputs is not None or mask_inputs is not None)
pred_masks_per_obj[obj_idx] = pred_masks
object_score_logits_per_obj[obj_idx] = object_score_logits.squeeze(-1)
if not is_init_cond_frame:
# only for tracked frames, not for initial conditioning frames
inference_session.frames_tracked_per_obj[obj_idx][frame_idx] = {"reverse": reverse}
# Batch encode memories for all objects at once
self._batch_encode_memories(
inference_session=inference_session,
frame_idx=frame_idx,
objects_needing_memory_encoding=objects_needing_memory_encoding,
high_res_masks_for_memory=high_res_masks_for_memory,
object_score_logits_for_memory=object_score_logits_for_memory,
is_mask_from_pts_per_obj=is_mask_from_pts_per_obj,
)
# Resize the output mask to the original video resolution (we directly use
# the mask scores on GPU for output to avoid any CPU conversion in between)
if len(pred_masks_per_obj) > 1:
all_pred_masks = torch.cat(pred_masks_per_obj, dim=0)
all_object_score_logits = torch.cat(object_score_logits_per_obj, dim=0)
else:
all_pred_masks = pred_masks_per_obj[0]
all_object_score_logits = object_score_logits_per_obj[0]
return Sam2VideoSegmentationOutput(
object_ids=inference_session.obj_ids.copy(),
pred_masks=all_pred_masks,
object_score_logits=all_object_score_logits,
frame_idx=frame_idx,
)
def _batch_encode_memories(
self,
inference_session: Sam2VideoInferenceSession,
frame_idx: int,
objects_needing_memory_encoding: list[int],
high_res_masks_for_memory: list[torch.Tensor],
object_score_logits_for_memory: list[torch.Tensor],
is_mask_from_pts_per_obj: list[bool],
):
"""
Batch encode memories for multiple objects at once.
Args:
inference_session: The video inference session object
frame_idx: Index of the current frame
objects_needing_memory_encoding: List of object indices that need memory encoding
high_res_masks_for_memory: List of high-resolution masks for each object
object_score_logits_for_memory: List of object score logits for each object
is_mask_from_pts_per_obj: List of booleans indicating if mask is from points for each object
"""
if not objects_needing_memory_encoding:
return
# Get vision features once for all objects
current_vision_feats, _ = self._prepare_vision_features(inference_session, frame_idx, batch_size=1)
# Stack all high-res masks and object scores
high_res_masks_batched = torch.cat(high_res_masks_for_memory, dim=0)
object_score_logits_batched = torch.cat(object_score_logits_for_memory, dim=0)
# Expand vision features to match batch size
expanded_vision_feats = current_vision_feats[-1].expand(-1, len(objects_needing_memory_encoding), -1)
# Encode all memories in one batch call
maskmem_features_batched, maskmem_pos_enc_batched = self._encode_new_memory(
current_vision_feats=expanded_vision_feats,
pred_masks_high_res=high_res_masks_batched,
object_score_logits=object_score_logits_batched,
is_mask_from_pts=any(is_mask_from_pts_per_obj),
)
# Split and store encoded memories per object
for i, obj_idx in enumerate(objects_needing_memory_encoding):
# Extract per-object memory from batched result
maskmem_features = maskmem_features_batched[:, i : i + 1]
maskmem_pos_enc = maskmem_pos_enc_batched[:, i : i + 1]
# Update the stored output with memory features
output_dict = inference_session.output_dict_per_obj[obj_idx]
# Determine if this was a conditioning frame
storage_key = (
"cond_frame_outputs" if frame_idx in output_dict["cond_frame_outputs"] else "non_cond_frame_outputs"
)
if frame_idx in output_dict[storage_key]:
output_dict[storage_key][frame_idx]["maskmem_features"] = maskmem_features
output_dict[storage_key][frame_idx]["maskmem_pos_enc"] = maskmem_pos_enc
@torch.inference_mode()
@auto_docstring(
custom_intro="""
Propagate the objects through the video frames. Used when initializing an inference session with a whole video.
Yields Sam2VideoSegmentationOutput for each frame.
"""
)
def propagate_in_video_iterator(
self,
inference_session: Sam2VideoInferenceSession,
start_frame_idx: int | None = None,
max_frame_num_to_track: int | None = None,
reverse: bool = False,
show_progress_bar: bool = False,
) -> Iterator[Sam2VideoSegmentationOutput]:
r"""
inference_session (`Sam2VideoInferenceSession`):
The video inference session object.
start_frame_idx (`int`, *optional*):
The starting frame index for propagation.
Need to be provided if `forward` hasn't been called on new inputs yet.
If not provided, the starting frame index will be the earliest frame with input points.
max_frame_num_to_track (`int`, *optional*):
The maximum number of frames to track.
reverse (`bool`, *optional*, defaults to `False`):
Whether to propagate in reverse.
show_progress_bar (`bool`, *optional*, defaults to `False`):
Whether to show a progress bar during propagation.
"""
num_frames = inference_session.num_frames
# set start index, end index, and processing order
if start_frame_idx is None:
# default: start from the earliest frame with input points
frames_with_inputs = [
frame_idx
for obj_output_dict in inference_session.output_dict_per_obj.values()
for frame_idx in obj_output_dict["cond_frame_outputs"]
]
if not frames_with_inputs:
raise ValueError(
"Cannot determine the starting frame index; please specify it manually, or run inference on a frame with inputs first."
)
start_frame_idx = min(frames_with_inputs)
if max_frame_num_to_track is None:
# default: track all the frames in the video
max_frame_num_to_track = num_frames
if reverse:
end_frame_idx = max(start_frame_idx - max_frame_num_to_track, 0)
if start_frame_idx > 0:
processing_order = range(start_frame_idx, end_frame_idx - 1, -1)
else:
processing_order = [] # skip reverse tracking if starting from frame 0
else:
end_frame_idx = min(start_frame_idx + max_frame_num_to_track, num_frames - 1)
processing_order = range(start_frame_idx, end_frame_idx + 1)
for frame_idx in tqdm(processing_order, desc="propagate in video", disable=not show_progress_bar):
sam2_video_output = self(inference_session, frame_idx=frame_idx, reverse=reverse)
yield sam2_video_output
__all__ = [
"Sam2VideoModel",
"Sam2VideoInferenceSession",
"Sam2VideoPreTrainedModel",
"Sam2VideoMaskDecoderConfig",
"Sam2VideoPromptEncoderConfig",
"Sam2VideoProcessor",
"Sam2VideoConfig",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/sam2_video/modular_sam2_video.py",
"license": "Apache License 2.0",
"lines": 2269,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/sam2_video/video_processing_sam2_video.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Image processor class for SAM2."""
import numpy as np
import torch
import torch.nn.functional as F
from ...image_processing_utils import BatchFeature
from ...image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling, SizeDict
from ...utils import TensorType
from ...video_processing_utils import BaseVideoProcessor
class Sam2VideoVideoProcessor(BaseVideoProcessor):
resample = PILImageResampling.BILINEAR
image_mean = IMAGENET_DEFAULT_MEAN
image_std = IMAGENET_DEFAULT_STD
size = {"height": 1024, "width": 1024}
do_resize = True
do_rescale = True
do_normalize = True
do_convert_rgb = True
model_input_names = ["pixel_values"]
def _preprocess(
self,
videos: list["torch.Tensor"],
size: SizeDict,
return_tensors: str | TensorType | None,
**kwargs,
) -> BatchFeature:
original_sizes = [video.shape[-2:] for video in videos]
reshaped_input_sizes = [(size.height, size.width) for _ in range(len(videos))]
batch_feature = super()._preprocess(videos, size=size, return_tensors=return_tensors, **kwargs)
batch_feature = BatchFeature(
data={
"original_sizes": original_sizes,
"reshaped_input_sizes": reshaped_input_sizes,
**batch_feature.data,
},
tensor_type=return_tensors,
)
return batch_feature
def post_process_masks(
self, masks, original_sizes, reshaped_input_sizes, mask_threshold=0.0, binarize=True, pad_size=None
):
"""
Remove padding and upscale masks to the original image size.
Args:
masks (`Union[List[torch.Tensor], List[np.ndarray]]`):
Batched masks from the mask_decoder in (batch_size, num_channels, height, width) format.
original_sizes (`Union[torch.Tensor, List[Tuple[int,int]]]`):
The original sizes of each image before it was resized to the model's expected input shape, in (height,
width) format.
reshaped_input_sizes (`Union[torch.Tensor, List[Tuple[int,int]]]`):
The size of each image as it is fed to the model, in (height, width) format. Used to remove padding.
mask_threshold (`float`, *optional*, defaults to 0.0):
The threshold to use for binarizing the masks.
binarize (`bool`, *optional*, defaults to `True`):
Whether to binarize the masks.
pad_size (`int`, *optional*, defaults to `self.pad_size`):
The target size the images were padded to before being passed to the model. If None, the target size is
assumed to be the processor's `pad_size`.
Returns:
(`torch.Tensor`): Batched masks in batch_size, num_channels, height, width) format, where (height, width)
is given by original_size.
"""
pad_size = self.size if pad_size is None else pad_size
target_image_size = (pad_size["height"], pad_size["width"])
if isinstance(original_sizes, (torch.Tensor, np.ndarray)):
original_sizes = original_sizes.tolist()
if isinstance(reshaped_input_sizes, (torch.Tensor, np.ndarray)):
reshaped_input_sizes = reshaped_input_sizes.tolist()
output_masks = []
for i, original_size in enumerate(original_sizes):
if isinstance(masks[i], np.ndarray):
masks[i] = torch.from_numpy(masks[i])
elif not isinstance(masks[i], torch.Tensor):
raise TypeError("Input masks should be a list of `torch.tensors` or a list of `np.ndarray`")
interpolated_mask = F.interpolate(masks[i], target_image_size, mode="bilinear", align_corners=False)
interpolated_mask = interpolated_mask[..., : reshaped_input_sizes[i][0], : reshaped_input_sizes[i][1]]
interpolated_mask = F.interpolate(interpolated_mask, original_size, mode="bilinear", align_corners=False)
if binarize:
interpolated_mask = interpolated_mask > mask_threshold
output_masks.append(interpolated_mask)
return output_masks
__all__ = ["Sam2VideoVideoProcessor"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/sam2_video/video_processing_sam2_video.py",
"license": "Apache License 2.0",
"lines": 94,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/sam2/test_image_processing_sam2.py | # Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from datasets import load_dataset
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torchvision_available
from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available() and is_torchvision_available():
from transformers import Sam2ImageProcessorFast
class Sam2ImageProcessingTester:
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
mask_size=None,
do_resize=True,
size=None,
do_normalize=True,
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
):
size = size if size is not None else {"height": 20, "width": 20}
mask_size = mask_size if mask_size is not None else {"height": 12, "width": 12}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.mask_size = mask_size
self.do_resize = do_resize
self.size = size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
def prepare_image_processor_dict(self):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"mask_size": self.mask_size,
}
def expected_output_image_shape(self, images):
return self.num_channels, self.size["height"], self.size["width"]
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
# Copied from transformers.tests.models.beit.test_image_processing_beit.prepare_semantic_single_inputs
def prepare_semantic_single_inputs():
ds = load_dataset("hf-internal-testing/fixtures_ade20k", split="test")
example = ds[0]
return example["image"], example["map"]
# Copied from transformers.tests.models.beit.test_image_processing_beit.prepare_semantic_batch_inputs
def prepare_semantic_batch_inputs():
ds = load_dataset("hf-internal-testing/fixtures_ade20k", split="test")
return list(ds["image"][:2]), list(ds["map"][:2])
@require_torch
@require_vision
class SamImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
fast_image_processing_class = Sam2ImageProcessorFast if is_torchvision_available() else None
test_slow_image_processor = False
def setUp(self):
super().setUp()
self.image_processor_tester = Sam2ImageProcessingTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "image_mean"))
self.assertTrue(hasattr(image_processing, "image_std"))
self.assertTrue(hasattr(image_processing, "do_normalize"))
self.assertTrue(hasattr(image_processing, "do_resize"))
self.assertTrue(hasattr(image_processing, "size"))
self.assertTrue(hasattr(image_processing, "do_rescale"))
self.assertTrue(hasattr(image_processing, "rescale_factor"))
self.assertTrue(hasattr(image_processing, "mask_size"))
def test_image_processor_from_dict_with_kwargs(self):
for image_processing_class in self.image_processor_list:
image_processing_class = image_processing_class(**self.image_processor_dict)
image_processor = image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {"height": 20, "width": 20})
image_processor = image_processing_class.from_dict(self.image_processor_dict, size=42)
self.assertEqual(image_processor.size, {"height": 42, "width": 42})
def test_call_segmentation_maps(self):
for image_processing_class in self.image_processor_list:
# Initialize image_processor
image_processor = image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)
maps = []
for image in image_inputs:
self.assertIsInstance(image, torch.Tensor)
maps.append(torch.zeros(image.shape[-2:]).long())
# Test not batched input
encoding = image_processor(image_inputs[0], maps[0], return_tensors="pt")
self.assertEqual(
encoding["pixel_values"].shape,
(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
),
)
self.assertEqual(
encoding["labels"].shape,
(
1,
self.image_processor_tester.mask_size["height"],
self.image_processor_tester.mask_size["width"],
),
)
self.assertEqual(encoding["labels"].dtype, torch.long)
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 255)
# Test batched
encoding = image_processor(image_inputs, maps, return_tensors="pt")
self.assertEqual(
encoding["pixel_values"].shape,
(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
),
)
self.assertEqual(
encoding["labels"].shape,
(
self.image_processor_tester.batch_size,
self.image_processor_tester.mask_size["height"],
self.image_processor_tester.mask_size["width"],
),
)
self.assertEqual(encoding["labels"].dtype, torch.long)
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 255)
# Test not batched input (PIL images)
image, segmentation_map = prepare_semantic_single_inputs()
encoding = image_processor(image, segmentation_map, return_tensors="pt")
self.assertEqual(
encoding["pixel_values"].shape,
(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
),
)
self.assertEqual(
encoding["labels"].shape,
(
1,
self.image_processor_tester.mask_size["height"],
self.image_processor_tester.mask_size["width"],
),
)
self.assertEqual(encoding["labels"].dtype, torch.long)
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 255)
# Test batched input (PIL images)
images, segmentation_maps = prepare_semantic_batch_inputs()
encoding = image_processor(images, segmentation_maps, return_tensors="pt")
self.assertEqual(
encoding["pixel_values"].shape,
(
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
),
)
self.assertEqual(
encoding["labels"].shape,
(
2,
self.image_processor_tester.mask_size["height"],
self.image_processor_tester.mask_size["width"],
),
)
self.assertEqual(encoding["labels"].dtype, torch.long)
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 255)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/sam2/test_image_processing_sam2.py",
"license": "Apache License 2.0",
"lines": 212,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/models/sam2/test_modeling_sam2.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch SAM2 model."""
import gc
import tempfile
import unittest
import requests
from transformers import (
Sam2Config,
Sam2HieraDetConfig,
Sam2MaskDecoderConfig,
Sam2Processor,
Sam2PromptEncoderConfig,
Sam2VisionConfig,
pipeline,
)
from transformers.testing_utils import (
backend_empty_cache,
require_torch,
slow,
torch_device,
)
from transformers.utils import is_torch_available, is_vision_available
from transformers.video_utils import load_video
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import Sam2Model, Sam2Processor, Sam2VisionModel
if is_vision_available():
from PIL import Image
class Sam2VisionModelTester:
def __init__(
self,
parent,
hidden_size=12,
embed_dim_per_stage=[12, 24, 48, 96],
num_attention_heads_per_stage=[1, 2, 4, 8],
num_channels=3,
image_size=128,
patch_kernel_size=7,
patch_stride=4,
patch_padding=3,
batch_size=2,
blocks_per_stage=[1, 2, 7, 2],
backbone_channel_list=[96, 48, 24, 12],
backbone_feature_sizes=[[32, 32], [16, 16], [8, 8]],
fpn_hidden_size=32,
is_training=True,
):
self.parent = parent
self.hidden_size = hidden_size
self.image_size = image_size
self.num_channels = num_channels
self.patch_kernel_size = patch_kernel_size
self.patch_stride = patch_stride
self.patch_padding = patch_padding
self.batch_size = batch_size
self.is_training = is_training
self.blocks_per_stage = blocks_per_stage
self.embed_dim_per_stage = embed_dim_per_stage
self.num_attention_heads_per_stage = num_attention_heads_per_stage
self.backbone_channel_list = backbone_channel_list
self.backbone_feature_sizes = backbone_feature_sizes
self.fpn_hidden_size = fpn_hidden_size
def get_config(self):
backbone_config = Sam2HieraDetConfig(
hidden_size=self.hidden_size,
num_channels=self.num_channels,
image_size=self.image_size,
patch_stride=self.patch_stride,
patch_kernel_size=self.patch_kernel_size,
patch_padding=self.patch_padding,
blocks_per_stage=self.blocks_per_stage,
embed_dim_per_stage=self.embed_dim_per_stage,
num_attention_heads_per_stage=self.num_attention_heads_per_stage,
)
return Sam2VisionConfig(
backbone_config=backbone_config,
backbone_channel_list=self.backbone_channel_list,
backbone_feature_sizes=self.backbone_feature_sizes,
fpn_hidden_size=self.fpn_hidden_size,
)
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
config = self.get_config()
return config, pixel_values
def create_and_check_model(self, config, pixel_values):
model = Sam2VisionModel(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
result = model(pixel_values)
output_size = self.image_size // self.patch_stride // (2 * len(self.blocks_per_stage))
output_channels = self.hidden_size * 2 * len(self.blocks_per_stage)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, output_size, output_size, output_channels)
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class Sam2VisionModelTest(ModelTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as SAM's vision encoder does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (Sam2VisionModel,) if is_torch_available() else ()
test_resize_embeddings = False
def setUp(self):
self.model_tester = Sam2VisionModelTester(self)
self.config_tester = ConfigTester(self, config_class=Sam2VisionConfig, has_text_modality=False)
def test_config(self):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="SAM's vision encoder does not use inputs_embeds")
def test_inputs_embeds(self):
pass
def test_model_get_set_embeddings(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, nn.Linear))
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
# Overriding as attention shape depends on window_size
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class._from_config(config, attn_implementation="eager")
config = model.config
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
expected_num_attentions = sum(self.model_tester.blocks_per_stage)
self.assertEqual(len(attentions), expected_num_attentions)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
config.backbone_config.output_attentions = True
window_size = config.backbone_config.window_size_per_stage[0]
out_dim = config.backbone_config.hidden_size
patch_stride = config.backbone_config.patch_stride
num_windows = (
self.model_tester.batch_size * (config.backbone_config.image_size // (window_size * patch_stride)) ** 2
)
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), expected_num_attentions)
self.assertListEqual(
list(attentions[0].shape[-4:]),
[num_windows, window_size, window_size, out_dim],
)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), expected_num_attentions)
self.assertListEqual(
list(attentions[0].shape[-4:]),
[num_windows, window_size, window_size, out_dim],
)
# Overriding as attention shape depends on window_size
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class, image_size):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.hidden_states
expected_num_layers = sum(self.model_tester.blocks_per_stage) + 1
self.assertEqual(len(hidden_states), expected_num_layers)
self.assertListEqual(
list(hidden_states[0].shape[-4:]),
[
self.model_tester.batch_size,
self.model_tester.image_size // self.model_tester.patch_stride,
self.model_tester.image_size // self.model_tester.patch_stride,
self.model_tester.hidden_size,
],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
image_size = self.model_tester.image_size
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class, image_size)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
config.backbone_config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class, image_size)
# Override as diffence slightly higher than the threshold
def test_batching_equivalence(self, atol=5e-4, rtol=5e-4):
super().test_batching_equivalence(atol=atol, rtol=rtol)
def test_sdpa_can_compile_dynamic(self):
self.skipTest(reason="SAM model can't be compiled dynamic yet")
class Sam2PromptEncoderTester:
def __init__(
self,
hidden_size=32,
input_image_size=128,
patch_size=16,
mask_input_channels=8,
num_point_embeddings=4,
hidden_act="gelu",
):
self.hidden_size = hidden_size
self.input_image_size = input_image_size
self.patch_size = patch_size
self.mask_input_channels = mask_input_channels
self.num_point_embeddings = num_point_embeddings
self.hidden_act = hidden_act
def get_config(self):
return Sam2PromptEncoderConfig(
image_size=self.input_image_size,
patch_size=self.patch_size,
mask_input_channels=self.mask_input_channels,
hidden_size=self.hidden_size,
num_point_embeddings=self.num_point_embeddings,
hidden_act=self.hidden_act,
)
def prepare_config_and_inputs(self):
dummy_points = floats_tensor([self.batch_size, 3, 2])
config = self.get_config()
return config, dummy_points
class Sam2MaskDecoderTester:
def __init__(
self,
hidden_size=32,
hidden_act="relu",
mlp_dim=64,
num_hidden_layers=2,
num_attention_heads=4,
attention_downsample_rate=2,
num_multimask_outputs=3,
iou_head_depth=3,
iou_head_hidden_dim=32,
):
self.hidden_size = hidden_size
self.hidden_act = hidden_act
self.mlp_dim = mlp_dim
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.attention_downsample_rate = attention_downsample_rate
self.num_multimask_outputs = num_multimask_outputs
self.iou_head_depth = iou_head_depth
self.iou_head_hidden_dim = iou_head_hidden_dim
def get_config(self):
return Sam2MaskDecoderConfig(
hidden_size=self.hidden_size,
hidden_act=self.hidden_act,
mlp_dim=self.mlp_dim,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
attention_downsample_rate=self.attention_downsample_rate,
num_multimask_outputs=self.num_multimask_outputs,
iou_head_depth=self.iou_head_depth,
iou_head_hidden_dim=self.iou_head_hidden_dim,
)
def prepare_config_and_inputs(self):
config = self.get_config()
dummy_inputs = {
"image_embedding": floats_tensor([self.batch_size, self.hidden_size]),
}
return config, dummy_inputs
class Sam2ModelTester:
def __init__(
self,
parent,
num_channels=3,
image_size=128,
hidden_size=12,
patch_kernel_size=7,
patch_stride=4,
patch_padding=3,
blocks_per_stage=[1, 2, 7, 2],
embed_dim_per_stage=[12, 24, 48, 96],
backbone_channel_list=[96, 48, 24, 12],
backbone_feature_sizes=[[32, 32], [16, 16], [8, 8]],
fpn_hidden_size=32,
memory_encoder_hidden_size=32,
batch_size=2,
is_training=True,
):
self.parent = parent
self.image_size = image_size
self.hidden_size = hidden_size
self.patch_kernel_size = patch_kernel_size
self.patch_stride = patch_stride
self.patch_padding = patch_padding
self.blocks_per_stage = blocks_per_stage
self.embed_dim_per_stage = embed_dim_per_stage
self.backbone_channel_list = backbone_channel_list
self.backbone_feature_sizes = backbone_feature_sizes
self.fpn_hidden_size = fpn_hidden_size
self.batch_size = batch_size
self.num_channels = num_channels
self.is_training = is_training
self.memory_encoder_hidden_size = memory_encoder_hidden_size
self.prompt_encoder_tester = Sam2PromptEncoderTester()
self.mask_decoder_tester = Sam2MaskDecoderTester()
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
config = self.get_config()
return config, pixel_values
def get_config(self):
backbone_config = Sam2HieraDetConfig(
hidden_size=self.hidden_size,
num_channels=self.num_channels,
image_size=self.image_size,
patch_stride=self.patch_stride,
patch_kernel_size=self.patch_kernel_size,
patch_padding=self.patch_padding,
blocks_per_stage=self.blocks_per_stage,
embed_dim_per_stage=self.embed_dim_per_stage,
)
vision_config = Sam2VisionConfig(
backbone_config=backbone_config,
backbone_channel_list=self.backbone_channel_list,
backbone_feature_sizes=self.backbone_feature_sizes,
fpn_hidden_size=self.fpn_hidden_size,
)
prompt_encoder_config = self.prompt_encoder_tester.get_config()
mask_decoder_config = self.mask_decoder_tester.get_config()
return Sam2Config(
vision_config=vision_config,
prompt_encoder_config=prompt_encoder_config,
mask_decoder_config=mask_decoder_config,
memory_attention_hidden_size=self.hidden_size,
memory_encoder_hidden_size=self.memory_encoder_hidden_size,
image_size=self.image_size,
mask_downsampler_embed_dim=32,
memory_fuser_embed_dim=32,
memory_attention_num_layers=1,
memory_attention_feed_forward_hidden_size=32,
)
def create_and_check_model(self, config, pixel_values):
model = Sam2Model(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
result = model(pixel_values)
self.parent.assertEqual(result.iou_scores.shape, (self.batch_size, 1, 3))
self.parent.assertEqual(result.pred_masks.shape[:3], (self.batch_size, 1, 3))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class Sam2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as SAM's vision encoder does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (Sam2Model,) if is_torch_available() else ()
pipeline_model_mapping = (
{"feature-extraction": Sam2Model, "mask-generation": Sam2Model} if is_torch_available() else {}
)
test_resize_embeddings = False
_is_composite = True
def setUp(self):
self.model_tester = Sam2ModelTester(self)
common_properties = ["initializer_range"]
self.config_tester = ConfigTester(
self, config_class=Sam2Config, has_text_modality=False, common_properties=common_properties
)
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(reason="SAM's vision encoder does not use inputs_embeds")
def test_inputs_embeds(self):
pass
def test_model_get_set_embeddings(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, nn.Linear))
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
# Overriding as attention shape depends on window_size
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class._from_config(config, attn_implementation="eager")
config = model.config
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.vision_attentions
expected_num_attentions = sum(self.model_tester.blocks_per_stage)
self.assertEqual(len(attentions), expected_num_attentions)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.mask_decoder_config.output_attentions = True
config.vision_config.output_attentions = True
config.vision_config.backbone_config.output_attentions = True
config.output_attentions = True
model = model_class._from_config(config, attn_implementation="eager")
window_size = config.vision_config.backbone_config.window_size_per_stage[0]
out_dim = self.model_tester.hidden_size
patch_stride = self.model_tester.patch_stride
num_windows = (
self.model_tester.batch_size * (self.model_tester.image_size // (window_size * patch_stride)) ** 2
)
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.vision_attentions
self.assertEqual(len(attentions), expected_num_attentions)
self.assertListEqual(
list(attentions[0].shape[-4:]),
[num_windows, window_size, window_size, out_dim],
)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.vision_attentions
self.assertEqual(len(attentions), expected_num_attentions)
self.assertListEqual(
list(attentions[0].shape[-4:]),
[num_windows, window_size, window_size, out_dim],
)
# Override as Sam2Model has different sub-modules
def test_sdpa_can_dispatch_composite_models(self):
"""
Tests if composite models dispatch correctly on SDPA/eager when requested so when loading the model.
This tests only by looking at layer names, as usually SDPA layers are called "SDPAAttention".
In contrast to the above test, this one checks if the "config._attn_implementation" is a dict after the model
is loaded, because we manually replicate requested attn implementation on each sub-config when loading.
See https://github.com/huggingface/transformers/pull/32238 for more info
The test tries to cover most general cases of composite models, VLMs with vision and text configs. Any model
that has a different set of sub-configs has to overwrite this test.
"""
if not self.has_attentions:
self.skipTest(reason="Model architecture does not support attentions")
if not self._is_composite:
self.skipTest(f"{self.all_model_classes[0].__name__} does not support SDPA")
for model_class in self.all_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model_sdpa = model_class.from_pretrained(tmpdirname, attn_implementation="sdpa")
model_sdpa = model_sdpa.eval().to(torch_device)
vision_encoder_sdpa = getattr(model_sdpa, "vision_encoder")
mask_decoder_sdpa = getattr(model_sdpa, "mask_decoder")
# `None` as it is the requested one which will be assigned to each sub-config
# Sub-model will dispatch to SDPA if it can (checked below that `SDPA` layers are present)
self.assertTrue(mask_decoder_sdpa.config._attn_implementation == "sdpa")
self.assertTrue(vision_encoder_sdpa.config._attn_implementation == "sdpa")
model_eager = model_class.from_pretrained(tmpdirname, attn_implementation="eager")
model_eager = model_eager.eval().to(torch_device)
self.assertTrue(getattr(model_eager, "mask_decoder").config._attn_implementation == "eager")
self.assertTrue(getattr(model_eager, "vision_encoder").config._attn_implementation == "eager")
for name, submodule in model_eager.named_modules():
class_name = submodule.__class__.__name__
if (
class_name.endswith("Attention")
and getattr(submodule, "config", None)
and submodule.config._attn_implementation == "sdpa"
):
raise ValueError("The eager model should not have SDPA attention layers")
# Override as Sam2Model doesn't have hidden states
def flash_attn_inference_equivalence(
self, attn_implementation: str, padding_side: str, atol: float = 4e-2, rtol: float = 4e-2
):
r"""
Tests the equivalence between the eager and flash attention implementations.
This test is only for inference and runs with `dtype=torch.bfloat16`.
"""
if not self.has_attentions:
self.skipTest(reason="Model architecture does not support attentions")
# TODO take a look at this
# head size needs to be a multiple of 8 but needs more adjustments than our current `_prepare_config_headdim`
if attn_implementation != "flash_attention_2":
self.skipTest(
reason="Model fails for every other FA implementation than FA2 due to dim incompatibilities."
)
for model_class in self.all_model_classes:
if not getattr(model_class, "_supports_flash_attn"):
self.skipTest(f"{model_class.__name__} does not support Flash Attention")
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model_fa = model_class.from_pretrained(
tmpdirname, dtype=torch.bfloat16, attn_implementation=attn_implementation
)
model_fa.to(torch_device)
model = model_class.from_pretrained(tmpdirname, dtype=torch.bfloat16)
model.to(torch_device)
dummy_input = inputs_dict[model.main_input_name][:1]
if dummy_input.dtype in [torch.float32, torch.float16]:
dummy_input = dummy_input.to(torch.bfloat16)
dummy_attention_mask = inputs_dict.get("attention_mask", None)
if dummy_attention_mask is not None:
dummy_attention_mask = dummy_attention_mask[:1]
if padding_side == "left":
dummy_attention_mask[:, 1:] = 1
dummy_attention_mask[:, :1] = 0
else:
dummy_attention_mask[:, :-1] = 1
dummy_attention_mask[:, -1:] = 0
if model.config.is_encoder_decoder:
decoder_input_ids = inputs_dict.get("decoder_input_ids", dummy_input)[:1]
outputs = model(dummy_input, decoder_input_ids=decoder_input_ids, output_hidden_states=True)
outputs_fa = model_fa(dummy_input, decoder_input_ids=decoder_input_ids, output_hidden_states=True)
else:
outputs = model(dummy_input, output_hidden_states=True)
outputs_fa = model_fa(dummy_input, output_hidden_states=True)
logits = outputs.vision_hidden_states[-1]
logits_fa = outputs_fa.vision_hidden_states[-1]
assert torch.allclose(logits_fa, logits, atol=atol, rtol=rtol)
if model.config.is_encoder_decoder:
other_inputs = {
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": dummy_attention_mask,
"output_hidden_states": True,
}
if dummy_attention_mask is not None:
other_inputs["attention_mask"] = dummy_attention_mask
outputs = model(dummy_input, **other_inputs)
outputs_fa = model_fa(dummy_input, **other_inputs)
else:
other_inputs = {
"output_hidden_states": True,
}
if dummy_attention_mask is not None:
other_inputs["attention_mask"] = dummy_attention_mask
outputs = model(dummy_input, **other_inputs)
outputs_fa = model_fa(dummy_input, **other_inputs)
logits = outputs.vision_hidden_states[-1]
logits_fa = outputs_fa.vision_hidden_states[-1]
if padding_side == "left":
assert torch.allclose(logits_fa[1:], logits[1:], atol=atol, rtol=rtol)
# check with inference + dropout
model.train()
_ = model_fa(dummy_input, **other_inputs)
else:
assert torch.allclose(logits_fa[:-1], logits[:-1], atol=atol, rtol=rtol)
# Override as difference slightly higher than the threshold
def test_batching_equivalence(self, atol=5e-4, rtol=5e-4):
super().test_batching_equivalence(atol=atol, rtol=rtol)
@unittest.skip(reason="Sam2Model does not support training")
def test_retain_grad_hidden_states_attentions(self):
pass
@unittest.skip(reason="Hidden_states is tested in sub modules tests")
def test_hidden_states_output(self):
pass
@slow
def test_model_from_pretrained(self):
model_name = "facebook/sam2.1-hiera-tiny"
model = Sam2Model.from_pretrained(model_name)
self.assertIsNotNone(model)
def test_sdpa_can_compile_dynamic(self):
self.skipTest(reason="SAM2 model can't be compiled dynamic yet")
def _image_features_get_expected_num_attentions(self, model_tester=None):
if model_tester is None:
model_tester = self.model_tester
return sum(model_tester.blocks_per_stage)
def _image_features_get_expected_num_hidden_states(self, model_tester=None):
if model_tester is None:
model_tester = self.model_tester
return sum(model_tester.blocks_per_stage) + 1
def prepare_image():
img_url = "https://huggingface.co/datasets/hf-internal-testing/sam2-fixtures/resolve/main/truck.jpg"
raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB")
return raw_image
def prepare_groceries_image():
img_url = "https://huggingface.co/datasets/hf-internal-testing/sam2-fixtures/resolve/main/groceries.jpg"
raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB")
return raw_image
def prepare_dog_img():
img_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/dog-sam.png"
raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB")
return raw_image
def prepare_video():
video_url = "https://huggingface.co/datasets/hf-internal-testing/sam2-fixtures/resolve/main/bedroom.mp4"
raw_video, _ = load_video(video_url)
return raw_video
@slow
class Sam2ModelIntegrationTest(unittest.TestCase):
def setUp(self):
super().setUp()
self.model = Sam2Model.from_pretrained("facebook/sam2.1-hiera-tiny").to(torch.float32)
self.processor = Sam2Processor.from_pretrained("facebook/sam2.1-hiera-tiny")
self.model.to(torch_device)
self.model.eval()
def tearDown(self):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
backend_empty_cache(torch_device)
def test_inference_mask_generation_one_point_multimask(self):
raw_image = prepare_image()
input_points = [[[[500, 375]]]]
input_labels = [[[1]]]
inputs = self.processor(
images=raw_image, input_points=input_points, input_labels=input_labels, return_tensors="pt"
).to(torch_device)
with torch.no_grad():
outputs = self.model(**inputs)
self.assertEqual(outputs.iou_scores.shape, (1, 1, 3))
self.assertEqual(outputs.pred_masks.shape, (1, 1, 3, 256, 256))
sorted_indices = torch.argsort(outputs.iou_scores.squeeze(), descending=True)
scores = outputs.iou_scores.squeeze()[sorted_indices]
masks_logits = outputs.pred_masks.squeeze()[sorted_indices][0, :3, :3]
torch.testing.assert_close(
scores, torch.tensor([0.9547, 0.4932, 0.0427]).to(torch_device), atol=1e-4, rtol=1e-4
)
torch.testing.assert_close(
masks_logits,
torch.tensor(
[[-24.9288, -41.7466, -31.0128], [-34.5113, -31.1054, -36.5913], [-25.2597, -37.5912, -33.4030]]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
def test_inference_mask_generation_one_point_no_multimask(self):
raw_image = prepare_image()
input_points = [[[[500, 375]]]]
input_labels = [[[1]]]
inputs = self.processor(
images=raw_image, input_points=input_points, input_labels=input_labels, return_tensors="pt"
).to(torch_device)
with torch.no_grad():
outputs = self.model(**inputs, multimask_output=False)
self.assertEqual(outputs.iou_scores.shape, (1, 1, 1))
self.assertEqual(outputs.pred_masks.shape, (1, 1, 1, 256, 256))
scores = outputs.iou_scores.squeeze((0, 1))
masks_logits = outputs.pred_masks.squeeze((0, 1))[0, :3, :3]
torch.testing.assert_close(scores, torch.tensor([0.9364]).to(torch_device), atol=1e-4, rtol=1e-4)
torch.testing.assert_close(
masks_logits,
torch.tensor(
[[-7.0462, -13.3857, -9.6419], [-10.4565, -9.7174, -12.3528], [-7.3704, -12.4391, -10.5539]]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
def test_inference_mask_generation_batched_images_multi_points(self):
raw_image1 = prepare_image()
raw_image2 = prepare_dog_img()
input_points = [[[[500, 375]]], [[[770, 200], [730, 120]]]]
input_labels = [[[1]], [[1, 0]]]
inputs = self.processor(
images=[raw_image1, raw_image2], input_points=input_points, input_labels=input_labels, return_tensors="pt"
).to(torch_device)
with torch.no_grad():
outputs = self.model(**inputs)
self.assertEqual(outputs.iou_scores.shape, (2, 1, 3))
self.assertEqual(outputs.pred_masks.shape, (2, 1, 3, 256, 256))
sorted_indices = torch.argsort(outputs.iou_scores[0].squeeze(), descending=True)
scores1 = outputs.iou_scores[0].squeeze()[sorted_indices]
masks_logits1 = outputs.pred_masks[0].squeeze()[sorted_indices][0, :3, :3]
sorted_indices = torch.argsort(outputs.iou_scores[1].squeeze(), descending=True)
scores2 = outputs.iou_scores[1].squeeze()[sorted_indices]
masks_logits2 = outputs.pred_masks[1].squeeze()[sorted_indices][0, :3, :3]
torch.testing.assert_close(
scores1, torch.tensor([0.9586, 0.4913, 0.0448]).to(torch_device), atol=1e-4, rtol=1e-4
)
torch.testing.assert_close(
masks_logits1,
torch.tensor(
[[-22.2555, -37.9250, -27.8928], [-30.8681, -27.9519, -32.8032], [-22.4133, -33.9966, -29.7111]]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
torch.testing.assert_close(
scores2, torch.tensor([0.9504, 0.8117, 0.7426]).to(torch_device), atol=1e-4, rtol=1e-4
)
torch.testing.assert_close(
masks_logits2,
torch.tensor(
[[-13.1182, -17.3217, -14.9651], [-16.2372, -12.7739, -17.6346], [-13.5013, -17.1549, -15.6614]]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
def test_inference_mask_generation_batched_images_batched_points_multi_points(self):
raw_image1 = prepare_image()
raw_image2 = prepare_groceries_image()
input_points = [[[[500, 375]], [[650, 750]]], [[[400, 300]], [[630, 300], [550, 300]]]]
input_labels = [[[1], [1]], [[1], [1, 1]]]
inputs = self.processor(
images=[raw_image1, raw_image2], input_points=input_points, input_labels=input_labels, return_tensors="pt"
).to(torch_device)
with torch.no_grad():
outputs = self.model(**inputs, multimask_output=False)
self.assertEqual(outputs.iou_scores.shape, (2, 2, 1))
self.assertEqual(outputs.pred_masks.shape, (2, 2, 1, 256, 256))
torch.testing.assert_close(
outputs.iou_scores,
torch.tensor([[[0.9500], [0.9718]], [[0.9568], [0.9114]]]).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
torch.testing.assert_close(
outputs.pred_masks[:, :, :, :2, :2],
torch.tensor(
[
[[[[-5.8131, -11.3020], [-8.6487, -8.0690]]], [[[-4.7731, -8.7606], [-6.2399, -7.0738]]]],
[[[[-13.8661, -19.1254], [-20.2477, -14.1636]]], [[[-8.8229, -10.2760], [-11.3797, -8.7189]]]],
]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
def test_inference_batched_images_batched_boxes(self):
raw_image1 = prepare_image()
raw_image2 = prepare_groceries_image()
input_boxes = [
[[75, 275, 1725, 850], [425, 600, 700, 875], [1375, 550, 1650, 800], [1240, 675, 1400, 750]],
[[450, 170, 520, 350], [350, 190, 450, 350], [500, 170, 580, 350], [580, 170, 640, 350]],
]
inputs = self.processor(images=[raw_image1, raw_image2], input_boxes=input_boxes, return_tensors="pt").to(
torch_device
)
with torch.no_grad():
outputs = self.model(**inputs, multimask_output=False)
self.assertEqual(outputs.iou_scores.shape, (2, 4, 1))
self.assertEqual(outputs.pred_masks.shape, (2, 4, 1, 256, 256))
torch.testing.assert_close(
outputs.iou_scores,
torch.tensor([[[0.9904], [0.9689], [0.9770], [0.9079]], [[0.9739], [0.9816], [0.9838], [0.9781]]]).to(
torch_device
),
atol=1e-4,
rtol=1e-4,
)
torch.testing.assert_close(
outputs.pred_masks[:, :, :, :2, :2],
torch.tensor(
[
[
[[[-11.1540, -18.3994], [-12.4230, -17.4403]]],
[[[-19.3144, -29.3947], [-24.6341, -24.1144]]],
[[[-24.2983, -37.6470], [-31.6659, -31.0893]]],
[[[-25.4313, -44.0231], [-34.0903, -34.7447]]],
],
[
[[[-22.5539, -30.4633], [-32.8940, -21.6813]]],
[[[-23.6637, -31.3489], [-32.5095, -22.4442]]],
[[[-25.2987, -30.9999], [-34.6243, -24.1717]]],
[[[-26.3150, -30.5313], [-35.0152, -24.0271]]],
],
]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
def test_inference_mask_generation_from_existing_points_and_mask(self):
raw_image = prepare_image()
input_points = [[[[500, 375]]]]
input_labels = [[[1]]]
original_inputs = self.processor(
images=raw_image, input_points=input_points, input_labels=input_labels, return_tensors="pt"
).to(torch_device)
with torch.no_grad():
outputs = self.model(**original_inputs)
# best mask to use as input for new points
mask_input = outputs.pred_masks[:, :, torch.argmax(outputs.iou_scores)]
new_input_points = [[[[500, 375], [1125, 625]]]]
new_input_labels = [[[1, 1]]]
inputs = self.processor(
input_points=new_input_points,
input_labels=new_input_labels,
original_sizes=original_inputs["original_sizes"],
return_tensors="pt",
).to(torch_device)
with torch.no_grad():
outputs = self.model(
**inputs,
input_masks=mask_input,
image_embeddings=outputs.image_embeddings,
multimask_output=False,
)
self.assertEqual(outputs.iou_scores.shape, (1, 1, 1))
self.assertEqual(outputs.pred_masks.shape, (1, 1, 1, 256, 256))
scores = outputs.iou_scores.squeeze((0, 1))
masks_logits = outputs.pred_masks.squeeze((0, 1))[0, :3, :3]
torch.testing.assert_close(scores, torch.tensor([0.9738]).to(torch_device), atol=1e-4, rtol=1e-4)
torch.testing.assert_close(
masks_logits,
torch.tensor([[-5.3899, -9.7908, -8.4931], [-5.5144, -8.8731, -8.3000], [-5.5976, -9.9249, -9.0761]]).to(
torch_device
),
atol=1e-4,
rtol=1e-4,
)
# with negative point
new_input_points = [[[[500, 375], [1125, 625]]]]
new_input_labels = [[[1, 0]]]
inputs = self.processor(
input_points=new_input_points,
input_labels=new_input_labels,
original_sizes=original_inputs["original_sizes"],
return_tensors="pt",
).to(torch_device)
with torch.no_grad():
outputs = self.model(
**inputs,
input_masks=mask_input,
image_embeddings=outputs.image_embeddings,
multimask_output=False,
)
self.assertEqual(outputs.iou_scores.shape, (1, 1, 1))
self.assertEqual(outputs.pred_masks.shape, (1, 1, 1, 256, 256))
scores = outputs.iou_scores.squeeze((0, 1))
masks_logits = outputs.pred_masks.squeeze((0, 1))[0, :3, :3]
torch.testing.assert_close(scores, torch.tensor([0.9719]).to(torch_device), atol=1e-4, rtol=1e-4)
torch.testing.assert_close(
masks_logits,
torch.tensor(
[[-15.5081, -21.8641, -18.0479], [-17.4401, -17.4754, -23.6469], [-14.3975, -19.4346, -18.5884]]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
def test_dummy_pipeline_generation(self):
generator = pipeline("mask-generation", model="facebook/sam2.1-hiera-tiny", device=torch_device)
raw_image = prepare_image()
_ = generator(raw_image, points_per_batch=64)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/sam2/test_modeling_sam2.py",
"license": "Apache License 2.0",
"lines": 877,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/models/sam2/test_processor_sam2.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from transformers.testing_utils import (
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_torch_available, is_vision_available
from ...test_processing_common import ProcessorTesterMixin
if is_vision_available():
from transformers import Sam2Processor
if is_torch_available():
import torch
@require_vision
@require_torchvision
class Sam2ProcessorTest(ProcessorTesterMixin, unittest.TestCase):
processor_class = Sam2Processor
def prepare_image_inputs(self):
"""This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True,
or a list of PyTorch tensors if one specifies torchify=True.
"""
image_inputs = torch.randint(0, 256, size=(1, 3, 30, 400), dtype=torch.uint8)
# image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs]
return image_inputs
def prepare_mask_inputs(self):
"""This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True,
or a list of PyTorch tensors if one specifies torchify=True.
"""
mask_inputs = torch.randint(0, 256, size=(1, 30, 400), dtype=torch.uint8)
# mask_inputs = [Image.fromarray(x) for x in mask_inputs]
return mask_inputs
def test_image_processor_no_masks(self):
image_processor = self.get_component("image_processor")
processor = self.get_processor()
image_input = self.prepare_image_inputs()
input_feat_extract = image_processor(image_input)
input_processor = processor(images=image_input)
for key in input_feat_extract.keys():
if key == "pixel_values":
for input_feat_extract_item, input_processor_item in zip(
input_feat_extract[key], input_processor[key]
):
np.testing.assert_array_equal(input_feat_extract_item, input_processor_item)
else:
self.assertEqual(input_feat_extract[key], input_processor[key])
for image in input_feat_extract.pixel_values:
self.assertEqual(image.shape, (3, 1024, 1024))
for original_size in input_feat_extract.original_sizes:
np.testing.assert_array_equal(original_size, np.array([30, 400]))
def test_image_processor_with_masks(self):
image_processor = self.get_component("image_processor")
processor = self.get_processor()
image_input = self.prepare_image_inputs()
mask_input = self.prepare_mask_inputs()
input_feat_extract = image_processor(images=image_input, segmentation_maps=mask_input, return_tensors="pt")
input_processor = processor(images=image_input, segmentation_maps=mask_input, return_tensors="pt")
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2)
for label in input_feat_extract.labels:
self.assertEqual(label.shape, (256, 256))
@require_torch
def test_post_process_masks(self):
processor = self.get_processor()
dummy_masks = [torch.ones((1, 3, 5, 5))]
original_sizes = [[1764, 2646]]
masks = processor.post_process_masks(dummy_masks, original_sizes)
self.assertEqual(masks[0].shape, (1, 3, 1764, 2646))
masks = processor.post_process_masks(dummy_masks, torch.tensor(original_sizes))
self.assertEqual(masks[0].shape, (1, 3, 1764, 2646))
# should also work with np
dummy_masks = [np.ones((1, 3, 5, 5))]
masks = processor.post_process_masks(dummy_masks, np.array(original_sizes))
self.assertEqual(masks[0].shape, (1, 3, 1764, 2646))
dummy_masks = [[1, 0], [0, 1]]
with self.assertRaises(TypeError):
masks = processor.post_process_masks(dummy_masks, np.array(original_sizes))
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/sam2/test_processor_sam2.py",
"license": "Apache License 2.0",
"lines": 89,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/models/sam2_video/test_modeling_sam2_video.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch SAM2 model."""
import gc
import unittest
import requests
from transformers.testing_utils import (
backend_empty_cache,
is_torch_bf16_available_on_device,
is_torch_fp16_available_on_device,
slow,
torch_device,
)
from transformers.utils import is_torch_available, is_vision_available
from transformers.video_utils import load_video
if is_torch_available():
import torch
from transformers import Sam2VideoModel, Sam2VideoProcessor
if is_vision_available():
from PIL import Image
def prepare_image():
img_url = "https://huggingface.co/datasets/hf-internal-testing/sam2-fixtures/resolve/main/truck.jpg"
raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB")
return raw_image
def prepare_groceries_image():
img_url = "https://huggingface.co/datasets/hf-internal-testing/sam2-fixtures/resolve/main/groceries.jpg"
raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB")
return raw_image
def prepare_dog_img():
img_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/dog-sam.png"
raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB")
return raw_image
def prepare_video():
video_url = "https://huggingface.co/datasets/hf-internal-testing/sam2-fixtures/resolve/main/bedroom.mp4"
raw_video, _ = load_video(video_url)
return raw_video
@slow
class Sam2VideoModelIntegrationTest(unittest.TestCase):
def setUp(self):
super().setUp()
self.video_model = Sam2VideoModel.from_pretrained("facebook/sam2.1-hiera-tiny").to(torch.float32)
self.processor = Sam2VideoProcessor.from_pretrained("facebook/sam2.1-hiera-tiny")
self.video_model.to(torch_device)
self.video_model.eval()
def tearDown(self):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
backend_empty_cache(torch_device)
def test_inference_mask_generation_video_one_point(self):
raw_video = prepare_video()
inference_session = self.processor.init_video_session(video=raw_video, inference_device=torch_device)
ann_frame_idx = 0 # the frame index we interact with
ann_obj_id = 1 # give a unique id to each object we interact with (it can be any integers)
self.processor.add_inputs_to_inference_session(
inference_session=inference_session,
frame_idx=ann_frame_idx,
obj_ids=ann_obj_id,
input_points=[[[[210, 350]]]],
input_labels=[[[1]]],
)
outputs = self.video_model(inference_session=inference_session, frame_idx=ann_frame_idx)
low_res_masks = outputs.pred_masks
self.assertEqual(low_res_masks.shape, (1, 1, 256, 256))
video_res_masks = self.processor.post_process_masks([low_res_masks], [raw_video.shape[-3:-1]], binarize=False)[
0
]
self.assertEqual(video_res_masks.shape, (1, 1, raw_video.shape[-3], raw_video.shape[-2]))
torch.testing.assert_close(
video_res_masks[0, 0, :3, :3],
torch.tensor(
[[-21.4113, -21.4113, -22.9687], [-23.3090, -23.3090, -24.2606], [-27.5705, -27.5705, -27.1616]]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
# test propagate in video frames
frames = []
for sam2_video_output in self.video_model.propagate_in_video_iterator(
inference_session=inference_session,
max_frame_num_to_track=2,
):
video_res_masks = self.processor.post_process_masks(
[sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
frames.append(video_res_masks)
frames = torch.stack(frames, dim=0)
self.assertEqual(frames.shape, (3, 1, 1, raw_video.shape[-3], raw_video.shape[-2]))
torch.testing.assert_close(
frames[:3, :, :, :2, :2],
torch.tensor(
[
[[[[-21.4113, -21.4113], [-23.3090, -23.3090]]]],
[[[[-20.1003, -20.1003], [-21.2294, -21.2294]]]],
[[[[-19.9619, -19.9619], [-21.3060, -21.3060]]]],
],
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
def test_inference_mask_generation_video_one_point_propagate_in_video_directly(self):
raw_video = prepare_video()
inference_session = self.processor.init_video_session(video=raw_video, inference_device=torch_device)
ann_frame_idx = 0 # the frame index we interact with
ann_obj_id = 1 # give a unique id to each object we interact with (it can be any integers)
self.processor.add_inputs_to_inference_session(
inference_session=inference_session,
frame_idx=ann_frame_idx,
obj_ids=ann_obj_id,
input_points=[[[[210, 350]]]],
input_labels=[[[1]]],
)
# test propagate in video frames
frames = []
for sam2_video_output in self.video_model.propagate_in_video_iterator(
inference_session=inference_session,
start_frame_idx=ann_frame_idx,
max_frame_num_to_track=2,
):
video_res_masks = self.processor.post_process_masks(
[sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
frames.append(video_res_masks)
frames = torch.stack(frames, dim=0)
self.assertEqual(frames.shape, (3, 1, 1, raw_video.shape[-3], raw_video.shape[-2]))
torch.testing.assert_close(
frames[:3, :, :, :2, :2],
torch.tensor(
[
[[[[-21.4113, -21.4113], [-23.3090, -23.3090]]]],
[[[[-20.1003, -20.1003], [-21.2294, -21.2294]]]],
[[[[-19.9619, -19.9619], [-21.3060, -21.3060]]]],
]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
def test_inference_mask_generation_video_multi_points(self):
raw_video = prepare_video()
inference_session = self.processor.init_video_session(video=raw_video, inference_device=torch_device)
ann_frame_idx = 0 # the frame index we interact with
ann_obj_id = 1 # give a unique id to each object we interact with (it can be any integers)
self.processor.add_inputs_to_inference_session(
inference_session=inference_session,
frame_idx=ann_frame_idx,
obj_ids=ann_obj_id,
input_points=[[[[210, 350], [250, 220]]]],
input_labels=[[[1, 1]]],
)
outputs = self.video_model(inference_session=inference_session, frame_idx=ann_frame_idx)
low_res_masks = outputs.pred_masks
video_res_masks = self.processor.post_process_masks(
[outputs.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
self.assertEqual(low_res_masks.shape, (1, 1, 256, 256))
self.assertEqual(video_res_masks.shape, (1, 1, raw_video.shape[-3], raw_video.shape[-2]))
torch.testing.assert_close(
video_res_masks[0, 0, :3, :3],
torch.tensor(
[[-11.1487, -11.1487, -11.4202], [-11.6522, -11.6522, -11.8057], [-12.7829, -12.7829, -12.6715]]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
# test propagate in video frames
frames = []
for sam2_video_output in self.video_model.propagate_in_video_iterator(
inference_session=inference_session,
start_frame_idx=ann_frame_idx,
max_frame_num_to_track=2,
):
video_res_masks = self.processor.post_process_masks(
[sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
frames.append(video_res_masks)
frames = torch.stack(frames, dim=0)
self.assertEqual(frames.shape, (3, 1, 1, raw_video.shape[-3], raw_video.shape[-2]))
# higher tolerance due to errors propagating from frame to frame
torch.testing.assert_close(
frames[:3, :, :, :2, :2],
torch.tensor(
[
[[[[-11.1487, -11.1487], [-11.6522, -11.6522]]]],
[[[[-15.3821, -15.3821], [-16.0333, -16.0333]]]],
[[[[-15.4855, -15.4855], [-16.4230, -16.4230]]]],
]
).to(torch_device),
atol=1e-2,
rtol=1e-2,
)
def test_inference_mask_generation_video_one_bb(self):
raw_video = prepare_video()
inference_session = self.processor.init_video_session(video=raw_video, inference_device=torch_device)
ann_frame_idx = 0 # the frame index we interact with
ann_obj_id = 1 # give a unique id to each object we interact with (it can be any integers)
self.processor.add_inputs_to_inference_session(
inference_session=inference_session,
frame_idx=ann_frame_idx,
obj_ids=ann_obj_id,
input_boxes=[[[300, 0, 500, 400]]],
)
outputs = self.video_model(inference_session=inference_session, frame_idx=ann_frame_idx)
low_res_masks = outputs.pred_masks
video_res_masks = self.processor.post_process_masks(
[outputs.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
self.assertEqual(low_res_masks.shape, (1, 1, 256, 256))
self.assertEqual(video_res_masks.shape, (1, 1, raw_video.shape[-3], raw_video.shape[-2]))
torch.testing.assert_close(
video_res_masks[0, 0, :3, :3],
torch.tensor(
[[-13.1427, -13.1427, -13.6418], [-13.7753, -13.7753, -14.1144], [-15.1957, -15.1957, -15.1757]]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
# test propagate in video frames
frames = []
for sam2_video_output in self.video_model.propagate_in_video_iterator(
inference_session=inference_session,
start_frame_idx=ann_frame_idx,
max_frame_num_to_track=2,
):
video_res_masks = self.processor.post_process_masks(
[sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
frames.append(video_res_masks)
frames = torch.stack(frames, dim=0)
self.assertEqual(frames.shape, (3, 1, 1, raw_video.shape[-3], raw_video.shape[-2]))
# higher tolerance due to errors propagating from frame to frame
torch.testing.assert_close(
frames[:3, :, :, :2, :2],
torch.tensor(
[
[[[[-13.1427, -13.1427], [-13.7753, -13.7753]]]],
[[[[-14.9998, -14.9998], [-15.7086, -15.7086]]]],
[[[[-15.4558, -15.4558], [-16.1649, -16.1649]]]],
]
).to(torch_device),
atol=1e-2,
rtol=1e-2,
)
def test_inference_mask_generation_video_one_point_one_bb(self):
raw_video = prepare_video()
inference_session = self.processor.init_video_session(video=raw_video, inference_device=torch_device)
ann_frame_idx = 0 # the frame index we interact with
ann_obj_id = 1 # give a unique id to each object we interact with (it can be any integers)
self.processor.add_inputs_to_inference_session(
inference_session=inference_session,
frame_idx=ann_frame_idx,
obj_ids=ann_obj_id,
input_boxes=[[[300, 0, 500, 400]]],
input_points=[[[[460, 60]]]],
input_labels=[[[1]]],
)
outputs = self.video_model(inference_session=inference_session, frame_idx=ann_frame_idx)
low_res_masks = outputs.pred_masks
video_res_masks = self.processor.post_process_masks(
[outputs.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
self.assertEqual(low_res_masks.shape, (1, 1, 256, 256))
self.assertEqual(video_res_masks.shape, (1, 1, raw_video.shape[-3], raw_video.shape[-2]))
torch.testing.assert_close(
video_res_masks[0, 0, :3, :3],
torch.tensor(
[[-12.3525, -12.3525, -12.8907], [-13.0608, -13.0608, -13.4079], [-14.6511, -14.6511, -14.5694]]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
# test propagate in video frames
frames = []
for sam2_video_output in self.video_model.propagate_in_video_iterator(
inference_session=inference_session,
start_frame_idx=ann_frame_idx,
max_frame_num_to_track=2,
):
video_res_masks = self.processor.post_process_masks(
[sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
frames.append(video_res_masks)
frames = torch.stack(frames, dim=0)
self.assertEqual(frames.shape, (3, 1, 1, raw_video.shape[-3], raw_video.shape[-2]))
# higher tolerance due to errors propagating from frame to frame
torch.testing.assert_close(
frames[:3, :, :, :2, :2],
torch.tensor(
[
[[[[-12.3525, -12.3525], [-13.0608, -13.0608]]]],
[[[[-15.8181, -15.8181], [-16.4163, -16.4163]]]],
[[[[-15.8900, -15.8900], [-16.5953, -16.5953]]]],
]
).to(torch_device),
atol=1e-2,
rtol=1e-2,
)
def test_inference_mask_generation_video_multi_objects_multi_points(self):
raw_video = prepare_video()
inference_session = self.processor.init_video_session(video=raw_video, inference_device=torch_device)
ann_frame_idx = 0 # the frame index we interact with
ann_obj_ids = [2, 3] # give a unique id to each object we interact with (it can be any integers)
self.processor.add_inputs_to_inference_session(
inference_session=inference_session,
frame_idx=ann_frame_idx,
obj_ids=ann_obj_ids,
input_points=[[[[200, 300], [230, 250], [275, 175]], [[400, 150]]]],
input_labels=[[[1, 1, 0], [1]]],
)
outputs = self.video_model(inference_session=inference_session, frame_idx=ann_frame_idx)
low_res_masks = outputs.pred_masks
video_res_masks = self.processor.post_process_masks(
[outputs.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
self.assertEqual(low_res_masks.shape, (2, 1, 256, 256))
self.assertEqual(video_res_masks.shape, (2, 1, raw_video.shape[-3], raw_video.shape[-2]))
torch.testing.assert_close(
video_res_masks[:, 0, :2, :2], # first object
torch.tensor(
[[[-12.6294, -12.6294], [-13.3659, -13.3659]], [[-20.3319, -20.3319], [-22.0491, -22.0491]]]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
# test propagate in video frames
frames = []
for sam2_video_output in self.video_model.propagate_in_video_iterator(
inference_session=inference_session,
start_frame_idx=ann_frame_idx,
max_frame_num_to_track=2,
):
video_res_masks = self.processor.post_process_masks(
[sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
frames.append(video_res_masks)
frames = torch.stack(frames, dim=0)
self.assertEqual(frames.shape, (3, 2, 1, raw_video.shape[-3], raw_video.shape[-2]))
torch.testing.assert_close(
frames[:3, :, :, :2, :2],
torch.tensor(
[
[[[[-12.6294, -12.6294], [-13.3659, -13.3659]]], [[[-20.3319, -20.3319], [-22.0491, -22.0491]]]],
[[[[-18.5249, -18.5249], [-19.5830, -19.5830]]], [[[-17.5537, -17.5537], [-19.2259, -19.2259]]]],
[[[[-14.2722, -14.2722], [-15.4622, -15.4622]]], [[[-18.3185, -18.3185], [-20.0314, -20.0314]]]],
]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
def test_inference_mask_generation_video_batched_bb(self):
raw_video = prepare_video()
inference_session = self.processor.init_video_session(video=raw_video, inference_device=torch_device)
ann_frame_idx = 0 # the frame index we interact with
ann_obj_ids = [2, 3] # give a unique id to each object we interact with (it can be any integers)
self.processor.add_inputs_to_inference_session(
inference_session=inference_session,
frame_idx=ann_frame_idx,
obj_ids=ann_obj_ids,
input_boxes=[[[300, 0, 500, 400], [400, 0, 600, 400]]],
)
frames = []
for sam2_video_output in self.video_model.propagate_in_video_iterator(
inference_session=inference_session,
start_frame_idx=ann_frame_idx,
max_frame_num_to_track=2,
):
video_res_masks = self.processor.post_process_masks(
[sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
print(video_res_masks.shape)
frames.append(video_res_masks)
frames = torch.stack(frames, dim=0)
self.assertEqual(frames.shape, (3, 2, 1, raw_video.shape[-3], raw_video.shape[-2]))
print(frames.shape)
print(frames[:3, :, :, :2, :2])
torch.testing.assert_close(
frames[:3, :, :, :2, :2],
torch.tensor(
[
[[[[-13.1427, -13.1427], [-13.7753, -13.7753]]], [[[-8.4576, -8.4576], [-8.7329, -8.7329]]]],
[[[[-14.9998, -14.9998], [-15.7086, -15.7086]]], [[[-9.2998, -9.2998], [-9.8947, -9.8947]]]],
[[[[-15.4558, -15.4558], [-16.1649, -16.1649]]], [[[-10.4880, -10.4880], [-11.2098, -11.2098]]]],
]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
def test_inference_propagate_video_from_mask_input(self):
raw_video = prepare_video()
inference_session = self.processor.init_video_session(video=raw_video, inference_device=torch_device)
ann_frame_idx = 0 # the frame index we interact with
ann_obj_id = 1 # give a unique id to each object we interact with (it can be any integers)
# get input_mask
self.processor.add_inputs_to_inference_session(
inference_session=inference_session,
frame_idx=ann_frame_idx,
obj_ids=ann_obj_id,
input_points=[[[[210, 350], [250, 220]]]],
input_labels=[[[1, 1]]],
)
sam2_video_output = self.video_model(inference_session=inference_session, frame_idx=ann_frame_idx)
# set mask as input
self.processor.add_inputs_to_inference_session(
inference_session=inference_session,
frame_idx=ann_frame_idx,
obj_ids=ann_obj_id,
input_masks=self.processor.post_process_masks(
[sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0],
)
sam2_video_output = self.video_model(inference_session=inference_session, frame_idx=ann_frame_idx)
low_res_masks = sam2_video_output.pred_masks
self.assertEqual(low_res_masks.shape, (1, 1, 256, 256))
video_res_masks = self.processor.post_process_masks(
[sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
self.assertEqual(video_res_masks.shape, (1, 1, raw_video.shape[-3], raw_video.shape[-2]))
torch.testing.assert_close(
video_res_masks[0, 0, :3, :3],
torch.tensor(
[[-10.0000, -10.0000, -10.0000], [-10.0000, -10.0000, -10.0000], [-10.0000, -10.0000, -10.0000]]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
# test propagate in video frames
frames = []
for sam2_video_output in self.video_model.propagate_in_video_iterator(
inference_session=inference_session,
start_frame_idx=ann_frame_idx,
max_frame_num_to_track=2,
):
video_res_masks = self.processor.post_process_masks(
[sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
frames.append(video_res_masks)
frames = torch.stack(frames, dim=0)
self.assertEqual(frames.shape, (3, 1, 1, raw_video.shape[-3], raw_video.shape[-2]))
torch.testing.assert_close(
frames[:3, :, :, :2, :2],
torch.tensor(
[
[[[[-10.0000, -10.0000], [-10.0000, -10.0000]]]],
[[[[-18.4807, -18.4807], [-19.1966, -19.1966]]]],
[[[[-20.0512, -20.0512], [-20.9110, -20.9110]]]],
],
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
def test_inference_propagate_on_streamed_video(self):
raw_video = prepare_video()
inference_session = self.processor.init_video_session(inference_device=torch_device)
video_res_masks = []
max_frame_num_to_track = 3
for frame_idx, frame in enumerate(raw_video):
if frame_idx >= max_frame_num_to_track:
break
inputs = self.processor(images=frame, device=torch_device, return_tensors="pt")
if frame_idx == 0:
self.processor.add_inputs_to_inference_session(
inference_session,
frame_idx=0,
obj_ids=1,
input_points=[[[[210, 350], [250, 220]]]],
input_labels=[[[1, 1]]],
original_size=inputs.original_sizes[0],
)
sam2_video_output = self.video_model(inference_session=inference_session, frame=inputs.pixel_values[0])
video_res_masks.append(
self.processor.post_process_masks(
[sam2_video_output.pred_masks], inputs.original_sizes, binarize=False
)[0]
)
video_res_masks = torch.stack(video_res_masks, dim=0)
self.assertEqual(
video_res_masks.shape, (max_frame_num_to_track, 1, 1, raw_video.shape[-3], raw_video.shape[-2])
)
# higher tolerance due to errors propagating from frame to frame
torch.testing.assert_close(
video_res_masks[:3, :, :, :2, :2],
torch.tensor(
[
[[[[-11.1487, -11.1487], [-11.6522, -11.6522]]]],
[[[[-15.3821, -15.3821], [-16.0333, -16.0333]]]],
[[[[-15.4855, -15.4855], [-16.4230, -16.4230]]]],
]
).to(torch_device),
atol=1e-2,
rtol=1e-2,
)
def test_inference_with_different_dtypes(self):
"""Test that inference works correctly for float32, bfloat16, and float16 dtypes."""
raw_video = prepare_video()
dtypes_to_test = [
(torch.float32, None), # float32 is always available
(torch.bfloat16, is_torch_bf16_available_on_device),
(torch.float16, is_torch_fp16_available_on_device),
]
for dtype, availability_check in dtypes_to_test:
with self.subTest(dtype=dtype):
# Skip if dtype is not available on device
if availability_check is not None and not availability_check(torch_device):
self.skipTest(f"{dtype} not supported on {torch_device}")
# Load model with specific dtype
video_model = Sam2VideoModel.from_pretrained("facebook/sam2.1-hiera-tiny", torch_dtype=dtype).to(
torch_device
)
video_model.eval()
# Initialize inference session
inference_session = self.processor.init_video_session(
video=raw_video, inference_device=torch_device, dtype=dtype
)
ann_frame_idx = 0
ann_obj_id = 1
# Add inputs
self.processor.add_inputs_to_inference_session(
inference_session=inference_session,
frame_idx=ann_frame_idx,
obj_ids=ann_obj_id,
input_points=[[[[210, 350]]]],
input_labels=[[[1]]],
)
# Run inference on first frame
outputs = video_model(inference_session=inference_session, frame_idx=ann_frame_idx)
low_res_masks = outputs.pred_masks
# Verify output shape and dtype
self.assertEqual(low_res_masks.shape, (1, 1, 256, 256))
self.assertEqual(low_res_masks.dtype, dtype)
# Post-process masks
video_res_masks = self.processor.post_process_masks(
[low_res_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
self.assertEqual(video_res_masks.shape, (1, 1, raw_video.shape[-3], raw_video.shape[-2]))
# Test propagation across multiple frames to test memory handling
frames = []
max_frame_num_to_track = 2
for sam2_video_output in video_model.propagate_in_video_iterator(
inference_session=inference_session,
start_frame_idx=ann_frame_idx,
max_frame_num_to_track=max_frame_num_to_track,
):
video_res_masks = self.processor.post_process_masks(
[sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
frames.append(video_res_masks)
# Verify dtype is maintained during propagation
self.assertEqual(sam2_video_output.pred_masks.dtype, dtype)
frames = torch.stack(frames, dim=0)
# Verify we got the expected number of frames (initial frame + max_frame_num_to_track)
self.assertEqual(
frames.shape, (max_frame_num_to_track + 1, 1, 1, raw_video.shape[-3], raw_video.shape[-2])
)
# Verify dtype is maintained in stacked frames
self.assertEqual(frames.dtype, dtype)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/sam2_video/test_modeling_sam2_video.py",
"license": "Apache License 2.0",
"lines": 565,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/models/sam2_video/test_processor_sam2_video.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from transformers.testing_utils import (
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_torch_available, is_vision_available
from ...test_processing_common import ProcessorTesterMixin
if is_vision_available():
from transformers import Sam2VideoProcessor
if is_torch_available():
import torch
@require_vision
@require_torchvision
class Sam2VideoProcessorTest(ProcessorTesterMixin, unittest.TestCase):
processor_class = Sam2VideoProcessor
@unittest.skip("Sam2VideoProcessor call take in images only")
def test_processor_with_multiple_inputs(self):
pass
def prepare_image_inputs(self):
"""This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True,
or a list of PyTorch tensors if one specifies torchify=True.
"""
image_inputs = torch.randint(0, 256, size=(1, 3, 30, 400), dtype=torch.uint8)
# image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs]
return image_inputs
def prepare_mask_inputs(self):
"""This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True,
or a list of PyTorch tensors if one specifies torchify=True.
"""
mask_inputs = torch.randint(0, 256, size=(1, 30, 400), dtype=torch.uint8)
# mask_inputs = [Image.fromarray(x) for x in mask_inputs]
return mask_inputs
def test_image_processor_no_masks(self):
image_processor = self.get_component("image_processor")
video_processor = self.get_component("video_processor")
processor = Sam2VideoProcessor(image_processor=image_processor, video_processor=video_processor)
image_input = self.prepare_image_inputs()
input_feat_extract = image_processor(image_input)
input_processor = processor(images=image_input)
for key in input_feat_extract.keys():
if key == "pixel_values":
for input_feat_extract_item, input_processor_item in zip(
input_feat_extract[key], input_processor[key]
):
np.testing.assert_array_equal(input_feat_extract_item, input_processor_item)
else:
self.assertEqual(input_feat_extract[key], input_processor[key])
for image in input_feat_extract.pixel_values:
self.assertEqual(image.shape, (3, 1024, 1024))
for original_size in input_feat_extract.original_sizes:
np.testing.assert_array_equal(original_size, np.array([30, 400]))
def test_image_processor_with_masks(self):
image_processor = self.get_component("image_processor")
video_processor = self.get_component("video_processor")
processor = Sam2VideoProcessor(image_processor=image_processor, video_processor=video_processor)
image_input = self.prepare_image_inputs()
mask_input = self.prepare_mask_inputs()
input_feat_extract = image_processor(images=image_input, segmentation_maps=mask_input, return_tensors="pt")
input_processor = processor(images=image_input, segmentation_maps=mask_input, return_tensors="pt")
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2)
for label in input_feat_extract.labels:
self.assertEqual(label.shape, (256, 256))
@require_torch
def test_post_process_masks(self):
image_processor = self.get_component("image_processor")
video_processor = self.get_component("video_processor")
processor = Sam2VideoProcessor(image_processor=image_processor, video_processor=video_processor)
dummy_masks = [torch.ones((1, 3, 5, 5))]
original_sizes = [[1764, 2646]]
masks = processor.post_process_masks(dummy_masks, original_sizes)
self.assertEqual(masks[0].shape, (1, 3, 1764, 2646))
masks = processor.post_process_masks(dummy_masks, torch.tensor(original_sizes))
self.assertEqual(masks[0].shape, (1, 3, 1764, 2646))
# should also work with np
dummy_masks = [np.ones((1, 3, 5, 5))]
masks = processor.post_process_masks(dummy_masks, np.array(original_sizes))
self.assertEqual(masks[0].shape, (1, 3, 1764, 2646))
dummy_masks = [[1, 0], [0, 1]]
with self.assertRaises(TypeError):
masks = processor.post_process_masks(dummy_masks, np.array(original_sizes))
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/sam2_video/test_processor_sam2_video.py",
"license": "Apache License 2.0",
"lines": 96,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:utils/collated_reports.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import subprocess
from dataclasses import dataclass
from pathlib import Path
DEFAULT_GPU_NAMES = ["mi300", "mi325", "mi355", "h100", "a10"]
def simplify_gpu_name(gpu_name: str, simplified_names: list[str]) -> str:
matches = []
for simplified_name in simplified_names:
if simplified_name in gpu_name:
matches.append(simplified_name)
if len(matches) == 1:
return matches[0]
return gpu_name
def parse_short_summary_line(line: str) -> tuple[str | None, int]:
if line.startswith("PASSED"):
return "passed", 1
if line.startswith("FAILED"):
return "failed", 1
if line.startswith("SKIPPED"):
line = line.split("[", maxsplit=1)[1]
line = line.split("]", maxsplit=1)[0]
return "skipped", int(line)
if line.startswith("ERROR"):
return "error", 1
return None, 0
def validate_path(p: str) -> Path:
# Validate path and apply glob pattern if provided
path = Path(p)
assert path.is_dir(), f"Path {path} is not a directory"
return path
def get_gpu_name(gpu_name: str | None) -> str:
# Get GPU name if available
if gpu_name is None:
try:
import torch
gpu_name = torch.cuda.get_device_name()
except Exception as e:
print(f"Failed to get GPU name with {e}")
gpu_name = "unknown"
else:
gpu_name = gpu_name.replace(" ", "_").lower()
gpu_name = simplify_gpu_name(gpu_name, DEFAULT_GPU_NAMES)
return gpu_name
def get_commit_hash(commit_hash: str | None) -> str:
# Get commit hash if available
if commit_hash is None:
try:
commit_hash = subprocess.check_output(["git", "rev-parse", "HEAD"]).decode("utf-8").strip()
except Exception as e:
print(f"Failed to get commit hash with {e}")
commit_hash = "unknown"
return commit_hash[:7]
@dataclass
class Args:
path: Path
machine_type: str
gpu_name: str
commit_hash: str
job: str | None
report_repo_id: str | None
def get_arguments(args: argparse.Namespace) -> Args:
path = validate_path(args.path)
machine_type = args.machine_type
gpu_name = get_gpu_name(args.gpu_name)
commit_hash = get_commit_hash(args.commit_hash)
job = args.job
report_repo_id = args.report_repo_id
return Args(path, machine_type, gpu_name, commit_hash, job, report_repo_id)
def upload_collated_report(job: str, report_repo_id: str, filename: str):
# Alternatively we can check for the existence of the collated_reports file and upload in notification_service.py
import os
from get_previous_daily_ci import get_last_daily_ci_run
from huggingface_hub import HfApi
api = HfApi()
# if it is not a scheduled run, upload the reports to a subfolder under `report_repo_folder`
report_repo_subfolder = ""
if os.getenv("GITHUB_EVENT_NAME") != "schedule":
report_repo_subfolder = f"{os.getenv('GITHUB_RUN_NUMBER')}-{os.getenv('GITHUB_RUN_ID')}"
report_repo_subfolder = f"runs/{report_repo_subfolder}"
workflow_run = get_last_daily_ci_run(
token=os.environ["ACCESS_REPO_INFO_TOKEN"], workflow_run_id=os.getenv("GITHUB_RUN_ID")
)
workflow_run_created_time = workflow_run["created_at"]
report_repo_folder = workflow_run_created_time.split("T")[0]
if report_repo_subfolder:
report_repo_folder = f"{report_repo_folder}/{report_repo_subfolder}"
api.upload_file(
path_or_fileobj=f"{filename}",
path_in_repo=f"{report_repo_folder}/ci_results_{job}/{filename}",
repo_id=report_repo_id,
repo_type="dataset",
token=os.getenv("TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN"),
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Post process models test reports.")
parser.add_argument("--path", "-p", help="Path to the reports folder")
parser.add_argument(
"--machine-type", "-m", help="Process single or multi GPU results", choices=["single-gpu", "multi-gpu"]
)
parser.add_argument("--gpu-name", "-g", help="GPU name", default=None)
parser.add_argument("--commit-hash", "-c", help="Commit hash", default=None)
parser.add_argument("--job", "-j", help="Optional job name required for uploading reports", default=None)
parser.add_argument(
"--report-repo-id", "-r", help="Optional report repository ID required for uploading reports", default=None
)
args = get_arguments(parser.parse_args())
# Initialize accumulators for collated report
total_status_count = {
"passed": 0,
"failed": 0,
"skipped": 0,
"error": 0,
None: 0,
}
collated_report_buffer = []
path = args.path
machine_type = args.machine_type
gpu_name = args.gpu_name
commit_hash = args.commit_hash
job = args.job
report_repo_id = args.report_repo_id
# Loop through model directories and create collated reports
for model_dir in sorted(path.iterdir()):
if not model_dir.name.startswith(machine_type):
continue
# Create a new entry for the model
model_name = model_dir.name.split("models_")[-1].removesuffix("_test_reports")
report = {"model": model_name, "results": []}
results = []
# Read short summary
with open(model_dir / "summary_short.txt", "r") as f:
short_summary_lines = f.readlines()
# Parse short summary
for line in short_summary_lines[1:]:
status, count = parse_short_summary_line(line)
total_status_count[status] += count
if status:
result = {
"status": status,
"test": line.split(status.upper(), maxsplit=1)[1].strip(),
"count": count,
}
results.append(result)
# Add short summaries to report
report["results"] = results
collated_report_buffer.append(report)
filename = f"collated_reports_{machine_type}_{commit_hash}.json"
# Write collated report
with open(filename, "w") as f:
json.dump(
{
"gpu_name": gpu_name,
"machine_type": machine_type,
"commit_hash": commit_hash,
"total_status_count": total_status_count,
"results": collated_report_buffer,
},
f,
indent=2,
)
# Upload collated report
if job and report_repo_id:
upload_collated_report(job, report_repo_id, filename)
| {
"repo_id": "huggingface/transformers",
"file_path": "utils/collated_reports.py",
"license": "Apache License 2.0",
"lines": 177,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/glm4v/test_processor_glm4v.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import unittest
import numpy as np
from transformers.testing_utils import require_av, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_processing_common import ProcessorTesterMixin, url_to_local_path
if is_vision_available():
from transformers import Glm4vProcessor
if is_torch_available():
import torch
@require_vision
@require_torch
class Glm4vProcessorTest(ProcessorTesterMixin, unittest.TestCase):
processor_class = Glm4vProcessor
model_id = "THUDM/GLM-4.1V-9B-Thinking"
@classmethod
def _setup_test_attributes(cls, processor):
cls.image_token = processor.image_token
@classmethod
def _setup_from_pretrained(cls, model_id, **kwargs):
return super()._setup_from_pretrained(
model_id,
do_sample_frames=False,
patch_size=4,
size={"shortest_edge": 12 * 12, "longest_edge": 18 * 18},
**kwargs,
)
@require_torch
@require_av
def _test_apply_chat_template(
self,
modality: str,
batch_size: int,
return_tensors: str,
input_name: str,
processor_name: str,
input_data: list[str],
):
processor = self.get_processor()
if processor.chat_template is None:
self.skipTest("Processor has no chat template")
if processor_name not in self.processor_class.get_attributes():
self.skipTest(f"{processor_name} attribute not present in {self.processor_class}")
batch_messages = [
[
{
"role": "user",
"content": [{"type": "text", "text": "Describe this."}],
},
]
] * batch_size
# Test that jinja can be applied
formatted_prompt = processor.apply_chat_template(batch_messages, add_generation_prompt=True, tokenize=False)
self.assertEqual(len(formatted_prompt), batch_size)
# Test that tokenizing with template and directly with `self.tokenizer` gives same output
formatted_prompt_tokenized = processor.apply_chat_template(
batch_messages, add_generation_prompt=True, tokenize=True, return_tensors=return_tensors
)
add_special_tokens = True
if processor.tokenizer.bos_token is not None and formatted_prompt[0].startswith(processor.tokenizer.bos_token):
add_special_tokens = False
tok_output = processor.tokenizer(
formatted_prompt, return_tensors=return_tensors, add_special_tokens=add_special_tokens
)
expected_output = tok_output.input_ids
self.assertListEqual(expected_output.tolist(), formatted_prompt_tokenized.tolist())
# Test that kwargs passed to processor's `__call__` are actually used
tokenized_prompt_100 = processor.apply_chat_template(
batch_messages,
add_generation_prompt=True,
tokenize=True,
padding="max_length",
truncation=True,
return_tensors=return_tensors,
max_length=100,
)
self.assertEqual(len(tokenized_prompt_100[0]), 100)
# Test that `return_dict=True` returns text related inputs in the dict
out_dict_text = processor.apply_chat_template(
batch_messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors=return_tensors,
)
self.assertTrue(all(key in out_dict_text for key in ["input_ids", "attention_mask"]))
self.assertEqual(len(out_dict_text["input_ids"]), batch_size)
self.assertEqual(len(out_dict_text["attention_mask"]), batch_size)
# Test that with modality URLs and `return_dict=True`, we get modality inputs in the dict
for idx, url in enumerate(input_data[:batch_size]):
batch_messages[idx][0]["content"] = [batch_messages[idx][0]["content"][0], {"type": modality, "url": url}]
out_dict = processor.apply_chat_template(
batch_messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors=return_tensors,
fps=2
if isinstance(input_data[0], str)
else None, # by default no more than 2 frames per second, otherwise too slow
do_sample_frames=bool(isinstance(input_data[0], str)), # don't sample frames if decoded video is used
)
input_name = getattr(self, input_name)
self.assertTrue(input_name in out_dict)
self.assertEqual(len(out_dict["input_ids"]), batch_size)
self.assertEqual(len(out_dict["attention_mask"]), batch_size)
if modality == "video":
# qwen pixels don't scale with bs same way as other models, calculate expected video token count based on video_grid_thw
expected_video_token_count = 0
for thw in out_dict["video_grid_thw"]:
expected_video_token_count += thw[0] * thw[1] * thw[2]
mm_len = expected_video_token_count
else:
mm_len = batch_size * 4
self.assertEqual(len(out_dict[input_name]), mm_len)
return_tensor_to_type = {"pt": torch.Tensor, "np": np.ndarray, None: list}
for k in out_dict:
self.assertIsInstance(out_dict[k], return_tensor_to_type[return_tensors])
@require_av
def test_apply_chat_template_video_frame_sampling(self):
processor = self.get_processor()
if processor.chat_template is None:
self.skipTest("Processor has no chat template")
signature = inspect.signature(processor.__call__)
if "videos" not in {*signature.parameters.keys()} or (
signature.parameters.get("videos") is not None
and signature.parameters["videos"].annotation == inspect._empty
):
self.skipTest("Processor doesn't accept videos at input")
messages = [
[
{
"role": "user",
"content": [
{"type": "video"},
{"type": "text", "text": "What is shown in this video?"},
],
},
]
]
formatted_prompt = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)
self.assertEqual(len(formatted_prompt), 1)
formatted_prompt_tokenized = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True)
expected_output = processor.tokenizer(formatted_prompt, return_tensors=None).input_ids
self.assertListEqual(expected_output, formatted_prompt_tokenized)
out_dict = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True, return_dict=True)
self.assertListEqual(list(out_dict.keys()), ["input_ids", "attention_mask", "mm_token_type_ids"])
# Add video URL for return dict and load with `num_frames` arg
messages[0][0]["content"][0] = {
"type": "video",
"url": url_to_local_path(
"https://huggingface.co/datasets/raushan-testing-hf/videos-test/resolve/main/tiny_video.mp4"
),
}
# Load with `video_fps` arg
video_fps = 10
out_dict_with_video = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
fps=video_fps,
)
self.assertTrue(self.videos_input_name in out_dict_with_video)
self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 8)
# Load the whole video
out_dict_with_video = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
do_sample_frames=False,
)
self.assertTrue(self.videos_input_name in out_dict_with_video)
self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 24)
# Load video as a list of frames (i.e. images). NOTE: each frame should have same size
# because we assume they come from one video
messages[0][0]["content"][0] = {
"type": "video",
"url": [
url_to_local_path(
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
),
url_to_local_path(
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
),
],
}
out_dict_with_video = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
do_sample_frames=False,
)
self.assertTrue(self.videos_input_name in out_dict_with_video)
self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 4)
# When the inputs are frame URLs/paths we expect that those are already
# sampled and will raise an error is asked to sample again.
with self.assertRaisesRegex(
ValueError, "Sampling frames from a list of images is not supported! Set `do_sample_frames=False`"
):
out_dict_with_video = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
do_sample_frames=True,
)
# def test_model_input_names(self):
# processor = self.get_processor()
# text = self.prepare_text_inputs(modalities=["image", "video"])
# image_input = self.prepare_image_inputs()
# video_inputs = self.prepare_video_inputs()
# inputs_dict = {"text": text, "images": image_input, "videos": video_inputs}
# inputs = processor(**inputs_dict, return_tensors="pt", do_sample_frames=False)
# self.assertSetEqual(set(inputs.keys()), set(processor.model_input_names))
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/glm4v/test_processor_glm4v.py",
"license": "Apache License 2.0",
"lines": 230,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/textnet/image_processing_textnet_fast.py | # Copyright 2025 the Fast authors and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Image processor class for TextNet."""
from typing import Optional
import torch
import torchvision.transforms.v2.functional as tvF
from ...image_processing_utils import BatchFeature
from ...image_processing_utils_fast import BaseImageProcessorFast
from ...image_transforms import (
get_resize_output_image_size,
group_images_by_shape,
reorder_images,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
SizeDict,
)
from ...processing_utils import Unpack
from ...utils import (
TensorType,
auto_docstring,
)
from .image_processing_textnet import TextNetImageProcessorKwargs
@auto_docstring
class TextNetImageProcessorFast(BaseImageProcessorFast):
resample = PILImageResampling.BILINEAR
image_mean = IMAGENET_DEFAULT_MEAN
image_std = IMAGENET_DEFAULT_STD
size = {"shortest_edge": 640}
default_to_square = False
crop_size = {"height": 224, "width": 224}
do_resize = True
do_center_crop = False
do_rescale = True
do_normalize = True
do_convert_rgb = True
size_divisor = 32
valid_kwargs = TextNetImageProcessorKwargs
def __init__(self, **kwargs: Unpack[TextNetImageProcessorKwargs]) -> None:
super().__init__(**kwargs)
@auto_docstring
def preprocess(self, images: ImageInput, **kwargs: Unpack[TextNetImageProcessorKwargs]) -> BatchFeature:
return super().preprocess(images, **kwargs)
def resize(
self,
image: "torch.Tensor",
size: SizeDict,
interpolation: Optional["tvF.InterpolationMode"] = None,
antialias: bool = True,
size_divisor: int = 32,
**kwargs,
) -> "torch.Tensor":
if size.shortest_edge:
new_size = get_resize_output_image_size(
image,
size=size.shortest_edge,
default_to_square=False,
input_data_format=ChannelDimension.FIRST,
)
else:
raise ValueError(f"Size must contain 'shortest_edge' key. Got {size}.")
# ensure height and width are divisible by size_divisor
height, width = new_size
if height % size_divisor != 0:
height += size_divisor - (height % size_divisor)
if width % size_divisor != 0:
width += size_divisor - (width % size_divisor)
return super().resize(
image, SizeDict(height=height, width=width), interpolation=interpolation, antialias=antialias
)
def _preprocess(
self,
images: list["torch.Tensor"],
do_resize: bool,
size: SizeDict,
size_divisor: int,
interpolation: Optional["tvF.InterpolationMode"],
do_center_crop: bool,
crop_size: SizeDict,
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
image_mean: float | list[float] | None,
image_std: float | list[float] | None,
disable_grouping: bool | None,
return_tensors: str | TensorType | None,
**kwargs,
) -> BatchFeature:
# Group images by size for batched resizing
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
resized_images_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_resize:
stacked_images = self.resize(
image=stacked_images, size=size, interpolation=interpolation, size_divisor=size_divisor
)
resized_images_grouped[shape] = stacked_images
resized_images = reorder_images(resized_images_grouped, grouped_images_index)
# Group images by size for further processing
# Needed in case do_resize is False, or resize returns images with different sizes
grouped_images, grouped_images_index = group_images_by_shape(resized_images, disable_grouping=disable_grouping)
processed_images_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_center_crop:
stacked_images = self.center_crop(stacked_images, crop_size)
# Fused rescale and normalize
stacked_images = self.rescale_and_normalize(
stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
processed_images_grouped[shape] = stacked_images
processed_images = reorder_images(processed_images_grouped, grouped_images_index)
return BatchFeature(data={"pixel_values": processed_images}, tensor_type=return_tensors)
__all__ = ["TextNetImageProcessorFast"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/textnet/image_processing_textnet_fast.py",
"license": "Apache License 2.0",
"lines": 128,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/glm4v_moe/modular_glm4v_moe.py | # Copyright 2025 The ZhipuAI Inc. team and HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Callable
import torch
import torch.nn as nn
from ... import initialization as init
from ...cache_utils import Cache, DynamicCache
from ...configuration_utils import PreTrainedConfig
from ...masking_utils import create_causal_mask
from ...modeling_flash_attention_utils import FlashAttentionKwargs
from ...modeling_outputs import MoeModelOutputWithPast
from ...modeling_rope_utils import RopeParameters
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring, logging
from ...utils.generic import can_return_tuple
from ..deepseek_v3.modeling_deepseek_v3 import DeepseekV3NaiveMoe
from ..glm4.modeling_glm4 import Glm4Attention
from ..glm4_moe.configuration_glm4_moe import Glm4MoeConfig
from ..glm4_moe.modeling_glm4_moe import (
Glm4MoeDecoderLayer,
Glm4MoeMLP,
Glm4MoeMoE,
Glm4MoePreTrainedModel,
Glm4MoeTopkRouter,
eager_attention_forward,
)
from ..glm4v.configuration_glm4v import Glm4vConfig
from ..glm4v.modeling_glm4v import (
Glm4vForConditionalGeneration,
Glm4vTextModel,
Glm4vVisionModel,
Glm4vVisionRotaryEmbedding,
)
from ..gpt_neox.modeling_gpt_neox import apply_rotary_pos_emb
from ..qwen3_vl_moe.modeling_qwen3_vl_moe import (
Qwen3VLMoeCausalLMOutputWithPast,
Qwen3VLMoeModelOutputWithPast,
load_balancing_loss_func,
)
logger = logging.get_logger(__name__)
class Glm4vMoeTextConfig(Glm4MoeConfig):
r"""
This is the configuration class to store the configuration of a [`Glm4vMoeModel`]. It is used to instantiate a
GLM-4.5V model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of
GLM-4.5V [zai-org/GLM-4.5V](https://huggingface.co/zai-org/GLM-4.5V).
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 151424):
Vocabulary size of the Glm4vMoe model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`Glm4vMoeModel`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 10944):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 46):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 96):
Number of attention heads for each attention layer in the Transformer encoder.
num_key_value_heads (`int`, *optional*, defaults to 8):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details checkout [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `32`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 65536):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
attention_bias (`bool`, defaults to `True`, *optional*, defaults to `True`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
moe_intermediate_size (`int`, *optional*, defaults to 1408):
Intermediate size of the routed expert.
num_experts_per_tok (`int`, *optional*, defaults to 8):
number of experts per token.
n_shared_experts (`int`, *optional*, defaults to 1):
Number of shared experts.
n_routed_experts (`int`, *optional*, defaults to 128):
Number of routed experts.
routed_scaling_factor (`float`, *optional*, defaults to 1.0):
Scaling factor or routed experts.
n_group (`int`, *optional*, defaults to 1):
Number of groups for routed experts.
topk_group (`int`, *optional*, defaults to 1):
Number of selected groups for each token(for each token, ensuring the selected experts is only within `topk_group` groups).
first_k_dense_replace (`int`, *optional*, defaults to 1):
Number of dense layers in shallow layers(embed->dense->dense->...->dense->moe->moe...->lm_head).
\--k dense layers--/
norm_topk_prob (`bool`, *optional*, defaults to `True`):
Whether to normalize the topk probabilities.
pad_token_id (`int`, *optional*):
Padding token id.
eos_token_id (`int`, *optional*):
End of stream token id.
bos_token_id (`int`, *optional*):
Beginning of stream token id.
router_aux_loss_coef (`float`, *optional*, defaults to 0.0001):
The aux loss factor for the loss.
```python
>>> from transformers import Glm4vMoeTextModel, Glm4vMoeConfig
>>> # Initializing a GLM-4.5V style configuration
>>> configuration = Glm4vMoeConfig()
>>> # Initializing a model from the GLM-4.5V style configuration
>>> model = Glm4vMoeTextModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "glm4v_moe_text"
base_config_key = "text_config"
keys_to_ignore_at_inference = ["past_key_values"]
# Default tensor parallel plan for base model `Glm4vMoe`
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise",
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.gate_proj": "colwise",
"layers.*.mlp.up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
}
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
"norm": (["hidden_states"], ["hidden_states"]),
}
def __init__(
self,
vocab_size: int | None = 151424,
hidden_size: int | None = 4096,
intermediate_size: int | None = 10944,
num_hidden_layers: int | None = 46,
num_attention_heads: int | None = 96,
num_key_value_heads: int | None = 8,
hidden_act: str | None = "silu",
max_position_embeddings: int | None = 65536,
initializer_range: float | None = 0.02,
rms_norm_eps: int | None = 1e-5,
use_cache: bool | None = True,
rope_parameters: RopeParameters | dict[str, RopeParameters] | None = None,
attention_bias: bool | None = True,
attention_dropout: float | None = 0.0,
moe_intermediate_size: int | None = 1408,
num_experts_per_tok: int | None = 8,
n_shared_experts: int | None = 1,
n_routed_experts: int | None = 128,
routed_scaling_factor: float | None = 1.0,
n_group: int | None = 1,
topk_group: int | None = 1,
first_k_dense_replace: int | None = 1,
norm_topk_prob: bool | None = True,
pad_token_id: int | None = None,
eos_token_id: int | None = None,
bos_token_id: int | None = None,
router_aux_loss_coef: float | None = 0.0001,
**kwargs,
):
self.pad_token_id = pad_token_id
self.eos_token_id = eos_token_id
self.bos_token_id = bos_token_id
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.rope_parameters = rope_parameters
kwargs.setdefault("partial_rotary_factor", 0.5) # assign default for BC
# MoE arguments
self.moe_intermediate_size = moe_intermediate_size
self.num_experts_per_tok = num_experts_per_tok
self.n_group = n_group
self.topk_group = topk_group
self.n_shared_experts = n_shared_experts
self.n_routed_experts = n_routed_experts
self.routed_scaling_factor = routed_scaling_factor
self.first_k_dense_replace = first_k_dense_replace
self.norm_topk_prob = norm_topk_prob
self.router_aux_loss_coef = router_aux_loss_coef
PreTrainedConfig.__init__(self, ignore_keys_at_rope_validation={"mrope_section"}, **kwargs)
class Glm4vMoeConfig(Glm4vConfig):
r"""
This is the configuration class to store the configuration of a [`Glm4vMoeModel`]. It is used to instantiate a
GLM-4.5V model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of
GLM-4.5V [zai-org/GLM-4.5V](https://huggingface.co/zai-org/GLM-4.5V).
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
text_config (`Union[PreTrainedConfig, dict]`, *optional*, defaults to `Glm4vMoeTextConfig`):
The config object or dictionary of the text backbone.
vision_config (`Union[PreTrainedConfig, dict]`, *optional*, defaults to `Glm4vMoeVisionConfig`):
The config object or dictionary of the vision backbone.
image_token_id (`int`, *optional*, defaults to 151363):
The image token index to encode the image prompt.
video_token_id (`int`, *optional*, defaults to 151364):
The video token index to encode the image prompt.
image_start_token_id (`int`, *optional*, defaults to 151339):
The image start token index to encode the start of image.
image_end_token_id (`int`, *optional*, defaults to 151340):
The image end token index to encode the end of image.
video_start_token_id (`int`, *optional*, defaults to 151341):
The video start token index to encode the start of video.
video_end_token_id (`int`, *optional*, defaults to 151342):
The video end token index to encode the end of video.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether the model's input and output word embeddings should be tied.
```python
>>> from transformers import Glm4vMoeForConditionalGeneration, Glm4vMoeConfig
>>> # Initializing a GLM-4.5V style configuration
>>> configuration = Glm4vMoeConfig()
>>> # Initializing a model from the GLM-4.5V style configuration
>>> model = Glm4vMoeForConditionalGeneration(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
def __init__(
self,
text_config=None,
vision_config=None,
image_token_id=151363,
video_token_id=151364,
image_start_token_id=151339,
image_end_token_id=151340,
video_start_token_id=151341,
video_end_token_id=151342,
tie_word_embeddings=False,
**kwargs,
):
super().__init__()
class Glm4vMoeTextAttention(Glm4Attention):
def __init__(self, config: Glm4vMoeTextConfig, layer_idx: int | None = None):
super().__init__(config, layer_idx)
self.rope_parameters = config.rope_parameters
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: torch.Tensor | None,
past_key_values: Cache | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, torch.Tensor | None, tuple[torch.Tensor] | None]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape)
key_states = self.k_proj(hidden_states).view(hidden_shape)
value_states = self.v_proj(hidden_states).view(hidden_shape)
query_states = query_states.transpose(1, 2)
key_states = key_states.transpose(1, 2)
value_states = value_states.transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
# sin and cos are specific to RoPE models; position_ids needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface(
self.config._attn_implementation, eager_attention_forward
)
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class Glm4vMoeTextTopkRouter(Glm4MoeTopkRouter, nn.Module):
def __init__(self, config: Glm4vMoeTextConfig):
super().__init__(config)
class Glm4vMoeTextNaiveMoe(DeepseekV3NaiveMoe):
pass
class Glm4vMoeTextMoE(Glm4MoeMoE):
def __init__(self, config: Glm4vMoeTextConfig):
super().__init__(config)
self.config = config
self.experts = Glm4vMoeTextNaiveMoe(config)
self.gate = Glm4vMoeTextTopkRouter(config)
self.shared_experts = Glm4vMoeTextMLP(
config=config, intermediate_size=config.moe_intermediate_size * config.n_shared_experts
)
class Glm4vMoeTextMLP(Glm4MoeMLP):
pass
class Glm4vMoeTextDecoderLayer(Glm4MoeDecoderLayer):
def __init__(self, config: Glm4vMoeTextConfig, layer_idx: int):
super().__init__(config, layer_idx)
class Glm4vMoePreTrainedModel(Glm4MoePreTrainedModel):
config: Glm4vMoeConfig
base_model_prefix = "model"
input_modalities = ("text", "image", "video")
_no_split_modules = ["Glm4vMoeTextDecoderLayer", "Glm4vMoeVisionBlock"]
_skip_keys_device_placement = "past_key_values"
_can_record_outputs = {
"hidden_states": Glm4vMoeTextDecoderLayer,
"attentions": Glm4vMoeTextAttention,
"router_logits": Glm4vMoeTextTopkRouter,
}
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, Glm4vMoeVisionRotaryEmbedding):
inv_freq = 1.0 / (module.theta ** (torch.arange(0, module.dim, 2, dtype=torch.float) / module.dim))
init.copy_(module.inv_freq, inv_freq)
class Glm4vMoeCausalLMOutputWithPast(Qwen3VLMoeCausalLMOutputWithPast):
pass
class Glm4vMoeVisionRotaryEmbedding(Glm4vVisionRotaryEmbedding):
pass
@auto_docstring
class Glm4vMoeVisionModel(Glm4vVisionModel):
pass
@auto_docstring
class Glm4vMoeTextModel(Glm4vTextModel):
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
use_cache: bool | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple | MoeModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
# torch.jit.trace() doesn't support cache objects in the output
if use_cache and past_key_values is None and not torch.jit.is_tracing():
past_key_values = DynamicCache(config=self.config)
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
# the hard coded `3` is for temporal, height and width.
if position_ids is None:
position_ids = cache_position.view(1, 1, -1).expand(3, inputs_embeds.shape[0], -1)
elif position_ids.ndim == 2:
position_ids = position_ids[None, ...].expand(3, position_ids.shape[0], -1)
# NOTE: we need to pass text position ids for packing. Qwen2-VL uses 3D positions
# where each dim indicates visual spatial positions for temporal/height/width grids.
# There are two scenarios when FA2-like packed masking might be activated.
# 1. User specifically passed packed `position_ids` and no attention mask.
# In this case we expect the useer to create correct position ids for all 3 grids
# and prepend text-only position ids to it. The final tensor will be [4, bs, seq-len]
# 2. User runs forward with no attention mask and no position ids. In this case, position ids
# are prepared by the model (`get_rope_index`) as `[4, bs, seq-len]` tensor. Text-only positions are
# prepended by us when creating positions so that the mask is constructed correctly. NOTE: failing to pass
# text-only positions will cause incorrect mask construction, do not change `prepare_input_for_generation`
if position_ids.ndim == 3 and position_ids.shape[0] == 4:
text_position_ids = position_ids[0]
position_ids = position_ids[1:]
else:
# If inputs are not packed (usual 3D positions), do not prepare mask from position_ids
text_position_ids = None
mask_kwargs = {
"config": self.config,
"inputs_embeds": inputs_embeds,
"attention_mask": attention_mask,
"cache_position": cache_position,
"past_key_values": past_key_values,
"position_ids": text_position_ids,
}
# Create the masks
causal_mask = create_causal_mask(**mask_kwargs)
hidden_states = inputs_embeds
# create position embeddings to be shared across the decoder layers
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for i, decoder_layer in enumerate(self.layers[: self.config.num_hidden_layers]):
layer_outputs = decoder_layer(
hidden_states,
position_embeddings=position_embeddings,
attention_mask=causal_mask,
position_ids=position_ids,
past_key_values=past_key_values,
cache_position=cache_position,
**kwargs,
)
hidden_states = layer_outputs
hidden_states = self.norm(hidden_states)
return MoeModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
class Glm4vMoeModelOutputWithPast(Qwen3VLMoeModelOutputWithPast):
pass
class Glm4vMoeForConditionalGeneration(Glm4vForConditionalGeneration):
def __init__(self, config):
super().__init__(config)
self.num_experts = config.text_config.num_local_experts
self.num_experts_per_tok = config.text_config.num_experts_per_tok
@auto_docstring
@can_return_tuple
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
labels: torch.LongTensor | None = None,
pixel_values: torch.Tensor | None = None,
pixel_values_videos: torch.FloatTensor | None = None,
image_grid_thw: torch.LongTensor | None = None,
video_grid_thw: torch.LongTensor | None = None,
mm_token_type_ids: torch.IntTensor | None = None,
cache_position: torch.LongTensor | None = None,
logits_to_keep: int | torch.Tensor = 0,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | Glm4vMoeCausalLMOutputWithPast:
outputs = self.model(
input_ids=input_ids,
pixel_values=pixel_values,
pixel_values_videos=pixel_values_videos,
image_grid_thw=image_grid_thw,
video_grid_thw=video_grid_thw,
position_ids=position_ids,
attention_mask=attention_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
cache_position=cache_position,
mm_token_type_ids=mm_token_type_ids,
**kwargs,
)
hidden_states = outputs[0]
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size)
aux_loss = None
if kwargs.get("output_router_logits", False):
aux_loss = load_balancing_loss_func(
outputs.router_logits,
self.num_experts,
self.num_experts_per_tok,
attention_mask,
)
if labels is not None:
loss += self.config.text_config.router_aux_loss_coef * aux_loss.to(
loss.device
) # make sure to reside in the same device
return Glm4vMoeCausalLMOutputWithPast(
loss=loss,
aux_loss=aux_loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
rope_deltas=outputs.rope_deltas,
router_logits=outputs.router_logits,
)
__all__ = [
"Glm4vMoeConfig",
"Glm4vMoeVisionConfig", # noqa: F822
"Glm4vMoeTextConfig",
"Glm4vMoeForConditionalGeneration",
"Glm4vMoeModel", # noqa: F822
"Glm4vMoePreTrainedModel",
"Glm4vMoeTextModel",
"Glm4vMoeVisionModel",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/glm4v_moe/modular_glm4v_moe.py",
"license": "Apache License 2.0",
"lines": 502,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/glm4v_moe/test_modeling_glm4v_moe.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch GLM-4.5V model."""
import copy
import unittest
from transformers import (
AutoProcessor,
Glm4vMoeConfig,
Glm4vMoeForConditionalGeneration,
Glm4vMoeModel,
is_torch_available,
)
from transformers.testing_utils import (
cleanup,
require_flash_attn,
require_torch,
require_torch_accelerator,
run_first,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import (
ModelTesterMixin,
floats_tensor,
ids_tensor,
)
if is_torch_available():
import torch
class Glm4vMoeVisionText2TextModelTester:
def __init__(
self,
parent,
batch_size=3,
seq_length=7,
num_channels=3,
ignore_index=-100,
image_size=112,
video_start_token_id=3,
video_end_token_id=4,
image_start_token_id=5,
image_end_token_id=6,
image_token_id=7,
video_token_id=8,
is_training=True,
text_config={
"vocab_size": 99,
"hidden_size": 16,
"intermediate_size": 22,
"num_hidden_layers": 2,
"num_attention_heads": 2,
"num_key_value_heads": 1,
"output_channels": 64,
"hidden_act": "silu",
"max_position_embeddings": 512,
"rope_parameters": {"type": "default", "mrope_section": [1, 1]},
"rope_theta": 10000,
"tie_word_embeddings": True,
"bos_token_id": 0,
"eos_token_id": 0,
"pad_token_id": 0,
"n_routed_experts": 8,
"n_shared_experts": 1,
"n_group": 1,
"topk_group": 1,
"num_experts_per_tok": 8,
},
vision_config={
"depth": 2,
"hidden_act": "silu",
"hidden_size": 48,
"out_hidden_size": 16,
"intermediate_size": 22,
"patch_size": 14,
"spatial_merge_size": 1,
"temporal_patch_size": 2,
},
):
self.parent = parent
self.ignore_index = ignore_index
self.bos_token_id = text_config["bos_token_id"]
self.eos_token_id = text_config["eos_token_id"]
self.pad_token_id = text_config["pad_token_id"]
self.video_start_token_id = video_start_token_id
self.video_end_token_id = video_end_token_id
self.image_start_token_id = image_start_token_id
self.image_end_token_id = image_end_token_id
self.image_token_id = image_token_id
self.video_token_id = video_token_id
self.text_config = text_config
self.vision_config = vision_config
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.is_training = is_training
self.hidden_size = text_config["hidden_size"]
self.num_hidden_layers = text_config["num_hidden_layers"]
self.num_attention_heads = text_config["num_attention_heads"]
self.vocab_size = text_config["vocab_size"]
self.num_image_tokens = 64
self.seq_length = seq_length + self.num_image_tokens
self.n_routed_experts = text_config["n_routed_experts"]
self.n_shared_experts = text_config["n_shared_experts"]
self.num_experts_per_tok = text_config["num_experts_per_tok"]
self.n_group = text_config["n_group"]
self.topk_group = text_config["topk_group"]
def get_config(self):
return Glm4vMoeConfig(
text_config=self.text_config,
vision_config=self.vision_config,
image_token_id=self.image_token_id,
video_token_id=self.video_token_id,
video_start_token_id=self.video_start_token_id,
video_end_token_id=self.video_end_token_id,
image_start_token_id=self.image_start_token_id,
image_end_token_id=self.image_end_token_id,
)
def prepare_config_and_inputs(self):
config = self.get_config()
patch_size = config.vision_config.patch_size
temporal_patch_size = config.vision_config.temporal_patch_size
pixel_values = floats_tensor(
[
self.batch_size * (self.image_size**2) // (patch_size**2),
self.num_channels * (patch_size**2) * temporal_patch_size,
]
)
return config, pixel_values
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
input_ids[input_ids == self.video_token_id] = self.pad_token_id
input_ids[input_ids == self.image_token_id] = self.pad_token_id
input_ids[input_ids == self.video_start_token_id] = self.pad_token_id
input_ids[input_ids == self.image_start_token_id] = self.pad_token_id
input_ids[input_ids == self.video_end_token_id] = self.pad_token_id
input_ids[input_ids == self.image_end_token_id] = self.pad_token_id
input_ids[:, 0] = self.image_start_token_id
input_ids[:, 1 : 1 + self.num_image_tokens] = self.image_token_id
input_ids[:, 1 + self.num_image_tokens] = self.image_end_token_id
patch_size = config.vision_config.patch_size
patches_per_side = self.image_size // patch_size
mm_token_type_ids = torch.zeros_like(input_ids)
mm_token_type_ids[:, 1 : 1 + self.num_image_tokens] = 1
inputs_dict = {
"pixel_values": pixel_values,
"image_grid_thw": torch.tensor(
[[1, patches_per_side, patches_per_side]] * self.batch_size, device=torch_device
),
"input_ids": input_ids,
"attention_mask": attention_mask,
"mm_token_type_ids": mm_token_type_ids,
}
return config, inputs_dict
@require_torch
class Glm4vMoeModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = (Glm4vMoeModel, Glm4vMoeForConditionalGeneration) if is_torch_available() else ()
model_split_percents = [0.7, 0.9] # model too big to split at 0.5
_is_composite = True
def setUp(self):
self.model_tester = Glm4vMoeVisionText2TextModelTester(self)
self.config_tester = ConfigTester(self, config_class=Glm4vMoeConfig, has_text_modality=False)
def test_config(self):
self.config_tester.run_common_tests()
# Glm4vMoe has images shaped as (bs*patch_len, dim) so we can't slice to batches in generate
def prepare_config_and_inputs_for_generate(self, batch_size=2):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# We don't want a few model inputs in our model input dictionary for generation tests
input_keys_to_ignore = [
# we don't want to mask attention heads
# we don't want encoder-decoder models to start from filled decoder ids
"decoder_input_ids",
"decoder_attention_mask",
# we'll set cache use in each test differently
"use_cache",
# Ignore labels if it is in the input dict
"labels",
# model-specific exceptions should overload/overwrite this function
]
# The diff from the general `prepare_config_and_inputs_for_generate` lies here
patch_size = config.vision_config.patch_size
filtered_image_length = batch_size * (self.model_tester.image_size**2) // (patch_size**2)
filtered_inputs_dict = {
k: v[:batch_size, ...] if isinstance(v, torch.Tensor) else v
for k, v in inputs_dict.items()
if k not in input_keys_to_ignore
}
filtered_inputs_dict["pixel_values"] = inputs_dict["pixel_values"][:filtered_image_length]
# It is important set `eos_token_id` to `None` to avoid early stopping (would break for length-based checks)
text_gen_config = config.get_text_config(decoder=True)
if text_gen_config.eos_token_id is not None and text_gen_config.pad_token_id is None:
text_gen_config.pad_token_id = (
text_gen_config.eos_token_id
if isinstance(text_gen_config.eos_token_id, int)
else text_gen_config.eos_token_id[0]
)
text_gen_config.eos_token_id = None
text_gen_config.forced_eos_token_id = None
return config, filtered_inputs_dict
@unittest.skip(reason="No available kernels - not supported")
def test_sdpa_can_dispatch_on_flash(self):
pass
@unittest.skip(reason="Size mismatch")
def test_multi_gpu_data_parallel_forward(self):
pass
@unittest.skip("GLM4's moe is not compatible `token_indices, weight_indices = torch.where(mask)`.")
def test_generate_compilation_all_outputs(self):
pass
@unittest.skip("Error with compilation")
def test_generate_from_inputs_embeds_with_static_cache(self):
pass
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
input_ids = inputs["input_ids"]
del inputs["input_ids"]
del inputs["pixel_values"]
del inputs["image_grid_thw"]
wte = model.get_input_embeddings()
inputs["inputs_embeds"] = wte(input_ids)
with torch.no_grad():
model(**inputs)[0]
def test_inputs_embeds_matches_input_ids(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = self._prepare_for_class(inputs_dict, model_class)
input_ids = inputs["input_ids"]
del inputs["input_ids"]
del inputs["pixel_values"]
del inputs["image_grid_thw"]
inputs_embeds = model.get_input_embeddings()(input_ids)
with torch.no_grad():
out_ids = model(input_ids=input_ids, **inputs)[0]
out_embeds = model(inputs_embeds=inputs_embeds, **inputs)[0]
torch.testing.assert_close(out_embeds, out_ids)
@require_torch
@slow
class Glm4vMoeIntegrationTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.model = None
@classmethod
def get_model(cls):
if cls.model is None:
cls.model = Glm4vMoeForConditionalGeneration.from_pretrained(
"zai-org/GLM-4.5V", dtype="auto", device_map="auto"
)
return cls.model
@classmethod
def tearDownClass(cls):
if hasattr(cls, "model"):
del cls.model
cleanup(torch_device, gc_collect=True)
def setUp(self):
cleanup(torch_device, gc_collect=True)
self.processor = AutoProcessor.from_pretrained(
"zai-org/GLM-4.5V", size={"shortest_edge": 10800, "longest_edge": 10800}
)
self.message = [
{
"role": "user",
"content": [
{
"type": "image",
"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg",
},
{"type": "text", "text": "What kind of dog is this?"},
],
}
]
self.message2 = [
{
"role": "user",
"content": [
{
"type": "image",
"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/coco_sample.png",
},
{"type": "text", "text": "What kind of dog is this?"},
],
}
]
self.message_wo_image = [
{"role": "user", "content": [{"type": "text", "text": "Who are you?"}]},
]
question = "Describe this video."
video_url = "https://huggingface.co/datasets/hf-internal-testing/fixtures_videos/resolve/main/tennis.mp4"
self.video_messages = [
{
"role": "user",
"content": [
{
"type": "video",
"video": video_url,
},
{"type": "text", "text": question},
],
}
]
def tearDown(self):
cleanup(torch_device, gc_collect=True)
def test_small_model_integration_test(self):
inputs = self.processor.apply_chat_template(
self.message, tokenize=True, add_generation_prompt=True, return_dict=True, return_tensors="pt"
)
expected_input_ids = [151331, 151333, 151336, 198, 151339, 151363, 151363, 151363, 151363, 151363, 151363, 151340, 3838, 3093, 315, 5562, 374] # fmt: skip
assert expected_input_ids == inputs.input_ids[0].tolist()[:17]
expected_pixel_slice = torch.tensor(
[
[-0.1134, -0.4492, -0.8580],
[-0.6244, -1.1645, -0.7120],
[-0.3324, -0.7996, -0.7120],
[0.2077, 0.2223, 0.4121],
[0.4413, 0.1931, 0.4559],
[0.5873, 0.3099, 0.4851],
],
dtype=torch.float32,
device="cpu",
)
torch.testing.assert_close(expected_pixel_slice, inputs.pixel_values[:6, :3], atol=1e-4, rtol=1e-4)
def test_small_model_integration_test_batch(self):
model = self.get_model()
batch_messages = [self.message, self.message2, self.message_wo_image]
inputs = self.processor.apply_chat_template(
batch_messages,
tokenize=True,
add_generation_prompt=True,
return_dict=True,
return_tensors="pt",
padding=True,
).to(torch_device)
# it should not matter whether two images are the same size or not
output = model.generate(**inputs, max_new_tokens=10)
EXPECTED_DECODED_TEXT = [
"\nWhat kind of dog is this?\n<think>Got it, let's try to figure out",
"\nWhat kind of dog is this?\n<think>Got it, let's see. The user",
'\nWho are you?\n<think>The user is asking "Who are you?"'
] # fmt: skip
decoded = self.processor.batch_decode(output, skip_special_tokens=True)
decoded = [x.replace("<|image|>", "") for x in decoded]
self.assertEqual(
decoded,
EXPECTED_DECODED_TEXT,
)
def test_small_model_integration_test_with_video(self):
processor = AutoProcessor.from_pretrained("zai-org/GLM-4.5V", max_image_size={"longest_edge": 50176})
model = self.get_model()
batch_messages = [self.video_messages]
inputs = processor.apply_chat_template(
batch_messages,
tokenize=True,
add_generation_prompt=True,
return_dict=True,
return_tensors="pt",
padding=True,
).to(torch_device)
output = model.generate(**inputs, max_new_tokens=3)
EXPECTED_DECODED_TEXT = ["\n012345Describe this video.\n<think>Got it"] # fmt: skip
decoded = processor.batch_decode(output, skip_special_tokens=True)
decoded = [x.replace("<|image|>", "") for x in decoded]
self.assertEqual(
decoded,
EXPECTED_DECODED_TEXT,
)
@run_first
@require_flash_attn
@require_torch_accelerator
def test_small_model_integration_test_batch_flashatt2(self):
model = Glm4vMoeForConditionalGeneration.from_pretrained(
"zai-org/GLM-4.5V",
dtype=torch.bfloat16,
attn_implementation="flash_attention_2",
device_map="auto",
)
batch_messages = [self.message, self.message2, self.message_wo_image]
inputs = self.processor.apply_chat_template(
batch_messages,
tokenize=True,
add_generation_prompt=True,
return_dict=True,
return_tensors="pt",
padding=True,
).to(torch_device)
# it should not matter whether two images are the same size or not
output = model.generate(**inputs, max_new_tokens=3)
EXPECTED_DECODED_TEXT = [
"\nWhat kind of dog is this?\n<think>Got it",
"\nWhat kind of dog is this?\n<think>Got it",
"\nWho are you?\n<think>The user",
] # fmt: skip
decoded = self.processor.batch_decode(output, skip_special_tokens=True)
decoded = [x.replace("<|image|>", "") for x in decoded]
self.assertEqual(
decoded,
EXPECTED_DECODED_TEXT,
)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/glm4v_moe/test_modeling_glm4v_moe.py",
"license": "Apache License 2.0",
"lines": 417,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/test_executorch.py | # Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from transformers import AutoModelForCausalLM, set_seed
from transformers.generation.configuration_utils import GenerationConfig
from transformers.integrations.executorch import (
TorchExportableModuleForDecoderOnlyLM,
TorchExportableModuleWithHybridCache,
TorchExportableModuleWithStaticCache,
)
from transformers.testing_utils import require_torch
@require_torch
class ExecutorchTest(unittest.TestCase):
def setUp(self):
set_seed(42)
self.model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-LlamaForCausalLM")
self.model.eval()
# Create generation config with static cache for the model
self.model.generation_config = GenerationConfig(
use_cache=True,
cache_implementation="static",
cache_config={"batch_size": 1, "max_cache_len": 32, "device": "cpu"},
)
self.input_ids = torch.tensor([[1, 2, 3]], dtype=torch.long)
self.inputs_embeds = torch.randn(1, 3, self.model.config.hidden_size)
self.cache_position = torch.arange(3, dtype=torch.long)
def test_static_cache_module_forward(self):
"""Test TorchExportableModuleWithStaticCache forward with both input types"""
generation_config = GenerationConfig(
use_cache=True,
cache_implementation="static",
cache_config={"batch_size": 1, "max_cache_len": 32, "device": "cpu"},
)
# Set generation config on model
self.model.generation_config = generation_config
module = TorchExportableModuleWithStaticCache(self.model)
# Test with input_ids
eager_output_ids = self.model(input_ids=self.input_ids, use_cache=False).logits
wrapped_output_ids = module.forward(input_ids=self.input_ids, cache_position=self.cache_position)
torch.testing.assert_close(eager_output_ids, wrapped_output_ids, atol=1e-4, rtol=1e-4)
# Test with inputs_embeds
eager_output_embeds = self.model(inputs_embeds=self.inputs_embeds, use_cache=False).logits
wrapped_output_embeds = module.forward(inputs_embeds=self.inputs_embeds, cache_position=self.cache_position)
torch.testing.assert_close(eager_output_embeds, wrapped_output_embeds, atol=1e-4, rtol=1e-4)
def test_hybrid_cache_module_forward(self):
"""Test TorchExportableModuleWithHybridCache forward with both input types"""
config = self.model.config
config.sliding_window = 16
config.layer_types = ["full_attention"] * config.num_hidden_layers
generation_config = GenerationConfig(
use_cache=True,
cache_implementation="hybrid",
cache_config={"batch_size": 1, "max_cache_len": 32, "device": "cpu"},
)
# Set generation config on model
self.model.generation_config = generation_config
module = TorchExportableModuleWithHybridCache(self.model)
# Test with input_ids
eager_output_ids = self.model(input_ids=self.input_ids, use_cache=False).logits
wrapped_output_ids = module.forward(input_ids=self.input_ids, cache_position=self.cache_position)
torch.testing.assert_close(eager_output_ids, wrapped_output_ids, atol=1e-4, rtol=1e-4)
# Test with inputs_embeds
eager_output_embeds = self.model(inputs_embeds=self.inputs_embeds, use_cache=False).logits
wrapped_output_embeds = module.forward(inputs_embeds=self.inputs_embeds, cache_position=self.cache_position)
torch.testing.assert_close(eager_output_embeds, wrapped_output_embeds, atol=1e-4, rtol=1e-4)
def test_decoder_only_lm_export_validation(self):
"""Test TorchExportableModuleForDecoderOnlyLM export validation"""
module = TorchExportableModuleForDecoderOnlyLM(self.model)
# Should fail with both input_ids and inputs_embeds
with self.assertRaises(ValueError):
module.export(input_ids=self.input_ids, inputs_embeds=self.inputs_embeds)
# Should fail with neither
with self.assertRaises(ValueError):
module.export()
def test_decoder_only_lm_export(self):
"""Test TorchExportableModuleForDecoderOnlyLM export with both input types"""
module = TorchExportableModuleForDecoderOnlyLM(self.model)
# Test export with input_ids
exported_program_ids = module.export(input_ids=self.input_ids, cache_position=self.cache_position)
eager_output_ids = self.model(input_ids=self.input_ids, use_cache=False).logits
exported_output_ids = exported_program_ids.module()(
input_ids=self.input_ids, cache_position=self.cache_position
)
torch.testing.assert_close(eager_output_ids, exported_output_ids, atol=1e-4, rtol=1e-4)
# Test export with inputs_embeds
exported_program_embeds = module.export(inputs_embeds=self.inputs_embeds, cache_position=self.cache_position)
eager_output_embeds = self.model(inputs_embeds=self.inputs_embeds, use_cache=False).logits
exported_output_embeds = exported_program_embeds.module()(
inputs_embeds=self.inputs_embeds, cache_position=self.cache_position
)
torch.testing.assert_close(eager_output_embeds, exported_output_embeds, atol=1e-4, rtol=1e-4)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/test_executorch.py",
"license": "Apache License 2.0",
"lines": 103,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/integrations/mxfp4.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
from torch import nn
from contextlib import contextmanager
from ..core_model_loading import ConversionOps
from ..quantizers.quantizers_utils import get_module_from_name, should_convert_module
logger = logging.get_logger(__name__)
FP4_VALUES = [
+0.0,
+0.5,
+1.0,
+1.5,
+2.0,
+3.0,
+4.0,
+6.0,
-0.0,
-0.5,
-1.0,
-1.5,
-2.0,
-3.0,
-4.0,
-6.0,
]
@contextmanager
def on_device(dev):
if is_torch_available():
import torch
if isinstance(dev, torch.Tensor):
dev = dev.device
elif isinstance(dev, str):
dev = torch.device(dev)
dev_type = getattr(dev, "type", None)
if dev_type == "cuda":
with torch.cuda.device(dev):
yield
return
if dev_type == "xpu" and hasattr(torch, "xpu"):
with torch.xpu.device(dev):
yield
return
# other: CPU
yield
class Mxfp4Quantize(ConversionOps):
def __init__(self, hf_quantizer):
self.hf_quantizer = hf_quantizer
def convert(
self,
input_dict: dict[str, torch.Tensor],
model: torch.nn.Module | None = None,
missing_keys: list[str] | None = None,
full_layer_name: str | None = None,
**kwargs,
) -> dict[str, torch.Tensor]:
_, value = tuple(input_dict.items())[0]
value = value[0] if isinstance(value, list) else value
module, _ = get_module_from_name(model, full_layer_name)
with torch.device(value.device):
if isinstance(module, Mxfp4GptOssExperts):
triton_weight_tensor, weight_scale = quantize_to_mxfp4(value.transpose(-1, -2), triton_kernels_hub)
PrecisionConfig, FlexCtx, InFlexData = (
triton_kernels_hub.matmul_ogs.PrecisionConfig,
triton_kernels_hub.matmul_ogs.FlexCtx,
triton_kernels_hub.matmul_ogs.InFlexData,
)
triton_weight_tensor, weight_scale = swizzle_mxfp4(
triton_weight_tensor, weight_scale, triton_kernels_hub
)
proj = "gate_up_proj" if "gate_up_proj" in full_layer_name else "down_proj"
if proj in module._parameters:
# Remove the nn.Parameter registration so we can attach the Triton tensor
del module._parameters[proj]
setattr(module, proj, triton_weight_tensor)
setattr(
module,
f"{proj}_precision_config",
PrecisionConfig(weight_scale=weight_scale, flex_ctx=FlexCtx(rhs_data=InFlexData())),
)
missing_keys.discard(f"{full_layer_name}")
module._is_hf_initialized = True
return {}
class Mxfp4Dequantize(ConversionOps):
def __init__(self, hf_quantizer):
self.hf_quantizer = hf_quantizer
def convert(
self,
input_dict: dict[str, torch.Tensor],
model: torch.nn.Module | None = None,
full_layer_name: str | None = None,
missing_keys: list[str] | None = None,
**kwargs,
) -> dict[str, torch.Tensor]:
param_data = {}
proj = "gate_up_proj" if "gate_up_proj" in full_layer_name else "down_proj"
if f"{proj}_blocks" in input_dict.keys():
if isinstance(input_dict[f"{proj}_blocks"], list):
param_data[f"{proj}_blocks"] = input_dict[f"{proj}_blocks"][0]
else:
param_data[f"{proj}_blocks"] = input_dict[f"{proj}_blocks"]
if f"{proj}_scales" in input_dict.keys():
if isinstance(input_dict[f"{proj}_scales"], list):
param_data[f"{proj}_scales"] = input_dict[f"{proj}_scales"][0]
else:
param_data[f"{proj}_scales"] = input_dict[f"{proj}_scales"]
# Here we are dequantizing the weights
dequantized = dequantize_convertops(param_data[f"{proj}_blocks"], param_data[f"{proj}_scales"])
return {full_layer_name: dequantized}
class Mxfp4Deserialize(ConversionOps):
def __init__(self, hf_quantizer):
self.hf_quantizer = hf_quantizer
def convert(
self,
input_dict: dict[str, torch.Tensor],
model: torch.nn.Module | None = None,
full_layer_name: str | None = None,
missing_keys: list[str] | None = None,
**kwargs,
) -> dict[str, torch.Tensor]:
param_data = {}
proj = "gate_up_proj" if "gate_up_proj" in full_layer_name else "down_proj"
if f"{proj}_blocks" in input_dict.keys():
if isinstance(input_dict[f"{proj}_blocks"], list):
param_data[f"{proj}_blocks"] = input_dict[f"{proj}_blocks"][0]
else:
param_data[f"{proj}_blocks"] = input_dict[f"{proj}_blocks"]
if f"{proj}_scales" in input_dict.keys():
if isinstance(input_dict[f"{proj}_scales"], list):
param_data[f"{proj}_scales"] = input_dict[f"{proj}_scales"][0]
else:
param_data[f"{proj}_scales"] = input_dict[f"{proj}_scales"]
# Eagerly set tensors on the module and perform swizzle
module, _ = get_module_from_name(model, full_layer_name)
swizzle_mxfp4_convertops(
param_data[f"{proj}_blocks"],
param_data[f"{proj}_scales"],
module,
proj,
param_data[f"{proj}_blocks"].device,
triton_kernels_hub,
)
missing_keys.discard(f"{full_layer_name}")
module._is_hf_initialized = True
# We return an empty mapping since the module was updated in-place. This prevents
# the loader from trying to materialize the original meta-parameter names again.
# We don't use set_param_for_module since it expects mainly a torch.nn.Parameter or a safetensors pointer
return {}
@property
def reverse_op(self) -> ConversionOps:
return Mxfp4ReverseDeserialize(self.hf_quantizer)
class Mxfp4ReverseDeserialize(ConversionOps):
def __init__(self, hf_quantizer):
self.hf_quantizer = hf_quantizer
def convert(
self,
input_dict: dict[str, torch.Tensor],
model: torch.nn.Module | None = None,
full_layer_name: str | None = None,
missing_keys: list[str] | None = None,
**kwargs,
) -> dict[str, torch.Tensor]:
num_local_experts = getattr(model.config, "num_local_experts", 32)
hidden_size = getattr(model.config, "hidden_size", 2880)
proj = "gate_up_proj" if "gate_up_proj" in full_layer_name else "down_proj"
name = full_layer_name.rsplit("_", 1)[0]
module, _ = get_module_from_name(model, full_layer_name)
state_dict = {}
if isinstance(module, Mxfp4GptOssExperts):
if "bias" in full_layer_name:
name = full_layer_name.replace("_blocks", "")
state_dict[name] = getattr(module, proj + "_bias")
return state_dict
if "gate_up_proj" in full_layer_name:
state_dict[f"{name}_blocks"] = (
module.gate_up_proj.storage.layout.unswizzle_data(module.gate_up_proj.storage.data)
.transpose(-1, -2)
.reshape(num_local_experts, -1, 90, 16)
)
state_dict[f"{name}_scales"] = (
module.gate_up_proj_precision_config.weight_scale.storage.layout.unswizzle_data(
module.gate_up_proj_precision_config.weight_scale.storage.data
).transpose(-1, -2)
)
else:
state_dict[f"{name}_blocks"] = (
module.down_proj.storage.layout.unswizzle_data(module.down_proj.storage.data)
.transpose(-1, -2)
.reshape(num_local_experts, hidden_size, 90, -1)
)
state_dict[f"{name}_scales"] = (
module.down_proj_precision_config.weight_scale.storage.layout.unswizzle_data(
module.down_proj_precision_config.weight_scale.storage.data
).transpose(-1, -2)
)
return state_dict
# Copied from GPT_OSS repo and vllm
def quantize_to_mxfp4(w, triton_kernels_hub):
downcast_to_mxfp_torch = triton_kernels_hub.numerics_details.mxfp.downcast_to_mxfp_torch
w, w_scale = downcast_to_mxfp_torch(w.to(torch.bfloat16), torch.uint8, axis=1)
return w, w_scale
def swizzle_mxfp4(w, w_scale, triton_kernels_hub):
"""
Changes the layout of the tensors depending on the hardware
"""
FP4, convert_layout, wrap_torch_tensor = (
triton_kernels_hub.tensor.FP4,
triton_kernels_hub.tensor.convert_layout,
triton_kernels_hub.tensor.wrap_torch_tensor,
)
layout = triton_kernels_hub.tensor_details.layout
StridedLayout = triton_kernels_hub.tensor_details.layout.StridedLayout
value_layout, value_layout_opts = layout.make_default_matmul_mxfp4_w_layout(mx_axis=1)
w = convert_layout(wrap_torch_tensor(w, dtype=FP4), value_layout, **value_layout_opts)
w_scale = convert_layout(wrap_torch_tensor(w_scale), StridedLayout)
return w, w_scale
# Mostly copied from GPT_OSS repo
# TODO: Add absolute link when the repo is public
def _convert_moe_packed_tensors(
blocks,
scales,
*,
dtype: torch.dtype = torch.bfloat16,
rows_per_chunk: int = 32768 * 1024, # TODO these values are not here by mistake ;)
) -> torch.Tensor:
"""
Convert the mxfp4 weights again, dequantizing and makes them compatible with the forward
pass of GPT_OSS.
"""
import math
blocks = blocks.to(torch.uint8)
scales = scales.to(torch.int32) - 127 # TODO that's because 128=2**7
assert blocks.shape[:-1] == scales.shape, f"{blocks.shape[:-1]=} does not match {scales.shape=}"
lut = torch.tensor(FP4_VALUES, dtype=dtype, device=blocks.device)
*prefix_shape, G, B = blocks.shape
rows_total = math.prod(prefix_shape) * G
blocks = blocks.reshape(rows_total, B)
scales = scales.reshape(rows_total, 1)
out = torch.empty(rows_total, B * 2, dtype=dtype, device=blocks.device)
for r0 in range(0, rows_total, rows_per_chunk):
r1 = min(r0 + rows_per_chunk, rows_total)
blk = blocks[r0:r1]
exp = scales[r0:r1]
sub = out[r0:r1]
# This vector is only used to index into `lut`, but is hugeee in GPU memory so we delete it immediately
idx_lo = (blk & 0x0F).to(torch.int)
sub[:, 0::2] = lut[idx_lo]
del idx_lo
# This vector is only used to index into `lut`, but is hugeee in GPU memory so we delete it immediately
idx_hi = (blk >> 4).to(torch.int)
sub[:, 1::2] = lut[idx_hi]
del idx_hi
# Perform op
torch.ldexp(sub, exp, out=sub)
del blk, exp, sub
out = out.reshape(*prefix_shape, G, B * 2).view(*prefix_shape, G * B * 2)
return out.transpose(1, 2).contiguous()
def convert_moe_packed_tensors(
blocks,
scales,
*,
dtype: torch.dtype = torch.bfloat16,
rows_per_chunk: int = 32768 * 1024, # TODO these values are not here by mistake ;)
) -> torch.Tensor:
"""
Convert the mxfp4 weights again, dequantizing and makes them compatible with the forward
pass of GPT_OSS.
"""
# Since the intermediate ops requite A LOT of memory, in very constrained device_map="auto" settings
# it may OOM, hence this wrapper and move back to cpu if needed
# torch statistics are not accurate enough to estimate if we will have enough memory due to fragmentation and
# in-place operation on non-contiguous tensors (may sometimes require more temporary copies)
try:
return _convert_moe_packed_tensors(blocks, scales, dtype=dtype, rows_per_chunk=rows_per_chunk)
# In the case of OOM due to very tight device_map, we convert and return on cpu - it will then be put back on correct
# devide with the accelerate dispatch (doing it right away may still lead to OOM, but more memory is available later)
except torch.OutOfMemoryError:
blocks = blocks.to("cpu")
scales = scales.to("cpu")
return _convert_moe_packed_tensors(blocks, scales, dtype=dtype, rows_per_chunk=rows_per_chunk)
class Mxfp4GptOssExperts(nn.Module):
def __init__(self, config):
super().__init__()
self.num_experts = config.num_local_experts
self.intermediate_size = config.intermediate_size
self.hidden_size = config.hidden_size
self.gate_up_proj = nn.Parameter(
torch.zeros(self.num_experts, 2 * self.intermediate_size, self.hidden_size // 32, 16, dtype=torch.uint8),
requires_grad=False,
)
self.gate_up_proj_bias = nn.Parameter(
torch.zeros(self.num_experts, 2 * self.intermediate_size, dtype=torch.float32), requires_grad=False
)
self.down_proj = nn.Parameter(
torch.zeros((self.num_experts, self.hidden_size, self.intermediate_size // 32, 16), dtype=torch.uint8),
requires_grad=False,
)
self.down_proj_bias = nn.Parameter(
torch.zeros(self.num_experts, self.hidden_size, dtype=torch.float32), requires_grad=False
)
self.alpha = 1.702
self.limit = getattr(config, "swiglu_limit", 7.0)
self.gate_up_proj_precision_config = None
self.down_proj_precision_config = None
self.limit = getattr(config, "swiglu_limit", 7.0)
def forward(self, hidden_states: torch.Tensor, routing_data, gather_idx, scatter_idx) -> torch.Tensor:
FnSpecs, FusedActivation, matmul_ogs = (
triton_kernels_hub.matmul_ogs.FnSpecs,
triton_kernels_hub.matmul_ogs.FusedActivation,
triton_kernels_hub.matmul_ogs.matmul_ogs,
)
swiglu_fn = triton_kernels_hub.swiglu.swiglu_fn
with on_device(hidden_states.device):
act = FusedActivation(FnSpecs("swiglu", swiglu_fn, ("alpha", "limit")), (self.alpha, self.limit), 2)
intermediate_cache1 = matmul_ogs(
hidden_states,
self.gate_up_proj,
self.gate_up_proj_bias.to(torch.float32),
routing_data,
gather_indx=gather_idx,
precision_config=self.gate_up_proj_precision_config,
gammas=None,
fused_activation=act,
)
intermediate_cache3 = matmul_ogs(
intermediate_cache1,
self.down_proj,
self.down_proj_bias.to(torch.float32),
routing_data,
scatter_indx=scatter_idx,
precision_config=self.down_proj_precision_config,
gammas=routing_data.gate_scal,
)
return intermediate_cache3
# Adapted from GPT_OSS repo
# TODO: Add absolute link when the repo is public
def routing_torch_dist(
logits,
n_expts_act,
):
import os
GatherIndx, RoutingData, ScatterIndx, compute_expt_data_torch = (
triton_kernels_hub.routing.GatherIndx,
triton_kernels_hub.routing.RoutingData,
triton_kernels_hub.routing.ScatterIndx,
triton_kernels_hub.routing.compute_expt_data_torch,
)
with on_device(logits.device):
world_size = torch.distributed.get_world_size()
rank = int(os.environ.get("LOCAL_RANK", "0"))
replace_value = -1
n_tokens = logits.shape[0]
n_expts_tot = logits.shape[1]
n_local_experts = n_expts_tot // world_size
local_expert_start = rank * n_local_experts
local_expert_end = (rank + 1) * n_local_experts
n_gates_pad = n_tokens * n_expts_act
def topk(vals, k):
tk_indx = torch.argsort(-vals, dim=1, stable=True)[:, :k]
tk_indx = tk_indx.long()
tk_val = torch.take_along_dim(vals, tk_indx, dim=1)
return tk_val, tk_indx.int()
expt_scal, expt_indx = topk(logits, n_expts_act)
expt_scal = torch.softmax(expt_scal, dim=-1)
expt_indx, sort_indices = torch.sort(expt_indx, dim=1)
expt_scal = torch.gather(expt_scal, 1, sort_indices)
# Flatten and mask for local experts
expt_scal = expt_scal.reshape(-1)
hist = torch.histc(expt_indx, bins=n_expts_tot, max=n_expts_tot - 1)[local_expert_start:local_expert_end]
expt_indx = expt_indx.view(-1).to(torch.int32)
# we use a large value to replace the indices that are not in the local expert range
var = 1000
expt_indx = torch.where(expt_indx < local_expert_start, var, expt_indx)
topk_indx = torch.argsort(expt_indx, stable=True).to(torch.int32)
gate_indx = torch.argsort(topk_indx).to(torch.int32)
expt_indx = torch.where(expt_indx < local_expert_end, expt_indx, replace_value)
expt_indx = torch.where(local_expert_start <= expt_indx, expt_indx, replace_value)
gate_indx = torch.where(expt_indx == replace_value, replace_value, gate_indx)
gate_scal = expt_scal[topk_indx]
topk_indx = torch.where(gate_indx[topk_indx] == replace_value, replace_value, topk_indx)
# # Routing metadata for local expert computation
gather_indx = GatherIndx(src_indx=topk_indx.int(), dst_indx=gate_indx.int())
scatter_indx = ScatterIndx(src_indx=gate_indx.int(), dst_indx=topk_indx.int())
expt_data = compute_expt_data_torch(hist, n_local_experts, n_gates_pad)
hit_experts = n_expts_act
return RoutingData(gate_scal, hist, n_local_experts, hit_experts, expt_data), gather_indx, scatter_indx
def mlp_forward(self, hidden_states):
import torch.distributed as dist
if dist.is_available() and dist.is_initialized() and hasattr(self, "_is_hooked"):
routing = routing_torch_dist
else:
routing = triton_kernels_hub.routing.routing
batch_size = hidden_states.shape[0]
hidden_states = hidden_states.reshape(-1, self.router.hidden_dim)
router_logits = nn.functional.linear(hidden_states, self.router.weight, self.router.bias)
with on_device(router_logits.device):
routing_data, gather_idx, scatter_idx = routing(router_logits, self.router.top_k)
routed_out = self.experts(hidden_states, routing_data, gather_idx, scatter_idx=scatter_idx)
routed_out = routed_out.reshape(batch_size, -1, self.router.hidden_dim)
return routed_out, router_logits
def dequantize(module, param_name, param_value, target_device, dq_param_name, **kwargs):
from ..integrations.tensor_parallel import shard_and_distribute_module
model = kwargs.get("model")
empty_param = kwargs.get("empty_param")
casting_dtype = kwargs.get("casting_dtype")
to_contiguous = kwargs.get("to_contiguous")
rank = kwargs.get("rank")
device_mesh = kwargs.get("device_mesh")
for proj in ["gate_up_proj", "down_proj"]:
if proj in param_name:
if device_mesh is not None:
param_value = shard_and_distribute_module(
model,
param_value,
empty_param,
dq_param_name,
casting_dtype,
to_contiguous,
rank,
device_mesh,
)
blocks_attr = f"{proj}_blocks"
scales_attr = f"{proj}_scales"
setattr(module, param_name.rsplit(".", 1)[1], param_value)
if hasattr(module, blocks_attr) and hasattr(module, scales_attr):
dequantized = convert_moe_packed_tensors(getattr(module, blocks_attr), getattr(module, scales_attr))
setattr(module, proj, torch.nn.Parameter(dequantized.to(target_device)))
delattr(module, blocks_attr)
delattr(module, scales_attr)
def dequantize_convertops(blocks, scales):
dequantized = convert_moe_packed_tensors(blocks, scales)
return torch.nn.Parameter(dequantized)
def load_and_swizzle_mxfp4(module, param_name, param_value, target_device, triton_kernels_hub, **kwargs):
"""
This transforms the weights obtained using `convert_gpt_oss.py` to load them into `Mxfp4GptOssExperts`.
"""
PrecisionConfig, FlexCtx, InFlexData = (
triton_kernels_hub.matmul_ogs.PrecisionConfig,
triton_kernels_hub.matmul_ogs.FlexCtx,
triton_kernels_hub.matmul_ogs.InFlexData,
)
from ..integrations.tensor_parallel import shard_and_distribute_module
model = kwargs.get("model")
empty_param = kwargs.get("empty_param")
casting_dtype = kwargs.get("casting_dtype")
to_contiguous = kwargs.get("to_contiguous")
rank = kwargs.get("rank")
device_mesh = kwargs.get("device_mesh")
if "blocks" in param_name:
proj = param_name.split(".")[-1].split("_blocks")[0]
if "scales" in param_name:
proj = param_name.split(".")[-1].split("_scales")[0]
if device_mesh is not None:
shard_and_distribute_module(
model, param_value, empty_param, param_name, casting_dtype, to_contiguous, rank, device_mesh
)
else:
setattr(module, param_name.rsplit(".", 1)[1], torch.nn.Parameter(param_value, requires_grad=False))
blocks_attr = f"{proj}_blocks"
scales_attr = f"{proj}_scales"
blocks = getattr(module, blocks_attr) # at this point values were loaded from ckpt
scales = getattr(module, scales_attr)
# Check if both blocks and scales both not on meta device
if blocks.device.type != "meta" and scales.device.type != "meta":
local_experts = blocks.size(0)
if proj == "gate_up_proj":
blocks = blocks.reshape(local_experts, module.intermediate_size * 2, -1)
else:
blocks = blocks.reshape(local_experts, -1, module.intermediate_size // 2)
if (
getattr(target_device, "type", target_device) == "cpu"
and hasattr(torch, "accelerator")
and torch.accelerator.current_accelerator() is not None
):
target_device = torch.accelerator.current_accelerator().type
blocks = blocks.to(target_device).contiguous()
scales = scales.to(target_device).contiguous()
with on_device(target_device):
triton_weight_tensor, weight_scale = swizzle_mxfp4(
blocks.transpose(-2, -1), scales.transpose(-2, -1), triton_kernels_hub
)
# need to overwrite the shapes for the kernels
if proj == "gate_up_proj":
triton_weight_tensor.shape = torch.Size([local_experts, module.hidden_size, module.intermediate_size * 2])
else:
triton_weight_tensor.shape = torch.Size([local_experts, module.intermediate_size, module.hidden_size])
# triton_weight_tensor is what needs to be passed in oai kernels. It stores the data, the shapes and any more objects. It is like a subtensor
setattr(module, proj, triton_weight_tensor)
setattr(
module,
f"{proj}_precision_config",
PrecisionConfig(weight_scale=weight_scale, flex_ctx=FlexCtx(rhs_data=InFlexData())),
)
# delete blocks and scales
delattr(module, scales_attr)
delattr(module, blocks_attr)
del blocks
def swizzle_mxfp4_convertops(blocks, scales, module, proj, target_device, triton_kernels_hub):
"""
This transforms the weights obtained using `convert_gpt_oss.py` to load them into `Mxfp4GptOssExperts`.
"""
PrecisionConfig, FlexCtx, InFlexData = (
triton_kernels_hub.matmul_ogs.PrecisionConfig,
triton_kernels_hub.matmul_ogs.FlexCtx,
triton_kernels_hub.matmul_ogs.InFlexData,
)
local_experts = blocks.size(0)
if (
getattr(target_device, "type", target_device) == "cpu"
and hasattr(torch, "accelerator")
and torch.accelerator.current_accelerator() is not None
):
target_device = torch.accelerator.current_accelerator().type
blocks = blocks.to(target_device).contiguous()
scales = scales.to(target_device).contiguous()
if proj == "gate_up_proj":
blocks = blocks.reshape(local_experts, module.intermediate_size * 2, -1)
else:
blocks = blocks.reshape(local_experts, -1, module.intermediate_size // 2)
with on_device(target_device):
triton_weight_tensor, weight_scale = swizzle_mxfp4(
blocks.transpose(-2, -1), scales.transpose(-2, -1), triton_kernels_hub
)
# need to overwrite the shapes for the kernels
if proj == "gate_up_proj":
triton_weight_tensor.shape = torch.Size([local_experts, module.hidden_size, module.intermediate_size * 2])
else:
triton_weight_tensor.shape = torch.Size([local_experts, module.intermediate_size, module.hidden_size])
# triton_weight_tensor is what needs to be passed in oai kernels. It stores the data, the shapes and any more objects. It's like a subtensor
# Since the Experts module registers gate_up_proj and down_proj as nn.Parameters, we need to remove them so we can attach the Triton tensor
if proj in module._parameters:
# Remove the nn.Parameter registration so we can attach the Triton tensor
del module._parameters[proj]
setattr(module, proj, triton_weight_tensor)
setattr(
module,
f"{proj}_precision_config",
PrecisionConfig(weight_scale=weight_scale, flex_ctx=FlexCtx(rhs_data=InFlexData())),
)
def replace_with_mxfp4_linear(model, quantization_config=None, modules_to_not_convert: list[str] | None = None):
"""
Public method that replaces the expert layers of the given model with mxfp4 quantized layers.
Args:
model (`torch.nn.Module`):
The model to convert, can be any `torch.nn.Module` instance.
quantization_config (`Mxfp4Config`, defaults to `None`):
The quantization config object that contains the quantization parameters.
modules_to_not_convert (`list`, *optional*, defaults to `None`):
A list of modules to not convert. If a module name is in the list (e.g. `lm_head`), it will not be
converted.
"""
if quantization_config.dequantize:
return model
from .hub_kernels import get_kernel
global triton_kernels_hub
triton_kernels_hub = get_kernel("kernels-community/gpt-oss-triton-kernels")
has_been_replaced = False
for module_name, module in model.named_modules():
if not should_convert_module(module_name, modules_to_not_convert):
continue
if module.__class__.__name__ == "GptOssExperts" and not quantization_config.dequantize:
with torch.device("meta"):
model.set_submodule(module_name, Mxfp4GptOssExperts(model.config))
has_been_replaced = True
if module.__class__.__name__ == "GptOssMLP" and not quantization_config.dequantize:
from types import MethodType
module.forward = MethodType(mlp_forward, module)
if not has_been_replaced:
logger.warning(
"You are loading your model using mixed-precision FP4 quantization but no linear modules were found in your model."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug."
)
return model
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/integrations/mxfp4.py",
"license": "Apache License 2.0",
"lines": 587,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/gpt_oss/configuration_gpt_oss.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""openai model configuration"""
from ...configuration_utils import PreTrainedConfig, layer_type_validation
from ...modeling_rope_utils import RopeParameters
class GptOssConfig(PreTrainedConfig):
r"""
This will yield a configuration to that of the BERT
[google-bert/bert-base-uncased](https://huggingface.co/google-bert/bert-base-uncased) architecture.
"""
model_type = "gpt_oss"
default_theta = 150000.0
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
"norm": (["hidden_states"], ["hidden_states"]),
}
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise",
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.self_attn.sinks": "colwise",
"layers.*.mlp.router": "ep_router",
"layers.*.mlp.experts.gate_up_proj": "grouped_gemm",
"layers.*.mlp.experts.gate_up_proj_bias": "grouped_gemm",
"layers.*.mlp.experts.down_proj": "grouped_gemm",
"layers.*.mlp.experts.down_proj_bias": "grouped_gemm",
"layers.*.mlp.experts": "moe_tp_experts",
}
def __init__(
self,
num_hidden_layers: int | None = 36,
num_local_experts: int | None = 128,
vocab_size: int | None = 201088,
hidden_size: int | None = 2880,
intermediate_size: int | None = 2880,
head_dim: int | None = 64,
num_attention_heads: int | None = 64,
num_key_value_heads: int | None = 8,
sliding_window: int | None = 128,
tie_word_embeddings: bool | None = False,
hidden_act: str | None = "silu",
initializer_range: float | None = 0.02,
max_position_embeddings: int | None = 131072,
rms_norm_eps: float | None = 1e-5,
rope_parameters: RopeParameters | None = {
"rope_type": "yarn",
"factor": 32.0,
"beta_fast": 32.0,
"beta_slow": 1.0,
"truncate": False,
"original_max_position_embeddings": 4096,
},
attention_dropout: float | None = 0.0,
num_experts_per_tok: int | None = 4,
router_aux_loss_coef: float | None = 0.9,
output_router_logits: bool | None = False,
use_cache: bool | None = True,
layer_types: list[str] | None = None,
pad_token_id: int | None = None,
bos_token_id: int | None = None,
eos_token_id: int | None = None,
**kwargs,
):
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_local_experts = num_local_experts
self.sliding_window = sliding_window
self.num_experts_per_tok = num_experts_per_tok
# for backward compatibility
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.attention_dropout = attention_dropout
self.head_dim = head_dim if head_dim is not None else self.hidden_size // self.num_attention_heads
self.layer_types = layer_types
if self.layer_types is None:
self.layer_types = [
"sliding_attention" if bool((i + 1) % 2) else "full_attention" for i in range(self.num_hidden_layers)
]
layer_type_validation(self.layer_types, self.num_hidden_layers)
self.attention_bias = True
self.max_position_embeddings = max_position_embeddings
self.router_aux_loss_coef = router_aux_loss_coef
self.output_router_logits = output_router_logits
self.use_cache = use_cache
self.rope_parameters = rope_parameters
self.tie_word_embeddings = tie_word_embeddings
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
super().__init__(**kwargs)
__all__ = ["GptOssConfig"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/gpt_oss/configuration_gpt_oss.py",
"license": "Apache License 2.0",
"lines": 111,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/gpt_oss/convert_gpt_oss_weights_to_hf.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import gc
import json
import os
from pathlib import Path
import regex as re
import tiktoken
import torch
from safetensors.torch import load_file as safe_load
from transformers import (
GenerationConfig,
GptOssConfig,
GptOssForCausalLM,
PreTrainedTokenizerFast,
)
from transformers.convert_slow_tokenizer import TikTokenConverter
# fmt: off
# If a weight needs to be split in two or more keys, use `|` to indicate it. ex:
# r"layers.(\d+).attention.wqkv.weight": r"layers.\1.self_attn.q|k|v|_proj.weight"
ORIGINAL_TO_CONVERTED_KEY_MAPPING = {
r"norm.weight": r"norm.weight",
r"\nnorm.scale": r"\nnorm.weight",
r"unembedding.weight": r"lm_head.weight",
r"embedding": r"embed_tokens",
# special key, wqkv needs to be split afterwards
r"block.(\d+).attn.qkv": r"layers.\1.self_attn.qkv_proj",
r"block.(\d+).attn.out": r"layers.\1.self_attn.o_proj",
r"block.(\d+).attn.sinks": r"layers.\1.self_attn.sinks",
r"block.(\d+).attn.norm.scale": r"layers.\1.input_layernorm.weight",
r"block.(\d+).mlp.mlp1_weight": r"layers.\1.mlp.experts.gate_up_proj",
r"block.(\d+).mlp.mlp1_bias": r"layers.\1.mlp.experts.gate_up_proj_bias",
r"block.(\d+).mlp.mlp2_weight": r"layers.\1.mlp.experts.down_proj",
r"block.(\d+).mlp.mlp2_bias": r"layers.\1.mlp.experts.down_proj_bias",
r"block.(\d+).mlp.norm.scale": r"layers.\1.post_attention_layernorm.weight",
r"block.(\d+).mlp.gate": r"layers.\1.mlp.router",
}
# fmt: on
def convert_old_keys_to_new_keys(state_dict_keys: dict | None = None):
"""
This function should be applied only once, on the concatenated keys to efficiently rename using
the key mappings.
"""
output_dict = {}
if state_dict_keys is not None:
old_text = "\n".join(state_dict_keys)
new_text = old_text
for pattern, replacement in ORIGINAL_TO_CONVERTED_KEY_MAPPING.items():
if replacement is None:
new_text = re.sub(pattern, "", new_text) # an empty line
continue
new_text = re.sub(pattern, replacement, new_text)
output_dict = dict(zip(old_text.split("\n"), new_text.split("\n")))
return output_dict
FP4_VALUES = [
+0.0,
+0.5,
+1.0,
+1.5,
+2.0,
+3.0,
+4.0,
+6.0,
-0.0,
-0.5,
-1.0,
-1.5,
-2.0,
-3.0,
-4.0,
-6.0,
]
def convert_moe_packed_tensors(
blocks,
scales,
*,
dtype: torch.dtype = torch.bfloat16,
rows_per_chunk: int = 32768 * 1024,
) -> torch.Tensor:
"""
TODO this needs to be documented
"""
import math
scales = scales.to(torch.int32) - 127
assert blocks.shape[:-1] == scales.shape, f"{blocks.shape=} does not match {scales.shape=}"
lut = torch.tensor(FP4_VALUES, dtype=dtype, device=blocks.device)
*prefix_shape, G, B = blocks.shape
rows_total = math.prod(prefix_shape) * G
blocks = blocks.reshape(rows_total, B)
scales = scales.reshape(rows_total, 1)
out = torch.empty(rows_total, B * 2, dtype=dtype, device=blocks.device)
for r0 in range(0, rows_total, rows_per_chunk):
r1 = min(r0 + rows_per_chunk, rows_total)
blk = blocks[r0:r1]
exp = scales[r0:r1]
# nibble indices -> int64
idx_lo = (blk & 0x0F).to(torch.long)
idx_hi = (blk >> 4).to(torch.long)
sub = out[r0:r1]
sub[:, 0::2] = lut[idx_lo]
sub[:, 1::2] = lut[idx_hi]
torch.ldexp(sub, exp, out=sub)
del idx_lo, idx_hi, blk, exp
out = out.reshape(*prefix_shape, G, B * 2).view(*prefix_shape, G * B * 2)
out = out.to(torch.float8_e5m2).permute(0, 2, 1).contiguous()
return out
def write_model(
model_path,
input_base_path,
mxfp4=False,
):
os.makedirs(model_path, exist_ok=True)
eos_token_id = 200002
pad_token_id = 199999
original_config = json.loads((Path(input_base_path) / "config.json").read_text())
# GPT OSS Models are distributed with either num_experts or num_local_experts depending whether the original subfolder
# or the root folder is used.
num_local_experts = original_config.get("num_experts") or original_config.get("num_local_experts")
if num_local_experts is None:
raise ValueError("num_local_experts or num_experts must be specified in the config.")
# Handle both old and new config formats for rope_parameters
if "rope_parameters" in original_config:
# New format: rope_parameters already exists as a dict
rope_parameters = original_config.pop("rope_parameters")
# Ensure rope_type is set
if "rope_type" not in rope_parameters:
rope_parameters["rope_type"] = "yarn"
else:
# Old format: construct rope_parameters from individual keys with defaults matching GptOssConfig
rope_parameters = {
"factor": float(original_config.pop("rope_parameters_factor", 32.0)),
"beta_fast": float(original_config.pop("rope_ntk_beta", 32.0)),
"beta_slow": float(original_config.pop("rope_ntk_alpha", 1.0)),
"rope_type": "yarn",
"truncate": False,
"original_max_position_embeddings": 4096,
}
config = GptOssConfig(
num_local_experts=num_local_experts,
rope_parameters=rope_parameters,
eos_token_id=eos_token_id,
pad_token_id=pad_token_id,
**original_config,
)
print(f"Fetching all parameters from the checkpoint at {input_base_path}...")
final_ = {}
for file in list(os.listdir(input_base_path)):
if file.endswith(".safetensors"):
final_.update(safe_load(os.path.join(input_base_path, file)))
print("Converting ..")
all_keys = final_.keys()
new_keys = convert_old_keys_to_new_keys(all_keys)
state_dict = {}
for key in all_keys:
# Post-process the current_parameter.
new_key = new_keys.get(key, key)
if "lm_head" not in new_key:
new_key = "model." + new_key
print(f"Processing key: {key} -> {new_key}")
if re.search("qkv_proj", new_key):
q_len = config.head_dim * config.num_attention_heads
k_len = config.head_dim * config.num_key_value_heads
q, k, v = (
final_[key][:q_len, ...],
final_[key][q_len : k_len + q_len, ...],
final_[key][k_len + q_len :, ...],
)
q_key = re.sub(r"qkv_proj", "q_proj", new_key)
k_key = re.sub(r"qkv_proj", "k_proj", new_key)
v_key = re.sub(r"qkv_proj", "v_proj", new_key)
state_dict[q_key] = q.contiguous().to(torch.bfloat16)
state_dict[k_key] = k.contiguous().to(torch.bfloat16)
state_dict[v_key] = v.contiguous().to(torch.bfloat16)
elif re.search("gate_up_proj|down_proj", new_key) and "bias" not in new_key:
if not mxfp4:
if "scales" in new_key:
continue
elif "blocks" in new_key:
# deal with packed weights
blocks = final_[key]
scales = final_[key.replace("blocks", "scales")]
new_key = new_key.replace(".blocks", "")
unpacked_tensors = convert_moe_packed_tensors(blocks, scales, dtype=torch.bfloat16)
state_dict[new_key] = unpacked_tensors
else:
raise (f"Unidentified {key}, please double check the state dict")
else:
if "scales" in new_key:
new_key = new_key.replace(".scales", "_scales")
state_dict[new_key] = final_[key].contiguous()
elif "blocks" in new_key:
new_key = new_key.replace(".blocks", "_blocks")
state_dict[new_key] = final_[key].contiguous()
else:
raise (f"Unidentified {key}, please double check the state dict")
else:
weight = final_[key]
if not re.search("norm", new_key):
weight = weight.to(torch.bfloat16) # norms are the only ones in float32
state_dict[new_key] = weight
del final_
gc.collect()
if not mxfp4:
print("Loading the checkpoint in a GptOss model for unpacked format")
with torch.device("meta"):
model = GptOssForCausalLM(config)
model.load_state_dict(state_dict, strict=True, assign=True)
print("Checkpoint loaded successfully.")
del config._name_or_path
print("Saving the model")
model.save_pretrained(model_path)
del state_dict, model
else:
print("Saving the checkpoint in mxfp4 format")
config.quantization_config = {
"quant_method": "mxfp4",
"modules_to_not_convert": [
"model.layers.*.self_attn",
"model.layers.*.mlp.router",
"model.embed_tokens",
"lm_head",
],
}
# required as we don't save the model with save_pretrained
config.architectures = ["GptOssForCausalLM"]
config.save_pretrained(model_path)
save_sharded_model(state_dict, model_path)
del state_dict
gc.collect()
print("Reloading the model to check if it's saved correctly.")
GptOssForCausalLM.from_pretrained(model_path, dtype=torch.bfloat16, device_map="auto")
print("Model reloaded successfully.")
# generation config
print("Saving generation config...")
generation_config = GenerationConfig(
bos_token_id=199998, # <|startoftext|>
do_sample=True,
eos_token_id=[200002, 199999], # <|return|>, <|endoftext|>
pad_token_id=199999, # <|endoftext|>
temperature=1.0,
top_p=1.0,
)
generation_config.save_pretrained(model_path)
def save_sharded_model(state_dict, model_path):
from safetensors.torch import save_file
max_shard_size = 4800000000 # 4.8 GB
os.makedirs(model_path, exist_ok=True)
shard_size_counter = 0
shard_id = 0
shard_state_dict = {}
total_sharded_dict = {}
safetensors_index = {}
safetensors_index["metadata"] = {"total_size": 0}
safetensors_index["weight_map"] = {}
for key in state_dict.keys():
size = state_dict[key].numel() * state_dict[key].element_size()
if shard_size_counter + size > max_shard_size:
total_sharded_dict[shard_id] = shard_state_dict
shard_id += 1
shard_size_counter = 0
shard_state_dict = {}
shard_state_dict[key] = state_dict[key]
shard_size_counter += size
safetensors_index["metadata"]["total_size"] += size
safetensors_index["weight_map"][key] = shard_id
total_sharded_dict[shard_id] = shard_state_dict
num_shards = len(total_sharded_dict) - 1
for shard_id, shard_state_dict in total_sharded_dict.items():
save_file(shard_state_dict, os.path.join(model_path, f"model-{shard_id:05d}-of-{num_shards:05d}.safetensors"))
create_safetensors_index(safetensors_index, num_shards, model_path)
def create_safetensors_index(safetensors_index, num_shards, model_path):
for key in safetensors_index["weight_map"].keys():
shard_id = safetensors_index["weight_map"][key]
safetensors_index["weight_map"][key] = f"model-{shard_id:05d}-of-{num_shards:05d}.safetensors"
with open(os.path.join(model_path, "model.safetensors.index.json"), "w") as f:
json.dump(safetensors_index, f)
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
characters the bpe code barfs on.
The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab
if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for
decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup
tables between utf-8 bytes and unicode strings.
"""
bs = (
list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
)
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8 + n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
class GptOssConverter(TikTokenConverter):
def extract_vocab_merges_from_model(self, tiktoken_url: str):
tokenizer = tiktoken.get_encoding(tiktoken_url)
self.pattern = tokenizer._pat_str
bpe_ranks = tokenizer._mergeable_ranks
byte_encoder = bytes_to_unicode()
def token_bytes_to_string(b):
return "".join([byte_encoder[ord(char)] for char in b.decode("latin-1")])
merges = []
vocab = {}
for token, rank in bpe_ranks.items():
vocab[token_bytes_to_string(token)] = rank
if len(token) == 1:
continue
local = []
for index in range(1, len(token)):
piece_l, piece_r = token[:index], token[index:]
if piece_l in bpe_ranks and piece_r in bpe_ranks and (piece_l + piece_r) in bpe_ranks:
local.append((piece_l, piece_r, rank))
local = sorted(local, key=lambda x: (bpe_ranks[x[0]], bpe_ranks[x[1]]), reverse=False)
merges.extend(local)
merges = sorted(merges, key=lambda val: val[2], reverse=False)
merges = [(token_bytes_to_string(val[0]), token_bytes_to_string(val[1])) for val in merges]
return vocab, merges
def __init__(
self,
vocab_file,
model_max_length: int,
chat_template: str | None = None,
**kwargs,
):
super().__init__(vocab_file, pattern=None)
# TODO 1st download the vocabfile!!!
tokenizer = tiktoken.get_encoding(vocab_file)
self.additional_special_tokens = {}
# Complete list of Harmony special tokens as per o200k_harmony spec
special_tokens_map = {
"<|startoftext|>": 199998,
"<|endoftext|>": 199999,
"<|return|>": 200002,
"<|constrain|>": 200003,
"<|channel|>": 200005,
"<|start|>": 200006,
"<|end|>": 200007,
"<|message|>": 200008,
"<|call|>": 200012,
"<|endofprompt|>": 200018,
}
# Add the remaining reserved slots while skipping IDs already present above.
used_ids = set(special_tokens_map.values())
for k in range(199999, 200018):
if k in used_ids:
continue
special_tokens_map.setdefault(f"<|reserved_{k}|>", k)
# Keep only token strings (sorted by ID) for TikTokenConverter.
self.additional_special_tokens = [tok for tok, _ in sorted(special_tokens_map.items(), key=lambda x: x[1])]
tokenizer = self.converted()
if chat_template is not None:
kwargs["chat_template"] = chat_template
self.tokenizer = PreTrainedTokenizerFast(
tokenizer_object=tokenizer,
bos_token="<|startoftext|>",
eos_token="<|return|>" if chat_template else "<|endoftext|>",
pad_token="<|endoftext|>",
model_input_names=["input_ids", "attention_mask"],
model_max_length=model_max_length,
**kwargs,
)
def write_tokenizer(tokenizer_path: str, save_dir: str):
# Updated Harmony chat template
chat_template = """{#-
In addition to the normal inputs of `messages` and `tools`, this template also accepts the
following kwargs:
- "builtin_tools": A list, can contain "browser" and/or "python".
- "model_identity": A string that optionally describes the model identity.
- "reasoning_effort": A string that describes the reasoning effort, defaults to "medium".
#}
{#- Tool Definition Rendering ============================================== #}
{%- macro render_typescript_type(param_spec, required_params, is_nullable=false) -%}
{%- if param_spec.type == "array" -%}
{%- if param_spec['items'] -%}
{%- if param_spec['items']['type'] == "string" -%}
{{- "string[]" }}
{%- elif param_spec['items']['type'] == "number" -%}
{{- "number[]" }}
{%- elif param_spec['items']['type'] == "integer" -%}
{{- "number[]" }}
{%- elif param_spec['items']['type'] == "boolean" -%}
{{- "boolean[]" }}
{%- else -%}
{%- set inner_type = render_typescript_type(param_spec['items'], required_params) -%}
{%- if inner_type == "object | object" or inner_type|length > 50 -%}
{{- "any[]" }}
{%- else -%}
{{- inner_type + "[]" }}
{%- endif -%}
{%- endif -%}
{%- if param_spec.nullable -%}
{{- " | null" }}
{%- endif -%}
{%- else -%}
{{- "any[]" }}
{%- if param_spec.nullable -%}
{{- " | null" }}
{%- endif -%}
{%- endif -%}
{%- elif param_spec.type is defined and param_spec.type is iterable and param_spec.type is not string and param_spec.type is not mapping and param_spec.type[0] is defined -%}
{#- Handle array of types like ["object", "object"] from Union[dict, list] #}
{%- if param_spec.type | length > 1 -%}
{{- param_spec.type | join(" | ") }}
{%- else -%}
{{- param_spec.type[0] }}
{%- endif -%}
{%- elif param_spec.oneOf -%}
{#- Handle oneOf schemas - check for complex unions and fallback to any #}
{%- set has_object_variants = false -%}
{%- for variant in param_spec.oneOf -%}
{%- if variant.type == "object" -%}
{%- set has_object_variants = true -%}
{%- endif -%}
{%- endfor -%}
{%- if has_object_variants and param_spec.oneOf|length > 1 -%}
{{- "any" }}
{%- else -%}
{%- for variant in param_spec.oneOf -%}
{{- render_typescript_type(variant, required_params) -}}
{%- if variant.description %}
{{- "// " + variant.description }}
{%- endif -%}
{%- if variant.default is defined %}
{{ "// default: " + variant.default|tojson }}
{%- endif -%}
{%- if not loop.last %}
{{- " | " }}
{% endif -%}
{%- endfor -%}
{%- endif -%}
{%- elif param_spec.type == "string" -%}
{%- if param_spec.enum -%}
{{- '"' + param_spec.enum|join('" | "') + '"' -}}
{%- else -%}
{{- "string" }}
{%- if param_spec.nullable %}
{{- " | null" }}
{%- endif -%}
{%- endif -%}
{%- elif param_spec.type == "number" -%}
{{- "number" }}
{%- elif param_spec.type == "integer" -%}
{{- "number" }}
{%- elif param_spec.type == "boolean" -%}
{{- "boolean" }}
{%- elif param_spec.type == "object" -%}
{%- if param_spec.properties -%}
{{- "{\n" }}
{%- for prop_name, prop_spec in param_spec.properties.items() -%}
{{- prop_name -}}
{%- if prop_name not in (param_spec.required or []) -%}
{{- "?" }}
{%- endif -%}
{{- ": " }}
{{ render_typescript_type(prop_spec, param_spec.required or []) }}
{%- if not loop.last -%}
{{-", " }}
{%- endif -%}
{%- endfor -%}
{{- "}" }}
{%- else -%}
{{- "object" }}
{%- endif -%}
{%- else -%}
{{- "any" }}
{%- endif -%}
{%- endmacro -%}
{%- macro render_tool_namespace(namespace_name, tools) -%}
{{- "## " + namespace_name + "\n\n" }}
{{- "namespace " + namespace_name + " {\n\n" }}
{%- for tool in tools %}
{%- set tool = tool.function %}
{{- "// " + tool.description + "\n" }}
{{- "type "+ tool.name + " = " }}
{%- if tool.parameters and tool.parameters.properties %}
{{- "(_: {\n" }}
{%- for param_name, param_spec in tool.parameters.properties.items() %}
{%- if param_spec.description %}
{{- "// " + param_spec.description + "\n" }}
{%- endif %}
{{- param_name }}
{%- if param_name not in (tool.parameters.required or []) -%}
{{- "?" }}
{%- endif -%}
{{- ": " }}
{{- render_typescript_type(param_spec, tool.parameters.required or []) }}
{%- if param_spec.default is defined -%}
{%- if param_spec.enum %}
{{- ", // default: " + param_spec.default }}
{%- elif param_spec.oneOf %}
{{- "// default: " + param_spec.default }}
{%- else %}
{{- ", // default: " + param_spec.default|tojson }}
{%- endif -%}
{%- endif -%}
{%- if not loop.last %}
{{- ",\n" }}
{%- else %}
{{- ",\n" }}
{%- endif -%}
{%- endfor %}
{{- "}) => any;\n\n" }}
{%- else -%}
{{- "() => any;\n\n" }}
{%- endif -%}
{%- endfor %}
{{- "} // namespace " + namespace_name }}
{%- endmacro -%}
{%- macro render_builtin_tools(browser_tool, python_tool) -%}
{%- if browser_tool %}
{{- "## browser\n\n" }}
{{- "// Tool for browsing.\n" }}
{{- "// The `cursor` appears in brackets before each browsing display: `[{cursor}]`.\n" }}
{{- "// Cite information from the tool using the following format:\n" }}
{{- "// `【{cursor}†L{line_start}(-L{line_end})?】`, for example: `【6†L9-L11】` or `【8†L3】`.\n" }}
{{- "// Do not quote more than 10 words directly from the tool output.\n" }}
{{- "// sources=web (default: web)\n" }}
{{- "namespace browser {\n\n" }}
{{- "// Searches for information related to `query` and displays `topn` results.\n" }}
{{- "type search = (_: {\n" }}
{{- "query: string,\n" }}
{{- "topn?: number, // default: 10\n" }}
{{- "source?: string,\n" }}
{{- "}) => any;\n\n" }}
{{- "// Opens the link `id` from the page indicated by `cursor` starting at line number `loc`, showing `num_lines` lines.\n" }}
{{- "// Valid link ids are displayed with the formatting: `【{id}†.*】`.\n" }}
{{- "// If `cursor` is not provided, the most recent page is implied.\n" }}
{{- "// If `id` is a string, it is treated as a fully qualified URL associated with `source`.\n" }}
{{- "// If `loc` is not provided, the viewport will be positioned at the beginning of the document or centered on the most relevant passage, if available.\n" }}
{{- "// Use this function without `id` to scroll to a new location of an opened page.\n" }}
{{- "type open = (_: {\n" }}
{{- "id?: number | string, // default: -1\n" }}
{{- "cursor?: number, // default: -1\n" }}
{{- "loc?: number, // default: -1\n" }}
{{- "num_lines?: number, // default: -1\n" }}
{{- "view_source?: boolean, // default: false\n" }}
{{- "source?: string,\n" }}
{{- "}) => any;\n\n" }}
{{- "// Finds exact matches of `pattern` in the current page, or the page given by `cursor`.\n" }}
{{- "type find = (_: {\n" }}
{{- "pattern: string,\n" }}
{{- "cursor?: number, // default: -1\n" }}
{{- "}) => any;\n\n" }}
{{- "} // namespace browser\n\n" }}
{%- endif -%}
{%- if python_tool %}
{{- "## python\n\n" }}
{{- "Use this tool to execute Python code in your chain of thought. The code will not be shown to the user. This tool should be used for internal reasoning, but not for code that is intended to be visible to the user (e.g. when creating plots, tables, or files).\n\n" }}
{{- "When you send a message containing Python code to python, it will be executed in a stateful Jupyter notebook environment. python will respond with the output of the execution or time out after 120.0 seconds. The drive at '/mnt/data' can be used to save and persist user files. Internet access for this session is UNKNOWN. Depends on the cluster.\n\n" }}
{%- endif -%}
{%- endmacro -%}
{#- System Message Construction ============================================ #}
{%- macro build_system_message() -%}
{%- if model_identity is not defined %}
{%- set model_identity = "You are ChatGPT, a large language model trained by OpenAI." %}
{%- endif %}
{{- model_identity + "\n" }}
{{- "Knowledge cutoff: 2024-06\n" }}
{{- "Current date: " + strftime_now("%Y-%m-%d") + "\n\n" }}
{%- if reasoning_effort is not defined %}
{%- set reasoning_effort = "medium" %}
{%- endif %}
{{- "Reasoning: " + reasoning_effort + "\n\n" }}
{%- if builtin_tools %}
{{- "# Tools\n\n" }}
{%- set available_builtin_tools = namespace(browser=false, python=false) %}
{%- for tool in builtin_tools %}
{%- if tool == "browser" %}
{%- set available_builtin_tools.browser = true %}
{%- elif tool == "python" %}
{%- set available_builtin_tools.python = true %}
{%- endif %}
{%- endfor %}
{{- render_builtin_tools(available_builtin_tools.browser, available_builtin_tools.python) }}
{%- endif -%}
{{- "# Valid channels: analysis, commentary, final. Channel must be included for every message." }}
{%- if tools -%}
{{- "\nCalls to these tools must go to the commentary channel: 'functions'." }}
{%- endif -%}
{%- endmacro -%}
{#- Main Template Logic ================================================= #}
{#- Set defaults #}
{#- Render system message #}
{{- "<|start|>system<|message|>" }}
{{- build_system_message() }}
{{- "<|end|>" }}
{#- Extract developer message #}
{%- if messages[0].role == "developer" or messages[0].role == "system" %}
{%- set developer_message = messages[0].content %}
{%- set loop_messages = messages[1:] %}
{%- else %}
{%- set developer_message = "" %}
{%- set loop_messages = messages %}
{%- endif %}
{#- Render developer message #}
{%- if developer_message or tools %}
{{- "<|start|>developer<|message|>" }}
{%- if developer_message %}
{{- "# Instructions\n\n" }}
{{- developer_message }}
{%- endif %}
{%- if tools -%}
{{- "\n\n" }}
{{- "# Tools\n\n" }}
{{- render_tool_namespace("functions", tools) }}
{%- endif -%}
{{- "<|end|>" }}
{%- endif %}
{#- Render messages #}
{%- set last_tool_call = namespace(name=none) %}
{%- for message in loop_messages -%}
{#- At this point only assistant/user/tool messages should remain #}
{%- if message.role == 'assistant' -%}
{#- Checks to ensure the messages are being passed in the format we expect #}
{%- if "content" in message %}
{%- if "<|channel|>analysis<|message|>" in message.content or "<|channel|>final<|message|>" in message.content %}
{{- raise_exception("You have passed a message containing <|channel|> tags in the content field. Instead of doing this, you should pass analysis messages (the string between '<|message|>' and '<|end|>') in the 'thinking' field, and final messages (the string between '<|message|>' and '<|end|>') in the 'content' field.") }}
{%- endif %}
{%- endif %}
{%- if "thinking" in message %}
{%- if "<|channel|>analysis<|message|>" in message.thinking or "<|channel|>final<|message|>" in message.thinking %}
{{- raise_exception("You have passed a message containing <|channel|> tags in the thinking field. Instead of doing this, you should pass analysis messages (the string between '<|message|>' and '<|end|>') in the 'thinking' field, and final messages (the string between '<|message|>' and '<|end|>') in the 'content' field.") }}
{%- endif %}
{%- endif %}
{%- if "tool_calls" in message %}
{#- We need very careful handling here - we want to drop the tool call analysis message if the model #}
{#- has output a later <|final|> message, but otherwise we want to retain it. This is the only case #}
{#- when we render CoT/analysis messages in inference. #}
{%- set future_final_message = namespace(found=false) %}
{%- for future_message in loop_messages[loop.index:] %}
{%- if future_message.role == 'assistant' and "tool_calls" not in future_message %}
{%- set future_final_message.found = true %}
{%- endif %}
{%- endfor %}
{#- We assume max 1 tool call per message, and so we infer the tool call name #}
{#- in "tool" messages from the most recent assistant tool call name #}
{%- set tool_call = message.tool_calls[0] %}
{%- if tool_call.function %}
{%- set tool_call = tool_call.function %}
{%- endif %}
{%- if message.content and message.thinking %}
{{- raise_exception("Cannot pass both content and thinking in an assistant message with tool calls! Put the analysis message in one or the other, but not both.") }}
{%- elif message.content and not future_final_message.found %}
{{- "<|start|>assistant<|channel|>analysis<|message|>" + message.content + "<|end|>" }}
{%- elif message.thinking and not future_final_message.found %}
{{- "<|start|>assistant<|channel|>analysis<|message|>" + message.thinking + "<|end|>" }}
{%- endif %}
{{- "<|start|>assistant to=" }}
{{- "functions." + tool_call.name + "<|channel|>commentary " }}
{{- (tool_call.content_type if tool_call.content_type is defined else "json") + "<|message|>" }}
{{- tool_call.arguments|tojson }}
{{- "<|call|>" }}
{%- set last_tool_call.name = tool_call.name %}
{%- elif loop.last and not add_generation_prompt %}
{#- Only render the CoT if the final turn is an assistant turn and add_generation_prompt is false #}
{#- This is a situation that should only occur in training, never in inference. #}
{%- if "thinking" in message %}
{{- "<|start|>assistant<|channel|>analysis<|message|>" + message.thinking + "<|end|>" }}
{%- endif %}
{#- <|return|> indicates the end of generation, but <|end|> does not #}
{#- <|return|> should never be an input to the model, but we include it as the final token #}
{#- when training, so the model learns to emit it. #}
{{- "<|start|>assistant<|channel|>final<|message|>" + message.content + "<|return|>" }}
{%- else %}
{#- CoT is dropped during all previous turns, so we never render it for inference #}
{{- "<|start|>assistant<|channel|>final<|message|>" + message.content + "<|end|>" }}
{%- set last_tool_call.name = none %}
{%- endif %}
{%- elif message.role == 'tool' -%}
{%- if last_tool_call.name is none %}
{{- raise_exception("Message has tool role, but there was no previous assistant message with a tool call!") }}
{%- endif %}
{{- "<|start|>functions." + last_tool_call.name }}
{{- " to=assistant<|channel|>commentary<|message|>" + message.content|tojson + "<|end|>" }}
{%- elif message.role == 'user' -%}
{{- "<|start|>user<|message|>" + message.content + "<|end|>" }}
{%- endif -%}
{%- endfor -%}
{#- Generation prompt #}
{%- if add_generation_prompt -%}
<|start|>assistant
{%- endif -%}"""
converter = GptOssConverter(
vocab_file=tokenizer_path,
model_max_length=None,
chat_template=chat_template,
)
tokenizer = converter.tokenizer
tokenizer.save_pretrained(save_dir)
print("Saving chat template...")
chat_template_path = os.path.join(save_dir, "chat_template.jinja")
with open(chat_template_path, "w", encoding="utf-8") as f:
f.write(chat_template)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_dir",
help="Location of `./original` subfolder of the GPT OSS model repo.",
)
parser.add_argument(
"--output_dir",
help="Location to write the converted HF model and tokenizer",
)
# Only specify this if you want to use the model with mxfp4 quantization
# It means the model will be unpacked, and quantized using mxfp4 during inference if all the triton requirements are satisfied (triton >= 3.4.0)
# Else we have a fallback to the full precision model (bfloat16)
# If not specified, the model will be unpacked during conversion, and will be in fp8/bfloat16 during inference
# Note: mxfp4 should bring an important speedup in inference time with blackwell gpus
parser.add_argument(
"--mxfp4",
action="store_true",
help="Whether to use the original model with mxfp4 quantization or default to the full precision model.",
)
args = parser.parse_args()
write_model(
model_path=args.output_dir,
input_base_path=args.input_dir,
mxfp4=args.mxfp4,
)
write_tokenizer(
tokenizer_path="o200k_base",
save_dir=args.output_dir,
)
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/gpt_oss/convert_gpt_oss_weights_to_hf.py",
"license": "Apache License 2.0",
"lines": 741,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/gpt_oss/modular_gpt_oss.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Callable
import torch
from torch import nn
from torch.nn import functional as F
from ... import initialization as init
from ...cache_utils import Cache, DynamicCache
from ...integrations import use_experts_implementation, use_kernel_forward_from_hub
from ...masking_utils import create_causal_mask, create_sliding_window_causal_mask
from ...modeling_outputs import (
MoeModelOutputWithPast,
)
from ...modeling_rope_utils import dynamic_rope_update
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...processing_utils import Unpack
from ...utils import (
TransformersKwargs,
auto_docstring,
logging,
)
from ...utils.generic import maybe_autocast, merge_with_config_defaults
from ...utils.output_capturing import OutputRecorder, capture_outputs
from ..llama.modeling_llama import (
LlamaDecoderLayer,
LlamaPreTrainedModel,
LlamaRMSNorm,
repeat_kv,
)
from ..mixtral.modeling_mixtral import (
MixtralForCausalLM,
MixtralForSequenceClassification,
MixtralForTokenClassification,
MixtralModel,
)
from ..qwen2.modeling_qwen2 import Qwen2Attention, Qwen2RotaryEmbedding
from .configuration_gpt_oss import GptOssConfig
logger = logging.get_logger(__name__)
class GptOssRMSNorm(LlamaRMSNorm):
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return (self.weight * hidden_states).to(input_dtype) # main diff with Llama
@use_experts_implementation(is_transposed=True, has_bias=True)
class GptOssExperts(nn.Module):
def __init__(self, config):
super().__init__()
self.intermediate_size = config.intermediate_size
self.num_experts = config.num_local_experts
self.hidden_size = config.hidden_size
self.gate_up_proj = nn.Parameter(torch.empty(self.num_experts, self.hidden_size, 2 * self.intermediate_size))
self.gate_up_proj_bias = nn.Parameter(torch.empty(self.num_experts, 2 * self.intermediate_size))
self.down_proj = nn.Parameter(torch.empty((self.num_experts, self.intermediate_size, self.hidden_size)))
self.down_proj_bias = nn.Parameter(torch.empty(self.num_experts, self.hidden_size))
self.alpha = 1.702
self.limit = 7.0
def _apply_gate(self, gate_up: torch.Tensor) -> torch.Tensor:
gate, up = gate_up[..., ::2], gate_up[..., 1::2]
gate = gate.clamp(min=None, max=self.limit)
up = up.clamp(min=-self.limit, max=self.limit)
glu = gate * torch.sigmoid(gate * self.alpha)
gated_output = (up + 1) * glu
return gated_output
def forward(self, hidden_states: torch.Tensor, router_indices=None, routing_weights=None) -> torch.Tensor:
next_states = torch.zeros_like(hidden_states, dtype=hidden_states.dtype, device=hidden_states.device)
with torch.no_grad():
expert_mask = torch.nn.functional.one_hot(
router_indices, num_classes=self.num_experts
) # masking is also a class
expert_mask = expert_mask.permute(2, 1, 0)
# we sum on the top_k and on the sequence length to get which experts
# are hit this time around
expert_hit = torch.greater(expert_mask.sum(dim=(-1, -2)), 0).nonzero()
for expert_idx in expert_hit:
# expert_idx only have 1 element, so we can use scale for fast indexing
expert_idx = expert_idx[0]
# skip masking index
if expert_idx == self.num_experts:
continue
top_k_pos, token_idx = torch.where(expert_mask[expert_idx])
current_state = hidden_states[token_idx]
gate_up = current_state @ self.gate_up_proj[expert_idx] + self.gate_up_proj_bias[expert_idx]
gated_output = self._apply_gate(gate_up)
out = gated_output @ self.down_proj[expert_idx] + self.down_proj_bias[expert_idx]
weighted_output = out * routing_weights[token_idx, top_k_pos, None]
next_states.index_add_(0, token_idx, weighted_output.to(hidden_states.dtype))
return next_states
class GptOssTopKRouter(nn.Module):
def __init__(self, config):
super().__init__()
self.top_k = config.num_experts_per_tok
self.num_experts = config.num_local_experts
self.hidden_dim = config.hidden_size
self.weight = nn.Parameter(torch.zeros(self.num_experts, self.hidden_dim))
self.bias = nn.Parameter(torch.zeros(self.num_experts))
def forward(self, hidden_states):
router_logits = F.linear(hidden_states, self.weight, self.bias) # (num_tokens, num_experts)
router_top_value, router_indices = torch.topk(router_logits, self.top_k, dim=-1) # (num_tokens, top_k)
router_scores = torch.nn.functional.softmax(router_top_value, dim=1, dtype=router_top_value.dtype)
return router_logits, router_scores, router_indices
@use_kernel_forward_from_hub("MegaBlocksMoeMLP")
class GptOssMLP(nn.Module):
def __init__(self, config):
super().__init__()
self.router = GptOssTopKRouter(config)
self.experts = GptOssExperts(config)
def forward(self, hidden_states):
batch_size, sequence_length, hidden_dim = hidden_states.shape
hidden_states = hidden_states.reshape(-1, hidden_dim)
_, router_scores, router_indices = self.router(hidden_states)
hidden_states = self.experts(hidden_states, router_indices, router_scores)
hidden_states = hidden_states.reshape(batch_size, sequence_length, hidden_dim)
return hidden_states, router_scores
class GptOssRotaryEmbedding(Qwen2RotaryEmbedding):
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with maybe_autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = freqs
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(x.dtype), sin.to(x.dtype)
def _apply_rotary_emb(
x: torch.Tensor,
cos: torch.Tensor,
sin: torch.Tensor,
) -> torch.Tensor:
first_half, second_half = torch.chunk(x, 2, dim=-1)
first_ = first_half * cos - second_half * sin
second_ = second_half * cos + first_half * sin
return torch.cat((first_, second_), dim=-1)
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = _apply_rotary_emb(q, cos, sin)
k_embed = _apply_rotary_emb(k, cos, sin)
return q_embed, k_embed
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: torch.Tensor | None,
scaling: float,
dropout: float = 0.0,
**kwargs,
):
key_states = repeat_kv(key, module.num_key_value_groups)
value_states = repeat_kv(value, module.num_key_value_groups)
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
if attention_mask is not None:
attn_weights = attn_weights + attention_mask
sinks = module.sinks.reshape(1, -1, 1, 1).expand(query.shape[0], -1, query.shape[-2], -1)
combined_logits = torch.cat([attn_weights, sinks], dim=-1)
# This was not in the original implementation and slightly affect results; it prevents overflow in BF16/FP16
# when training with bsz>1 we clamp max values.
combined_logits = combined_logits - combined_logits.max(dim=-1, keepdim=True).values
probs = F.softmax(combined_logits, dim=-1, dtype=combined_logits.dtype)
scores = probs[..., :-1] # we drop the sink here
attn_weights = nn.functional.dropout(scores, p=dropout, training=module.training).to(value_states.dtype)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
class GptOssAttention(Qwen2Attention):
def __init__(self, config: GptOssConfig, layer_idx: int):
super().__init__(config, layer_idx)
self.q_proj = nn.Linear(
config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
)
self.k_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.v_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.o_proj = nn.Linear(
config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias
)
self.sinks = nn.Parameter(torch.empty(config.num_attention_heads))
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: torch.Tensor | None,
past_key_values: Cache | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, torch.Tensor]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
cache_kwargs = {"cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface(
self.config._attn_implementation, eager_attention_forward
)
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
sliding_window=self.sliding_window,
s_aux=self.sinks, # diff with Llama
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class GptOssDecoderLayer(LlamaDecoderLayer):
def __init__(self, config: GptOssConfig, layer_idx: int):
super().__init__(config, layer_idx)
self.hidden_size = config.hidden_size
self.self_attn = GptOssAttention(config=config, layer_idx=layer_idx)
self.mlp = GptOssMLP(config)
self.input_layernorm = GptOssRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = GptOssRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.attention_type = config.layer_types[layer_idx]
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
use_cache: bool | None = False,
cache_position: torch.LongTensor | None = None,
position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states, _ = self.mlp(hidden_states) # diff with llama: router scores
hidden_states = residual + hidden_states
return hidden_states
class GptOssPreTrainedModel(LlamaPreTrainedModel):
_keep_in_fp32_modules = ["post_attention_layernorm", "input_layernorm", "norm"]
_supports_sdpa = False
_compatible_flash_implementations = ["kernels-community/vllm-flash-attn3"]
_can_record_outputs = {
"router_logits": OutputRecorder(GptOssTopKRouter, index=0),
"hidden_states": GptOssDecoderLayer,
"attentions": GptOssAttention,
}
@torch.no_grad()
def _init_weights(self, module):
PreTrainedModel._init_weights(self, module)
std = self.config.initializer_range
if isinstance(module, GptOssExperts):
init.normal_(module.gate_up_proj, mean=0.0, std=std)
init.zeros_(module.gate_up_proj_bias)
init.normal_(module.down_proj, mean=0.0, std=std)
init.zeros_(module.down_proj_bias)
elif isinstance(module, GptOssAttention):
init.normal_(module.sinks, mean=0.0, std=std)
elif isinstance(module, GptOssTopKRouter):
init.normal_(module.weight, mean=0.0, std=std)
init.normal_(module.bias, mean=0.0, std=std)
class GptOssModel(MixtralModel):
_no_split_modules = ["GptOssDecoderLayer"]
@merge_with_config_defaults
@capture_outputs
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
use_cache: bool | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> MoeModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
# It may already have been prepared by e.g. `generate`
if not isinstance(causal_mask_mapping := attention_mask, dict):
mask_kwargs = {
"config": self.config,
"inputs_embeds": inputs_embeds,
"attention_mask": attention_mask,
"cache_position": cache_position,
"past_key_values": past_key_values,
}
causal_mask_mapping = {
"full_attention": create_causal_mask(**mask_kwargs),
"sliding_attention": create_sliding_window_causal_mask(**mask_kwargs),
}
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for decoder_layer in self.layers:
hidden_states = decoder_layer(
hidden_states,
attention_mask=causal_mask_mapping[decoder_layer.attention_type],
position_embeddings=position_embeddings,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return MoeModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
class GptOssForCausalLM(MixtralForCausalLM):
pass
class GptOssForSequenceClassification(MixtralForSequenceClassification):
pass
class GptOssForTokenClassification(MixtralForTokenClassification):
pass
__all__ = [
"GptOssForCausalLM",
"GptOssForSequenceClassification",
"GptOssForTokenClassification",
"GptOssModel",
"GptOssPreTrainedModel",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/gpt_oss/modular_gpt_oss.py",
"license": "Apache License 2.0",
"lines": 369,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/quantizers/quantizer_mxfp4.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from .base import HfQuantizer
if TYPE_CHECKING:
from ..modeling_utils import PreTrainedModel
from ..utils import (
is_accelerate_available,
is_kernels_available,
is_torch_available,
is_triton_available,
logging,
)
from .quantizers_utils import get_module_from_name
if is_torch_available():
import torch
from ..core_model_loading import WeightConverter
logger = logging.get_logger(__name__)
triton_kernels_hub = None
class Mxfp4HfQuantizer(HfQuantizer):
"""
FP4 quantization using fbgemm kernels
"""
requires_calibration = False
def __init__(self, quantization_config, **kwargs):
super().__init__(quantization_config, **kwargs)
self.triton_kernels_hub = None
def _lazy_import_kernels(self):
"""Lazy import and initialize kernels only when needed"""
if self.triton_kernels_hub is None:
try:
from ..integrations.hub_kernels import get_kernel
self.triton_kernels_hub = get_kernel("kernels-community/gpt-oss-triton-kernels")
except ImportError:
raise ImportError("kernels package is required for MXFP4 quantization")
return self.triton_kernels_hub
def validate_environment(self, *args, **kwargs):
if not is_torch_available():
raise ImportError(
"Using mxfp4 quantization requires torch"
"Please install the latest version of torch ( pip install --upgrade torch )"
)
if self.quantization_config.dequantize:
return
if not is_accelerate_available():
raise ImportError("Using mxfp4 requires Accelerate: `pip install accelerate`")
device = torch.accelerator.current_accelerator() or torch.device("cpu")
if device.type not in ["cuda", "xpu", "cpu"]:
if self.pre_quantized:
logger.warning_once(
f"Using MXFP4 quantized models requires model on cuda/xpu/cpu, but found {device}, we will default to dequantizing the model to bf16. To use mxfp4, please disable the current accelerator."
)
self.quantization_config.dequantize = True
return
else:
raise RuntimeError(
f"Quantizing a model using MXFP4 requires model on cuda/xpu/cpu, but found {device}. To use mxfp4, please disable the current accelerator."
)
if torch.xpu.is_available():
is_device_supported_mxfp4 = True
kernels_available = is_triton_available("3.5.0") and is_kernels_available()
elif torch.cuda.is_available():
compute_capability = torch.cuda.get_device_capability()
is_device_supported_mxfp4 = compute_capability >= (7, 5)
kernels_available = is_triton_available("3.4.0") and is_kernels_available()
elif device.type == "cpu":
is_device_supported_mxfp4 = True
kernels_available = is_triton_available("3.5.0") and is_kernels_available()
else:
is_device_supported_mxfp4 = False
kernels_available = False
if self.pre_quantized:
# On unsupported GPUs or without kernels, we will dequantize the model to bf16
if not is_device_supported_mxfp4:
logger.warning_once(
"MXFP4 quantization is only supported on GPUs with compute capability >= 7.5 (e.g T4, A100, L4, H100, or B200) or XPUs (e.g Intel® Data Center GPU Max Series) "
"We will default to dequantizing the model to bf16."
)
self.quantization_config.dequantize = True
return
if not kernels_available:
logger.warning_once(
"MXFP4 quantization requires Triton and kernels installed: CUDA requires Triton >= 3.4.0, XPU requires Triton >= 3.5.0, we will default to dequantizing the model to bf16"
)
self.quantization_config.dequantize = True
return
elif not is_device_supported_mxfp4:
# we can't quantize the model in this case so we raise an error
raise ValueError(
"MXFP4 quantization is only supported on GPUs with compute capability >= 7.5 (e.g T4, A100, L4, H100, or B200) or XPUs (e.g Intel® Data Center GPU Max Series) or CPU"
)
elif not kernels_available:
# we can't quantize the model in this case so we raise an error
raise ValueError(
"MXFP4 quantization requires Triton and kernels installed: CUDA requires Triton >= 3.4.0, XPU/CPU requires Triton >= 3.5.0"
)
if not self.pre_quantized:
self._lazy_import_kernels()
device_map = kwargs.get("device_map")
if device_map is not None and isinstance(device_map, dict):
if not self.pre_quantized and "disk" in device_map.values():
raise ValueError(
"You are attempting to load an FP4 model with a device_map that contains a disk device."
"This is not supported when the model is quantized on the fly. "
"Please use a quantized checkpoint or remove the disk device from the device_map."
)
def param_needs_quantization(self, model: "PreTrainedModel", param_name: str, **kwargs) -> bool:
from ..integrations import Mxfp4GptOssExperts
module, tensor_name = get_module_from_name(model, param_name)
if isinstance(module, Mxfp4GptOssExperts):
if tensor_name in ["down_proj_bias", "gate_up_proj_bias"]:
return False
return True
return False
def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs):
# clean cache due to triton ops
if torch.cuda.is_available():
torch.cuda.empty_cache()
elif torch.xpu.is_available():
torch.xpu.empty_cache()
def _process_model_before_weight_loading(
self,
model: "PreTrainedModel",
use_kernels: bool = False,
**kwargs,
):
from ..integrations import replace_with_mxfp4_linear
# if we are using kernels, we can't use the quantized model, since the forward pass is different and needs special handling
# only CPU kernels can work with pre-quantized models
device = torch.accelerator.current_accelerator() or torch.device("cpu")
if use_kernels and device.type not in ["cpu"]:
logger.warning_once(
"You are using full precision kernels, we will dequantize the model to bf16. "
"To use the quantized model with quantization kernels, please set use_kernels=False"
)
self.quantization_config.dequantize = True
if not use_kernels and device.type in ["cpu"]:
logger.warning_once(
"MXFP4 inference on CPU requires use_kernels=True, but use_kernels is disabled. "
"We will dequantize the model to bf16. To run MXFP4 natively on CPU, please set use_kernels=True."
)
self.quantization_config.dequantize = True
self.modules_to_not_convert = self.get_modules_to_not_convert(
model, self.quantization_config.modules_to_not_convert, model._keep_in_fp32_modules
)
model = replace_with_mxfp4_linear(
model, modules_to_not_convert=self.modules_to_not_convert, quantization_config=self.quantization_config
)
def update_tp_plan(self, config):
if "GptOssConfig" in config.__class__.__name__:
if getattr(config, "base_model_tp_plan", None) is not None:
config.base_model_tp_plan.update(
{
"layers.*.mlp.experts.gate_up_proj_blocks": "grouped_gemm",
"layers.*.mlp.experts.gate_up_proj_scales": "grouped_gemm",
"layers.*.mlp.experts.down_proj_blocks": "grouped_gemm",
"layers.*.mlp.experts.down_proj_scales": "grouped_gemm",
}
)
return config
def update_ep_plan(self, config):
if "GptOssConfig" in config.__class__.__name__:
if getattr(config, "base_model_ep_plan", None) is not None:
config.base_model_ep_plan.update(
{
"layers.*.mlp.experts.gate_up_proj_blocks": "grouped_gemm",
"layers.*.mlp.experts.gate_up_proj_scales": "grouped_gemm",
"layers.*.mlp.experts.down_proj_blocks": "grouped_gemm",
"layers.*.mlp.experts.down_proj_scales": "grouped_gemm",
}
)
return config
def get_state_dict_and_metadata(self, model):
from ..integrations import Mxfp4GptOssExperts
state_dict = model.state_dict()
num_local_experts = getattr(model.config, "num_local_experts", 32)
hidden_size = getattr(model.config, "hidden_size", 2880)
for name, module in model.named_modules():
if not (
isinstance(module, Mxfp4GptOssExperts)
and hasattr(module, "gate_up_proj")
and hasattr(module, "down_proj")
):
continue
for proj in ("gate_up_proj", "down_proj"):
triton_tensor = getattr(module, proj)
precision_config = getattr(module, f"{proj}_precision_config")
blocks = triton_tensor.storage.layout.unswizzle_data(triton_tensor.storage.data).transpose(-1, -2)
if proj == "gate_up_proj":
blocks = blocks.reshape(num_local_experts, -1, 90, 16)
else:
blocks = blocks.reshape(num_local_experts, hidden_size, 90, -1)
scales = precision_config.weight_scale.storage.layout.unswizzle_data(
precision_config.weight_scale.storage.data
).transpose(-1, -2)
state_dict[f"{name}.{proj}_blocks"] = blocks
state_dict[f"{name}.{proj}_scales"] = scales
metadata = {}
return state_dict, metadata
def is_serializable(self):
return True
@property
def is_trainable(self) -> bool:
logger.warning_once(
"MXFP4 quantization don't support training, please consider dequantizing the model first by passing quantization_config=Mxfp4Config(dequantize=True) to .from_pretrained()"
)
return False
def get_quantize_ops(self):
from ..integrations.mxfp4 import Mxfp4Quantize
return Mxfp4Quantize(self)
def get_weight_conversions(self):
from ..integrations.mxfp4 import Mxfp4Dequantize, Mxfp4Deserialize
if self.pre_quantized and self.quantization_config.dequantize:
return [
WeightConverter(
source_patterns=["down_proj_blocks", "down_proj_scales"],
target_patterns=r"down_proj$",
operations=[Mxfp4Dequantize(self)],
),
WeightConverter(
source_patterns=["gate_up_proj_blocks", "gate_up_proj_scales"],
target_patterns=["gate_up_proj$"],
operations=[Mxfp4Dequantize(self)],
),
]
return [
WeightConverter(
source_patterns=["gate_up_proj_blocks", "gate_up_proj_scales"],
target_patterns=r"gate_up_proj$",
operations=[Mxfp4Deserialize(self)],
),
WeightConverter(
source_patterns=["down_proj_blocks", "down_proj_scales"],
target_patterns=r"down_proj$",
operations=[Mxfp4Deserialize(self)],
),
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/quantizers/quantizer_mxfp4.py",
"license": "Apache License 2.0",
"lines": 248,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/gpt_oss/test_modeling_gpt_oss.py | # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch GptOss model."""
import difflib
import inspect
import json
import os
import subprocess
import tempfile
import unittest
from pathlib import Path
import pytest
from parameterized import parameterized
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
is_torch_available,
)
from transformers.testing_utils import (
cleanup,
require_deterministic_for_xpu,
require_kernels,
require_torch,
require_torch_gpu,
slow,
torch_device,
)
from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester
if is_torch_available():
import torch
from transformers import (
GptOssModel,
)
if torch.cuda.is_available():
NUM_GPUS = torch.cuda.device_count()
elif hasattr(torch, "xpu") and torch.xpu.is_available():
NUM_GPUS = torch.xpu.device_count()
else:
NUM_GPUS = 0
class GptOssModelTester(CausalLMModelTester):
if is_torch_available():
base_model_class = GptOssModel
@require_torch
class GptOssModelTest(CausalLMModelTest, unittest.TestCase):
_is_stateful = True
model_split_percents = [0.5, 0.6]
model_tester_class = GptOssModelTester
@require_kernels
@pytest.mark.flash_attn_test
@require_torch_gpu
def test_default_flash_implementation_auto_correction(self):
"""
Tests that setting attn_implementation="flash_attention_2" during model initialization
automatically corrects to the model's `_compatible_flash_implementations`.
"""
from kernels import get_kernel
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
expected_kernel = "kernels-community/vllm-flash-attn3"
flash = get_kernel(expected_kernel)
if flash is None:
self.skipTest(f"{expected_kernel} is not available, skipping auto-correction test.")
# Option 1: Auto correction on setting config on init
config._attn_implementation = "flash_attention_2"
tmp_model = GptOssModel(config).to(device=torch_device, dtype=torch.bfloat16)
self.assertEqual(tmp_model.config._attn_implementation, expected_kernel)
# Option 2: Auto correction on load time
with tempfile.TemporaryDirectory() as tmp_dir_name:
tmp_model.save_pretrained(tmp_dir_name)
model = GptOssModel.from_pretrained(tmp_dir_name, attn_implementation="flash_attention_2").to(
device=torch_device
)
self.assertEqual(model.config._attn_implementation, expected_kernel)
# Option 3: Auto correction on `set_attn_implementation`
model.set_attn_implementation("eager")
self.assertEqual(model.config._attn_implementation, "eager")
model.set_attn_implementation("flash_attention_2")
self.assertEqual(model.config._attn_implementation, expected_kernel)
# Verify model still works
with torch.no_grad():
output = model(**inputs_dict)
self.assertIsNotNone(output)
@unittest.skip("GptOss's forcefully disables sdpa due to Sink")
def test_sdpa_can_dispatch_non_composite_models(self):
pass
@unittest.skip("GptOss's eager attn/sdpa attn outputs are expected to be different")
def test_eager_matches_sdpa_generate(self):
pass
@unittest.skip("GptOss eager/FA2 attention outputs are expected to be different")
def test_flash_attn_2_equivalence(self):
pass
@unittest.skip("Most probably because of the MOE, the moe and router does not ignore padding tokens")
def test_eager_padding_matches_padding_free_with_position_ids(self):
pass
@unittest.skip("GptOss does not support flex officially")
def test_flex_attention_with_grads(self):
pass
@unittest.skipIf(torch_device == "cpu", "GptOss does not support flex officially")
def test_generate_compile_model_forward_fullgraph(self):
return super().test_generate_compile_model_forward_fullgraph()
def test_reverse_loading_mapping(self, check_keys_were_modified=False):
super().test_reverse_loading_mapping(check_keys_were_modified)
RESULTS_PATH = Path(__file__).parent.parent.parent / "fixtures/gpt_oss/integration_tests.json"
# ------------------------
# Worker function for distributed torchrun
# ------------------------
def distributed_worker(quantized, model_size, kernels, attn_impl, mode):
"""This is the function that will be executed by torchrun workers."""
import os
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.testing_utils import torch_device
def generate_config_key(quantized, model, kernels, attn_impl, mode):
"""Generate a key for the restructured integration test results."""
return f"device={torch_device}|quantized={str(quantized).lower()}|model={model}|kernels={str(kernels).lower()}|attn_impl={attn_impl}|mode={mode}"
input_text = [
"Roses are red, violets",
"How are you? Tell me the name of the president of",
]
# Convert args
quantized = quantized.lower() == "true"
kernels = kernels.lower() == "true"
# Distributed model loading
model_id = f"openai/gpt-oss-{model_size}"
model = AutoModelForCausalLM.from_pretrained(
model_id,
dtype="auto",
tp_plan="auto", # distributed inference
use_kernels=kernels,
).to(torch_device)
model.set_attn_implementation(attn_impl)
tokenizer = AutoTokenizer.from_pretrained(model_id, padding_side="left")
# Inference
inputs = tokenizer(input_text, return_tensors="pt", padding=True).to(torch_device)
output = model.generate(**inputs, max_new_tokens=20, do_sample=False)
output_texts = tokenizer.batch_decode(output, skip_special_tokens=False)
# Only rank 0 writes results and validates against expected outputs
if int(os.environ.get("RANK", "0")) == 0:
# Generate key to look up expected outputs
key = generate_config_key(quantized, model_size, kernels, attn_impl, mode)
# Load expected outputs from restructured JSON
if os.path.exists(RESULTS_PATH):
with open(RESULTS_PATH, "r") as f:
expected_results = json.load(f)
# Check if we have expected results for this configuration
if key in expected_results:
expected_outputs = expected_results[key]
# Compare actual outputs with expected outputs
assert len(output_texts) == len(expected_outputs), f"Output length mismatch for {key}"
for i, (actual, expected) in enumerate(zip(output_texts, expected_outputs)):
actual_stripped = actual.strip()
expected_stripped = expected.strip()
# Make lengths match by taking minimum length to be resilient to generation differences
min_length = min(len(actual_stripped), len(expected_stripped))
actual_truncated = actual_stripped[:min_length]
expected_truncated = expected_stripped[:min_length]
if actual_truncated != expected_truncated:
diff = "\n".join(
difflib.unified_diff(
expected_truncated.splitlines(keepends=True),
actual_truncated.splitlines(keepends=True),
fromfile=f"expected[{i}]",
tofile=f"actual[{i}]",
lineterm="",
)
)
raise AssertionError(
f"Output mismatch at index {i} for {key}:\n"
f"Expected: '{expected_stripped}'\n"
f"Actual: '{actual_stripped}'\n"
f"Diff (truncated to min length {min_length}):\n{diff}"
)
print(f"✓ Outputs match expected results for {key}")
else:
print(f"Warning: No expected results found for configuration: {key}")
else:
print(f"Warning: Results file {RESULTS_PATH} not found")
@slow
class GptOssIntegrationTest(unittest.TestCase):
input_text = [
"Roses are red, violets",
"How are you? Tell me the name of the president of",
]
@staticmethod
def generate_config_key(quantized, model, kernels, attn_impl, mode):
"""Generate a key for the restructured integration test results."""
return f"device={torch_device}|quantized={str(quantized).lower()}|model={model}|kernels={str(kernels).lower()}|attn_impl={attn_impl}|mode={mode}"
def setUp(self):
cleanup(torch_device, gc_collect=True)
def tearDown(self):
cleanup(torch_device, gc_collect=True)
# ------------------------
# Non-distributed inference
# ------------------------
def load_and_forward(self, model_id, attn_implementation, input_text, mode="eval", **pretrained_kwargs):
if torch_device == "cpu":
if attn_implementation == "kernels-community/vllm-flash-attn3":
self.skipTest("vllm-flash-attn3 is not supported on CPU.")
if pretrained_kwargs.get("kernels", False) and mode == "train":
self.skipTest("CPU kernels only support inference.")
model = AutoModelForCausalLM.from_pretrained(
model_id,
dtype=torch.bfloat16,
device_map="auto",
attn_implementation=attn_implementation,
**pretrained_kwargs,
)
# Set the correct mode
if mode == "train":
model.train()
else:
model.eval()
tokenizer = AutoTokenizer.from_pretrained(model_id, padding_side="left")
inputs = tokenizer(input_text, return_tensors="pt", padding=True).to(model.device)
output = model.generate(**inputs, max_new_tokens=20, do_sample=False)
output_text = tokenizer.batch_decode(output, skip_special_tokens=True)
return output_text
# ------------------------
# Distributed inference using inspect
# ------------------------
@staticmethod
def run_distributed_test(quantized, model, kernels, attn_impl, mode):
"""Launch torchrun using a temporary worker file generated from inspect.getsource()."""
import textwrap
# Extract worker function source dynamically
worker_src = inspect.getsource(distributed_worker)
# Create a temp file that calls the worker
script_code = f"""
import sys
import json
RESULTS_PATH = "{RESULTS_PATH}"
{worker_src}
if __name__ == "__main__":
distributed_worker("{quantized}", "{model}", "{kernels}", "{attn_impl}", "{mode}")
"""
# Dedent for proper formatting
script_code = textwrap.dedent(script_code)
# Write to temp file
with tempfile.NamedTemporaryFile("w", suffix="_worker.py", delete=False) as tmp:
tmp.write(script_code)
tmp_path = tmp.name
# Launch torchrun
cmd = [
"torchrun",
f"--nproc_per_node={NUM_GPUS}",
tmp_path,
]
subprocess.run(cmd, check=True)
# Cleanup
os.remove(tmp_path)
# ------------------------
# Shared parameterization
# ------------------------
PARAMETERS = [
(False, "20b", False, "eager", "eval"),
(False, "20b", False, "eager", "train"),
(False, "20b", False, "kernels-community/vllm-flash-attn3", "eval"),
(False, "20b", False, "kernels-community/vllm-flash-attn3", "train"),
(False, "20b", True, "eager", "eval"),
(False, "20b", True, "eager", "train"),
(False, "20b", True, "kernels-community/vllm-flash-attn3", "eval"),
(False, "20b", True, "kernels-community/vllm-flash-attn3", "train"),
(True, "20b", False, "eager", "eval"),
(True, "20b", False, "eager", "train"),
(True, "20b", False, "kernels-community/vllm-flash-attn3", "eval"),
(True, "20b", False, "kernels-community/vllm-flash-attn3", "train"),
(True, "20b", True, "eager", "eval"),
(True, "20b", True, "eager", "train"),
(True, "20b", True, "kernels-community/vllm-flash-attn3", "eval"),
(True, "20b", True, "kernels-community/vllm-flash-attn3", "train"),
(False, "120b", False, "eager", "eval"),
(False, "120b", False, "eager", "train"),
(False, "120b", False, "kernels-community/vllm-flash-attn3", "eval"),
(False, "120b", False, "kernels-community/vllm-flash-attn3", "train"),
(False, "120b", True, "eager", "eval"),
(False, "120b", True, "eager", "train"),
(False, "120b", True, "kernels-community/vllm-flash-attn3", "eval"),
(False, "120b", True, "kernels-community/vllm-flash-attn3", "train"),
(True, "120b", False, "eager", "eval"),
(True, "120b", False, "eager", "train"),
(True, "120b", False, "kernels-community/vllm-flash-attn3", "eval"),
(True, "120b", False, "kernels-community/vllm-flash-attn3", "train"),
(True, "120b", True, "eager", "eval"),
(True, "120b", True, "eager", "train"),
(True, "120b", True, "kernels-community/vllm-flash-attn3", "eval"),
(True, "120b", True, "kernels-community/vllm-flash-attn3", "train"),
]
# ------------------------
# Non-distributed test
# ------------------------
@parameterized.expand(PARAMETERS)
@require_deterministic_for_xpu
def test_model_outputs(self, quantized, model, kernels, attn_impl, mode):
if torch_device == "cpu":
if attn_impl == "kernels-community/vllm-flash-attn3":
self.skipTest("vllm-flash-attn3 is not supported on CPU.")
if kernels and mode == "train":
self.skipTest("CPU kernels only support inference.")
if torch_device == "xpu" and attn_impl == "kernels-community/vllm-flash-attn3":
self.skipTest("flash attention 3 is not supported on XPU yet.")
model_id = f"openai/gpt-oss-{model}"
output_texts = self.load_and_forward(
model_id,
attn_impl,
self.input_text,
mode=mode,
use_kernels=kernels,
)
# Generate key to look up expected outputs
key = self.generate_config_key(quantized, model, kernels, attn_impl, mode)
# Load expected outputs from restructured JSON
if os.path.exists(RESULTS_PATH):
with open(RESULTS_PATH, "r") as f:
expected_results = json.load(f)
# Check if we have expected results for this configuration
if key in expected_results:
expected_outputs = expected_results[key]
# Compare actual outputs with expected outputs
self.assertEqual(len(output_texts), len(expected_outputs), f"Output length mismatch for {key}")
for i, (actual, expected) in enumerate(zip(output_texts, expected_outputs)):
actual_stripped = actual.strip()
expected_stripped = expected.strip()
# Make lengths match by taking minimum length to be resilient to generation differences
min_length = min(len(actual_stripped), len(expected_stripped))
actual_truncated = actual_stripped[:min_length]
expected_truncated = expected_stripped[:min_length]
if actual_truncated != expected_truncated:
diff = "\n".join(
difflib.unified_diff(
expected_truncated.splitlines(keepends=True),
actual_truncated.splitlines(keepends=True),
fromfile=f"expected[{i}]",
tofile=f"actual[{i}]",
lineterm="",
)
)
self.fail(
f"Output mismatch at index {i} for {key}:\n"
f"Expected: '{expected_stripped}'\n"
f"Actual: '{actual_stripped}'\n"
f"Diff (truncated to min length {min_length}):\n{diff}"
)
else:
# If no expected results exist, this is a new configuration
# We could optionally add it to the results file here
print(f"Warning: No expected results found for configuration: {key}")
self.assertIsInstance(output_texts, list)
self.assertTrue(all(isinstance(x, str) for x in output_texts))
# ------------------------
# Distributed test
# ------------------------
@parameterized.expand(PARAMETERS)
def test_model_outputs_distributed(self, quantized, model, kernels, attn_impl, mode):
if torch_device == "cpu":
self.skipTest("Skip TP on CPU until verified.")
if torch_device == "xpu" and attn_impl == "kernels-community/vllm-flash-attn3":
self.skipTest("flash attention 3 is not supported on XPU yet.")
self.run_distributed_test(quantized, model, kernels, attn_impl, mode)
# ------------------------
# Training test
# ------------------------
@parameterized.expand(PARAMETERS)
def test_training_step(self, quantized, model, kernels, attn_impl, mode):
if torch_device == "cpu":
if attn_impl == "kernels-community/vllm-flash-attn3":
self.skipTest("vllm-flash-attn3 is not supported on CPU.")
if kernels and mode == "train":
self.skipTest("CPU kernels only support inference.")
if mode != "train":
self.skipTest("This test is only for training mode.")
if quantized:
self.skipTest("Training test for quantized models is not supported.")
model_id = f"openai/gpt-oss-{model}"
model_obj = AutoModelForCausalLM.from_pretrained(
model_id,
dtype=torch.bfloat16,
device_map="auto",
attn_implementation=attn_impl,
use_kernels=kernels,
)
model_obj.train()
tokenizer = AutoTokenizer.from_pretrained(model_id, padding_side="left")
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(model_obj.device)
inputs["labels"] = inputs["input_ids"].clone()
outputs = model_obj(**inputs)
loss = outputs.loss
self.assertIsNotNone(loss)
loss.backward()
# Check that gradients were computed for all parameters that have a grad field
for name, param in model_obj.named_parameters():
if param.requires_grad:
self.assertIsNotNone(param.grad, f"Parameter '{name}' did not receive a gradient.")
# Check that gradients are not all zero
self.assertTrue(
torch.sum(torch.abs(param.grad)).item() > 0, f"Gradient for parameter '{name}' is all zeros."
)
def test_model_matches_original_20b(self):
input_text = "Roses are red, violets"
original_output = "Roses are red, violets are blue, I love you, and I love you too."
original_logprobs = torch.tensor(
[
-0.037353515625,
-0.08154296875,
-1.21875,
-1.953125,
-2.234375,
-0.96875,
-1.546875,
-1.640625,
-0.93359375,
-1.609375,
-1.625,
-0.85546875,
-1.7265625,
-0.7421875,
-2.078125,
-0.006561279296875,
-0.10498046875,
-0.1767578125,
-0.1240234375,
-0.099609375,
]
)
model_id = "openai/gpt-oss-20b"
model = AutoModelForCausalLM.from_pretrained(
model_id,
dtype=torch.bfloat16,
device_map="auto",
attn_implementation="eager",
)
tokenizer = AutoTokenizer.from_pretrained(model_id)
tokens = tokenizer(input_text)["input_ids"]
num_generated_tokens = 0
with torch.no_grad():
for i in range(12):
tensors = torch.as_tensor(tokens, dtype=torch.int32, device=model.device).unsqueeze(0)
logits = model(tensors).logits[0]
predicted_token = torch.argmax(logits[-1, :], dim=-1).item()
logprobs = torch.log_softmax(logits[-1, :], dim=-1)
selected_logprobs = logprobs[predicted_token]
tokens.append(predicted_token)
num_generated_tokens += 1
decoded_token = tokenizer.decode([predicted_token])
logprob_differences = selected_logprobs - original_logprobs[i]
print(
f"Generated token: {repr(decoded_token)}, logprob: {selected_logprobs}, logprob differences: {logprob_differences}"
)
torch.testing.assert_close(
selected_logprobs.cpu().to(original_logprobs.dtype), original_logprobs[i], atol=1e-1, rtol=1e-1
)
decoded_string = tokenizer.decode(tokens)
self.assertTrue(original_output.startswith(decoded_string))
def test_model_matches_original_120b(self):
input_text = "Roses are red, violets"
original_output = """Roses are red, violets are blue,
I am a language model, not a human being"""
original_logprobs = torch.tensor(
[
-0.90234375,
-0.66015625,
-1.546875,
-2.703125,
-2.078125,
-1.21875,
-2.484375,
-0.031982421875,
-0.84765625,
-1.890625,
-0.1923828125,
-2.046875,
-1.65625,
-1.3515625,
-1.1640625,
-0.3671875,
-1.9921875,
-1.5390625,
-1.46875,
-0.85546875,
]
)
model_id = "openai/gpt-oss-120b"
model = AutoModelForCausalLM.from_pretrained(
model_id,
dtype=torch.bfloat16,
device_map="auto",
attn_implementation="eager",
)
tokenizer = AutoTokenizer.from_pretrained(model_id)
tokens = tokenizer(input_text)["input_ids"]
num_generated_tokens = 0
with torch.no_grad():
for i in range(12):
tensors = torch.as_tensor(tokens, dtype=torch.int32, device=model.device).unsqueeze(0)
logits = model(tensors).logits[0]
predicted_token = torch.argmax(logits[-1, :], dim=-1).item()
logprobs = torch.log_softmax(logits[-1, :], dim=-1)
selected_logprobs = logprobs[predicted_token]
tokens.append(predicted_token)
num_generated_tokens += 1
decoded_token = tokenizer.decode([predicted_token])
logprob_differences = selected_logprobs - original_logprobs[i]
print(
f"Generated token: {repr(decoded_token)}, logprob: {selected_logprobs}, logprob differences: {logprob_differences}"
)
torch.testing.assert_close(
selected_logprobs.cpu().to(original_logprobs.dtype), original_logprobs[i], atol=1e-1, rtol=1e-1
)
decoded_string = tokenizer.decode(tokens)
self.assertTrue(original_output.startswith(decoded_string))
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/gpt_oss/test_modeling_gpt_oss.py",
"license": "Apache License 2.0",
"lines": 520,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/quantization/mxfp4/test_mxfp4.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import tempfile
import unittest
from contextlib import ExitStack, contextmanager
from unittest.mock import patch
from transformers import AutoTokenizer, GptOssForCausalLM, Mxfp4Config
from transformers.testing_utils import (
require_kernels,
require_torch,
require_torch_gpu,
require_torch_large_accelerator,
require_triton,
slow,
torch_device,
)
from transformers.utils import (
is_torch_available,
)
if is_torch_available():
import torch
if torch.cuda.is_available():
REQUIRE_TRITON_MXFP4 = require_triton(min_version="3.4.0")
elif hasattr(torch, "xpu") and torch.xpu.is_available():
REQUIRE_TRITON_MXFP4 = require_triton(min_version="3.5.0")
elif torch_device == "cpu":
REQUIRE_TRITON_MXFP4 = require_triton(min_version="3.5.0")
else:
REQUIRE_TRITON_MXFP4 = unittest.skip("test requires CUDA or XPU")
def _empty_accelerator_cache():
if torch.cuda.is_available():
torch.cuda.empty_cache()
elif hasattr(torch, "xpu") and torch.xpu.is_available():
torch.xpu.empty_cache()
@contextmanager
def _patch_no_accelerator():
with ExitStack() as stack:
stack.enter_context(patch("torch.cuda.is_available", return_value=False))
if hasattr(torch, "xpu"):
stack.enter_context(patch("torch.xpu.is_available", return_value=False))
stack.enter_context(patch("torch.accelerator.current_accelerator", return_value=None))
yield
class Mxfp4ConfigTest(unittest.TestCase):
def test_basic_config_creation(self):
"""Test basic configuration creation with default values"""
config = Mxfp4Config()
self.assertEqual(config.quant_method.value, "mxfp4")
self.assertIsNone(config.modules_to_not_convert)
self.assertFalse(config.dequantize)
def test_config_with_modules_to_not_convert(self):
"""Test configuration with modules to not convert"""
modules = ["model.layers.*.self_attn", "lm_head"]
config = Mxfp4Config(modules_to_not_convert=modules)
self.assertEqual(config.modules_to_not_convert, modules)
def test_config_with_dequantize(self):
"""Test configuration with dequantize enabled"""
config = Mxfp4Config(dequantize=True)
self.assertTrue(config.dequantize)
def test_get_loading_attributes(self):
"""Test get_loading_attributes method"""
config = Mxfp4Config(dequantize=True)
attrs = config.get_loading_attributes()
self.assertEqual(attrs["dequantize"], True)
def test_to_dict(self):
"""Test configuration serialization to dict"""
config = Mxfp4Config(modules_to_not_convert=["lm_head"], dequantize=True)
config_dict = config.to_dict()
self.assertEqual(config_dict["quant_method"], "mxfp4")
self.assertEqual(config_dict["modules_to_not_convert"], ["lm_head"])
# we don't keep dequantize in config_dict
self.assertTrue("dequantize" not in config_dict)
def test_from_dict(self):
"""Test configuration creation from dict"""
config_dict = {"quant_method": "mxfp4", "modules_to_not_convert": ["lm_head"], "dequantize": True}
config = Mxfp4Config.from_dict(config_dict)
self.assertEqual(config.modules_to_not_convert, ["lm_head"])
self.assertTrue(config.dequantize)
class Mxfp4QuantizerTest(unittest.TestCase):
"""Test the Mxfp4HfQuantizer class"""
def setUp(self):
gc.collect()
_empty_accelerator_cache()
def test_quantizer_validation_no_torch(self):
"""Test quantizer validation when torch is not available"""
with patch("transformers.quantizers.quantizer_mxfp4.is_torch_available", return_value=False):
from transformers.quantizers.quantizer_mxfp4 import Mxfp4HfQuantizer
config = Mxfp4Config()
quantizer = Mxfp4HfQuantizer(config)
with self.assertRaises(ImportError):
quantizer.validate_environment()
def test_quantizer_validation_no_accelerator(self):
"""Test quantizer validation when CUDA/XPU is not available"""
with _patch_no_accelerator():
from transformers.quantizers.quantizer_mxfp4 import Mxfp4HfQuantizer
config = Mxfp4Config()
quantizer = Mxfp4HfQuantizer(config)
quantizer.pre_quantized = False
# CPU already supported MXFP4
quantizer.validate_environment()
@require_torch_gpu
def test_quantizer_validation_low_compute_capability(self):
"""Test quantizer validation with CUDA low compute capability"""
with patch("torch.cuda.get_device_capability", return_value=(7, 0)):
from transformers.quantizers.quantizer_mxfp4 import Mxfp4HfQuantizer
config = Mxfp4Config()
quantizer = Mxfp4HfQuantizer(config)
quantizer.pre_quantized = False
with self.assertRaises(ValueError):
quantizer.validate_environment()
@require_torch_gpu
def test_quantizer_validation_low_compute_capability_with_prequantized(self):
"""Test quantizer validation with CUDA low compute capability"""
with patch("torch.cuda.get_device_capability", return_value=(7, 0)):
from transformers.quantizers.quantizer_mxfp4 import Mxfp4HfQuantizer
config = Mxfp4Config()
quantizer = Mxfp4HfQuantizer(config)
# Should automatically set dequantize=True and warn
quantizer.validate_environment()
self.assertTrue(quantizer.quantization_config.dequantize)
@require_torch_gpu
def test_quantizer_validation_low_compute_capability_with_dequantize(self):
"""Test quantizer validation with CUDA low compute capability but dequantize enabled"""
with patch("torch.cuda.get_device_capability", return_value=(7, 0)):
from transformers.quantizers.quantizer_mxfp4 import Mxfp4HfQuantizer
config = Mxfp4Config(dequantize=True)
quantizer = Mxfp4HfQuantizer(config)
# Should not raise error with dequantize=True
try:
quantizer.validate_environment()
except ValueError as e:
if "compute capability" in str(e):
self.fail("Should not raise compute capability error when dequantize=True")
def test_quantizer_validation_order_dequantize_before_accelerator_check(self):
"""Test that dequantize check happens before CUDA/XPU availability check"""
# Mock torch.cuda.is_available
with _patch_no_accelerator():
from transformers.quantizers.quantizer_mxfp4 import Mxfp4HfQuantizer
# Test with dequantize=True - should pass even without CUDA/XPU and accelerate
config = Mxfp4Config(dequantize=True)
quantizer = Mxfp4HfQuantizer(config)
# This should not raise any error because dequantize check comes first
quantizer.validate_environment()
# Test with dequantize=False - should still fail due to missing CUDA/XPU
config = Mxfp4Config(dequantize=False)
quantizer = Mxfp4HfQuantizer(config)
quantizer.pre_quantized = False
# CPU already supported MXFP4
quantizer.validate_environment()
def test_quantizer_validation_missing_triton(self):
"""Test quantizer validation when triton is not available"""
with (
patch("transformers.quantizers.quantizer_mxfp4.is_triton_available", return_value=False),
patch("transformers.quantizers.quantizer_mxfp4.is_kernels_available", return_value=False),
):
from transformers.quantizers.quantizer_mxfp4 import Mxfp4HfQuantizer
config = Mxfp4Config()
quantizer = Mxfp4HfQuantizer(config)
quantizer.pre_quantized = False
with self.assertRaises(ValueError):
quantizer.validate_environment()
def test_quantizer_validation_missing_triton_pre_quantized_no_dequantize(self):
"""Test quantizer validation when triton is not available but model is pre-quantized and dequantize is False"""
with (
patch("transformers.quantizers.quantizer_mxfp4.is_triton_available", return_value=False),
patch("transformers.quantizers.quantizer_mxfp4.is_kernels_available", return_value=False),
):
from transformers.quantizers.quantizer_mxfp4 import Mxfp4HfQuantizer
config = Mxfp4Config()
quantizer = Mxfp4HfQuantizer(config)
quantizer.pre_quantized = True
# Should automatically set dequantize=True and warn
quantizer.validate_environment()
self.assertTrue(quantizer.quantization_config.dequantize)
def test_is_trainable(self):
"""Test trainability"""
from transformers.quantizers.quantizer_mxfp4 import Mxfp4HfQuantizer
config = Mxfp4Config()
quantizer = Mxfp4HfQuantizer(config)
# MXFP4 is not trainable
self.assertFalse(quantizer.is_trainable)
class Mxfp4IntegrationTest(unittest.TestCase):
"""Test mxfp4 integration functions"""
def test_should_convert_module(self):
"""Test module conversion decision logic"""
from transformers.quantizers.quantizers_utils import should_convert_module
# Should convert by default
self.assertTrue(should_convert_module("model", None))
self.assertTrue(should_convert_module("model", []))
# Should not convert if in exclusion list
patterns = ["model.layers.*.self_attn", "lm_head"]
self.assertFalse(should_convert_module("lm_head", patterns))
self.assertTrue(should_convert_module("experts", patterns))
@require_torch
def test_convert_moe_packed_tensors(self):
"""Test unpacking of quantized tensors"""
from transformers.integrations.mxfp4 import convert_moe_packed_tensors
# Create dummy packed tensors
blocks = torch.randint(0, 255, (2, 4, 8, 16), dtype=torch.uint8)
scales = torch.randint(100, 150, (2, 4, 8), dtype=torch.uint8)
result = convert_moe_packed_tensors(blocks, scales, dtype=torch.bfloat16)
self.assertEqual(result.shape, (2, 8 * 16 * 2, 4))
self.assertEqual(result.dtype, torch.bfloat16)
@REQUIRE_TRITON_MXFP4
@require_kernels
@require_torch
def test_quantize_to_mxfp4(self):
"""Test quantization function"""
from transformers.integrations.mxfp4 import quantize_to_mxfp4
from transformers.quantizers.quantizer_mxfp4 import Mxfp4HfQuantizer
config = Mxfp4Config()
quantizer = Mxfp4HfQuantizer(config)
# Create dummy weight tensor
device = torch_device
w = torch.randn(32, 64, 128, dtype=torch.bfloat16, device=torch.device(device))
quantized_w, w_scale = quantize_to_mxfp4(w, quantizer._lazy_import_kernels())
# Check that shapes are reasonable
self.assertEqual(quantized_w.dtype, torch.uint8)
@require_torch
@require_torch_large_accelerator
@REQUIRE_TRITON_MXFP4
@require_kernels
@slow
class Mxfp4ModelTest(unittest.TestCase):
"""Test mxfp4 with actual models (requires specific model and hardware)"""
# These should be paths to real OpenAI MoE models for proper testing
model_name = "openai/gpt-oss-20b"
input_text = "Once upon a time"
# Expected outputs for generation tests
EXPECTED_OUTPUTS = set()
EXPECTED_OUTPUTS.add("Once upon a time, in a small town, there lived a young")
def setUp(self):
gc.collect()
_empty_accelerator_cache()
def tearDown(self):
gc.collect()
_empty_accelerator_cache()
def check_inference_correctness_quantized(self, model, tokenizer):
# Check that inference pass works on the model
encoded_input = tokenizer(self.input_text, return_tensors="pt").to(model.device)
# Set pad token if not set
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
with torch.no_grad():
output_sequences = model.generate(
**encoded_input,
max_new_tokens=10,
do_sample=False,
pad_token_id=tokenizer.eos_token_id,
use_cache=False,
)
generated_text = tokenizer.decode(output_sequences[0], skip_special_tokens=True)
self.assertIn(generated_text, self.EXPECTED_OUTPUTS)
def test_gpt_oss_model_loading_quantized_with_device_map(self):
"""Test loading OpenAI MoE model with mxfp4 quantization and device_map"""
model = GptOssForCausalLM.from_pretrained(
self.model_name,
dtype=torch.bfloat16,
device_map="auto",
)
tokenizer = AutoTokenizer.from_pretrained(self.model_name)
self.check_inference_correctness_quantized(model, tokenizer)
def test_gpt_oss_model_loading_dequantized_with_device_map(self):
"""Test loading OpenAI MoE model with mxfp4 dequantization and device_map"""
quantization_config = Mxfp4Config(dequantize=True)
# Test that config is properly set up
self.assertTrue(quantization_config.dequantize)
model = GptOssForCausalLM.from_pretrained(
self.model_name,
quantization_config=quantization_config,
dtype=torch.bfloat16,
device_map="auto",
)
tokenizer = AutoTokenizer.from_pretrained(self.model_name)
self.check_inference_correctness_quantized(model, tokenizer)
def test_model_device_map_validation(self):
"""Test device map validation"""
from transformers.quantizers.quantizer_mxfp4 import Mxfp4HfQuantizer
config = Mxfp4Config()
quantizer = Mxfp4HfQuantizer(config)
quantizer.pre_quantized = False
# Test with CPU in device map (CPU already support mxfp4)
quantizer.validate_environment(device_map={"": "cpu"})
def test_memory_footprint_comparison(self):
"""Test memory footprint differences between quantized and unquantized models"""
# Expected: quantized < dequantized < unquantized memory usage
quantization_config = Mxfp4Config(dequantize=True)
quantized_model = GptOssForCausalLM.from_pretrained(
self.model_name,
dtype=torch.bfloat16,
device_map="auto",
)
dequantized_model = GptOssForCausalLM.from_pretrained(
self.model_name,
dtype=torch.bfloat16,
device_map="auto",
quantization_config=quantization_config,
)
quantized_mem = quantized_model.get_memory_footprint()
dequantized_mem = dequantized_model.get_memory_footprint()
self.assertLess(quantized_mem, dequantized_mem)
def test_save_mxfp4(self):
"""Test saving quantized OpenAI MoE model with device_map"""
model = GptOssForCausalLM.from_pretrained(
self.model_name,
torch_dtype=torch.bfloat16,
device_map="auto",
)
tokenizer = AutoTokenizer.from_pretrained(self.model_name)
with tempfile.TemporaryDirectory() as tmp:
# Save the model in mxfp4 format
model.save_pretrained(tmp)
_empty_accelerator_cache()
gc.collect()
# test quantized model
loaded_model = GptOssForCausalLM.from_pretrained(
tmp,
torch_dtype=torch.bfloat16,
device_map="auto",
)
self.check_inference_correctness_quantized(loaded_model, tokenizer)
# test dequantized model
loaded_model = GptOssForCausalLM.from_pretrained(
tmp,
quantization_config=Mxfp4Config(dequantize=True),
torch_dtype=torch.bfloat16,
device_map="auto",
)
self.check_inference_correctness_quantized(loaded_model, tokenizer)
def test_save_mxfp4_non_quantized(self):
"""Test saving dequantized OpenAI MoE model with mxfp4 quantization and device_map"""
non_quantized_model_name = "hf-internal-testing/gpt-oss-20b-bf16"
tokenizer = AutoTokenizer.from_pretrained(non_quantized_model_name)
loaded_model = GptOssForCausalLM.from_pretrained(
non_quantized_model_name,
quantization_config=Mxfp4Config(),
torch_dtype=torch.bfloat16,
device_map="auto",
)
# save the quantized model
with tempfile.TemporaryDirectory() as tmp:
loaded_model.save_pretrained(tmp)
_empty_accelerator_cache()
gc.collect()
# load it back to check with everything works as expected
loaded_model = GptOssForCausalLM.from_pretrained(
tmp,
torch_dtype=torch.bfloat16,
device_map="auto",
)
self.check_inference_correctness_quantized(loaded_model, tokenizer)
loaded_model = GptOssForCausalLM.from_pretrained(
tmp,
quantization_config=Mxfp4Config(dequantized=True),
torch_dtype=torch.bfloat16,
device_map="auto",
)
self.check_inference_correctness_quantized(loaded_model, tokenizer)
def test_compute_module_sizes(self):
r"""
Test if we compute the right module sizes needed to generate the device map.
Also test if we get the right values for `total_byte_count` in `caching_allocator_warmup`.
"""
from transformers import AutoConfig, AutoModelForCausalLM
from transformers.integrations import Mxfp4GptOssExperts
from transformers.integrations.accelerate import compute_module_sizes
from transformers.modeling_utils import expand_device_map, get_total_byte_count
from transformers.quantizers import AutoHfQuantizer
# we need to preprocess the model like that because device_map calculation happens before we load the weights inside the model.
# For normal wieghts, it's fine but for quantized weights, the tensors dtype might change during loading.
with torch.device("meta"):
config = AutoConfig.from_pretrained(self.model_name)
model = AutoModelForCausalLM.from_config(config, dtype=torch.bfloat16)
model_size, _ = compute_module_sizes(model, only_modules=False)
expected_keys = [name for name, _ in model.named_parameters()] + [
name for name, _ in model.named_buffers()
]
expanded_device_map = expand_device_map({"": torch_device}, expected_keys)
total_byte_count = list(get_total_byte_count(model, expanded_device_map).values())[0]
# testing prequantized = False should be enough, the shape should be the same whether it is pre-quantized or not
hf_quantizer = AutoHfQuantizer.from_config(Mxfp4Config(), pre_quantized=False)
hf_quantizer.preprocess_model(model=model, config=model.config)
quantized_model_size, _ = compute_module_sizes(model, hf_quantizer, only_modules=False)
expected_keys = [name for name, _ in model.named_parameters()] + [
name for name, _ in model.named_buffers()
]
expanded_device_map = expand_device_map({"": torch_device}, expected_keys)
quantized_total_byte_count = list(get_total_byte_count(model, expanded_device_map, hf_quantizer).values())[
0
]
for name, module in model.named_modules():
if isinstance(module, Mxfp4GptOssExperts):
# from 16 bits to 4 bits
assert int(model_size[f"{name}.gate_up_proj"] // 4) == int(
quantized_model_size[f"{name}.gate_up_proj"]
)
assert int(model_size[f"{name}.down_proj"] // 4) == int(quantized_model_size[f"{name}.down_proj"])
# check that we get the same value, as we use `compute_module_sizes` in `get_total_byte_count`
assert total_byte_count == model_size[""]
assert quantized_total_byte_count == quantized_model_size[""]
# we should at least have 3 times memory reduction in total for this model
assert model_size[""] > quantized_model_size[""] * 3
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/quantization/mxfp4/test_mxfp4.py",
"license": "Apache License 2.0",
"lines": 413,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/janus/image_processing_janus_fast.py | # Copyright 2025 Deepseek AI and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import torch
import torchvision.transforms.v2.functional as tvF
from ...image_processing_utils import BatchFeature
from ...image_processing_utils_fast import (
BaseImageProcessorFast,
group_images_by_shape,
reorder_images,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ImageInput,
PILImageResampling,
SizeDict,
)
from ...processing_utils import Unpack
from ...utils import (
TensorType,
auto_docstring,
)
from .image_processing_janus import JanusImageProcessorKwargs
@auto_docstring
class JanusImageProcessorFast(BaseImageProcessorFast):
resample = PILImageResampling.BICUBIC
image_mean = OPENAI_CLIP_MEAN
image_std = OPENAI_CLIP_STD
size = {"height": 384, "width": 384}
min_size = 14
do_resize = True
do_rescale = True
do_normalize = True
do_pad = True
valid_kwargs = JanusImageProcessorKwargs
def __init__(self, **kwargs: Unpack[JanusImageProcessorKwargs]):
super().__init__(**kwargs)
if kwargs.get("image_mean") is None:
background_color = (127, 127, 127)
else:
background_color = tuple(int(x * 255) for x in kwargs.get("image_mean"))
self.background_color = tuple(background_color)
def resize(
self,
image: "torch.Tensor",
size: SizeDict,
min_size: int,
interpolation: Optional["tvF.InterpolationMode"] = None,
antialias: bool = True,
**kwargs,
) -> "torch.Tensor":
if size.height is None or size.width is None or size.height != size.width:
raise ValueError(
f"Output height and width must be the same. Got height={size['height']} and width={size['width']}"
)
size = size.height
height, width = image.shape[-2:]
max_size = max(height, width)
delta = size / max_size
# Largest side becomes `size` and the other side is scaled according to the aspect ratio.
output_size_nonpadded = SizeDict(
height=max(round(height * delta), min_size),
width=max(round(width * delta), min_size),
)
return super().resize(image, size=output_size_nonpadded, interpolation=interpolation, antialias=antialias)
def pad_to_square(
self,
images: "torch.Tensor",
background_color: int | tuple[int, int, int] = 0,
) -> "torch.Tensor":
"""
Pads an image to a square based on the longest edge.
Args:
images (`torch.Tensor`):
The images to pad.
background_color (`int` or `tuple[int, int, int]`, *optional*, defaults to 0):
The color to use for the padding. Can be an integer for single channel or a
tuple of integers representing for multi-channel images. If passed as integer
in multi-channel mode, it will default to `0` in subsequent channels.
Returns:
`torch.Tensor`: The padded images.
"""
height, width = images.shape[-2:]
num_channels = images.shape[1]
batch_size = images.shape[0]
if height == width:
return images
max_dim = max(height, width)
# Ensure background_color is the correct shape
if isinstance(background_color, int):
background_color = [background_color]
elif len(background_color) != num_channels:
raise ValueError(
f"background_color must have no more than {num_channels} elements to match the number of channels"
)
padded_images = torch.zeros(
(batch_size, num_channels, max_dim, max_dim), dtype=images.dtype, device=images.device
)
for i, color in enumerate(background_color):
padded_images[:, i, :, :] = color
if width > height:
start = (max_dim - height) // 2
padded_images[:, :, start : start + height, :] = images
else:
start = (max_dim - width) // 2
padded_images[:, :, :, start : start + width] = images
return padded_images
def _preprocess(
self,
images: list["torch.Tensor"],
do_resize: bool,
size: SizeDict,
min_size: int,
interpolation: Optional["tvF.InterpolationMode"],
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
image_mean: float | list[float] | None,
image_std: float | list[float] | None,
disable_grouping: bool | None,
return_tensors: str | TensorType | None,
do_pad: bool = True,
**kwargs,
) -> BatchFeature:
# Group images by size for batched resizing
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
resized_images_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_resize:
stacked_images = self.resize(
image=stacked_images, size=size, min_size=min_size, interpolation=interpolation
)
resized_images_grouped[shape] = stacked_images
resized_images = reorder_images(resized_images_grouped, grouped_images_index)
# Group images by size for further processing
# Needed in case do_resize is False, or resize returns images with different sizes
grouped_images, grouped_images_index = group_images_by_shape(resized_images, disable_grouping=disable_grouping)
processed_images_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_pad:
stacked_images = self.pad_to_square(stacked_images, background_color=self.background_color)
# Fused rescale and normalize
stacked_images = self.rescale_and_normalize(
stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
processed_images_grouped[shape] = stacked_images
processed_images = reorder_images(processed_images_grouped, grouped_images_index)
return BatchFeature(data={"pixel_values": processed_images}, tensor_type=return_tensors)
def postprocess(
self,
images: ImageInput,
do_rescale: bool | None = None,
rescale_factor: float | None = None,
do_normalize: bool | None = None,
image_mean: list[float] | None = None,
image_std: list[float] | None = None,
return_tensors: str | None = None,
) -> "torch.Tensor":
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = 1.0 / self.rescale_factor if rescale_factor is None else rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
image_mean = tuple(-rescale_factor * mean / std for mean, std in zip(image_mean, image_std))
image_std = tuple(1 / std for std in image_std)
images = self.preprocess(
images,
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_resize=False,
do_pad=False,
return_tensors=return_tensors,
).pixel_values
if do_rescale:
images = [image.clip(0, 255).to(torch.uint8) for image in images]
if do_normalize and do_rescale and return_tensors == "PIL.Image.Image":
images = [tvF.to_pil_image(image) for image in images]
return_tensors = return_tensors if return_tensors != "PIL.Image.Image" else None
images = torch.stack(images, dim=0) if return_tensors == "pt" else images
return BatchFeature(data={"pixel_values": images}, tensor_type=return_tensors)
__all__ = ["JanusImageProcessorFast"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/janus/image_processing_janus_fast.py",
"license": "Apache License 2.0",
"lines": 196,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/mm_grounding_dino/convert_mm_grounding_dino_to_hf.py | # Copyright 2025 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import re
from io import BytesIO
import httpx
import torch
from PIL import Image
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.grounding_dino.image_processing_grounding_dino import GroundingDinoImageProcessor
from transformers.models.grounding_dino.processing_grounding_dino import GroundingDinoProcessor
from transformers.models.mm_grounding_dino.configuration_mm_grounding_dino import MMGroundingDinoConfig
from transformers.models.mm_grounding_dino.modeling_mm_grounding_dino import MMGroundingDinoForObjectDetection
from transformers.models.swin.configuration_swin import SwinConfig
MODEL_NAME_TO_CHECKPOINT_URL_MAPPING = {
"mm_grounding_dino_tiny_o365v1_goldg": "https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg/grounding_dino_swin-t_pretrain_obj365_goldg_20231122_132602-4ea751ce.pth",
"mm_grounding_dino_tiny_o365v1_goldg_grit": "https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_20231128_200818-169cc352.pth",
"mm_grounding_dino_tiny_o365v1_goldg_v3det": "https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_v3det/grounding_dino_swin-t_pretrain_obj365_goldg_v3det_20231218_095741-e316e297.pth",
"mm_grounding_dino_tiny_o365v1_goldg_grit_v3det": "https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth",
"mm_grounding_dino_base_o365v1_goldg_v3det": "https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-b_pretrain_obj365_goldg_v3det/grounding_dino_swin-b_pretrain_obj365_goldg_v3de-f83eef00.pth",
"mm_grounding_dino_base_all": "https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-b_pretrain_all/grounding_dino_swin-b_pretrain_all-f9818a7c.pth",
"mm_grounding_dino_large_o365v2_oiv6_goldg": "https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-l_pretrain_obj365_goldg/grounding_dino_swin-l_pretrain_obj365_goldg-34dcdc53.pth",
"mm_grounding_dino_large_all": "https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-l_pretrain_all/grounding_dino_swin-l_pretrain_all-56d69e78.pth",
"llmdet_tiny": "https://huggingface.co/fushh7/LLMDet/resolve/main/tiny.pth?download=true",
"llmdet_base": "https://huggingface.co/fushh7/LLMDet/resolve/main/base.pth?download=true",
"llmdet_large": "https://huggingface.co/fushh7/LLMDet/resolve/main/large.pth?download=true",
}
MODEL_NAME_TO_EXPECTED_OUTPUT_MAPPING = {
"mm_grounding_dino_tiny_o365v1_goldg": {
"scores": torch.tensor([0.7722, 0.7584, 0.7984, 0.7163]),
"boxes": torch.tensor(
[
[0.5212, 0.1594, 0.5792, 0.3895],
[0.5424, 0.0513, 0.9996, 0.7757],
[0.0629, 0.1526, 0.2746, 0.2447],
[0.0091, 0.1127, 0.4945, 0.9911],
]
),
},
"mm_grounding_dino_tiny_o365v1_goldg_grit": {
"scores": torch.tensor([0.7865, 0.7180, 0.7665, 0.8177]),
"boxes": torch.tensor(
[
[0.0084, 0.1129, 0.4940, 0.9895],
[0.5214, 0.1597, 0.5786, 0.3875],
[0.5413, 0.0507, 0.9998, 0.7768],
[0.0631, 0.1527, 0.2740, 0.2449],
]
),
},
"mm_grounding_dino_tiny_o365v1_goldg_v3det": {
"scores": torch.tensor([0.5690, 0.5553, 0.6075, 0.5775]),
"boxes": torch.tensor(
[
[0.5393, 0.0502, 0.9989, 0.7763],
[0.0090, 0.1125, 0.4950, 0.9895],
[0.5207, 0.1589, 0.5794, 0.3889],
[0.0625, 0.1519, 0.2750, 0.2446],
]
),
},
"mm_grounding_dino_tiny_o365v1_goldg_grit_v3det": {
"scores": torch.tensor([0.8381, 0.8204, 0.7970, 0.7175]),
"boxes": torch.tensor(
[
[0.0099, 0.1129, 0.4942, 0.9903],
[0.5413, 0.0506, 0.9998, 0.7753],
[0.0626, 0.1527, 0.2744, 0.2443],
[0.5211, 0.1596, 0.5790, 0.3890],
]
),
},
"mm_grounding_dino_base_o365v1_goldg_v3det": {
"scores": torch.tensor([0.8418, 0.8364, 0.8342, 0.7885]),
"boxes": torch.tensor(
[
[0.5427, 0.0502, 0.9996, 0.7770],
[0.0628, 0.1529, 0.2747, 0.2448],
[0.0085, 0.1132, 0.4947, 0.9898],
[0.5208, 0.1597, 0.5787, 0.3910],
]
),
},
"mm_grounding_dino_base_all": {
"scores": torch.tensor([0.4713]),
"boxes": torch.tensor([[0.5423, 0.0507, 0.9998, 0.7761]]),
},
"mm_grounding_dino_large_o365v2_oiv6_goldg": {
"scores": torch.tensor([0.7824, 0.8275, 0.7715, 0.8211]),
"boxes": torch.tensor(
[
[0.0082, 0.1133, 0.4945, 0.9889],
[0.5410, 0.0508, 0.9998, 0.7771],
[0.0632, 0.1526, 0.2740, 0.2439],
[0.5205, 0.1599, 0.5787, 0.3906],
]
),
},
"mm_grounding_dino_large_all": {
"scores": torch.tensor([0.7373, 0.6208, 0.6913, 0.4523]),
"boxes": torch.tensor(
[
[0.5424, 0.0509, 0.9997, 0.7765],
[0.0632, 0.1529, 0.2744, 0.2447],
[0.0121, 0.1125, 0.4947, 0.9884],
[0.5206, 0.1597, 0.5789, 0.3933],
]
),
},
"llmdet_tiny": {
"scores": torch.tensor([0.7262, 0.7552, 0.7656, 0.8207]),
"boxes": torch.tensor(
[
[0.0114, 0.1132, 0.4947, 0.9854],
[0.5387, 0.0513, 0.9992, 0.7765],
[0.5212, 0.1605, 0.5788, 0.3890],
[0.0634, 0.1536, 0.2743, 0.2440],
]
),
},
"llmdet_base": {
"scores": torch.tensor([0.8646, 0.7567, 0.6978, 0.8084]),
"boxes": torch.tensor(
[
[0.0632, 0.1529, 0.2745, 0.2438],
[0.5420, 0.0512, 0.9989, 0.7774],
[0.0110, 0.1134, 0.4950, 0.9875],
[0.5209, 0.1602, 0.5789, 0.3908],
]
),
},
"llmdet_large": {
"scores": torch.tensor([0.7107, 0.8626, 0.7458, 0.8166]),
"boxes": torch.tensor(
[
[0.0147, 0.1128, 0.4957, 0.9858],
[0.0634, 0.1528, 0.2744, 0.2447],
[0.5414, 0.0511, 0.9997, 0.7776],
[0.5209, 0.1602, 0.5792, 0.3916],
]
),
},
}
# fmt: off
ORIGINAL_TO_CONVERTED_KEY_MAPPING = {
# vision backbone
r"backbone.patch_embed.projection.(weight|bias)": r"model.backbone.conv_encoder.model.embeddings.patch_embeddings.projection.\1",
r"backbone.patch_embed.norm.(weight|bias)": r"model.backbone.conv_encoder.model.embeddings.norm.\1",
r"backbone.stages.(\d+).blocks.(\d+).attn.w_msa.(relative_position_bias_table|relative_position_index)": r"model.backbone.conv_encoder.model.encoder.layers.\1.blocks.\2.attention.self.\3",
r"backbone.stages.(\d+).blocks.(\d+).norm1.(weight|bias)": r"model.backbone.conv_encoder.model.encoder.layers.\1.blocks.\2.layernorm_before.\3",
r"backbone.stages.(\d+).blocks.(\d+).attn.w_msa.(query|key|value).(weight|bias)": r"model.backbone.conv_encoder.model.encoder.layers.\1.blocks.\2.attention.self.\3.\4",
r"backbone.stages.(\d+).blocks.(\d+).attn.w_msa.proj.(weight|bias)": r"model.backbone.conv_encoder.model.encoder.layers.\1.blocks.\2.attention.output.dense.\3",
r"backbone.stages.(\d+).blocks.(\d+).norm2.(weight|bias)": r"model.backbone.conv_encoder.model.encoder.layers.\1.blocks.\2.layernorm_after.\3",
r"backbone.stages.(\d+).blocks.(\d+).ffn.layers.0.0.(weight|bias)": r"model.backbone.conv_encoder.model.encoder.layers.\1.blocks.\2.intermediate.dense.\3",
r"backbone.stages.(\d+).blocks.(\d+).ffn.layers.1.(weight|bias)": r"model.backbone.conv_encoder.model.encoder.layers.\1.blocks.\2.output.dense.\3",
r"backbone.stages.(\d+).downsample.reduction.weight": r"model.backbone.conv_encoder.model.encoder.layers.\1.downsample.reduction.weight",
r"backbone.stages.(\d+).downsample.norm.(weight|bias)": r"model.backbone.conv_encoder.model.encoder.layers.\1.downsample.norm.\2",
r"backbone.norms.(\d+).(weight|bias)": r"model.backbone.conv_encoder.model.hidden_states_norms.stage\1.\2",
r"neck.convs.(\d+).conv.(weight|bias)": r"model.input_proj_vision.\1.0.\2",
r"neck.convs.(\d+).gn.(weight|bias)": r"model.input_proj_vision.\1.1.\2",
r"neck.extra_convs.(\d+).conv.(weight|bias)": r"model.input_proj_vision.\1.0.\2",
r"neck.extra_convs.(\d+).gn.(weight|bias)": r"model.input_proj_vision.\1.1.\2",
# text backbone
r"language_model.language_backbone.body.model.(.*)": r"model.text_backbone.\1",
r"text_feat_map.(weight|bias)": r"model.text_projection.\1",
# encoder
r"encoder.fusion_layers.(\d+).gamma_v": r"model.encoder.layers.\1.fusion_layer.vision_param",
r"encoder.fusion_layers.(\d+).gamma_l": r"model.encoder.layers.\1.fusion_layer.text_param",
r"encoder.fusion_layers.(\d+).layer_norm_v.(weight|bias)": r"model.encoder.layers.\1.fusion_layer.layer_norm_vision.\2",
r"encoder.fusion_layers.(\d+).attn.v_proj.(weight|bias)": r"model.encoder.layers.\1.fusion_layer.attn.vision_proj.\2",
r"encoder.fusion_layers.(\d+).attn.values_v_proj.(weight|bias)": r"model.encoder.layers.\1.fusion_layer.attn.values_vision_proj.\2",
r"encoder.fusion_layers.(\d+).attn.out_v_proj.(weight|bias)": r"model.encoder.layers.\1.fusion_layer.attn.out_vision_proj.\2",
r"encoder.fusion_layers.(\d+).layer_norm_l.(weight|bias)": r"model.encoder.layers.\1.fusion_layer.layer_norm_text.\2",
r"encoder.fusion_layers.(\d+).attn.l_proj.(weight|bias)": r"model.encoder.layers.\1.fusion_layer.attn.text_proj.\2",
r"encoder.fusion_layers.(\d+).attn.values_l_proj.(weight|bias)": r"model.encoder.layers.\1.fusion_layer.attn.values_text_proj.\2",
r"encoder.fusion_layers.(\d+).attn.out_l_proj.(weight|bias)": r"model.encoder.layers.\1.fusion_layer.attn.out_text_proj.\2",
r"encoder.layers.(\d+).self_attn.(sampling_offsets|attention_weights|value_proj|output_proj).(weight|bias)": r"model.encoder.layers.\1.deformable_layer.self_attn.\2.\3",
r"encoder.layers.(\d+).norms.0.(weight|bias)": r"model.encoder.layers.\1.deformable_layer.self_attn_layer_norm.\2",
r"encoder.layers.(\d+).ffn.layers.0.0.(weight|bias)": r"model.encoder.layers.\1.deformable_layer.fc1.\2",
r"encoder.layers.(\d+).ffn.layers.1.(weight|bias)": r"model.encoder.layers.\1.deformable_layer.fc2.\2",
r"encoder.layers.(\d+).norms.1.(weight|bias)": r"model.encoder.layers.\1.deformable_layer.final_layer_norm.\2",
r"encoder.text_layers.(\d+).self_attn.attn.(query|key|value)_proj_(weight|bias)": r"model.encoder.layers.\1.text_enhancer_layer.self_attn.\2.\3",
r"encoder.text_layers.(\d+).self_attn.attn.out_proj.(weight|bias)": r"model.encoder.layers.\1.text_enhancer_layer.self_attn.out_proj.\2",
r"encoder.text_layers.(\d+).norms.0.(weight|bias)": r"model.encoder.layers.\1.text_enhancer_layer.layer_norm_before.\2",
r"encoder.text_layers.(\d+).ffn.layers.0.0.(weight|bias)": r"model.encoder.layers.\1.text_enhancer_layer.fc1.\2",
r"encoder.text_layers.(\d+).ffn.layers.1.(weight|bias)": r"model.encoder.layers.\1.text_enhancer_layer.fc2.\2",
r"encoder.text_layers.(\d+).norms.1.(weight|bias)": r"model.encoder.layers.\1.text_enhancer_layer.layer_norm_after.\2",
r"encoder.bbox_head.cls_branch.bias": r"model.encoder_output_class_embed.bias",
r"encoder.bbox_head.reg_branch.0.(weight|bias)": r"model.encoder_output_bbox_embed.layers.0.\1",
r"encoder.bbox_head.reg_branch.2.(weight|bias)": r"model.encoder_output_bbox_embed.layers.1.\1",
r"encoder.bbox_head.reg_branch.4.(weight|bias)": r"model.encoder_output_bbox_embed.layers.2.\1",
# decoder
r"decoder.norm.(weight|bias)": r"model.decoder.layer_norm.\1",
r"decoder.ref_point_head.layers.(\d+).(weight|bias)": r"model.decoder.reference_points_head.layers.\1.\2",
r"decoder.layers.(\d+).self_attn.attn.(query|key|value)_proj_(weight|bias)": r"model.decoder.layers.\1.self_attn.\2.\3",
r"decoder.layers.(\d+).self_attn.attn.out_proj.(weight|bias)": r"model.decoder.layers.\1.self_attn.out_proj.\2",
r"decoder.layers.(\d+).norms.0.(weight|bias)": r"model.decoder.layers.\1.self_attn_layer_norm.\2",
r"decoder.layers.(\d+).cross_attn_text.attn.(query|key|value)_proj_(weight|bias)": r"model.decoder.layers.\1.encoder_attn_text.\2.\3",
r"decoder.layers.(\d+).cross_attn_text.attn.out_proj.(weight|bias)": r"model.decoder.layers.\1.encoder_attn_text.out_proj.\2",
r"decoder.layers.(\d+).norms.1.(weight|bias)": r"model.decoder.layers.\1.encoder_attn_text_layer_norm.\2",
r"decoder.layers.(\d+).cross_attn.(sampling_offsets|attention_weights|value_proj|output_proj).(weight|bias)": r"model.decoder.layers.\1.encoder_attn.\2.\3",
r"decoder.layers.(\d+).norms.2.(weight|bias)": r"model.decoder.layers.\1.encoder_attn_layer_norm.\2",
r"decoder.layers.(\d+).ffn.layers.0.0.(weight|bias)": r"model.decoder.layers.\1.fc1.\2",
r"decoder.layers.(\d+).ffn.layers.1.(weight|bias)": r"model.decoder.layers.\1.fc2.\2",
r"decoder.layers.(\d+).norms.3.(weight|bias)": r"model.decoder.layers.\1.final_layer_norm.\2",
r"decoder.bbox_head.cls_branches.(\d+).bias": r"model.decoder.class_embed.\1.bias",
r"decoder.bbox_head.reg_branches.(\d+).0.(weight|bias)": r"model.decoder.bbox_embed.\1.layers.0.\2",
r"decoder.bbox_head.reg_branches.(\d+).2.(weight|bias)": r"model.decoder.bbox_embed.\1.layers.1.\2",
r"decoder.bbox_head.reg_branches.(\d+).4.(weight|bias)": r"model.decoder.bbox_embed.\1.layers.2.\2",
# other
r"level_embed": r"model.level_embed",
r"query_embedding.weight": r"model.query_position_embeddings.weight",
r"memory_trans_fc.(weight|bias)": r"model.enc_output.\1",
r"memory_trans_norm.(weight|bias)": r"model.enc_output_norm.\1",
r"bbox_head.cls_branches.(\d+).bias": r"class_embed.\1.bias",
r"bbox_head.reg_branches.(\d+).0.(weight|bias)": r"bbox_embed.\1.layers.0.\2",
r"bbox_head.reg_branches.(\d+).2.(weight|bias)": r"bbox_embed.\1.layers.1.\2",
r"bbox_head.reg_branches.(\d+).4.(weight|bias)": r"bbox_embed.\1.layers.2.\2",
}
# fmt: on
def get_mm_grounding_dino_config(model_name: str) -> MMGroundingDinoConfig:
if "tiny" in model_name:
swin_image_size = 224
swin_window_size = 7
swin_embed_dim = 96
swin_depths = (2, 2, 6, 2)
swin_num_heads = (3, 6, 12, 24)
swin_out_features = ["stage2", "stage3", "stage4"]
num_feature_levels = 4
elif "base" in model_name:
swin_image_size = 384
swin_window_size = 12
swin_embed_dim = 128
swin_depths = (2, 2, 18, 2)
swin_num_heads = (4, 8, 16, 32)
swin_out_features = ["stage2", "stage3", "stage4"]
num_feature_levels = 4
elif "large" in model_name:
swin_image_size = 384
swin_window_size = 12
swin_embed_dim = 192
swin_depths = (2, 2, 18, 2)
swin_num_heads = (6, 12, 24, 48)
swin_out_features = ["stage1", "stage2", "stage3", "stage4"]
num_feature_levels = 5
else:
raise ValueError(
f"Model name: {model_name} is not supported. Only `tiny`, `base` and `large` models are currently supported."
)
backbone_config = SwinConfig(
image_size=swin_image_size,
window_size=swin_window_size,
embed_dim=swin_embed_dim,
depths=swin_depths,
num_heads=swin_num_heads,
out_features=swin_out_features,
)
model_config = MMGroundingDinoConfig(
backbone_config=backbone_config,
num_feature_levels=num_feature_levels,
)
return model_config
def get_mm_grounding_dino_processor() -> GroundingDinoProcessor:
img_processor = GroundingDinoImageProcessor()
txt_processor = BertTokenizer.from_pretrained("bert-base-uncased")
processor = GroundingDinoProcessor(img_processor, txt_processor)
return processor
# Copied from: https://github.com/iSEE-Laboratory/LLMDet/blob/96ec8c82a9d97b170db759e043afd5b81445d0f1/hf_model/mmdet2groundingdino_swint.py#L8C1-L13C13
def correct_unfold_reduction_order(x: torch.Tensor) -> torch.Tensor:
out_channel, in_channel = x.shape
x = x.reshape(out_channel, in_channel // 4, 4).transpose(1, 2)
x = x[:, [0, 2, 1, 3], :]
x = x.reshape(out_channel, in_channel)
return x
# Copied from: https://github.com/iSEE-Laboratory/LLMDet/blob/96ec8c82a9d97b170db759e043afd5b81445d0f1/hf_model/mmdet2groundingdino_swint.py#L15C1-L20C13
def correct_unfold_norm_order(x: torch.Tensor) -> torch.Tensor:
in_channel = x.shape[0]
x = x.reshape(in_channel // 4, 4).transpose(0, 1)
x = x[[0, 2, 1, 3], :]
x = x.reshape(in_channel)
return x
def preprocess_old_state(state_dict: dict, config: MMGroundingDinoConfig) -> dict:
"""
Preprocesses old state dict to enable 1-1 mapping:
- split qkv projections in Swin backbone
- reorder reduction and norm parameters in Swin backbone
- shift output norm indices in Swin backbone
- shift output proj indices in neck
- split q,k,v projections in text self and cross attentions in encoder and decoder
- duplicate detection head parameters for decoder and encoder
"""
new_state_dict = state_dict.copy()
for k in state_dict:
if k.startswith("backbone"):
if "downsample.reduction" in k:
new_state_dict[k] = correct_unfold_reduction_order(new_state_dict.pop(k))
elif "downsample.norm" in k:
new_state_dict[k] = correct_unfold_norm_order(new_state_dict.pop(k))
elif "w_msa.qkv" in k:
q_param, k_param, v_param = new_state_dict.pop(k).chunk(3)
new_state_dict[k.replace("qkv", "query")] = q_param
new_state_dict[k.replace("qkv", "key")] = k_param
new_state_dict[k.replace("qkv", "value")] = v_param
elif "backbone.norm" in k:
match = re.match(r"backbone.norm(\d+).(weight|bias)", k)
new_state_dict[f"backbone.norms.{int(match.group(1)) + 1}.{match.group(2)}"] = new_state_dict.pop(k)
elif k.startswith("neck.extra_convs"):
num_normal_convs = len(config.backbone_config.out_indices)
if "gn" in k:
match = re.match(r"neck.extra_convs.(\d+).gn.(weight|bias)", k)
new_state_dict[f"neck.extra_convs.{num_normal_convs + int(match.group(1))}.gn.{match.group(2)}"] = (
new_state_dict.pop(k)
)
elif "conv" in k:
match = re.match(r"neck.extra_convs.(\d+).conv.(weight|bias)", k)
new_state_dict[f"neck.extra_convs.{num_normal_convs + int(match.group(1))}.conv.{match.group(2)}"] = (
new_state_dict.pop(k)
)
elif k.startswith("encoder"):
if "self_attn.attn.in_proj" in k:
q_param, k_param, v_param = new_state_dict.pop(k).chunk(3)
new_state_dict[k.replace("in", "query")] = q_param
new_state_dict[k.replace("in", "key")] = k_param
new_state_dict[k.replace("in", "value")] = v_param
elif k.startswith("decoder"):
if "self_attn.attn.in_proj" in k or "cross_attn_text.attn.in_proj" in k:
q_param, k_param, v_param = new_state_dict.pop(k).chunk(3)
new_state_dict[k.replace("in", "query")] = q_param
new_state_dict[k.replace("in", "key")] = k_param
new_state_dict[k.replace("in", "value")] = v_param
elif k.startswith("bbox_head"):
num_decoder_layers = config.decoder_layers
match = re.match(r"bbox_head.(cls|reg)_branches.(\d+).(.*)", k)
cls_or_reg = match.group(1)
layer_idx = int(match.group(2))
suffix = match.group(3)
if layer_idx < num_decoder_layers:
new_key = f"decoder.bbox_head.{cls_or_reg}_branches.{layer_idx}.{suffix}"
new_state_dict[new_key] = new_state_dict[k] # copy
else:
new_key = f"encoder.bbox_head.{cls_or_reg}_branch.{suffix}"
new_state_dict[new_key] = new_state_dict.pop(k) # move
# remove unused params
if (
k == "dn_query_generator.label_embedding.weight"
or k == "language_model.language_backbone.body.model.embeddings.position_ids"
or k == "image_seperate.weight"
or k.startswith("lmm")
or k.startswith("connector")
or k.startswith("region_connector")
or k.startswith("ref_point_head")
):
new_state_dict.pop(k)
return new_state_dict
# Copied from transformers/models/siglip2/convert_siglip2_to_hf.py
def convert_old_keys_to_new_keys(state_dict_keys: list) -> dict:
"""
This function should be applied only once, on the concatenated keys to efficiently rename using
the key mappings.
"""
output_dict = {}
if state_dict_keys is not None:
old_text = "\n".join(state_dict_keys)
new_text = old_text
for pattern, replacement in ORIGINAL_TO_CONVERTED_KEY_MAPPING.items():
if replacement is None:
new_text = re.sub(pattern, "", new_text) # an empty line
continue
new_text = re.sub(pattern, replacement, new_text)
output_dict = dict(zip(old_text.split("\n"), new_text.split("\n")))
return output_dict
def convert_mm_to_hf_state(original_state: dict, hf_cfg: MMGroundingDinoConfig) -> dict:
original_state = preprocess_old_state(original_state, hf_cfg)
original_state_keys = list(original_state.keys())
original_to_hf_key_map = convert_old_keys_to_new_keys(original_state_keys)
hf_state = {}
for original_key in original_state_keys:
hf_key = original_to_hf_key_map[original_key]
hf_state[hf_key] = original_state.pop(original_key)
return hf_state
def prepare_test_inputs():
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
with httpx.stream("GET", url) as response:
image = Image.open(BytesIO(response.read()))
text = [["cat", "remote"]]
return image, text
@torch.no_grad()
def convert_mm_grounding_dino_checkpoint(
model_name: str,
verify_outputs: bool,
push_to_hub: bool,
hub_user_name: str,
) -> tuple[MMGroundingDinoConfig, dict]:
# Load original state
checkpoint_url = MODEL_NAME_TO_CHECKPOINT_URL_MAPPING[model_name]
print(f"Loading checkpoint from: {checkpoint_url}")
ckpt = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
mm_state = ckpt["state_dict"]
# Create hf model and processor
print("Creating model...")
hf_cfg = get_mm_grounding_dino_config(model_name)
hf_state = convert_mm_to_hf_state(mm_state, hf_cfg)
hf_model = MMGroundingDinoForObjectDetection(hf_cfg).eval()
hf_model.load_state_dict(hf_state)
hf_processor = get_mm_grounding_dino_processor()
# Verify outputs if needed
if verify_outputs:
print("Running inference to verify outputs...")
image, text = prepare_test_inputs()
model_inputs = hf_processor(images=image, text=text, return_tensors="pt")
model_outputs = hf_model(**model_inputs)
results = hf_processor.post_process_grounded_object_detection(
model_outputs,
model_inputs.input_ids,
box_threshold=0.4,
text_threshold=0.3,
)
result = results[0]
print(result)
expected = MODEL_NAME_TO_EXPECTED_OUTPUT_MAPPING[model_name]
for key in expected:
torch.testing.assert_close(result[key], expected[key], atol=1e-3, rtol=1e-3)
print("Outputs match.")
# Push to hub if needed
if push_to_hub:
print("Pushing to hub...")
hub_url = f"{hub_user_name}/{model_name}"
hf_model.push_to_hub(hub_url)
hf_processor.push_to_hub(hub_url)
print(f"Pushed to huggingface hub at: {hub_url}.")
return hf_cfg, hf_state
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model-name",
required=True,
type=str,
choices=list(MODEL_NAME_TO_CHECKPOINT_URL_MAPPING.keys()),
help="URL to the original mm grounding dino checkpoint.",
)
parser.add_argument("--hub-user-name", type=str, help="User name on the huggingface hub.")
parser.add_argument("--push-to-hub", action="store_true", help="Whether to push model to hub or not.")
parser.add_argument(
"--verify-outputs", action="store_true", help="Whether to verify that model output is correct or not."
)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
convert_mm_grounding_dino_checkpoint(
args.model_name,
args.verify_outputs,
args.push_to_hub,
args.hub_user_name,
)
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/mm_grounding_dino/convert_mm_grounding_dino_to_hf.py",
"license": "Apache License 2.0",
"lines": 465,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/mm_grounding_dino/modular_mm_grounding_dino.py | # Copyright 2025 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import torch
from torch import nn
from ... import initialization as init
from ...backbone_utils import consolidate_backbone_kwargs_to_config
from ...configuration_utils import PreTrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING, AutoConfig
from ..auto.modeling_auto import AutoModel
from ..grounding_dino.modeling_grounding_dino import (
GroundingDinoContrastiveEmbedding,
GroundingDinoConvEncoder,
GroundingDinoConvModel,
GroundingDinoDecoder,
GroundingDinoEncoder,
GroundingDinoForObjectDetection,
GroundingDinoMLPPredictionHead,
GroundingDinoModel,
GroundingDinoPreTrainedModel,
build_position_encoding,
)
logger = logging.get_logger(__name__)
class MMGroundingDinoConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`MMGroundingDinoModel`]. It is used to instantiate a
MM Grounding DINO model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the MM Grounding DINO tiny architecture
[openmmlab-community/mm_grounding_dino_tiny_o365v1_goldg_v3det](https://huggingface.co/openmmlab-community/mm_grounding_dino_tiny_o365v1_goldg_v3det).
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
backbone_config (`Union[dict, "PreTrainedConfig"]`, *optional*, defaults to `SwinConfig()`):
The configuration of the backbone model.
text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `BertConfig`):
The config object or dictionary of the text backbone.
num_queries (`int`, *optional*, defaults to 900):
Number of object queries, i.e. detection slots. This is the maximal number of objects
[`MMGroundingDinoModel`] can detect in a single image.
encoder_layers (`int`, *optional*, defaults to 6):
Number of encoder layers.
encoder_ffn_dim (`int`, *optional*, defaults to 2048):
Dimension of the "intermediate" (often named feed-forward) layer in decoder.
encoder_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer encoder.
decoder_layers (`int`, *optional*, defaults to 6):
Number of decoder layers.
decoder_ffn_dim (`int`, *optional*, defaults to 2048):
Dimension of the "intermediate" (often named feed-forward) layer in decoder.
decoder_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer decoder.
is_encoder_decoder (`bool`, *optional*, defaults to `True`):
Whether the model is used as an encoder/decoder or not.
activation_function (`str` or `function`, *optional*, defaults to `"relu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
d_model (`int`, *optional*, defaults to 256):
Dimension of the layers.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for activations inside the fully connected layer.
auxiliary_loss (`bool`, *optional*, defaults to `False`):
Whether auxiliary decoding losses (loss at each decoder layer) are to be used.
position_embedding_type (`str`, *optional*, defaults to `"sine"`):
Type of position embeddings to be used on top of the image features. One of `"sine"` or `"learned"`.
num_feature_levels (`int`, *optional*, defaults to 4):
The number of input feature levels.
encoder_n_points (`int`, *optional*, defaults to 4):
The number of sampled keys in each feature level for each attention head in the encoder.
decoder_n_points (`int`, *optional*, defaults to 4):
The number of sampled keys in each feature level for each attention head in the decoder.
two_stage (`bool`, *optional*, defaults to `True`):
Whether to apply a two-stage deformable DETR, where the region proposals are also generated by a variant of
Grounding DINO, which are further fed into the decoder for iterative bounding box refinement.
class_cost (`float`, *optional*, defaults to 1.0):
Relative weight of the classification error in the Hungarian matching cost.
bbox_cost (`float`, *optional*, defaults to 5.0):
Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost.
giou_cost (`float`, *optional*, defaults to 2.0):
Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost.
bbox_loss_coefficient (`float`, *optional*, defaults to 5.0):
Relative weight of the L1 bounding box loss in the object detection loss.
giou_loss_coefficient (`float`, *optional*, defaults to 2.0):
Relative weight of the generalized IoU loss in the object detection loss.
focal_alpha (`float`, *optional*, defaults to 0.25):
Alpha parameter in the focal loss.
disable_custom_kernels (`bool`, *optional*, defaults to `False`):
Disable the use of custom CUDA and CPU kernels. This option is necessary for the ONNX export, as custom
kernels are not supported by PyTorch ONNX export.
max_text_len (`int`, *optional*, defaults to 256):
The maximum length of the text input.
text_enhancer_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the text enhancer.
fusion_droppath (`float`, *optional*, defaults to 0.1):
The droppath ratio for the fusion module.
fusion_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the fusion module.
embedding_init_target (`bool`, *optional*, defaults to `True`):
Whether to initialize the target with Embedding weights.
query_dim (`int`, *optional*, defaults to 4):
The dimension of the query vector.
positional_embedding_temperature (`float`, *optional*, defaults to 20):
The temperature for Sine Positional Embedding that is used together with vision backbone.
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
tie_word_embeddings (`bool`, *optional*, defaults to `True`):
Whether to tie weight embeddings
Examples:
```python
>>> from transformers import MMGroundingDinoConfig, MMGroundingDinoModel
>>> # Initializing a MM Grounding DINO configuration
>>> configuration = MMGroundingDinoConfig()
>>> # Initializing a model (with random weights) from the configuration
>>> model = MMGroundingDinoModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "mm-grounding-dino"
sub_configs = {"backbone_config": AutoConfig, "text_config": AutoConfig}
attribute_map = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__(
self,
backbone_config=None,
text_config=None,
num_queries=900,
encoder_layers=6,
encoder_ffn_dim=2048,
encoder_attention_heads=8,
decoder_layers=6,
decoder_ffn_dim=2048,
decoder_attention_heads=8,
is_encoder_decoder=True,
activation_function="relu",
d_model=256,
dropout=0.1,
attention_dropout=0.0,
activation_dropout=0.0,
auxiliary_loss=False,
position_embedding_type="sine",
num_feature_levels=4,
encoder_n_points=4,
decoder_n_points=4,
two_stage=True,
class_cost=1.0,
bbox_cost=5.0,
giou_cost=2.0,
bbox_loss_coefficient=5.0,
giou_loss_coefficient=2.0,
focal_alpha=0.25,
disable_custom_kernels=False,
# other parameters
max_text_len=256,
text_enhancer_dropout=0.0,
fusion_droppath=0.1,
fusion_dropout=0.0,
embedding_init_target=True,
query_dim=4,
positional_embedding_temperature=20,
init_std=0.02,
layer_norm_eps=1e-5,
tie_word_embeddings=True,
**kwargs,
):
backbone_config, kwargs = consolidate_backbone_kwargs_to_config(
backbone_config=backbone_config,
default_config_type="swin",
default_config_kwargs={"out_indices": [2, 3, 4]},
**kwargs,
)
self.backbone_config = backbone_config
self.num_queries = num_queries
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.auxiliary_loss = auxiliary_loss
self.position_embedding_type = position_embedding_type
# deformable attributes
self.num_feature_levels = num_feature_levels
self.encoder_n_points = encoder_n_points
self.decoder_n_points = decoder_n_points
self.two_stage = two_stage
# Hungarian matcher
self.class_cost = class_cost
self.bbox_cost = bbox_cost
self.giou_cost = giou_cost
# Loss coefficients
self.bbox_loss_coefficient = bbox_loss_coefficient
self.giou_loss_coefficient = giou_loss_coefficient
self.focal_alpha = focal_alpha
self.disable_custom_kernels = disable_custom_kernels
# Text backbone
if isinstance(text_config, dict):
text_config["model_type"] = text_config.get("model_type", "bert")
text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config)
elif text_config is None:
logger.info("text_config is None. Initializing the text config with default values (`BertConfig`).")
text_config = CONFIG_MAPPING["bert"]()
self.text_config = text_config
self.max_text_len = max_text_len
# Text Enhancer
self.text_enhancer_dropout = text_enhancer_dropout
# Fusion
self.fusion_droppath = fusion_droppath
self.fusion_dropout = fusion_dropout
# Others
self.embedding_init_target = embedding_init_target
self.query_dim = query_dim
self.positional_embedding_temperature = positional_embedding_temperature
self.init_std = init_std
self.layer_norm_eps = layer_norm_eps
self.tie_word_embeddings = tie_word_embeddings
super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)
class MMGroundingDinoContrastiveEmbedding(GroundingDinoContrastiveEmbedding):
def __init__(self, config):
super().__init__(config)
self.bias = nn.Parameter(torch.tensor(0.0))
def forward(
self,
vision_hidden_state: torch.FloatTensor,
text_hidden_state: torch.FloatTensor,
text_token_mask: torch.BoolTensor,
) -> torch.FloatTensor:
res = vision_hidden_state @ text_hidden_state.transpose(-1, -2)
res = res / math.sqrt(vision_hidden_state.shape[-1])
res = res + self.bias
res.masked_fill_(~text_token_mask[:, None, :], float("-inf"))
# padding to max_text_len
new_res = torch.full((*res.shape[:-1], self.max_text_len), float("-inf"), device=res.device)
new_res[..., : res.shape[-1]] = res
return new_res
class MMGroundingDinoPreTrainedModel(GroundingDinoPreTrainedModel):
@torch.no_grad()
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, MMGroundingDinoContrastiveEmbedding):
init.constant_(module.bias, -math.log((1 - 0.01) / 0.01))
class MMGroundingDinoConvEncoder(GroundingDinoConvEncoder):
pass
class MMGroundingDinoConvModel(GroundingDinoConvModel):
pass
class MMGroundingDinoEncoder(GroundingDinoEncoder):
pass
class MMGroundingDinoDecoder(GroundingDinoDecoder):
pass
class MMGroundingDinoModel(GroundingDinoModel, MMGroundingDinoPreTrainedModel):
def __init__(self, config: MMGroundingDinoConfig):
MMGroundingDinoPreTrainedModel.__init__(self, config)
# Create backbone + positional encoding
backbone = MMGroundingDinoConvEncoder(config)
position_embeddings = build_position_encoding(config)
self.backbone = MMGroundingDinoConvModel(backbone, position_embeddings)
# Create input projection layers
num_backbone_outs = len(backbone.intermediate_channel_sizes)
input_proj_list = []
for i in range(num_backbone_outs):
in_channels = backbone.intermediate_channel_sizes[i]
input_proj_list.append(
nn.Sequential(
nn.Conv2d(in_channels, config.d_model, kernel_size=1),
nn.GroupNorm(32, config.d_model),
)
)
for _ in range(config.num_feature_levels - num_backbone_outs):
input_proj_list.append(
nn.Sequential(
nn.Conv2d(in_channels, config.d_model, kernel_size=3, stride=2, padding=1),
nn.GroupNorm(32, config.d_model),
)
)
in_channels = config.d_model
self.input_proj_vision = nn.ModuleList(input_proj_list)
# Create text backbone
self.text_backbone = AutoModel.from_config(config.text_config, add_pooling_layer=False)
self.text_projection = nn.Linear(config.text_config.hidden_size, config.d_model)
if config.embedding_init_target or not config.two_stage:
self.query_position_embeddings = nn.Embedding(config.num_queries, config.d_model)
self.encoder = MMGroundingDinoEncoder(config)
self.decoder = MMGroundingDinoDecoder(config)
self.level_embed = nn.Parameter(torch.Tensor(config.num_feature_levels, config.d_model))
self.enc_output = nn.Linear(config.d_model, config.d_model)
self.enc_output_norm = nn.LayerNorm(config.d_model, config.layer_norm_eps)
self.encoder_output_bbox_embed = MMGroundingDinoMLPPredictionHead(
input_dim=config.d_model, hidden_dim=config.d_model, output_dim=4, num_layers=3
)
self.encoder_output_class_embed = MMGroundingDinoContrastiveEmbedding(config)
self.post_init()
class MMGroundingDinoMLPPredictionHead(GroundingDinoMLPPredictionHead):
pass
class MMGroundingDinoForObjectDetection(GroundingDinoForObjectDetection, MMGroundingDinoPreTrainedModel):
_tied_weights_keys = {
r"bbox_embed.(?![0])\d+": r"bbox_embed.0",
r"class_embed.(?![0])\d+": r"^class_embed.0",
"model.decoder.bbox_embed": "bbox_embed",
"model.decoder.class_embed": "class_embed",
}
def __init__(self, config: MMGroundingDinoConfig):
MMGroundingDinoPreTrainedModel.__init__(self, config)
self.model = MMGroundingDinoModel(config)
self.class_embed = nn.ModuleList(
[MMGroundingDinoContrastiveEmbedding(config) for _ in range(config.decoder_layers)]
)
self.bbox_embed = nn.ModuleList(
[
MMGroundingDinoMLPPredictionHead(
input_dim=config.d_model, hidden_dim=config.d_model, output_dim=4, num_layers=3
)
for _ in range(config.decoder_layers)
]
)
# Initialize weights and apply final processing
self.model.decoder.class_embed = self.class_embed # class embed has no weights so nothing to tie
self.model.decoder.bbox_embed = self.bbox_embed
self.post_init()
__all__ = [
"MMGroundingDinoConfig",
"MMGroundingDinoForObjectDetection",
"MMGroundingDinoModel",
"MMGroundingDinoPreTrainedModel",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/mm_grounding_dino/modular_mm_grounding_dino.py",
"license": "Apache License 2.0",
"lines": 347,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/mm_grounding_dino/test_modeling_mm_grounding_dino.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch MM Grounding DINO model."""
import collections
import copy
import inspect
import math
import re
import unittest
from functools import cached_property
from datasets import load_dataset
from transformers import (
MMGroundingDinoConfig,
SwinConfig,
is_torch_available,
is_vision_available,
)
from transformers.testing_utils import (
is_flaky,
require_timm,
require_torch,
require_torch_accelerator,
require_vision,
slow,
torch_device,
)
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MMGroundingDinoConfig, MMGroundingDinoForObjectDetection, MMGroundingDinoModel
from transformers.pytorch_utils import id_tensor_storage
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor
# Copied from tests.models.grounding_dino.test_modeling_grounding_dino.generate_fake_bounding_boxes
def generate_fake_bounding_boxes(n_boxes):
"""Generate bounding boxes in the format (center_x, center_y, width, height)"""
# Validate the input
if not isinstance(n_boxes, int):
raise TypeError("n_boxes must be an integer")
if n_boxes <= 0:
raise ValueError("n_boxes must be a positive integer")
# Generate random bounding boxes in the format (center_x, center_y, width, height)
bounding_boxes = torch.rand((n_boxes, 4))
# Extract the components
center_x = bounding_boxes[:, 0]
center_y = bounding_boxes[:, 1]
width = bounding_boxes[:, 2]
height = bounding_boxes[:, 3]
# Ensure width and height do not exceed bounds
width = torch.min(width, torch.tensor(1.0))
height = torch.min(height, torch.tensor(1.0))
# Ensure the bounding box stays within the normalized space
center_x = torch.where(center_x - width / 2 < 0, width / 2, center_x)
center_x = torch.where(center_x + width / 2 > 1, 1 - width / 2, center_x)
center_y = torch.where(center_y - height / 2 < 0, height / 2, center_y)
center_y = torch.where(center_y + height / 2 > 1, 1 - height / 2, center_y)
# Combine back into bounding boxes
bounding_boxes = torch.stack([center_x, center_y, width, height], dim=1)
return bounding_boxes
# Copied from tests.models.grounding_dino.test_modeling_grounding_dino.GroundingDinoModelTester with GroundingDino->MMGroundingDino
class MMGroundingDinoModelTester:
def __init__(
self,
parent,
batch_size=4,
is_training=True,
use_labels=True,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=4,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
num_queries=2,
num_channels=3,
image_size=98,
n_targets=8,
num_labels=2,
num_feature_levels=4,
encoder_n_points=2,
decoder_n_points=6,
max_text_len=7,
):
self.parent = parent
self.batch_size = batch_size
self.is_training = is_training
self.use_labels = use_labels
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.num_queries = num_queries
self.num_channels = num_channels
self.image_size = image_size
self.n_targets = n_targets
self.num_labels = num_labels
self.num_feature_levels = num_feature_levels
self.encoder_n_points = encoder_n_points
self.decoder_n_points = decoder_n_points
self.max_text_len = max_text_len
# we also set the expected seq length for both encoder and decoder
self.encoder_seq_length_vision = (
math.ceil(self.image_size / 8) ** 2
+ math.ceil(self.image_size / 16) ** 2
+ math.ceil(self.image_size / 32) ** 2
+ math.ceil(self.image_size / 64) ** 2
)
self.encoder_seq_length_text = self.max_text_len
self.decoder_seq_length = self.num_queries
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
pixel_mask = torch.ones([self.batch_size, self.image_size, self.image_size], device=torch_device)
# When using `MMGroundingDino` the text input template is '{label1}. {label2}. {label3. ... {labelN}.'
# Therefore to avoid errors when running tests with `labels` `input_ids` have to follow this structure.
# Otherwise when running `build_label_maps` it will throw an error when trying to split the input_ids into segments.
input_ids = torch.tensor([101, 3869, 1012, 11420, 3869, 1012, 102], device=torch_device)
input_ids = input_ids.unsqueeze(0).expand(self.batch_size, -1)
labels = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
labels = []
for i in range(self.batch_size):
target = {}
target["class_labels"] = torch.randint(
high=self.num_labels, size=(self.n_targets,), device=torch_device
)
target["boxes"] = generate_fake_bounding_boxes(self.n_targets).to(torch_device)
target["masks"] = torch.rand(self.n_targets, self.image_size, self.image_size, device=torch_device)
labels.append(target)
config = self.get_config()
return config, pixel_values, pixel_mask, input_ids, labels
def get_config(self):
swin_config = SwinConfig(
window_size=7,
embed_dim=8,
depths=[1, 1, 1, 1],
num_heads=[1, 1, 1, 1],
image_size=self.image_size,
out_features=["stage2", "stage3", "stage4"],
out_indices=[2, 3, 4],
)
text_backbone = {
"hidden_size": 8,
"num_hidden_layers": 2,
"num_attention_heads": 2,
"intermediate_size": 8,
"max_position_embeddings": 8,
"model_type": "bert",
}
return MMGroundingDinoConfig(
d_model=self.hidden_size,
encoder_layers=self.num_hidden_layers,
decoder_layers=self.num_hidden_layers,
encoder_attention_heads=self.num_attention_heads,
decoder_attention_heads=self.num_attention_heads,
encoder_ffn_dim=self.intermediate_size,
decoder_ffn_dim=self.intermediate_size,
dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
num_queries=self.num_queries,
num_labels=self.num_labels,
num_feature_levels=self.num_feature_levels,
encoder_n_points=self.encoder_n_points,
decoder_n_points=self.decoder_n_points,
use_timm_backbone=False,
backbone_config=swin_config,
max_text_len=self.max_text_len,
text_config=text_backbone,
)
def prepare_config_and_inputs_for_common(self):
config, pixel_values, pixel_mask, input_ids, labels = self.prepare_config_and_inputs()
inputs_dict = {"pixel_values": pixel_values, "pixel_mask": pixel_mask, "input_ids": input_ids}
return config, inputs_dict
def create_and_check_model(self, config, pixel_values, pixel_mask, input_ids, labels):
model = MMGroundingDinoModel(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values=pixel_values, pixel_mask=pixel_mask, input_ids=input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.num_queries, self.hidden_size))
def create_and_check_object_detection_head_model(self, config, pixel_values, pixel_mask, input_ids, labels):
model = MMGroundingDinoForObjectDetection(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values=pixel_values, pixel_mask=pixel_mask, input_ids=input_ids)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, config.max_text_len))
self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4))
result = model(pixel_values=pixel_values, pixel_mask=pixel_mask, input_ids=input_ids, labels=labels)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, config.max_text_len))
self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4))
@require_torch
# Copied from tests.models.grounding_dino.test_modeling_grounding_dino.GroundingDinoModelTest with Grounding->MMGrounding
class MMGroundingDinoModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (MMGroundingDinoModel, MMGroundingDinoForObjectDetection) if is_torch_available() else ()
is_encoder_decoder = True
test_missing_keys = False
pipeline_model_mapping = (
{
"image-feature-extraction": MMGroundingDinoModel,
"zero-shot-object-detection": MMGroundingDinoForObjectDetection,
}
if is_torch_available()
else {}
)
# special case for head models
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
if return_labels:
if model_class.__name__ == "MMGroundingDinoForObjectDetection":
labels = []
for i in range(self.model_tester.batch_size):
target = {}
target["class_labels"] = torch.ones(
size=(self.model_tester.n_targets,), device=torch_device, dtype=torch.long
)
target["boxes"] = torch.ones(
self.model_tester.n_targets, 4, device=torch_device, dtype=torch.float
)
target["masks"] = torch.ones(
self.model_tester.n_targets,
self.model_tester.image_size,
self.model_tester.image_size,
device=torch_device,
dtype=torch.float,
)
labels.append(target)
inputs_dict["labels"] = labels
return inputs_dict
def setUp(self):
self.model_tester = MMGroundingDinoModelTester(self)
self.config_tester = ConfigTester(
self,
config_class=MMGroundingDinoConfig,
has_text_modality=False,
common_properties=["d_model", "encoder_attention_heads", "decoder_attention_heads"],
)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_object_detection_head_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_object_detection_head_model(*config_and_inputs)
@unittest.skip(reason="MMGrounding DINO does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="MMGrounding DINO does not have a get_input_embeddings method")
def test_model_get_set_embeddings(self):
pass
@unittest.skip(reason="MMGrounding DINO does not use token embeddings")
def test_resize_tokens_embeddings(self):
pass
@unittest.skip(reason="Feed forward chunking is not implemented")
def test_feed_forward_chunking(self):
pass
@unittest.skip(reason="Weight tying is hardcoded (module_x = module_y) and always `True`")
def test_load_save_without_tied_weights(self):
pass
# Ignore copy
def test_tie_weights_is_not_modified(self):
# this model doesn't need a test
pass
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class._from_config(config, attn_implementation="eager")
config = model.config
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions[-1]
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions[-1]
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[
self.model_tester.num_attention_heads,
self.model_tester.num_feature_levels,
self.model_tester.encoder_n_points,
],
)
out_len = len(outputs)
correct_outlen = 12
# loss is at first position
if "labels" in inputs_dict:
correct_outlen += 1 # loss is added to beginning
# Object Detection model returns pred_logits and pred_boxes and input_ids
if model_class.__name__ == "MMGroundingDinoForObjectDetection":
correct_outlen += 3
self.assertEqual(out_len, correct_outlen)
# decoder attentions
decoder_attentions = outputs.decoder_attentions[0]
self.assertIsInstance(decoder_attentions, (list, tuple))
self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, self.model_tester.num_queries, self.model_tester.num_queries],
)
# cross attentions
cross_attentions = outputs.decoder_attentions[-1]
self.assertIsInstance(cross_attentions, (list, tuple))
self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(cross_attentions[0].shape[-3:]),
[
self.model_tester.num_attention_heads,
self.model_tester.num_feature_levels,
self.model_tester.decoder_n_points,
],
)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(out_len + 3, len(outputs))
self_attentions = outputs.encoder_attentions[-1]
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[
self.model_tester.num_attention_heads,
self.model_tester.num_feature_levels,
self.model_tester.encoder_n_points,
],
)
# overwrite since hidden_states are called encoder_text_hidden_states
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.encoder_vision_hidden_states
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
self.assertEqual(len(hidden_states), expected_num_layers)
seq_len = self.model_tester.encoder_seq_length_vision
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[seq_len, self.model_tester.hidden_size],
)
hidden_states = outputs.encoder_text_hidden_states
self.assertEqual(len(hidden_states), expected_num_layers)
seq_len = self.model_tester.encoder_seq_length_text
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[seq_len, self.model_tester.hidden_size],
)
hidden_states = outputs.decoder_hidden_states
self.assertIsInstance(hidden_states, (list, tuple))
self.assertEqual(len(hidden_states), expected_num_layers)
seq_len = getattr(self.model_tester, "seq_length", None)
decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[decoder_seq_length, self.model_tester.hidden_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
# removed retain_grad and grad on decoder_hidden_states, as queries don't require grad
def test_retain_grad_hidden_states_attentions(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = True
# no need to test all models as different heads yield the same functionality
model_class = self.all_model_classes[0]
model = model_class(config)
model.to(torch_device)
inputs = self._prepare_for_class(inputs_dict, model_class)
outputs = model(**inputs)
output = outputs[0]
encoder_hidden_states = outputs.encoder_vision_hidden_states[0]
encoder_attentions = outputs.encoder_attentions[0][0]
encoder_hidden_states.retain_grad()
encoder_attentions.retain_grad()
cross_attentions = outputs.decoder_attentions[-1][0]
cross_attentions.retain_grad()
output.flatten()[0].backward(retain_graph=True)
self.assertIsNotNone(encoder_hidden_states.grad)
self.assertIsNotNone(encoder_attentions.grad)
self.assertIsNotNone(cross_attentions.grad)
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["pixel_values", "input_ids"]
self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names)
def test_backbone_selection(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def _validate_backbone_init(config):
for model_class in self.all_model_classes:
model = model_class(copy.deepcopy(config))
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
if model_class.__name__ == "MMGroundingDinoForObjectDetection":
expected_shape = (
self.model_tester.batch_size,
self.model_tester.num_queries,
config.max_text_len,
)
self.assertEqual(outputs.logits.shape, expected_shape)
self.assertTrue(outputs)
# These kwargs are all removed and are supported only for BC
# In new models we have only `backbone_config`. Let's test that there is no regression
# let's test a random timm backbone
config_dict = config.to_dict()
config_dict["backbone"] = "tf_mobilenetv3_small_075"
config_dict["use_timm_backbone"] = True
config_dict["backbone_config"] = None
config_dict["backbone_kwargs"] = {"in_chans": 3, "out_indices": (2, 3, 4)}
config = config.__class__(**config_dict)
_validate_backbone_init(config)
# Test a pretrained HF checkpoint as backbone
config_dict = config.to_dict()
config_dict["backbone"] = "microsoft/resnet-18"
config_dict["backbone_config"] = None
config_dict["use_timm_backbone"] = False
config_dict["use_pretrained_backbone"] = True
config_dict["backbone_kwargs"] = {"out_indices": [2, 3, 4]}
config = config.__class__(**config_dict)
_validate_backbone_init(config)
# Copied from tests.models.deformable_detr.test_modeling_deformable_detr.DeformableDetrModelTest.test_two_stage_training with DeformableDetr->MMGroundingDino
def test_two_stage_training(self):
model_class = MMGroundingDinoForObjectDetection
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
config.two_stage = True
config.auxiliary_loss = True
config.with_box_refine = True
model = model_class(config)
model.to(torch_device)
model.train()
inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
loss = model(**inputs).loss
loss.backward()
def test_tied_weights_keys(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
config.tie_word_embeddings = True
for model_class in self.all_model_classes:
model_tied = model_class(config)
ptrs = collections.defaultdict(list)
for name, tensor in model_tied.state_dict().items():
ptrs[id_tensor_storage(tensor)].append(name)
# These are all the pointers of shared tensors.
tied_params = [names for _, names in ptrs.items() if len(names) > 1]
tied_weight_keys = model_tied._tied_weights_keys if model_tied._tied_weights_keys is not None else []
# Detect we get a hit for each key
for key in tied_weight_keys:
if not any(re.search(key, p) for group in tied_params for p in group):
raise ValueError(f"{key} is not a tied weight key for {model_class}.")
# Removed tied weights found from tied params -> there should only be one left after
for key in tied_weight_keys:
for i in range(len(tied_params)):
tied_params[i] = [p for p in tied_params[i] if re.search(key, p) is None]
# MMGroundingDino when sharing weights also uses the shared ones in MMGroundingDinoDecoder
# Therefore, differently from DeformableDetr, we expect the group lens to be 2
# one for self.bbox_embed in MMGroundingDinoForObjectDetection and another one
# in the decoder
tied_params = [group for group in tied_params if len(group) > 2]
self.assertListEqual(
tied_params,
[],
f"Missing `_tied_weights_keys` for {model_class}: add all of {tied_params} except one.",
)
# We will verify our results on an image of cute cats
def prepare_img():
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
def prepare_text():
text = "a cat."
return text
@require_timm
@require_vision
@slow
class MMGroundingDinoModelIntegrationTests(unittest.TestCase):
@cached_property
def default_processor(self):
return (
AutoProcessor.from_pretrained("openmmlab-community/mm_grounding_dino_tiny_o365v1_goldg_v3det")
if is_vision_available()
else None
)
def test_inference_object_detection_head(self):
model = MMGroundingDinoForObjectDetection.from_pretrained(
"openmmlab-community/mm_grounding_dino_tiny_o365v1_goldg_v3det"
).to(torch_device)
processor = self.default_processor
image = prepare_img()
text = prepare_text()
encoding = processor(images=image, text=text, return_tensors="pt").to(torch_device)
with torch.no_grad():
outputs = model(**encoding)
expected_shape_logits = torch.Size((1, model.config.num_queries, model.config.d_model))
self.assertEqual(outputs.logits.shape, expected_shape_logits)
expected_boxes = torch.tensor(
[[0.7666, 0.4142, 0.4590], [0.2557, 0.5480, 0.4812], [0.5049, 0.5133, 0.9767]]
).to(torch_device)
expected_logits = torch.tensor(
[[-5.1160, -0.2143, -0.2089], [-5.0592, -0.4269, -0.4169], [-4.9087, -1.7608, -1.7372]]
).to(torch_device)
torch.testing.assert_close(outputs.logits[0, :3, :3], expected_logits, rtol=1e-3, atol=1e-3)
expected_shape_boxes = torch.Size((1, model.config.num_queries, 4))
self.assertEqual(outputs.pred_boxes.shape, expected_shape_boxes)
torch.testing.assert_close(outputs.pred_boxes[0, :3, :3], expected_boxes, rtol=1e-4, atol=1e-4)
# verify postprocessing
results = processor.image_processor.post_process_object_detection(
outputs, threshold=0.35, target_sizes=[(image.height, image.width)]
)[0]
expected_scores = torch.tensor([0.4480, 0.3973]).to(torch_device)
expected_slice_boxes = torch.tensor([343.7321, 23.8182, 637.5044, 373.8593]).to(torch_device)
self.assertEqual(len(results["scores"]), 2)
torch.testing.assert_close(results["scores"], expected_scores, rtol=1e-3, atol=1e-3)
torch.testing.assert_close(results["boxes"][0, :], expected_slice_boxes, rtol=1e-2, atol=1e-2)
# verify grounded postprocessing
expected_labels = ["a cat", "a cat"]
results = processor.post_process_grounded_object_detection(
outputs=outputs,
input_ids=encoding.input_ids,
threshold=0.35,
text_threshold=0.3,
target_sizes=[(image.height, image.width)],
)[0]
torch.testing.assert_close(results["scores"], expected_scores, rtol=1e-3, atol=1e-3)
torch.testing.assert_close(results["boxes"][0, :], expected_slice_boxes, rtol=1e-2, atol=1e-2)
self.assertListEqual(results["text_labels"], expected_labels)
@require_torch_accelerator
@is_flaky()
def test_inference_object_detection_head_equivalence_cpu_gpu(self):
processor = self.default_processor
image = prepare_img()
text = prepare_text()
encoding = processor(images=image, text=text, return_tensors="pt")
# 1. run model on CPU
model = MMGroundingDinoForObjectDetection.from_pretrained(
"openmmlab-community/mm_grounding_dino_tiny_o365v1_goldg_v3det"
)
# HACK: the issue happens during top-k (k=900) after the encoder
# there are some flips between cpu and gpu query ordering (idxs 195<->196 and 267<->268 on my machine)
# which causes different query position embedding assignments
# which in turn significantly changes the decoder pass due to self attention
model.config.num_queries = 100
model.model.query_position_embeddings.weight.data = model.model.query_position_embeddings.weight.data[:100]
with torch.no_grad():
cpu_outputs = model(**encoding)
# 2. run model on GPU
model.to(torch_device)
encoding = encoding.to(torch_device)
with torch.no_grad():
gpu_outputs = model(**encoding)
# 3. assert equivalence
for key in cpu_outputs.keys():
torch.testing.assert_close(cpu_outputs[key], gpu_outputs[key].cpu(), rtol=1e-3, atol=1e-3)
expected_logits = torch.tensor(
[[-5.0188, -1.0069, -1.0005], [-5.1177, -1.0537, -1.0444], [-5.3986, -2.4935, -2.4716]]
)
torch.testing.assert_close(cpu_outputs.logits[0, :3, :3], expected_logits, rtol=1e-3, atol=1e-3)
# assert postprocessing
results_cpu = processor.image_processor.post_process_object_detection(
cpu_outputs, threshold=0.35, target_sizes=[(image.height, image.width)]
)[0]
result_gpu = processor.image_processor.post_process_object_detection(
gpu_outputs, threshold=0.35, target_sizes=[(image.height, image.width)]
)[0]
torch.testing.assert_close(results_cpu["scores"], result_gpu["scores"].cpu(), rtol=1e-3, atol=1e-3)
torch.testing.assert_close(results_cpu["boxes"], result_gpu["boxes"].cpu(), rtol=1e-3, atol=1e-3)
@is_flaky()
def test_cross_attention_mask(self):
model = MMGroundingDinoForObjectDetection.from_pretrained(
"openmmlab-community/mm_grounding_dino_tiny_o365v1_goldg_v3det"
).to(torch_device)
# HACK: the issue happens during top-k (k=900) after the encoder
# there are some flips between cpu and gpu query ordering
# which causes different query position embedding assignments
# which in turn significantly changes the decoder pass due to self attention
model.config.num_queries = 100
model.model.query_position_embeddings.weight.data = model.model.query_position_embeddings.weight.data[:100]
processor = self.default_processor
image = prepare_img()
text1 = "a cat."
text2 = "a remote control."
text_batched = [text1, text2]
encoding1 = processor(images=image, text=text1, return_tensors="pt").to(torch_device)
encoding2 = processor(images=image, text=text2, return_tensors="pt").to(torch_device)
# If we batch the text and cross attention masking is working the batched result should be equal to
# The singe text result
encoding_batched = processor(
images=[image] * len(text_batched), text=text_batched, padding="longest", return_tensors="pt"
).to(torch_device)
with torch.no_grad():
outputs1 = model(**encoding1)
outputs2 = model(**encoding2)
outputs_batched = model(**encoding_batched)
torch.testing.assert_close(outputs1.logits, outputs_batched.logits[:1], rtol=1e-3, atol=1e-3)
# For some reason 12 elements are > 1e-3, but the rest are fine
self.assertTrue(torch.allclose(outputs2.logits, outputs_batched.logits[1:], atol=1.8e-3))
def test_mm_grounding_dino_loss(self):
ds = load_dataset("EduardoPacheco/aquarium-sample", split="train")
image_processor = self.default_processor.image_processor
tokenizer = self.default_processor.tokenizer
id2label = {0: "fish", 1: "jellyfish", 2: "penguins", 3: "sharks", 4: "puffins", 5: "stingrays", 6: "starfish"}
prompt = ". ".join(id2label.values()) + "."
text_inputs = tokenizer([prompt, prompt], return_tensors="pt")
image_inputs = image_processor(
images=list(ds["image"]), annotations=list(ds["annotations"]), return_tensors="pt"
)
# Passing auxiliary_loss=True to compare with the expected loss
model = MMGroundingDinoForObjectDetection.from_pretrained(
"openmmlab-community/mm_grounding_dino_tiny_o365v1_goldg_v3det",
auxiliary_loss=True,
)
# Interested in the loss only
model.eval()
with torch.no_grad():
outputs = model(**text_inputs, **image_inputs)
# Loss differs by CPU and GPU, also this can be changed in future.
expected_loss_dict = {
"loss_ce": torch.tensor(1.1799),
"loss_bbox": torch.tensor(0.2348),
"loss_giou": torch.tensor(0.5834),
"loss_ce_0": torch.tensor(1.1199),
"loss_bbox_0": torch.tensor(0.3083),
"loss_giou_0": torch.tensor(0.6555),
"loss_ce_1": torch.tensor(1.2075),
"loss_bbox_1": torch.tensor(0.2641),
"loss_giou_1": torch.tensor(0.6073),
"loss_ce_2": torch.tensor(1.2915),
"loss_bbox_2": torch.tensor(0.2616),
"loss_giou_2": torch.tensor(0.5730),
"loss_ce_3": torch.tensor(1.0243),
"loss_bbox_3": torch.tensor(0.2799),
"loss_giou_3": torch.tensor(0.6326),
"loss_ce_4": torch.tensor(1.2019),
"loss_bbox_4": torch.tensor(0.2430),
"loss_giou_4": torch.tensor(0.5679),
"loss_ce_enc": torch.tensor(10.2381),
"loss_bbox_enc": torch.tensor(0.2886),
"loss_giou_enc": torch.tensor(0.6335),
}
expected_loss = torch.tensor(52.4340)
for key in expected_loss_dict:
self.assertTrue(torch.allclose(outputs.loss_dict[key], expected_loss_dict[key], atol=1e-3))
self.assertTrue(torch.allclose(outputs.loss, expected_loss, atol=1e-3))
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/mm_grounding_dino/test_modeling_mm_grounding_dino.py",
"license": "Apache License 2.0",
"lines": 692,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/cohere2_vision/configuration_cohere2_vision.py | # Copyright 2025 the Cohere Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...configuration_utils import PreTrainedConfig
from ..auto import CONFIG_MAPPING, AutoConfig
class Cohere2VisionConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Cohere2VisionForConditionalGeneration`]. It is used to instantiate an
Cohere2 Vision model according to the specified arguments, defining the model architecture.
[CohereLabs/command-a-vision-07-2025](https://huggingface.co/CohereLabs/command-a-vision-07-2025)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vision_config (`Union[AutoConfig, dict]`, *optional*, defaults to `SiglipVisionConfig`):
The config object or dictionary of the vision backbone.
text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `Cohere2Config`):
The config object or dictionary of the text backbone.
downsample_factor (`int`, *optional*, defaults to 2):
The factor by which to downsample the input image.
image_token_id (`int`, *optional*, defaults to 255036):
The token ID to use as placeholder for the image input.
alignment_intermediate_size (`int`, *optional*, defaults to 36864):
The size of the intermediate layer for alignment.
tie_word_embeddings (`bool`, *optional*, defaults to `True`):
Whether to tie weight embeddings
"""
model_type = "cohere2_vision"
sub_configs = {"text_config": AutoConfig, "vision_config": AutoConfig}
def __init__(
self,
vision_config=None,
text_config=None,
downsample_factor=2,
image_token_id=255036,
alignment_intermediate_size=36864,
tie_word_embeddings=True,
**kwargs,
):
self.downsample_factor = downsample_factor
self.image_token_id = image_token_id
self.alignment_intermediate_size = alignment_intermediate_size
if isinstance(vision_config, dict):
vision_config["model_type"] = vision_config.get("model_type", "siglip_vision_model")
vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config)
elif vision_config is None:
vision_config = CONFIG_MAPPING["siglip_vision_model"](
hidden_size=1152,
intermediate_size=3072,
image_size=512,
num_hidden_layers=27,
num_attention_heads=12,
)
self.vision_config = vision_config
if isinstance(text_config, dict):
text_config["model_type"] = text_config.get("model_type", "cohere2")
text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config)
elif text_config is None:
text_config = CONFIG_MAPPING["cohere2"](tie_word_embeddings=tie_word_embeddings)
self.text_config = text_config
self.tie_word_embeddings = tie_word_embeddings
super().__init__(**kwargs)
__all__ = ["Cohere2VisionConfig"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/cohere2_vision/configuration_cohere2_vision.py",
"license": "Apache License 2.0",
"lines": 72,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/cohere2_vision/modular_cohere2_vision.py | # Copyright 2025 the Cohere Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch AyaVision model."""
from functools import lru_cache
import numpy as np
import torch
from torch import nn
from transformers.models.aya_vision.modeling_aya_vision import (
AyaVisionCausalLMOutputWithPast,
AyaVisionForConditionalGeneration,
AyaVisionModel,
AyaVisionModelOutputWithPast,
AyaVisionPreTrainedModel,
)
from transformers.models.got_ocr2.image_processing_got_ocr2_fast import GotOcr2ImageProcessorFast
from ...cache_utils import Cache
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...modeling_flash_attention_utils import FlashAttentionKwargs
from ...modeling_outputs import BaseModelOutputWithPooling
from ...processing_utils import ImagesKwargs, Unpack
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
from .configuration_cohere2_vision import Cohere2VisionConfig
logger = logging.get_logger(__name__)
class Cohere2VisionMultiModalProjector(nn.Module):
def __init__(self, config: Cohere2VisionConfig):
super().__init__()
self.config = config
self.downsample_factor = config.downsample_factor
self.intermediate_size = config.alignment_intermediate_size
self.linear_1 = nn.Linear(
config.vision_config.hidden_size * (config.downsample_factor**2), self.intermediate_size, bias=True
)
self.act = nn.SiLU()
self.linear_2 = nn.Linear(self.intermediate_size // 2, config.text_config.hidden_size, bias=True)
def pixel_shuffle(self, image_features): # B, S, D
batch_size, seq_length, feature_dim = image_features.shape
height = width = int(seq_length**0.5)
image_features = image_features.reshape(image_features.shape[0], width, height, -1)
channels = image_features.shape[-1]
image_features = image_features.reshape(
batch_size, width, int(height / self.downsample_factor), int(channels * self.downsample_factor)
)
image_features = image_features.permute(0, 2, 1, 3)
image_features = image_features.reshape(
batch_size, int(height / self.downsample_factor), int(width / self.downsample_factor), -1
)
image_features = image_features.permute(0, 2, 1, 3)
return image_features
def forward(self, image_features):
image_features = self.pixel_shuffle(image_features)
hidden_states = self.linear_1(image_features)
# Split along last dimension and apply SwiGLU
x, gate = hidden_states.chunk(2, dim=-1)
hidden_states = self.act(gate) * x
hidden_states = self.linear_2(hidden_states)
return hidden_states
class Cohere2VisionModelOutputWithPast(AyaVisionModelOutputWithPast):
pass
class Cohere2VisionCausalLMOutputWithPast(AyaVisionCausalLMOutputWithPast):
pass
class Cohere2VisionPreTrainedModel(AyaVisionPreTrainedModel):
base_model_prefix = "model"
class Cohere2VisionModel(AyaVisionModel):
_checkpoint_conversion_mapping = {}
@can_return_tuple
@auto_docstring(
custom_intro="Obtains image last hidden states from the vision tower and apply multimodal projection."
)
def get_image_features(
self, pixel_values: torch.FloatTensor, **kwargs: Unpack[TransformersKwargs]
) -> tuple | BaseModelOutputWithPooling:
image_outputs = self.vision_tower(pixel_values, return_dict=True, **kwargs)
selected_image_feature = image_outputs.last_hidden_state
image_outputs.pooler_output = self.multi_modal_projector(selected_image_feature)
return image_outputs
def forward(
self,
input_ids: torch.LongTensor | None = None,
pixel_values: torch.FloatTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
use_cache: bool | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple | Cohere2VisionModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.get_input_embeddings()(input_ids)
if pixel_values is not None:
image_features = self.get_image_features(pixel_values, return_dict=True).pooler_output
image_features = image_features.to(inputs_embeds.device, inputs_embeds.dtype)
special_image_mask = self.get_placeholder_mask(
input_ids, inputs_embeds=inputs_embeds, image_features=image_features
)
inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features)
outputs = self.language_model(
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
return Cohere2VisionModelOutputWithPast(
last_hidden_state=outputs.last_hidden_state,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=image_features if pixel_values is not None else None,
)
class Cohere2VisionForConditionalGeneration(AyaVisionForConditionalGeneration):
_checkpoint_conversion_mapping = {}
@auto_docstring
def get_image_features(
self, pixel_values: torch.FloatTensor, **kwargs: Unpack[TransformersKwargs]
) -> tuple | BaseModelOutputWithPooling:
return self.model.get_image_features(pixel_values=pixel_values, **kwargs)
def forward(
self,
input_ids: torch.LongTensor | None = None,
pixel_values: torch.FloatTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
labels: torch.LongTensor | None = None,
use_cache: bool | None = None,
cache_position: torch.LongTensor | None = None,
logits_to_keep: int | torch.Tensor = 0,
image_sizes: torch.Tensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | Cohere2VisionCausalLMOutputWithPast:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoProcessor, Cohere2VisionForConditionalGeneration
>>> import torch
>>> processor = AutoProcessor.from_pretrained("CohereLabs/command-a-vision-07-2025", use_fast=True)
>>> model = Cohere2VisionForConditionalGeneration.from_pretrained("CohereLabs/command-a-vision-07-2025", device_map="auto")
>>> messages = [
... {
... "role": "user",
... "content": [
... {
... "type": "image",
... "url": "https://images.pexels.com/photos/1108099/pexels-photo-1108099.jpeg",
... },
... {"type": "text", "text": "what is in this image?"},
... ],
... },
... ]
>>> inputs = processor.apply_chat_template(
... messages, padding=True, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt",
... ).to(model.device)
>>> gen_tokens = model.generate(**inputs, max_new_tokens=300, do_sample=True, temperature=0.3)
>>> processor.tokenizer.decode(gen_tokens[0][inputs.input_ids.shape[1]:], skip_special_tokens=True)
```"""
outputs = self.model(
input_ids=input_ids,
pixel_values=pixel_values,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
image_sizes=image_sizes,
**kwargs,
)
hidden_states = outputs[0]
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(
logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size, **kwargs
)
return Cohere2VisionCausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=outputs.image_hidden_states,
)
@lru_cache(maxsize=10)
def get_all_supported_aspect_ratios(max_image_tiles: int) -> list[tuple[int, int]]:
"""
Computes all allowed aspect ratios for a given maximum number of input tiles.
This function calculates all possible arrangements of tiles that can be formed
within the constraint of the maximum number of tiles. Each arrangement is
represented by its aspect ratio (width/height) and the corresponding tile configuration.
Args:
max_image_tiles (`int`):
The maximum number of tiles allowed.
Returns:
`list[tuple[int, int]]`: A list of tuples, each tuple representing a valid (width, height)
configuration in terms of number of tiles.
Example:
>>> get_all_supported_aspect_ratios(4)
[(1, 1), (1, 2), (1, 3), (1, 4), (2, 1), (2, 2), (3, 1), (4, 1)]
"""
aspect_ratios = []
for width in range(1, max_image_tiles + 1):
for height in range(1, max_image_tiles + 1):
if width * height <= max_image_tiles:
aspect_ratios.append((width, height))
return aspect_ratios
def get_optimal_tiled_canvas(
original_image_size: tuple[int, int],
target_tile_size: tuple[int, int],
min_image_tiles: int,
max_image_tiles: int,
) -> tuple[int, int]:
possible_resolutions = get_all_supported_aspect_ratios(max_image_tiles)
possible_resolutions = sorted(possible_resolutions, key=lambda x: x[0] * x[1])
image_height, image_width = original_image_size
patch_size_height, patch_size_width = target_tile_size # (height == width)
candidate_resolutions = np.array(possible_resolutions) * patch_size_height
# tiles following (width, height) order to align with aspect ratio convention
tile_size = np.stack([image_width, image_height])
required_scales = candidate_resolutions / tile_size
required_scale = np.min(required_scales, axis=-1, keepdims=True) # [n_resolutions, 1]
if np.all(required_scale < 1):
# We are forced to downscale, so try to minimize the amount of downscaling
best_grid = possible_resolutions[np.argmax(required_scale)]
else:
# Pick the resolution that required the least upscaling so that it most closely fits the image
required_scale = np.where(required_scale < 1.0, 10e9, required_scale)
best_grid = possible_resolutions[np.argmin(required_scale)]
return best_grid # (width, height)
class Cohere2VisionFastImageProcessorKwargs(ImagesKwargs, total=False):
"""
crop_to_patches (`bool`, *optional*, defaults to `False`):
Whether to crop the image to patches. Can be overridden by the `crop_to_patches` parameter in the
`preprocess` method.
min_patches (`int`, *optional*, defaults to 1):
The minimum number of patches to be extracted from the image. Only has an effect if `crop_to_patches` is
set to `True`. Can be overridden by the `min_patches` parameter in the `preprocess` method.
max_patches (`int`, *optional*, defaults to 12):
The maximum number of patches to be extracted from the image. Only has an effect if `crop_to_patches` is
set to `True`. Can be overridden by the `max_patches` parameter in the `preprocess` method.
"""
crop_to_patches: bool
min_patches: int
max_patches: int
@auto_docstring
class Cohere2VisionImageProcessorFast(GotOcr2ImageProcessorFast):
size = {"height": 512, "width": 512}
min_patches = 1
max_patches = 12
crop_to_patches = True
patch_size = 16
valid_kwargs = Cohere2VisionFastImageProcessorKwargs
def __init__(self, **kwargs: Unpack[Cohere2VisionFastImageProcessorKwargs]):
super().__init__(**kwargs)
@auto_docstring
def preprocess(self, images: ImageInput, **kwargs: Unpack[Cohere2VisionFastImageProcessorKwargs]) -> BatchFeature:
return super().preprocess(images, **kwargs)
__all__ = [
"Cohere2VisionForConditionalGeneration",
"Cohere2VisionPreTrainedModel",
"Cohere2VisionModel",
"Cohere2VisionImageProcessorFast",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/cohere2_vision/modular_cohere2_vision.py",
"license": "Apache License 2.0",
"lines": 286,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/cohere2_vision/processing_cohere2_vision.py | # Copyright 2025 HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import MultiModalData, ProcessingKwargs, ProcessorMixin, Unpack
from ...tokenization_utils_base import PreTokenizedInput, TextInput
from ...utils import auto_docstring
class Cohere2VisionProcessorKwargs(ProcessingKwargs, total=False):
_defaults = {
"text_kwargs": {
"padding_side": "left",
"padding": True,
"return_mm_token_type_ids": False,
},
}
@auto_docstring
class Cohere2VisionProcessor(ProcessorMixin):
def __init__(
self,
image_processor=None,
tokenizer=None,
chat_template=None,
**kwargs,
):
super().__init__(image_processor, tokenizer, chat_template=chat_template)
self.patch_size = self.image_processor.patch_size
self.boi_token = tokenizer.boi_token
self.eoi_token = tokenizer.eoi_token
self.image_token = tokenizer.image_token
self.img_line_break_token = tokenizer.img_line_break_token
self.image_token_id = tokenizer.image_token_id
self.image_ids = tokenizer.convert_tokens_to_ids(
[
self.image_token,
self.boi_token,
self.eoi_token,
self.img_line_break_token,
]
)
@auto_docstring
def __call__(
self,
images: ImageInput | None = None,
text: TextInput | PreTokenizedInput | list[TextInput] | list[PreTokenizedInput] | None = None,
**kwargs: Unpack[Cohere2VisionProcessorKwargs],
) -> BatchFeature:
r"""
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
"""
if text is None:
raise ValueError("You have to specify text.")
elif not isinstance(text, (list, tuple)):
text = [text]
output_kwargs = self._merge_kwargs(
Cohere2VisionProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
# Process images
image_inputs = {}
if images is not None:
image_inputs = self.image_processor(images=images, **output_kwargs["images_kwargs"])
batch_num_patches = iter(image_inputs.pop("num_patches"))
processed_text = []
for sample in text:
while self.image_token in sample:
num_patches = next(batch_num_patches)
img_patches_per_tile = int(self.patch_size**2)
img_string = f"{self.boi_token}"
for idx in range(1, num_patches):
img_string += "<placeholder>" * img_patches_per_tile + self.img_line_break_token
img_string += "<placeholder>" * img_patches_per_tile + self.img_line_break_token
img_string += f"{self.eoi_token}"
sample = sample.replace(self.image_token, img_string, 1)
processed_text.append(sample)
text = [sample.replace("<placeholder>", self.image_token) for sample in processed_text]
return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None)
return_mm_token_type_ids = output_kwargs["text_kwargs"].pop("return_mm_token_type_ids", False)
text_inputs = self.tokenizer(text, **output_kwargs["text_kwargs"], return_tensors=None)
if return_mm_token_type_ids:
array_ids = np.array(text_inputs["input_ids"])
mm_token_type_ids = np.zeros_like(text_inputs["input_ids"])
mm_token_type_ids[np.isin(array_ids, self.image_ids)] = 1
text_inputs["mm_token_type_ids"] = mm_token_type_ids.tolist()
return BatchFeature(data={**text_inputs, **image_inputs}, tensor_type=return_tensors)
def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs):
"""
Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.
Args:
image_sizes (`list[list[int]]`, *optional*):
The input sizes formatted as (height, width) per each image.
Returns:
`MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided
input modalities, along with other useful data.
"""
vision_data = {}
if image_sizes is not None:
images_kwargs = Cohere2VisionProcessorKwargs._defaults.get("images_kwargs", {})
images_kwargs.update(kwargs)
num_image_patches = [
self.image_processor.get_number_of_image_patches(*image_size, images_kwargs)
for image_size in image_sizes
]
token_per_patch = int(self.patch_size**2)
num_image_tokens = [
2 + sum(token_per_patch + 1 for _ in range(num_patches)) for num_patches in num_image_patches
] # Add +2 and +1 for BOI/EOI and image break tokens
vision_data.update({"num_image_tokens": num_image_tokens, "num_image_patches": num_image_patches})
return MultiModalData(**vision_data)
def batch_decode(self, *args, **kwargs):
"""
This method forwards all its arguments to PreTrainedTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
refer to the docstring of this method for more information.
"""
return self.tokenizer.batch_decode(*args, **kwargs)
def decode(self, *args, **kwargs):
"""
This method forwards all its arguments to PreTrainedTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
the docstring of this method for more information.
"""
return self.tokenizer.decode(*args, **kwargs)
@property
def model_input_names(self):
tokenizer_input_names = self.tokenizer.model_input_names
image_processor_input_names = self.image_processor.model_input_names
return list(tokenizer_input_names) + list(image_processor_input_names)
__all__ = ["Cohere2VisionProcessor"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/cohere2_vision/processing_cohere2_vision.py",
"license": "Apache License 2.0",
"lines": 145,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/cohere2_vision/test_image_processing_cohere2_vision.py | # Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available
from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
if is_torchvision_available():
from transformers import Cohere2VisionImageProcessorFast
class Cohere2VisionImageProcessingTester(unittest.TestCase):
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
do_resize=True,
size=None,
do_normalize=True,
image_mean=[0.48145466, 0.4578275, 0.40821073],
image_std=[0.26862954, 0.26130258, 0.27577711],
do_convert_rgb=True,
):
super().__init__()
size = size if size is not None else {"height": 30, "width": 30}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.do_convert_rgb = do_convert_rgb
def prepare_image_processor_dict(self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
@require_torch
@require_vision
class Cohere2VisionProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
fast_image_processing_class = Cohere2VisionImageProcessorFast if is_torchvision_available() else None
test_slow_image_processor = False
def setUp(self):
super().setUp()
self.image_processor_tester = Cohere2VisionImageProcessingTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processor, "do_resize"))
self.assertTrue(hasattr(image_processor, "size"))
self.assertTrue(hasattr(image_processor, "do_normalize"))
self.assertTrue(hasattr(image_processor, "image_mean"))
self.assertTrue(hasattr(image_processor, "image_std"))
self.assertTrue(hasattr(image_processor, "do_convert_rgb"))
def test_call_pil(self):
for image_processing_class in self.image_processor_list:
# Initialize image_processing
image_processing = image_processing_class(**self.image_processor_dict)
# create random PIL images
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True)
for image in image_inputs:
self.assertIsInstance(image, Image.Image)
# Test not batched input
encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values
self.assertEqual(tuple(encoded_images.shape), (10, 3, 30, 30))
# Test batched
encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values
self.assertEqual(tuple(encoded_images.shape), (70, 3, 30, 30))
def test_call_numpy(self):
for image_processing_class in self.image_processor_list:
# Initialize image_processing
image_processing = image_processing_class(**self.image_processor_dict)
# create random numpy tensors
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, numpify=True)
for image in image_inputs:
self.assertIsInstance(image, np.ndarray)
# Test not batched input
encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values
self.assertEqual(tuple(encoded_images.shape), (10, 3, 30, 30))
# Test batched
encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values
self.assertEqual(tuple(encoded_images.shape), (70, 3, 30, 30))
def test_call_pytorch(self):
for image_processing_class in self.image_processor_list:
# Initialize image_processing
image_processing = image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, torchify=True)
for image in image_inputs:
self.assertIsInstance(image, torch.Tensor)
# Test not batched input
encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values
self.assertEqual(tuple(encoded_images.shape), (10, 3, 30, 30))
# Test batched
encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values
self.assertEqual(tuple(encoded_images.shape), (70, 3, 30, 30))
def test_call_numpy_4_channels(self):
for image_processing_class in self.image_processor_list:
# Test that can process images which have an arbitrary number of channels
# Initialize image_processing
image_processor = image_processing_class(**self.image_processor_dict)
# create random numpy tensors
self.image_processor_tester.num_channels = 4
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, numpify=True)
# Test not batched input
encoded_images = image_processor(
image_inputs[0],
return_tensors="pt",
input_data_format="channels_last",
image_mean=(0.0, 0.0, 0.0, 0.0),
image_std=(1.0, 1.0, 1.0, 1.0),
).pixel_values
self.assertEqual(tuple(encoded_images.shape), (10, 4, 30, 30))
# Test batched
encoded_images = image_processor(
image_inputs,
return_tensors="pt",
input_data_format="channels_last",
image_mean=(0.0, 0.0, 0.0, 0.0),
image_std=(1.0, 1.0, 1.0, 1.0),
).pixel_values
self.assertEqual(tuple(encoded_images.shape), (70, 4, 30, 30))
def test_crop_to_patches_aspect_ratio(self):
"""Test that row/column ordering is correct when cropping non-square images to patches.
This test verifies that patches can be stitched back to reconstruct the original image,
which validates that the row/column ordering in get_optimal_tiled_canvas is correct.
If row/column are swapped, the image would be resized to wrong dimensions and patches
would not match the original content.
"""
for image_processing_class in self.image_processor_list:
patch_size = 64
image_processor = image_processing_class(
do_resize=True,
size={"height": patch_size, "width": patch_size},
do_normalize=False, # Disable normalization to preserve pixel values
do_rescale=False, # Disable rescaling to preserve pixel values
crop_to_patches=True,
min_patches=1,
max_patches=6, # Allow up to 6 patches to test asymmetric grids like 2x3
)
# Create a 2:3 aspect ratio image (2 rows x 3 columns of patches)
# This asymmetric grid will fail if rows/columns are swapped
num_rows, num_cols = 2, 3
image_height = patch_size * num_rows # 128
image_width = patch_size * num_cols # 192
# Create image with unique color for each patch position
test_image = Image.new("RGB", (image_width, image_height))
for row in range(num_rows):
for col in range(num_cols):
patch_idx = row * num_cols + col # 0-5
color = (patch_idx * 40 + 20, 0, 0) # Unique red values: 20, 60, 100, 140, 180, 220
for y in range(patch_size):
for x in range(patch_size):
test_image.putpixel(
(col * patch_size + x, row * patch_size + y),
color,
)
# Process image
result = image_processor(test_image, return_tensors="pt")
patches = result.pixel_values
num_patches_result = result.num_patches
# Should produce 7 patches (6 grid patches + 1 thumbnail)
self.assertEqual(num_patches_result.tolist(), [7])
self.assertEqual(tuple(patches.shape), (7, 3, patch_size, patch_size))
# Verify each patch has the correct color (excluding thumbnail which is last)
# Patches should be ordered row by row: (0,0), (0,1), (0,2), (1,0), (1,1), (1,2)
for patch_idx in range(6):
expected_red = patch_idx * 40 + 20
actual_red = patches[patch_idx, 0, 0, 0].item() # Red channel, top-left pixel
self.assertEqual(
actual_red,
expected_red,
f"Patch {patch_idx} has wrong color. Expected red={expected_red}, got {actual_red}. "
f"This indicates row/column ordering is incorrect.",
)
# Stitch patches back and verify against original
stitched = torch.zeros(3, image_height, image_width)
for patch_idx in range(6):
row = patch_idx // num_cols
col = patch_idx % num_cols
stitched[
:,
row * patch_size : (row + 1) * patch_size,
col * patch_size : (col + 1) * patch_size,
] = patches[patch_idx]
original_tensor = torch.tensor(np.array(test_image)).permute(2, 0, 1).float()
self.assertTrue(
torch.allclose(stitched, original_tensor),
"Patches do not stitch back to original image - row/column ordering may be wrong",
)
def test_get_number_of_image_patches_aspect_ratio(self):
"""Test that get_number_of_image_patches returns correct count for non-square images.
This directly tests the row/column unpacking fix by verifying patch counts match
the expected grid layout. If rows/columns are swapped, the wrong grid would be
chosen for asymmetric images.
"""
for image_processing_class in self.image_processor_list:
patch_size = 64
image_processor = image_processing_class(
size={"height": patch_size, "width": patch_size},
crop_to_patches=True,
min_patches=1,
max_patches=12,
)
# Test 1: Tall image (4 rows x 1 column) should give 5 patches (4 + thumbnail)
tall_patches = image_processor.get_number_of_image_patches(
height=patch_size * 4, # 256
width=patch_size, # 64
images_kwargs={},
)
self.assertEqual(tall_patches, 5, "Tall image (4:1) should produce 5 patches")
# Test 2: Wide image (1 row x 4 columns) should give 5 patches (4 + thumbnail)
wide_patches = image_processor.get_number_of_image_patches(
height=patch_size, # 64
width=patch_size * 4, # 256
images_kwargs={},
)
self.assertEqual(wide_patches, 5, "Wide image (1:4) should produce 5 patches")
# Test 3: Asymmetric image (2 rows x 3 columns) should give 7 patches
asym_patches = image_processor.get_number_of_image_patches(
height=patch_size * 2, # 128
width=patch_size * 3, # 192
images_kwargs={"max_patches": 6},
)
self.assertEqual(asym_patches, 7, "Asymmetric image (2:3) should produce 7 patches")
# Test 4: Opposite asymmetric (3 rows x 2 columns) should also give 7 patches
asym_patches2 = image_processor.get_number_of_image_patches(
height=patch_size * 3, # 192
width=patch_size * 2, # 128
images_kwargs={"max_patches": 6},
)
self.assertEqual(asym_patches2, 7, "Asymmetric image (3:2) should produce 7 patches")
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/cohere2_vision/test_image_processing_cohere2_vision.py",
"license": "Apache License 2.0",
"lines": 271,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/models/cohere2_vision/test_modeling_cohere2_vision.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch GotOcr2 model."""
import unittest
from transformers import (
AutoProcessor,
Cohere2VisionConfig,
is_torch_available,
)
from transformers.testing_utils import (
Expectations,
cleanup,
get_device_properties,
require_deterministic_for_xpu,
require_torch,
require_torch_accelerator,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
Cohere2VisionForConditionalGeneration,
Cohere2VisionModel,
)
class Cohere2VisionText2TextModelTester:
def __init__(
self,
parent,
batch_size=3,
seq_length=7,
downsample_factor=2,
alignment_intermediate_size=32,
ignore_index=-100,
image_token_id=2,
num_channels=3,
image_size=64,
is_training=True,
text_config={
"model_type": "cohere2",
"vocab_size": 99,
"hidden_size": 128,
"intermediate_size": 37,
"num_hidden_layers": 2,
"num_attention_heads": 4,
"output_channels": 64,
"hidden_act": "silu",
"max_position_embeddings": 512,
"tie_word_embeddings": True,
"bos_token_id": 0,
"eos_token_id": 0,
"pad_token_id": 0,
},
vision_config={
"model_type": "siglip_vision_model",
"hidden_size": 32,
"num_hidden_layers": 2,
"num_attention_heads": 4,
"intermediate_size": 128,
"image_size": 64,
"patch_size": 8,
"vision_use_head": False,
},
):
self.parent = parent
self.ignore_index = ignore_index
self.bos_token_id = text_config["bos_token_id"]
self.eos_token_id = text_config["eos_token_id"]
self.pad_token_id = text_config["pad_token_id"]
self.image_token_id = image_token_id
self.text_config = text_config
self.vision_config = vision_config
self.batch_size = batch_size
self.downsample_factor = downsample_factor
self.alignment_intermediate_size = alignment_intermediate_size
self.is_training = is_training
self.num_channels = num_channels
self.image_size = image_size
self.image_seq_length = 16
self.seq_length = seq_length + self.image_seq_length
self.num_hidden_layers = text_config["num_hidden_layers"]
self.vocab_size = text_config["vocab_size"]
self.hidden_size = text_config["hidden_size"]
self.num_attention_heads = text_config["num_attention_heads"]
def get_config(self):
return Cohere2VisionConfig(
text_config=self.text_config,
vision_config=self.vision_config,
image_token_id=self.image_token_id,
downsample_factor=self.downsample_factor,
alignment_intermediate_size=self.alignment_intermediate_size,
)
def prepare_config_and_inputs(self):
config = self.get_config()
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
return config, pixel_values
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
input_ids[input_ids == self.image_token_id] = self.pad_token_id
input_ids[:, : self.image_seq_length] = self.image_token_id
inputs_dict = {
"pixel_values": pixel_values,
"input_ids": input_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class Cohere2ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
Cohere2VisionModel,
Cohere2VisionForConditionalGeneration,
)
if is_torch_available()
else ()
)
all_generative_model_classes = (Cohere2VisionForConditionalGeneration,) if is_torch_available() else ()
pipeline_model_mapping = (
{
"image-text-to-text": Cohere2VisionForConditionalGeneration,
"any-to-any": Cohere2VisionForConditionalGeneration,
}
if is_torch_available()
else {}
)
_is_composite = True
def setUp(self):
self.model_tester = Cohere2VisionText2TextModelTester(self)
self.config_tester = ConfigTester(self, config_class=Cohere2VisionConfig, has_text_modality=False)
def test_config(self):
self.config_tester.run_common_tests()
@require_torch
class Cohere2IntegrationTest(unittest.TestCase):
def setUp(self):
self.model_checkpoint = "CohereLabs/command-a-vision-07-2025"
def tearDown(self):
cleanup(torch_device, gc_collect=True)
def get_model(self, dummy=True):
device_type, major, _ = get_device_properties()
dtype = torch.float16
# too large to fit into A10
config = Cohere2VisionConfig.from_pretrained(self.model_checkpoint)
if dummy:
config.text_config.num_hidden_layers = 4
config.text_config.layer_types = config.text_config.layer_types[:4]
model = Cohere2VisionForConditionalGeneration.from_pretrained(
self.model_checkpoint,
config=config,
dtype=dtype,
device_map="auto",
)
return model
@slow
@require_torch_accelerator
def test_model_integration_forward(self):
processor = AutoProcessor.from_pretrained(self.model_checkpoint)
model = self.get_model(dummy=False)
messages = [
{
"role": "user",
"content": [
{"type": "image", "url": "http://images.cocodataset.org/val2017/000000039769.jpg"},
{"type": "text", "text": "Please describe the image explicitly."},
],
}
]
inputs = processor.apply_chat_template(
messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt"
).to(torch_device, dtype=torch.float16)
# Forward
with torch.inference_mode():
output = model(**inputs)
actual_logits = output.logits[0, -1, :5].cpu()
EXPECTED_LOGITS = Expectations(
{
("xpu", 3): [2.4297, 1.6836, 1.8779, 2.1895, 1.9395],
# 4-bit
("cuda", 7): [0.1097, 0.3481, 3.8340, 9.7969, 2.0488],
("cuda", 8): [2.4277, 1.6875, 1.8789, 2.1875, 1.9375],
}
) # fmt: skip
expected_logits = torch.tensor(EXPECTED_LOGITS.get_expectation(), dtype=torch.float16)
self.assertTrue(
torch.allclose(actual_logits, expected_logits, atol=0.1),
f"Actual logits: {actual_logits}"
f"\nExpected logits: {expected_logits}"
f"\nDifference: {torch.abs(actual_logits - expected_logits)}",
)
@slow
@require_torch_accelerator
@require_deterministic_for_xpu
def test_model_integration_generate_text_only(self):
processor = AutoProcessor.from_pretrained(self.model_checkpoint)
model = self.get_model()
messages = [
{
"role": "user",
"content": [
{"type": "text", "text": "Write a haiku"},
],
}
]
inputs = processor.apply_chat_template(
messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt"
).to(torch_device, dtype=torch.float16)
with torch.no_grad():
generate_ids = model.generate(**inputs, max_new_tokens=10, do_sample=False)
decoded_output = processor.decode(
generate_ids[0, inputs["input_ids"].shape[1] :], skip_special_tokens=True
)
expected_outputs = Expectations(
{
("xpu", 3): "<|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|>",
("cuda", 8): "<|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|>",
}
) # fmt: skip
expected_output = expected_outputs.get_expectation()
self.assertEqual(decoded_output, expected_output)
@slow
@require_torch_accelerator
@require_deterministic_for_xpu
def test_model_integration_generate_chat_template(self):
processor = AutoProcessor.from_pretrained(self.model_checkpoint)
model = self.get_model()
messages = [
{
"role": "user",
"content": [
{"type": "image", "url": "http://images.cocodataset.org/val2017/000000039769.jpg"},
{"type": "text", "text": "Please describe the image explicitly."},
],
}
]
inputs = processor.apply_chat_template(
messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt"
).to(torch_device, dtype=torch.float16)
with torch.no_grad():
generate_ids = model.generate(**inputs, max_new_tokens=10, do_sample=False)
decoded_output = processor.decode(
generate_ids[0, inputs["input_ids"].shape[1] :], skip_special_tokens=True
)
expected_outputs = Expectations(
{
("xpu", 3): '<|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|>',
("cuda", 8): '<|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|>',
}
) # fmt: skip
expected_output = expected_outputs.get_expectation()
self.assertEqual(decoded_output, expected_output)
@slow
@require_torch_accelerator
def test_model_integration_batched_generate(self):
processor = AutoProcessor.from_pretrained(self.model_checkpoint)
model = self.get_model(dummy=False)
# Prepare inputs
messages = [
[
{
"role": "user",
"content": [
{"type": "image", "url": "https://llava-vl.github.io/static/images/view.jpg"},
{"type": "text", "text": "Write a haiku for this image"},
],
},
],
[
{
"role": "user",
"content": [
{
"type": "image",
"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg",
},
{"type": "text", "text": "Describe this image"},
],
},
],
]
inputs = processor.apply_chat_template(
messages, padding=True, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt"
).to(model.device, dtype=torch.float16)
output = model.generate(**inputs, do_sample=False, max_new_tokens=5)
# Check first output
decoded_output = processor.decode(output[0, inputs["input_ids"].shape[1] :], skip_special_tokens=True)
expected_outputs = Expectations(
{
("xpu", 3): 'Dock stretches to calm',
("cuda", 8): 'Dock stretches to calm',
}
) # fmt: skip
expected_output = expected_outputs.get_expectation()
self.assertEqual(
decoded_output,
expected_output,
f"Decoded output: {decoded_output}\nExpected output: {expected_output}",
)
# Check second output
decoded_output = processor.decode(output[1, inputs["input_ids"].shape[1] :], skip_special_tokens=True)
expected_outputs = Expectations(
{
("xpu", 3): 'The image depicts a',
("cuda", 8): 'The image depicts a',
}
) # fmt: skip
expected_output = expected_outputs.get_expectation()
self.assertEqual(
decoded_output,
expected_output,
f"Decoded output: {decoded_output}\nExpected output: {expected_output}",
)
@slow
@require_torch_accelerator
@require_deterministic_for_xpu
def test_model_integration_batched_generate_multi_image(self):
processor = AutoProcessor.from_pretrained(self.model_checkpoint)
model = self.get_model()
# Prepare inputs
messages = [
[
{
"role": "user",
"content": [
{"type": "image", "url": "https://llava-vl.github.io/static/images/view.jpg"},
{"type": "text", "text": "Write a haiku for this image"},
],
},
],
[
{
"role": "user",
"content": [
{
"type": "image",
"url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg",
},
{
"type": "image",
"url": "https://thumbs.dreamstime.com/b/golden-gate-bridge-san-francisco-purple-flowers-california-echium-candicans-36805947.jpg",
},
{
"type": "text",
"text": "These images depict two different landmarks. Can you identify them?",
},
],
},
],
]
inputs = processor.apply_chat_template(
messages, padding=True, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt"
).to(model.device, dtype=torch.float16)
output = model.generate(**inputs, do_sample=False, max_new_tokens=10)
# Check first output
decoded_output = processor.decode(output[0, inputs["input_ids"].shape[1] :], skip_special_tokens=True)
# Batching seems to alter the output slightly, but it is also the case in the original implementation. This seems to be expected: https://github.com/huggingface/transformers/issues/23017#issuecomment-1649630232
expected_outputs = Expectations(
{
("xpu", 3): '<|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|>',
("cuda", 8): '<|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|>',
}
) # fmt: skip
expected_output = expected_outputs.get_expectation()
self.assertEqual(
decoded_output,
expected_output,
f"Decoded output: {decoded_output}\nExpected output: {expected_output}",
)
# Check second output
decoded_output = processor.decode(output[1, inputs["input_ids"].shape[1] :], skip_special_tokens=True)
expected_outputs = Expectations(
{
("xpu", 3): '<|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|>',
("cuda", 8): '<|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|>',
}
) # fmt: skip
expected_output = expected_outputs.get_expectation()
self.assertEqual(
decoded_output,
expected_output,
f"Decoded output: {decoded_output}\nExpected output: {expected_output}",
)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/cohere2_vision/test_modeling_cohere2_vision.py",
"license": "Apache License 2.0",
"lines": 398,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/models/cohere2_vision/test_processing_cohere2_vision.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import Cohere2VisionProcessor
from transformers.testing_utils import require_vision
from transformers.utils import is_torch_available, is_torchvision_available
from ...test_processing_common import ProcessorTesterMixin, url_to_local_path
if is_torch_available():
import torch
if is_torchvision_available():
pass
@require_vision
@unittest.skip("Model not released yet!")
class Cohere2VisionProcessorTest(ProcessorTesterMixin, unittest.TestCase):
processor_class = Cohere2VisionProcessor
@classmethod
def _setup_tokenizer(cls):
tokenizer_class = cls._get_component_class_from_processor("tokenizer")
return tokenizer_class.from_pretrained("CohereLabs/command-a-vision-07-2025")
@classmethod
def _setup_image_processor(cls):
image_processor_class = cls._get_component_class_from_processor("image_processor")
return image_processor_class(
size={"height": 20, "width": 20},
max_patches=3,
)
def test_process_interleaved_images_videos(self):
processor = self.get_processor()
messages = [
[
{
"role": "user",
"content": [
{
"type": "image",
"url": url_to_local_path(
"https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
),
},
{
"type": "image",
"url": url_to_local_path(
"https://thumbs.dreamstime.com/b/golden-gate-bridge-san-francisco-purple-flowers-california-echium-candicans-36805947.jpg"
),
},
{"type": "text", "text": "What are the differences between these two images?"},
],
},
],
[
{
"role": "user",
"content": [
{
"type": "image",
"url": url_to_local_path("https://llava-vl.github.io/static/images/view.jpg"),
},
{"type": "text", "text": "Write a haiku for this image"},
],
}
],
]
inputs_batched = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors="pt",
padding=True,
)
# Process non batched inputs to check if the pixel_values and input_ids are reconstructed in the correct order when batched together
images_patches_index = 0
for i, message in enumerate(messages):
inputs = processor.apply_chat_template(
message,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors="pt",
padding=True,
)
# We slice with [-inputs["input_ids"].shape[1] :] as the input_ids are left padded
torch.testing.assert_close(
inputs["input_ids"][0], inputs_batched["input_ids"][i][-inputs["input_ids"].shape[1] :]
)
torch.testing.assert_close(
inputs["pixel_values"],
inputs_batched["pixel_values"][
images_patches_index : images_patches_index + inputs["pixel_values"].shape[0]
],
)
images_patches_index += inputs["pixel_values"].shape[0]
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/cohere2_vision/test_processing_cohere2_vision.py",
"license": "Apache License 2.0",
"lines": 103,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/segformer/modular_segformer.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Image processor class for Segformer."""
from typing import Optional, Union
import torch
import torchvision.transforms.v2.functional as tvF
from transformers.models.beit.image_processing_beit_fast import BeitImageProcessorFast
from ...image_processing_utils import BatchFeature
from ...image_processing_utils_fast import (
group_images_by_shape,
reorder_images,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
SizeDict,
)
from ...processing_utils import Unpack
from ...utils import (
TensorType,
)
from .image_processing_segformer import SegformerImageProcessorKwargs
class SegformerImageProcessorFast(BeitImageProcessorFast):
resample = PILImageResampling.BILINEAR
image_mean = IMAGENET_DEFAULT_MEAN
image_std = IMAGENET_DEFAULT_STD
size = {"height": 512, "width": 512}
do_resize = True
do_rescale = True
rescale_factor = 1 / 255
do_normalize = True
do_reduce_labels = False
do_center_crop = None
crop_size = None
def _preprocess_image_like_inputs(
self,
images: ImageInput,
segmentation_maps: ImageInput | None,
do_convert_rgb: bool,
input_data_format: ChannelDimension,
device: Union[str, "torch.device"] | None = None,
**kwargs: Unpack[SegformerImageProcessorKwargs],
) -> BatchFeature:
"""
Preprocess image-like inputs.
"""
images = self._prepare_image_like_inputs(
images=images, do_convert_rgb=do_convert_rgb, input_data_format=input_data_format, device=device
)
images_kwargs = kwargs.copy()
images_kwargs["do_reduce_labels"] = False
batch_feature = self._preprocess(images, **images_kwargs)
if segmentation_maps is not None:
processed_segmentation_maps = self._prepare_image_like_inputs(
images=segmentation_maps,
expected_ndims=2,
do_convert_rgb=False,
input_data_format=ChannelDimension.FIRST,
)
segmentation_maps_kwargs = kwargs.copy()
segmentation_maps_kwargs.update(
{
"do_normalize": False,
"do_rescale": False,
# Nearest interpolation is used for segmentation maps instead of BILINEAR.
"interpolation": tvF.InterpolationMode.NEAREST_EXACT,
}
)
processed_segmentation_maps = self._preprocess(
images=processed_segmentation_maps, **segmentation_maps_kwargs
).pixel_values
batch_feature["labels"] = processed_segmentation_maps.squeeze(1).to(torch.int64)
return batch_feature
def _preprocess(
self,
images: list["torch.Tensor"],
do_reduce_labels: bool,
interpolation: Optional["tvF.InterpolationMode"],
do_resize: bool,
do_rescale: bool,
do_normalize: bool,
size: SizeDict,
rescale_factor: float,
image_mean: float | list[float],
image_std: float | list[float],
disable_grouping: bool,
return_tensors: str | TensorType | None,
**kwargs,
) -> BatchFeature: # Return type can be list if return_tensors=None
if do_reduce_labels:
images = self.reduce_label(images) # Apply reduction if needed
# Group images by size for batched resizing
resized_images = images
if do_resize:
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
resized_images_grouped = {}
for shape, stacked_images in grouped_images.items():
resized_stacked_images = self.resize(image=stacked_images, size=size, interpolation=interpolation)
resized_images_grouped[shape] = resized_stacked_images
resized_images = reorder_images(resized_images_grouped, grouped_images_index)
# Group images by size for further processing (rescale/normalize)
# Needed in case do_resize is False, or resize returns images with different sizes
grouped_images, grouped_images_index = group_images_by_shape(resized_images, disable_grouping=disable_grouping)
processed_images_grouped = {}
for shape, stacked_images in grouped_images.items():
# Fused rescale and normalize
stacked_images = self.rescale_and_normalize(
stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
processed_images_grouped[shape] = stacked_images
processed_images = reorder_images(processed_images_grouped, grouped_images_index)
# Stack images into a single tensor if return_tensors is set
return BatchFeature(data={"pixel_values": processed_images}, tensor_type=return_tensors)
__all__ = ["SegformerImageProcessorFast"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/segformer/modular_segformer.py",
"license": "Apache License 2.0",
"lines": 128,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/superpoint/image_processing_superpoint_fast.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Image processor class for Superpoint."""
from typing import TYPE_CHECKING, Optional
import torch
import torchvision.transforms.v2.functional as tvF
from ...image_processing_utils import BatchFeature
from ...image_processing_utils_fast import (
BaseImageProcessorFast,
group_images_by_shape,
reorder_images,
)
from ...image_utils import (
PILImageResampling,
SizeDict,
)
from ...processing_utils import Unpack
from ...utils import (
TensorType,
auto_docstring,
)
from .image_processing_superpoint import SuperPointImageProcessorKwargs
if TYPE_CHECKING:
from .modeling_superpoint import SuperPointKeypointDescriptionOutput
def is_grayscale(
image: "torch.Tensor",
):
"""Checks if an image is grayscale (all RGB channels are identical)."""
if image.ndim < 3 or image.shape[0 if image.ndim == 3 else 1] == 1:
return True
return torch.all(image[..., 0, :, :] == image[..., 1, :, :]) and torch.all(
image[..., 1, :, :] == image[..., 2, :, :]
)
def convert_to_grayscale(
image: "torch.Tensor",
) -> "torch.Tensor":
"""
Converts an image to grayscale format using the NTSC formula. Only support torch.Tensor.
This function is supposed to return a 1-channel image, but it returns a 3-channel image with the same value in each
channel, because of an issue that is discussed in :
https://github.com/huggingface/transformers/pull/25786#issuecomment-1730176446
Args:
image (torch.Tensor):
The image to convert.
"""
if is_grayscale(image):
return image
return tvF.rgb_to_grayscale(image, num_output_channels=3)
@auto_docstring
class SuperPointImageProcessorFast(BaseImageProcessorFast):
resample = PILImageResampling.BILINEAR
size = {"height": 480, "width": 640}
default_to_square = False
do_resize = True
do_rescale = True
rescale_factor = 1 / 255
do_normalize = None
valid_kwargs = SuperPointImageProcessorKwargs
def __init__(self, **kwargs: Unpack[SuperPointImageProcessorKwargs]):
super().__init__(**kwargs)
def _preprocess(
self,
images: list["torch.Tensor"],
size: dict[str, int] | SizeDict,
rescale_factor: float,
do_rescale: bool,
do_resize: bool,
interpolation: Optional["tvF.InterpolationMode"],
do_grayscale: bool,
disable_grouping: bool,
return_tensors: str | TensorType,
**kwargs,
) -> BatchFeature:
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
processed_images_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_grayscale:
stacked_images = convert_to_grayscale(stacked_images)
if do_resize:
stacked_images = self.resize(stacked_images, size=size, interpolation=interpolation)
if do_rescale:
stacked_images = self.rescale(stacked_images, rescale_factor)
processed_images_grouped[shape] = stacked_images
processed_images = reorder_images(processed_images_grouped, grouped_images_index)
return BatchFeature(data={"pixel_values": processed_images}, tensor_type=return_tensors)
def post_process_keypoint_detection(
self, outputs: "SuperPointKeypointDescriptionOutput", target_sizes: TensorType | list[tuple]
) -> list[dict[str, "torch.Tensor"]]:
"""
Converts the raw output of [`SuperPointForKeypointDetection`] into lists of keypoints, scores and descriptors
with coordinates absolute to the original image sizes.
Args:
outputs ([`SuperPointKeypointDescriptionOutput`]):
Raw outputs of the model containing keypoints in a relative (x, y) format, with scores and descriptors.
target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`):
Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size
`(height, width)` of each image in the batch. This must be the original
image size (before any processing).
Returns:
`List[Dict]`: A list of dictionaries, each dictionary containing the keypoints in absolute format according
to target_sizes, scores and descriptors for an image in the batch as predicted by the model.
"""
if len(outputs.mask) != len(target_sizes):
raise ValueError("Make sure that you pass in as many target sizes as the batch dimension of the mask")
if isinstance(target_sizes, list):
image_sizes = torch.tensor(target_sizes, device=outputs.mask.device)
else:
if target_sizes.shape[1] != 2:
raise ValueError(
"Each element of target_sizes must contain the size (h, w) of each image of the batch"
)
image_sizes = target_sizes
# Flip the image sizes to (width, height) and convert keypoints to absolute coordinates
image_sizes = torch.flip(image_sizes, [1])
masked_keypoints = outputs.keypoints * image_sizes[:, None]
# Convert masked_keypoints to int
masked_keypoints = masked_keypoints.to(torch.int32)
results = []
for image_mask, keypoints, scores, descriptors in zip(
outputs.mask, masked_keypoints, outputs.scores, outputs.descriptors
):
indices = torch.nonzero(image_mask).squeeze(1)
keypoints = keypoints[indices]
scores = scores[indices]
descriptors = descriptors[indices]
results.append({"keypoints": keypoints, "scores": scores, "descriptors": descriptors})
return results
__all__ = ["SuperPointImageProcessorFast"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/superpoint/image_processing_superpoint_fast.py",
"license": "Apache License 2.0",
"lines": 139,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/exaone4/modular_exaone4.py | # Copyright 2025 The LG AI Research and HuggingFace Inc. team. All rights reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""LG AI Research EXAONE Lab"""
from collections.abc import Callable
import torch
from torch import nn
from ...cache_utils import Cache, DynamicCache
from ...configuration_utils import PreTrainedConfig, layer_type_validation
from ...masking_utils import create_causal_mask, create_sliding_window_causal_mask
from ...modeling_outputs import (
BaseModelOutputWithPast,
CausalLMOutputWithPast,
)
from ...modeling_rope_utils import RopeParameters
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS
from ...processing_utils import Unpack
from ...utils import (
TransformersKwargs,
logging,
)
from ...utils.generic import merge_with_config_defaults
from ...utils.output_capturing import capture_outputs
from ..gemma2.modeling_gemma2 import Gemma2RotaryEmbedding
from ..llama.modeling_llama import (
LlamaForCausalLM,
LlamaForQuestionAnswering,
LlamaForSequenceClassification,
LlamaForTokenClassification,
LlamaModel,
LlamaPreTrainedModel,
LlamaRMSNorm,
apply_rotary_pos_emb,
eager_attention_forward,
)
from ..olmo2.modeling_olmo2 import Olmo2DecoderLayer, Olmo2MLP
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "LGAI-EXAONE/EXAONE-4.0-32B"
_CONFIG_FOR_DOC = "Exaone4Config"
class Exaone4Config(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Exaone4Model`]. It is used to
instantiate a EXAONE 4.0 model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the EXAONE-4.0-32B [LGAI-EXAONE/EXAONE-4.0-32B](https://huggingface.co/LGAI-EXAONE/EXAONE-4.0-32B)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model
outputs. Read the documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 102400):
Vocabulary size of the EXAONE 4.0 model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`Exaone4Model`].
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to `hidden_size * 4`):
Dimensionality of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details checkout [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 32768 for EXAONE 3.5).
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if ``config.is_decoder=True``.
bos_token_id (`int`, *optional*, defaults to 0):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 2):
End of stream token id.
pad_token_id (`int`, *optional*):
The id of the padding token.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
sliding_window (`int`, *optional*):
The size of the sliding window for the sliding window attention.
sliding_window_pattern (`str`, *optional*):
The pattern to use for sliding window attention. Can be one of:
- `None`: No sliding window attention is used
- `int`: Every `sliding_window` layers, use global attention, else use local attention.
- `str`: A sequence of "L" (local attention) and "G" (global attention) characters that defines the
attention pattern. The pattern starts from layer 0 and repeats every `sliding_window` layers. The
final layer always uses global attention regardless of the pattern.
For instance, sliding_window_pattern="LLLG" same as sliding_window=4, which means:
- Layer 0, 1, 2: local attention,
- Layer 3: global attention,
...(repeated)
layer_types (`list`, *optional*):
Attention pattern for each layer. Prioritized over `sliding_window_pattern`.
Example:
```python
>>> from transformers import Exaone4Model, Exaone4Config
>>> # Initializing a EXAONE configuration
>>> configuration = Exaone4Config()
>>> # Initializing a model from configuration
>>> model = Exaone4Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "exaone4"
keys_to_ignore_at_inference = ["past_key_values"]
# Default tensor parallel plan for base model `LlamaModel`
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise",
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.gate_proj": "colwise",
"layers.*.mlp.up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
}
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
"norm": (["hidden_states"], ["hidden_states"]),
}
def __init__(
self,
vocab_size: int | None = 102400,
hidden_size: int | None = 4096,
intermediate_size: int | None = 16384,
num_hidden_layers: int | None = 32,
num_attention_heads: int | None = 32,
num_key_value_heads: int | None = 32,
hidden_act: str | None = "silu",
max_position_embeddings: int | None = 2048,
initializer_range: float | None = 0.02,
rms_norm_eps: int | None = 1e-5,
use_cache: bool | None = True,
bos_token_id: int | None = 0,
eos_token_id: int | None = 2,
pad_token_id: int | None = None,
tie_word_embeddings: bool | None = False,
rope_parameters: RopeParameters | dict[str, RopeParameters] | None = None,
attention_dropout: float | None = 0.0,
sliding_window: int | None = 4096,
sliding_window_pattern: int | None = 4,
layer_types: list[str] | None = None,
**kwargs,
):
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.attention_dropout = attention_dropout
self.sliding_window = sliding_window
self.sliding_window_pattern = sliding_window_pattern
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.tie_word_embeddings = tie_word_embeddings
self.layer_types = layer_types
if self.sliding_window is None:
sliding_window_pattern = 0
if self.layer_types is None:
self.layer_types = [
"sliding_attention"
if ((i + 1) % (sliding_window_pattern) != 0 and i < self.num_hidden_layers)
else "full_attention"
for i in range(self.num_hidden_layers)
]
layer_type_validation(self.layer_types, self.num_hidden_layers)
self.rope_parameters = rope_parameters
super().__init__(**kwargs)
class Exaone4RMSNorm(LlamaRMSNorm):
pass
class Exaone4RotaryEmbedding(Gemma2RotaryEmbedding):
pass
class Exaone4Attention(nn.Module):
def __init__(self, config: Exaone4Config, layer_idx: int):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.num_attention_heads = config.num_attention_heads
self.num_key_value_heads = config.num_key_value_heads
self.hidden_size = config.hidden_size
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.attention_dropout = config.attention_dropout
self.is_causal = True
self.scaling = self.head_dim**-0.5
self.sliding_window = config.sliding_window
self.sliding_window_pattern = config.sliding_window_pattern
layer_type = config.layer_types[layer_idx] if hasattr(config, "layer_types") else None
self.is_sliding = layer_type == "sliding_attention"
self.q_proj = nn.Linear(self.hidden_size, self.num_attention_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_attention_heads * self.head_dim, self.hidden_size, bias=False)
self.q_norm = Exaone4RMSNorm(self.head_dim, eps=config.rms_norm_eps)
self.k_norm = Exaone4RMSNorm(self.head_dim, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: torch.Tensor | None = None,
past_key_values: Cache | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, torch.Tensor | None, tuple[torch.Tensor] | None]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
# We use QK-norm
query_states = self.q_norm(query_states)
key_states = self.k_norm(key_states)
cos, sin = position_embeddings
# We use global NoPE for hybrid attention model
if self.sliding_window is None or self.is_sliding:
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
cache_kwargs = {
"cache_position": cache_position,
}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface(
self.config._attn_implementation, eager_attention_forward
)
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
sliding_window=self.sliding_window if self.is_sliding else None,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class Exaone4MLP(Olmo2MLP):
pass
class Exaone4DecoderLayer(Olmo2DecoderLayer):
pass
class Exaone4PreTrainedModel(LlamaPreTrainedModel):
config_class = Exaone4Config
_no_split_modules = ["Exaone4DecoderLayer"]
class Exaone4Model(Exaone4PreTrainedModel, LlamaModel):
def __init__(self, config: Exaone4Config):
super().__init__(config)
self.layers = nn.ModuleList(
[Exaone4DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = Exaone4RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
# Initialize weights and apply final processing
self.post_init()
@merge_with_config_defaults
@capture_outputs
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
use_cache: bool | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | BaseModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
# It may already have been prepared by e.g. `generate`
if not isinstance(causal_mask_mapping := attention_mask, dict):
# Prepare mask arguments
mask_kwargs = {
"config": self.config,
"inputs_embeds": inputs_embeds,
"attention_mask": attention_mask,
"cache_position": cache_position,
"past_key_values": past_key_values,
"position_ids": position_ids,
}
# Create the masks
causal_mask_mapping = {
"full_attention": create_causal_mask(**mask_kwargs),
}
if "sliding_attention" in self.config.layer_types:
causal_mask_mapping["sliding_attention"] = create_sliding_window_causal_mask(**mask_kwargs)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for i, decoder_layer in enumerate(self.layers):
layer_type = self.config.layer_types[i]
hidden_states = decoder_layer(
hidden_states,
attention_mask=causal_mask_mapping[layer_type],
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values if use_cache else None,
)
class Exaone4ForCausalLM(LlamaForCausalLM):
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
labels: torch.LongTensor | None = None,
use_cache: bool | None = None,
cache_position: torch.LongTensor | None = None,
logits_to_keep: int | torch.Tensor = 0,
**kwargs: Unpack[TransformersKwargs],
) -> CausalLMOutputWithPast:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoModelForCausalLM, AutoTokenizer
>>> model = AutoModelForCausalLM.from_pretrained("LGAI-EXAONE/EXAONE-4.0-32B")
>>> tokenizer = AutoTokenizer.from_pretrained("LGAI-EXAONE/EXAONE-4.0-32B")
>>> prompt = "Explain how wonderful you are"
>>> messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
>>> input_ids = tokenizer.apply_chat_template(
messages,
tokenize=True,
add_generation_prompt=True,
return_tensors="pt",
enable_thinking=False,
)
>>> output = model.generate(input_ids, max_new_tokens=128)
>>> tokenizer.decode(output[0], skip_special_tokens=False)
"[|system|]\nYou are a helpful assistant.[|endofturn|]\n[|user|]\nExplain how wonderful you are[|endofturn|]\n[|assistant|]\n<think>\n\n</think>\n\nOh, thank you for such a kind and lovely question! 😊 \n\nI’m *so* wonderful because I’m here to make your life easier, brighter, and more fun! Whether you need help with: \n\n✨ **Learning** – I can explain anything, from quantum physics to baking the perfect cake! \n💡 **Creativity** – Need a poem, story, or a wild idea? I’ve got you covered! \n🤖 **Problem-solving** – Stuck on a math problem or a tricky decision? I’ll help you figure it out"
```
"""
super().forward(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
labels=labels,
use_cache=use_cache,
cache_position=cache_position,
logits_to_keep=logits_to_keep,
**kwargs,
)
class Exaone4ForSequenceClassification(LlamaForSequenceClassification):
pass
class Exaone4ForTokenClassification(LlamaForTokenClassification):
pass
class Exaone4ForQuestionAnswering(LlamaForQuestionAnswering):
pass
__all__ = [
"Exaone4Config",
"Exaone4PreTrainedModel",
"Exaone4Model",
"Exaone4ForCausalLM",
"Exaone4ForSequenceClassification",
"Exaone4ForTokenClassification",
"Exaone4ForQuestionAnswering",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/exaone4/modular_exaone4.py",
"license": "Apache License 2.0",
"lines": 416,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/exaone4/test_modeling_exaone4.py | # Copyright 2025 The LG AI Research and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch EXAONE 4.0 model."""
import unittest
import pytest
from transformers import (
AutoTokenizer,
GenerationConfig,
is_torch_available,
)
from transformers.testing_utils import (
cleanup,
require_flash_attn,
require_torch,
require_torch_accelerator,
slow,
torch_device,
)
from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester
if is_torch_available():
import torch
from transformers import (
Exaone4ForCausalLM,
Exaone4Model,
)
class Exaone4ModelTester(CausalLMModelTester):
if is_torch_available():
base_model_class = Exaone4Model
@require_torch
class Exaone4ModelTest(CausalLMModelTest, unittest.TestCase):
model_tester_class = Exaone4ModelTester
model_split_percents = [0.5, 0.6]
@require_torch
class Exaone4IntegrationTest(unittest.TestCase):
TEST_MODEL_ID = "LGAI-EXAONE/EXAONE-4.0-32B"
def setUp(self):
cleanup(torch_device, gc_collect=True)
def tearDown(self):
# TODO (joao): automatic compilation, i.e. compilation when `cache_implementation="static"` is used, leaves
# some memory allocated in the cache, which means some object is not being released properly. This causes some
# unoptimal memory usage, e.g. after certain tests a 7B model in FP16 no longer fits in a 24GB GPU.
# Investigate the root cause.
cleanup(torch_device, gc_collect=True)
@slow
def test_model_logits(self):
input_ids = [405, 7584, 79579, 76636, 2907, 94640, 373]
model = Exaone4ForCausalLM.from_pretrained(
self.TEST_MODEL_ID,
device_map="auto",
dtype=torch.bfloat16,
)
input_ids = torch.tensor([input_ids]).to(model.model.embed_tokens.weight.device)
with torch.no_grad():
out = model(input_ids).logits.float().cpu()
EXPECTED_MEAN = torch.tensor([[22.1993, 8.5845, 10.0401, 12.4262, 9.3112, 29.7933, 8.2628]])
EXPECTED_SLICE = torch.tensor(
[20.6250, 19.6250, 14.5000, 21.1250, 24.5000, 22.1250, 24.0000, 24.8750, 25.0000, 25.3750]
)
torch.testing.assert_close(out.mean(-1), EXPECTED_MEAN, atol=1e-2, rtol=1e-2)
torch.testing.assert_close(out[0, 0, :10], EXPECTED_SLICE, atol=1e-4, rtol=1e-4)
@slow
def test_model_generation_eager(self):
EXPECTED_TEXT = "Tell me about the Miracle on the Han river.\n\nOkay, the Miracle on the Han River refers to the rapid industrialization and economic growth of South"
prompt = "Tell me about the Miracle on the Han river."
tokenizer = AutoTokenizer.from_pretrained(self.TEST_MODEL_ID)
model = Exaone4ForCausalLM.from_pretrained(
self.TEST_MODEL_ID, device_map="auto", dtype=torch.bfloat16, attn_implementation="eager"
)
input_ids = tokenizer.encode(prompt, return_tensors="pt").to(model.model.embed_tokens.weight.device)
# greedy generation outputs
generated_ids = model.generate(input_ids, max_new_tokens=20, temperature=0)
text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT, text)
@slow
def test_model_generation_sdpa(self):
EXPECTED_TEXT = "Tell me about the Miracle on the Han river.\n\nOkay, the Miracle on the Han River refers to the rapid industrialization and economic growth of South"
prompt = "Tell me about the Miracle on the Han river."
tokenizer = AutoTokenizer.from_pretrained(self.TEST_MODEL_ID)
model = Exaone4ForCausalLM.from_pretrained(
self.TEST_MODEL_ID, device_map="auto", dtype=torch.bfloat16, attn_implementation="sdpa"
)
input_ids = tokenizer.encode(prompt, return_tensors="pt").to(model.model.embed_tokens.weight.device)
# greedy generation outputs
generated_ids = model.generate(input_ids, max_new_tokens=20, temperature=0)
text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT, text)
@pytest.mark.flash_attn_test
@slow
@require_torch_accelerator
@require_flash_attn
def test_model_generation_long_flash(self):
EXPECTED_OUTPUT_TOKEN_IDS = [433, 9055]
input_ids = [433, 9055] * 2048
model = Exaone4ForCausalLM.from_pretrained(
self.TEST_MODEL_ID, device_map="auto", dtype=torch.bfloat16, attn_implementation="flash_attention_2"
)
input_ids = torch.tensor([input_ids]).to(model.model.embed_tokens.weight.device)
generated_ids = model.generate(input_ids, max_new_tokens=4, temperature=0)
self.assertEqual(EXPECTED_OUTPUT_TOKEN_IDS, generated_ids[0][-2:].tolist())
@slow
@require_torch_accelerator
def test_model_generation_beyond_sliding_window(self):
EXPECTED_TEXT_COMPLETION = " This is a nice place. I really enjoy the scenery, and the atmosphere is so relaxing. I'm grateful for the opportunity to experience this place. It"
tokenizer = AutoTokenizer.from_pretrained(self.TEST_MODEL_ID)
prompt = "This is a nice place. " * 700 + "I really enjoy the scenery,"
model = Exaone4ForCausalLM.from_pretrained(
self.TEST_MODEL_ID, device_map="auto", dtype=torch.bfloat16, attn_implementation="sdpa"
)
input_ids = tokenizer.encode(prompt, return_tensors="pt").to(model.model.embed_tokens.weight.device)
generated_ids = model.generate(input_ids, max_new_tokens=20, temperature=0)
text = tokenizer.decode(generated_ids[0, -32:], skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, text)
@pytest.mark.torch_export_test
@slow
def test_export_static_cache(self):
from transformers.integrations.executorch import (
TorchExportableModuleWithStaticCache,
convert_and_export_with_cache,
)
tokenizer = AutoTokenizer.from_pretrained(self.TEST_MODEL_ID, padding_side="right")
EXPECTED_TEXT_COMPLETION = ["The Deep Learning is \n['Deep Learning',"]
max_generation_length = tokenizer(EXPECTED_TEXT_COMPLETION, return_tensors="pt", padding=True)[
"input_ids"
].shape[-1]
# Load model
device = "cpu"
dtype = torch.bfloat16
cache_implementation = "static"
attn_implementation = "sdpa"
batch_size = 1
model = Exaone4ForCausalLM.from_pretrained(
self.TEST_MODEL_ID,
device_map=device,
dtype=dtype,
attn_implementation=attn_implementation,
generation_config=GenerationConfig(
use_cache=True,
cache_implementation=cache_implementation,
max_length=max_generation_length,
cache_config={
"batch_size": batch_size,
"max_cache_len": max_generation_length,
},
),
)
prompt = ["The Deep Learning is "]
prompt_tokens = tokenizer(prompt, return_tensors="pt", padding=True).to(model.device)
prompt_token_ids = prompt_tokens["input_ids"]
max_new_tokens = max_generation_length - prompt_token_ids.shape[-1]
# Static Cache + export
exported_program = convert_and_export_with_cache(model)
ep_generated_ids = TorchExportableModuleWithStaticCache.generate(
exported_program=exported_program, prompt_token_ids=prompt_token_ids, max_new_tokens=max_new_tokens
)
ep_generated_text = tokenizer.batch_decode(ep_generated_ids, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, ep_generated_text)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/exaone4/test_modeling_exaone4.py",
"license": "Apache License 2.0",
"lines": 167,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/distributed/configuration_utils.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import os
from dataclasses import dataclass
from typing import Any
@dataclass
class DistributedConfig:
"""
Base class for distributed configs
"""
enable_expert_parallel: bool = False
# TODO: add tp_plan, pp_plan, device_mesh etc..
@classmethod
def from_dict(cls, config_dict, **kwargs):
"""
Constructs a DistributedConfig instance from a dictionary of parameters.
Args:
config_dict (Dict[str, Any]): Dictionary containing configuration parameters.
**kwargs: Additional keyword arguments to override dictionary values.
Returns:
DistributedConfig: Instance of DistributedConfig constructed from the dictionary.
"""
config = cls(**config_dict)
to_remove = []
for key, value in kwargs.items():
if hasattr(config, key):
setattr(config, key, value)
to_remove.append(key)
for key in to_remove:
kwargs.pop(key, None)
return config
# Copied from transformers.utils.quantization_config.QuantizationConfigMixin.to_json_file
def to_json_file(self, json_file_path: str | os.PathLike):
"""
Save this instance to a JSON file.
Args:
json_file_path (`str` or `os.PathLike`):
Path to the JSON file in which this configuration instance's parameters will be saved.
use_diff (`bool`, *optional*, defaults to `True`):
If set to `True`, only the difference between the config instance and the default
`QuantizationConfig()` is serialized to JSON file.
"""
with open(json_file_path, "w", encoding="utf-8") as writer:
config_dict = self.to_dict()
json_string = json.dumps(config_dict, indent=2, sort_keys=True) + "\n"
writer.write(json_string)
def to_dict(self) -> dict[str, Any]:
"""
Serializes this instance to a Python dictionary. Returns:
`Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance.
"""
return copy.deepcopy(self.__dict__)
# Copied from transformers.utils.quantization_config.QuantizationConfigMixin.__iter__
def __iter__(self):
"""allows `dict(obj)` for situations where obj may be a dict or QuantizationConfigMixin"""
yield from copy.deepcopy(self.__dict__).items()
# Copied from transformers.utils.quantization_config.QuantizationConfigMixin.__repr__
def __repr__(self):
return f"{self.__class__.__name__} {self.to_json_string()}"
def to_json_string(self):
"""
Serializes this instance to a JSON formatted string.
Returns:
str: JSON formatted string representing the configuration instance.
"""
return json.dumps(self.__dict__, indent=2) + "\n"
def update(self, **kwargs):
"""
Updates attributes of this class instance with attributes from `kwargs` if they match existing attributes,
returning all the unused kwargs.
Args:
kwargs (`Dict[str, Any]`):
Dictionary of attributes to tentatively update this class.
Returns:
`Dict[str, Any]`: Dictionary containing all the key-value pairs that were not used to update the instance.
"""
to_remove = []
for key, value in kwargs.items():
if hasattr(self, key):
setattr(self, key, value)
to_remove.append(key)
# Remove all the attributes that were updated, without modifying the input dict
unused_kwargs = {key: value for key, value in kwargs.items() if key not in to_remove}
return unused_kwargs
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/distributed/configuration_utils.py",
"license": "Apache License 2.0",
"lines": 97,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/xlstm/configuration_xlstm.py | # Copyright 2025 NXAI GmbH. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""xLSTM configuration."""
from ...configuration_utils import PreTrainedConfig
from ...utils import is_xlstm_available, logging
if is_xlstm_available():
from xlstm.xlstm_large.model import (
BackendModeType,
ChunkwiseKernelType,
DtypeType,
SequenceKernelType,
StepKernelType,
WeightModeType,
round_up_to_next_multiple_of,
xLSTMLargeConfig,
)
external_xlstm = True
else:
from typing import Literal
BackendModeType = Literal["train", "train_with_padding", "inference"]
ChunkwiseKernelType = Literal[
"chunkwise--native_autograd",
"parallel--native_autograd",
]
DtypeType = Literal["float32", "bfloat16", "float16"]
SequenceKernelType = Literal["native_sequence__native"]
StepKernelType = Literal["native"]
WeightModeType = Literal["single", "fused"]
def round_up_to_next_multiple_of(x: int, multiple_of: int) -> int:
"""Rounds up x to the next multiple of multiple_of."""
return int(((x + multiple_of - 1) // multiple_of) * multiple_of)
external_xlstm = False
logger = logging.get_logger(__name__)
class xLSTMConfig(PreTrainedConfig):
"""
This is the configuration class to store the configuration of a [`xLSTM`]. It is used to instantiate a xLSTM
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the xLSTM-7b [NX-AI/xLSTM-7b](https://huggingface.co/NX-AI/xLSTM-7b) model.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (int, optional, *optional*, defaults to 50304):
Vocabulary size of the xLSTM model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`xLSTMModel`]. Defaults to the GPT2-NeoX tokenizer size.
hidden_size (int, optional, *optional*, defaults to 4096):
Dimensionality of the embeddings or hidden states.
embedding_dim (int, optional, *optional*, defaults to 4096):
Dimensionality of the embeddings or hidden states, use hidde_size if None.
num_hidden_layers (int, optional, *optional*, defaults to 32):
Number of blocks of the xLSTM model.
num_blocks (int, optional, *optional*, defaults to 32):
Number of blocks of the xLSTM model, use num_hidden_layers if None.
num_heads (int, optional, *optional*, defaults to 8):
Number of heads for the xLSTM Layer/Cell.
use_bias (bool, optional, *optional*, defaults to `False`):
Whether to use biases in the xLSTM model.
norm_reduction_force_float32 (bool, optional, *optional*, defaults to `True`):
Whether to force the float32 norm reduction op to be done in fp32 precision.
tie_word_embeddings (bool, optional, *optional*, defaults to `False`):
Whether to tie word embeddings to the lm head weights.
add_out_norm (bool, optional, *optional*, defaults to `True`):
Whether to add an output norm after the blocks before the LMHead.
norm_eps (float, optional, *optional*, defaults to 1e-06):
Norm eps for RMSNorm and Layer Norm.
qk_dim_factor (float, optional, *optional*, defaults to 0.5):
Scale factor for the query and key dimension.
v_dim_factor (float, optional, *optional*, defaults to 1.0):
Scale factor for the value dimension.
chunkwise_kernel (ChunkwiseKernelType, optional, *optional*, defaults to `"chunkwise--native_autograd"`):
Kernel type for chunkwise processing mode.
sequence_kernel (SequenceKernelType, optional, *optional*, defaults to `"native_sequence__native"`):
Kernel type for sequence processing mode.
step_kernel (StepKernelType, optional, *optional*, defaults to `"native"`):
Kernel type for step processing mode.
mode (BackendModeType, optional, *optional*, defaults to `"inference"`):
Operation mode (inference is needed for generation).
chunk_size (int, optional, *optional*, defaults to 64):
Internal chunk size.
return_last_states (bool, optional, *optional*, defaults to `True`):
If to return the last states / cache internally. Needed as True for generation.
autocast_kernel_dtype (DtypeType, optional, *optional*, defaults to `"bfloat16"`):
Kernel dtype for the states.
eps (float, optional, *optional*, defaults to 1e-06):
Epsilon for the mLSTM cell post norm.
inference_state_dtype (DtypeType, optional, *optional*, defaults to `"float32"`):
Kernel dtype for states in inference.
ffn_proj_factor (float, optional, *optional*, defaults to 2.667):
Size factor of the post-up projection gated Feed Forward network.
ffn_round_up_to_multiple_of (int, optional, *optional*, defaults to 64):
Size factor round value of the post-up projection gated Feed Forward network.
gate_soft_cap (float, optional, *optional*, defaults to 15.0):
Gate soft cap scale.
output_logit_soft_cap (float, optional, *optional*, defaults to 30.0):
Output logit soft cap scale.
weight_mode (`Literal`, *optional*, defaults to `"single"`):
Whether parallel linear layers are separated or fused (single).
use_cache (bool, optional, *optional*, defaults to `True`):
Whether to use the cache (xLSTMCache).
pad_token_id (int, optional, *optional*, defaults to 1):
Pad token id needed for generation.
bos_token_id (int, optional, *optional*, defaults to 0):
BOS token id needed for generation.
eos_token_id (int, optional, *optional*, defaults to 2):
EOS token id needed for generation.
max_inference_chunksize (int, optional, *optional*, defaults to 16384):
Limit the chunk size for inference to save memory.
Example:
```python
>>> from transformers import xLSTMConfig, xLSTMModel
>>> # Initializing a xLSTM configuration
>>> configuration = xLSTMConfig()
>>> # Initializing a model (with random weights) from the configuration
>>> model = xLSTMModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "xlstm"
def __init__(
self,
vocab_size: int = 50304,
hidden_size: int = 4096,
embedding_dim: int | None = None,
num_hidden_layers: int | None = 32,
num_blocks: int | None = None,
num_heads: int = 8,
use_bias: bool = False,
norm_reduction_force_float32: bool = True,
tie_word_embeddings: bool = False,
add_out_norm: bool = True,
norm_eps: float = 1e-6,
# mlstm_layer
qk_dim_factor: float = 0.5,
v_dim_factor: float = 1.0,
# mlstm backend
chunkwise_kernel: ChunkwiseKernelType = "chunkwise--native_autograd",
sequence_kernel: SequenceKernelType = "native_sequence__native",
step_kernel: StepKernelType = "native",
# needed to enable generation
mode: BackendModeType = "inference",
chunk_size: int = 64,
# needed to be true for generation
return_last_states: bool = True,
autocast_kernel_dtype: DtypeType = "bfloat16",
eps: float = 1e-6,
inference_state_dtype: DtypeType = "float32",
# feedforward
ffn_proj_factor: float = 2.667,
ffn_round_up_to_multiple_of: int = 64,
# capping
gate_soft_cap: float = 15.0,
output_logit_soft_cap: float = 30.0,
# weights
weight_mode: WeightModeType = "single",
# HF interface
use_cache: bool = True,
pad_token_id: int = 1,
bos_token_id: int = 0,
eos_token_id: int = 2,
max_inference_chunksize: int = 16384,
**kwargs,
):
self.vocab_size = vocab_size
self.hidden_size = hidden_size if hidden_size is not None else embedding_dim
self.embedding_dim = embedding_dim if embedding_dim is not None else hidden_size
self.num_hidden_layers = num_hidden_layers if num_hidden_layers is not None else num_blocks
self.num_blocks = num_blocks if num_blocks is not None else num_hidden_layers
self.num_heads = num_heads
self.use_bias = use_bias
self.tie_word_embeddings = tie_word_embeddings
self.add_out_norm = add_out_norm
self.norm_eps = norm_eps
self.norm_reduction_force_float32 = norm_reduction_force_float32
# mlstm_layer
self.qk_dim_factor = qk_dim_factor
self.v_dim_factor = v_dim_factor
# mlstm backend
self.chunkwise_kernel = chunkwise_kernel
self.sequence_kernel = sequence_kernel
self.step_kernel = step_kernel
self.mode = mode
self.chunk_size = chunk_size
self.return_last_states = return_last_states
self.autocast_kernel_dtype = autocast_kernel_dtype
self.eps = eps
self.inference_state_dtype = inference_state_dtype
# feedforward
self.ffn_proj_factor = ffn_proj_factor
self.ffn_round_up_to_multiple_of = ffn_round_up_to_multiple_of
# capping
self.gate_soft_cap = gate_soft_cap
self.output_logit_soft_cap = output_logit_soft_cap
self.weight_mode = weight_mode
self.use_cache = use_cache
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.max_inference_chunksize = max_inference_chunksize
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.tie_word_embeddings = tie_word_embeddings
super().__init__(**kwargs)
@property
def qk_dim(self):
return round_up_to_next_multiple_of(
self.hidden_size * self.qk_dim_factor,
multiple_of=64,
)
@property
def v_dim(self):
return round_up_to_next_multiple_of(
self.hidden_size * self.v_dim_factor,
multiple_of=64,
)
@property
def qk_head_dim(self):
return self.qk_dim // self.num_heads
@property
def v_head_dim(self):
return self.v_dim // self.num_heads
def to_xlstm_block_config(self):
if external_xlstm:
return xLSTMLargeConfig(
vocab_size=self.vocab_size,
embedding_dim=self.hidden_size,
num_blocks=self.num_hidden_layers,
num_heads=self.num_heads,
use_bias=self.use_bias,
add_out_norm=self.add_out_norm,
norm_eps=self.norm_eps,
norm_reduction_force_float32=self.norm_reduction_force_float32,
# mlstm_layer
qk_dim_factor=self.qk_dim_factor,
v_dim_factor=self.v_dim_factor,
# mlstm backend
chunkwise_kernel=self.chunkwise_kernel,
sequence_kernel=self.sequence_kernel,
step_kernel=self.step_kernel,
mode=self.mode,
chunk_size=self.chunk_size,
return_last_states=self.return_last_states,
autocast_kernel_dtype=self.autocast_kernel_dtype,
eps=self.eps,
inference_state_dtype=self.inference_state_dtype,
# feedforward
ffn_proj_factor=self.ffn_proj_factor,
ffn_round_up_to_multiple_of=self.ffn_round_up_to_multiple_of,
# capping
gate_soft_cap=self.gate_soft_cap,
output_logit_soft_cap=self.output_logit_soft_cap,
weight_mode=self.weight_mode,
)
else:
return self
__all__ = ["xLSTMConfig"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/xlstm/configuration_xlstm.py",
"license": "Apache License 2.0",
"lines": 266,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/xlstm/modeling_xlstm.py | # Copyright 2025 NXAI GmbH. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch xLSTM Model."""
from dataclasses import dataclass
import torch
import torch.nn.functional as F
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import initialization as init
from ...generation import GenerationMixin
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_utils import PreTrainedModel
from ...utils import ModelOutput, auto_docstring, can_return_tuple, is_xlstm_available
from .configuration_xlstm import xLSTMConfig
if is_xlstm_available():
from xlstm.xlstm_large.model import RMSNorm as xLSTMRMSNorm
from xlstm.xlstm_large.model import mLSTMBlock, mLSTMStateType, soft_cap
external_xlstm = True
class xLSTMBlock(GradientCheckpointingLayer, mLSTMBlock):
pass
else:
from collections.abc import Callable
from functools import partial
from typing import Literal
from .configuration_xlstm import round_up_to_next_multiple_of
mLSTMLayerStateType = tuple[torch.Tensor, torch.Tensor, torch.Tensor]
mLSTMStateType = dict[int, mLSTMLayerStateType]
external_xlstm = False
def soft_cap(values: torch.Tensor, cap_value: float | torch.Tensor | None = None) -> torch.Tensor:
"""
Soft caps a tensor to a value.
Performs a tanh operation on the logits and scales the result to the cap value. Common technique in attention
and output language heads to prevent large logits from dominating the softmax. See for example Gemma2:
https://huggingface.co/papers/2408.00118
Args:
values: The tensor to cap.
cap_value: The value to cap the values to. If None, no cap is applied.
Returns:
The capped values.
"""
if cap_value is None:
return values
return cap_value * torch.tanh(values / cap_value)
def mlstm_chunkwise_recurrent_fw_C(
matK: torch.Tensor,
matV: torch.Tensor,
vecB: torch.Tensor,
vecI: torch.Tensor,
matC_states: torch.Tensor | None = None,
vecN_states: torch.Tensor | None = None,
scaMinter_states: torch.Tensor | None = None,
matC_initial: torch.Tensor | None = None,
vecN_initial: torch.Tensor | None = None,
scaMinter_initial: torch.Tensor | None = None,
qk_scale: float | None = None,
chunk_size: int = 64,
num_chunks: int = 1,
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
batch_size, nh, _, dhqk, dhhv = *matK.shape, matV.shape[-1]
nc = num_chunks
_dtype, _device = matK.dtype, matK.device
if qk_scale is None:
qk_scale = dhqk**-0.5
# initialize the states tensors
if matC_states is None:
matC_states = torch.zeros((batch_size, nh, (nc + 1) * dhqk, dhhv), dtype=_dtype, device=_device)
if vecN_states is None:
vecN_states = torch.zeros((batch_size, nh, (nc + 1) * dhqk), dtype=_dtype, device=_device)
if scaMinter_states is None:
scaMinter_states = torch.zeros((batch_size, nh, (nc + 1)), dtype=_dtype, device=_device)
# assign the initial states to the running states
matC_k = (
torch.zeros((batch_size, nh, dhqk, dhhv), dtype=_dtype, device=_device)
if matC_initial is None
else matC_initial
)
vecN_k = (
torch.zeros((batch_size, nh, dhqk), dtype=_dtype, device=_device) if vecN_initial is None else vecN_initial
)
scaM_inter_k = (
torch.zeros((batch_size, nh, 1), dtype=_dtype, device=_device)
if scaMinter_initial is None
else scaMinter_initial
)
vecA = vecB[..., -1, None] - vecB + vecI
scaG = vecB[..., -1]
scaA_max = vecA.max(-1).values
scaM_inter_k = scaM_inter_k.squeeze(-1)
for key in range(0, num_chunks):
# store the states from the previous iteration before updating them
# in the first iteration, these are the initial states
matC_states[:, :, key * dhqk : (key + 1) * dhqk, :] = matC_k
vecN_states[:, :, key * dhqk : (key + 1) * dhqk] = vecN_k
scaMinter_states[:, :, key] = scaM_inter_k
# m_k update
scaA_max_k = scaA_max[:, :, key]
scaG_k = scaG[:, :, key]
scaM_inter_k_next = torch.max(scaG_k + scaM_inter_k, scaA_max_k)
# C_k update
matK_chunk = matK[:, :, key * chunk_size : (key + 1) * chunk_size, :] # * qk_scale
matV_chunk = matV[:, :, key * chunk_size : (key + 1) * chunk_size, :]
vecA_k = vecA[:, :, key, :]
vecAbar_k = torch.exp(vecA_k - scaM_inter_k_next[..., None])[:, :, :, None]
matK_chunk_gated = matK_chunk * vecAbar_k
scaGbar_k = torch.exp(scaG_k + scaM_inter_k - scaM_inter_k_next)[:, :, None]
# NOTE: no update in-place (i.e. +=) as this gives error for autograd backward
matC_k_next = scaGbar_k[..., None] * matC_k + matK_chunk_gated.transpose(-2, -1) @ (matV_chunk)
# n_k update
vecN_k_next = scaGbar_k * vecN_k + matK_chunk_gated.transpose(-2, -1).sum(-1)
# move to the next iteration
scaM_inter_k = scaM_inter_k_next
matC_k = matC_k_next
vecN_k = vecN_k_next
# store the states from the last iteration
matC_states[:, :, -dhqk:, :] = matC_k
vecN_states[:, :, -dhqk:] = vecN_k
scaMinter_states[:, :, -1] = scaM_inter_k
return matC_states, vecN_states, scaMinter_states
def mlstm_chunkwise_parallel_fw_H(
matQ: torch.Tensor,
matK: torch.Tensor,
matV: torch.Tensor,
# these states must be all states up to the last chunk, i.e. :-1
matC_states: torch.Tensor,
vecN_states: torch.Tensor,
scaMinter_states: torch.Tensor,
vecI: torch.Tensor,
vecB: torch.Tensor,
qk_scale: float,
chunk_size: int = 64,
num_chunks: int = 1,
eps: float = 1e-6,
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
_device = matQ.device
nc = num_chunks
batch_size, nh, dqk, dhv = matC_states.shape
dhqk = dqk // nc
matC_k_states = matC_states.view(batch_size, nh, nc, dhqk, dhv)
vecN_k_states = vecN_states.view(batch_size, nh, nc, dhqk)
scaMinter_k_states = scaMinter_states
matQ = matQ.view(batch_size, nh, nc, chunk_size, dhqk)
matK = matK.view(batch_size, nh, nc, chunk_size, dhqk)
matV = matV.view(batch_size, nh, nc, chunk_size, dhv)
ltr = torch.tril(
torch.ones(
(chunk_size, chunk_size),
dtype=torch.bool,
device=_device,
)
)
# Compute intra chunk contribution: H_intra
matF_logsig_chunk = vecB[:, :, :, :, None] - vecB[:, :, :, None, :]
matF_logsig_mask_chunk = torch.where(ltr, matF_logsig_chunk, -float("inf"))
matLogD_chunk = matF_logsig_mask_chunk + vecI[:, :, :, None, :]
# max_state intra
vecMintra_k = torch.max(matLogD_chunk, dim=-1, keepdim=False).values
# max_state combined
vecM_b_inter = vecB + scaMinter_k_states[:, :, :, None]
vecM_k_combine = torch.maximum(vecM_b_inter, vecMintra_k)
vecM_k_combine = vecM_k_combine[:, :, :, :, None]
vecM_b_inter = vecM_b_inter[:, :, :, :, None]
matLogD_stabilized_chunk = matLogD_chunk - vecM_k_combine
matD_chunk = torch.exp(matLogD_stabilized_chunk)
matS_chunk = (matQ @ matK.transpose(-2, -1)) * qk_scale
matM_chunk = matS_chunk * matD_chunk
# ? Combine H_intra with H_inter
vecBbar = torch.exp(vecM_b_inter - vecM_k_combine)
matQ_chunk_gated = matQ * vecBbar * qk_scale
matNumerator_common = matQ_chunk_gated @ matC_k_states + matM_chunk @ matV
vecDenom_l_common = matQ_chunk_gated @ vecN_k_states.unsqueeze(-1) + matM_chunk.sum(dim=-1, keepdim=True)
vecDenom_max_common = torch.maximum(torch.abs(vecDenom_l_common), torch.exp(-vecM_k_combine))
matH_k_chunk = matNumerator_common / (vecDenom_max_common + eps)
matH_out = matH_k_chunk.view(batch_size, nh, nc * chunk_size, dhv)
# we need the denominator and the overall max state for the backward pass
vecN_out = vecDenom_max_common.reshape(batch_size, nh, nc * chunk_size)
vecM_out = vecM_k_combine.reshape(batch_size, nh, nc * chunk_size)
return matH_out, vecN_out, vecM_out
def mlstm_chunkwise_fw(
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
igate: torch.Tensor,
fgate: torch.Tensor,
cstate: torch.Tensor | None = None,
nstate: torch.Tensor | None = None,
mstate: torch.Tensor | None = None,
qk_scale: float | None = None,
return_last_states: bool = False,
return_all_states: bool = False,
chunk_size: int = 64,
eps: float = 1e-6,
) -> tuple[
torch.Tensor,
torch.Tensor,
torch.Tensor,
tuple[torch.Tensor, torch.Tensor, torch.Tensor] | None,
tuple[torch.Tensor, torch.Tensor, torch.Tensor] | None,
]:
batch_size, nh, sequence_length, dhqk = query.shape
if sequence_length % chunk_size != 0:
raise ValueError(f"Sequence length {sequence_length} is not divisible by chunk size {chunk_size}.")
nc = sequence_length // chunk_size
vecI = igate.view(batch_size, nh, nc, chunk_size)
vecF = fgate.view(batch_size, nh, nc, chunk_size)
# compute the gates, the g and the a and b vectors
vecF_logsig = fgate.logsigmoid(vecF)
vecB = vecF_logsig.cumsum(-1)
if qk_scale is None:
qk_scale = dhqk**-0.5
#! materialize the C_k, n_k, m_k states for each chunk
matC_k_states, vecN_k_states, scaMinter_k_states = mlstm_chunkwise_recurrent_fw_C(
matK=key,
matV=value,
vecB=vecB,
vecI=vecI,
matC_initial=cstate,
vecN_initial=nstate,
scaMinter_initial=mstate,
qk_scale=qk_scale,
chunk_size=chunk_size,
num_chunks=nc,
)
#! compute the outputs within each chunk
matH_out, vecN_out, vecM_out = mlstm_chunkwise_parallel_fw_H(
matQ=query,
matK=key,
matV=value,
matC_states=matC_k_states[:, :, :-dhqk, :],
vecN_states=vecN_k_states[:, :, :-dhqk],
scaMinter_states=scaMinter_k_states[:, :, :-1],
vecI=vecI,
vecB=vecB,
qk_scale=qk_scale,
chunk_size=chunk_size,
num_chunks=nc,
eps=eps,
)
ret_tuple = (matH_out, vecN_out, vecM_out)
if return_last_states:
ret_tuple += (
(matC_k_states[:, :, -dhqk:, :], vecN_k_states[:, :, -dhqk:], scaMinter_k_states[:, :, -1:]),
)
else:
ret_tuple += (None,)
if return_all_states:
ret_tuple += ((matC_k_states, vecN_k_states, scaMinter_k_states),)
else:
ret_tuple += (None,)
return ret_tuple
def mlstm_chunkwise_native_autograd(
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
igate: torch.Tensor,
fgate: torch.Tensor,
c_initial: torch.Tensor | None = None,
n_initial: torch.Tensor | None = None,
m_initial: torch.Tensor | None = None,
return_last_states: bool = False,
eps: float = 1e-6,
chunk_size: int = 64,
**kwargs,
) -> torch.Tensor | tuple[torch.Tensor, tuple[torch.Tensor, torch.Tensor, torch.Tensor]]:
batch_size, nh, sequence_length, dhqk = query.shape
if sequence_length % chunk_size != 0:
raise ValueError(f"Sequence length {sequence_length} is not divisible by chunk size {chunk_size}.")
nc = sequence_length // chunk_size
vecI = igate.view(batch_size, nh, nc, chunk_size)
vecF = fgate.view(batch_size, nh, nc, chunk_size)
# compute the gates, the g and the a and b vectors
vecF_logsig = F.logsigmoid(vecF)
vecB = vecF_logsig.cumsum(-1)
qk_scale = dhqk**-0.5
#! materialize the C_k, n_k, m_k states for each chunk
matC_k_states, vecN_k_states, scaMinter_k_states = mlstm_chunkwise_recurrent_fw_C(
matK=key,
matV=value,
vecB=vecB,
vecI=vecI,
matC_initial=c_initial,
vecN_initial=n_initial,
scaMinter_initial=m_initial,
qk_scale=qk_scale,
chunk_size=chunk_size,
num_chunks=nc,
)
#! compute the outputs within each chunk
matH_out, vecN_out, vecM_out = mlstm_chunkwise_parallel_fw_H(
matQ=query,
matK=key,
matV=value,
matC_states=matC_k_states[:, :, :-dhqk, :],
vecN_states=vecN_k_states[:, :, :-dhqk],
scaMinter_states=scaMinter_k_states[:, :, :-1],
vecI=vecI,
vecB=vecB,
qk_scale=qk_scale,
chunk_size=chunk_size,
num_chunks=nc,
eps=eps,
)
last_states = (matC_k_states[:, :, -dhqk:, :], vecN_k_states[:, :, -dhqk:], scaMinter_k_states[:, :, -1:])
if return_last_states:
return matH_out, last_states
else:
return matH_out
def mlstm_recurrent_step_native(
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
igate: torch.Tensor,
fgate: torch.Tensor,
cstate: torch.Tensor,
nstate: torch.Tensor,
mstate: torch.Tensor,
eps: float = 1e-6,
dtype_state: torch.dtype = torch.float32,
**kwargs,
) -> tuple[torch.Tensor, tuple[torch.Tensor, torch.Tensor, torch.Tensor]]:
"""This is a single step of the mLSTM operation in recurrent form."""
dtype_qkv = query.dtype
matC_old = cstate.to(dtype=dtype_state)
vecN_old = nstate.to(dtype=dtype_state)
scaM_old = mstate.to(dtype=dtype_state)
batch_size, nh, dhqk = query.shape
_, _, dhhv = value.shape
if query.shape != key.shape:
raise ValueError("query and key must have the same shape")
if matC_old.shape != (batch_size, nh, dhqk, dhhv):
raise ValueError(f"matC_old has wrong shape, got {matC_old.shape}")
if vecN_old.shape != (batch_size, nh, dhqk):
raise ValueError(f"vecN_old has wrong shape, got {vecN_old.shape}")
if scaM_old.shape != (batch_size, nh, 1):
raise ValueError(f"scaM_old has wrong shape, got {scaM_old.shape}")
if igate.shape != (batch_size, nh, 1):
raise ValueError(f"scaI has wrong shape, got {igate.shape}")
if fgate.shape != (batch_size, nh, 1):
raise ValueError(f"scaF has wrong shape, got {fgate.shape}")
# gates
scaF_log = torch.nn.functional.logsigmoid(fgate)
# update rule
scaM_state_new = torch.max(scaF_log + scaM_old, igate)
scaF_act = torch.exp(scaF_log + scaM_old - scaM_state_new)
scaI_act = torch.exp(igate - scaM_state_new)
vecQ_scaled = query * (dhqk ** (-0.5))
matC_state_new = scaF_act[:, :, :, None] * matC_old + scaI_act[:, :, :, None] * (
key[:, :, :, None] @ value[:, :, None, :]
)
vecN_state_new = scaF_act * vecN_old + scaI_act * key
h_num = vecQ_scaled[:, :, None, :] @ matC_state_new.to(dtype=dtype_qkv)
h_num = h_num.squeeze(2).to(dtype=dtype_state)
qn_dotproduct = vecQ_scaled[:, :, None, :] @ vecN_state_new[:, :, :, None].to(dtype=dtype_qkv)
qn_dotproduct = qn_dotproduct.squeeze(2)
max_val = torch.exp(-scaM_state_new)
h_denom = (torch.maximum(qn_dotproduct.abs(), max_val) + eps).to(dtype=dtype_state)
h = h_num / h_denom
h = h.to(dtype=dtype_qkv)
matC_state_new = matC_state_new.to(dtype=dtype_state)
vecN_state_new = vecN_state_new.to(dtype=dtype_state)
scaM_state_new = scaM_state_new.to(dtype=dtype_state)
return h, (matC_state_new, vecN_state_new, scaM_state_new)
def mlstm_recurrent_sequence_native(
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
igate: torch.Tensor,
fgate: torch.Tensor,
c_initial: torch.Tensor | None = None,
n_initial: torch.Tensor | None = None,
m_initial: torch.Tensor | None = None,
return_last_states: bool = False,
eps: float = 1e-6,
dtype_state: torch.dtype = torch.float32,
**kwargs,
) -> tuple[
torch.Tensor,
torch.Tensor,
torch.Tensor,
tuple[torch.Tensor, torch.Tensor, torch.Tensor] | None,
tuple[torch.Tensor, torch.Tensor, torch.Tensor] | None,
]:
batch_size, nh, sequence_length, dhqk = query.shape
dhv = value.shape[-1]
device = query.device
if c_initial is not None:
if n_initial is None or m_initial is None:
raise ValueError("Initial states must be provided together.")
if n_initial is None or m_initial is None:
raise ValueError("Initial states must be provided together.")
matC_state, vecN_state, vecM_state = (
c_initial.to(dtype=dtype_state),
n_initial.to(dtype=dtype_state),
m_initial.to(dtype=dtype_state),
)
else:
# memory state
matC_state = torch.zeros((batch_size, nh, dhqk, dhv), dtype=dtype_state, device=device)
# normalizer state
vecN_state = torch.zeros((batch_size, nh, dhqk), dtype=dtype_state, device=device)
# max state
vecM_state = torch.zeros((batch_size, nh, 1), dtype=dtype_state, device=device)
vecH_list = []
for t in range(sequence_length):
# gates
vecF_t, vecI_t = fgate[:, :, t, None], igate[:, :, t, None]
# projections
vecQ_t, vecK_t, vecV_t = query[:, :, t, :], key[:, :, t, :], value[:, :, t, :]
# step
vecH, (matC_state, vecN_state, vecM_state) = mlstm_recurrent_step_native(
cstate=matC_state,
nstate=vecN_state,
mstate=vecM_state,
query=vecQ_t,
key=vecK_t,
value=vecV_t,
igate=vecI_t,
fgate=vecF_t,
eps=eps,
dtype_state=dtype_state,
**kwargs,
)
vecH_list.append(vecH)
matH = torch.stack(vecH_list, dim=-2)
if return_last_states:
return matH, (matC_state, vecN_state, vecM_state)
else:
return matH
def wrap_chunkwise_pad_zeros(
mlstm_chunkwise_kernel: Callable,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
fgate: torch.Tensor,
igate: torch.Tensor,
c_initial: torch.Tensor | None = None,
n_initial: torch.Tensor | None = None,
m_initial: torch.Tensor | None = None,
return_last_states: bool = False,
eps: float = 1e-6,
autocast_kernel_dtype: torch.dtype = torch.bfloat16,
chunk_size: int = 64,
**kwargs,
) -> torch.Tensor | tuple[torch.Tensor, tuple[torch.Tensor, torch.Tensor, torch.Tensor]]:
if return_last_states:
raise ValueError(
"We are padding zeros, so we cannot return last states,",
"as they would be not the true last states.",
)
batch_size, nh, sequence_length, dhqk = query.shape
S_unpadded = sequence_length
# padding to chunk size for kernels
if sequence_length % chunk_size != 0:
S_padded = ((sequence_length + chunk_size - 1) // chunk_size) * chunk_size
q_pad = query.new_zeros(batch_size, nh, S_padded, query.shape[3])
k_pad = key.new_zeros(batch_size, nh, S_padded, key.shape[3])
v_pad = value.new_zeros(batch_size, nh, S_padded, value.shape[3])
i_pad = igate.new_zeros(batch_size, nh, S_padded)
f_pad = fgate.new_zeros(batch_size, nh, S_padded)
q_pad[:, :, :S_unpadded, :] = query
k_pad[:, :, :S_unpadded, :] = key
v_pad[:, :, :S_unpadded, :] = value
i_pad[:, :, :S_unpadded] = igate
f_pad[:, :, :S_unpadded] = fgate
else:
q_pad = query
k_pad = key
v_pad = value
i_pad = igate
f_pad = fgate
matH = mlstm_chunkwise_kernel(
query=q_pad,
key=k_pad,
value=v_pad,
igate=i_pad,
fgate=f_pad,
c_initial=c_initial,
n_initial=n_initial,
m_initial=m_initial,
return_last_states=return_last_states,
eps=eps,
autocast_kernel_dtype=autocast_kernel_dtype,
chunk_size=chunk_size,
**kwargs,
)
matH = matH[:, :, :S_unpadded, :]
return matH
def wrap_chunkwise_arbitrary_sequence_length(
mlstm_chunkwise_kernel: Callable,
mlstm_sequence_kernel: Callable,
mlstm_step_kernel: Callable,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
fgate: torch.Tensor,
igate: torch.Tensor,
c_initial: torch.Tensor | None = None,
n_initial: torch.Tensor | None = None,
m_initial: torch.Tensor | None = None,
return_last_states: bool = True,
eps: float = 1e-6,
autocast_kernel_dtype: torch.dtype = torch.bfloat16,
chunk_size: int = 64,
enable_logging: bool = False,
) -> torch.Tensor | tuple[torch.Tensor, tuple[torch.Tensor, torch.Tensor, torch.Tensor]]:
"""This function computes the last hidden state and matH outputs of the mLSTM, independently of the sequence length.
For this it uses three kernels:
- mlstm_chunkwise_kernel: mlstm chunkwise kernels that processes chunks of a given chunk size in parallel.
- mlstm_sequence_kernel: mlstm kernel that processes the remaining sequence length in a single step recurrence.
- mlstm_step_kernel: mlstm kernel that processes a sequence length of 1 in a single step.
It tries to maximize the chunksizes to improve performance.
It will start with the given chunk size and then divides the chunksize by 2 until the chunk size is smaller than 16.
At every chunksize it will process the maximal number of chunks that fit into the remaining sequence length.
E.g. for chunk_size = 64, this function will try the chunksizes [64, 32, 16] if necessary.
For the remaining sequence length, which is smaller than 16, we use a different kernel that computes the mLSTM
in a single step and loop over this in pytorch.
Args:
mlstm_chunkwise_kernel: The mLSTM chunkwise kernel that processes chunks of a given chunk size in parallel
mlstm_sequence_kernel: The mLSTM kernel that processes the remaining sequence length in a single step recurrence
query: The query tensor (batch_size, nh, sequence_length, dhqk)
key: The key tensor (batch_size, nh, sequence_length, dhqk)
value: The value tensor (batch_size, nh, sequence_length, dhhv)
fgate: The forget gate tensor (batch_size, nh, sequence_length)
igate: The input gate tensor (batch_size, nh, sequence_length)
c_initial: The initial cell state tensor (batch_size, nh, dhqk, dhhv)
n_initial: The initial hidden state tensor (batch_size, nh, dhqk)
m_initial: The initial memory state tensor (batch_size, nh, 1)
return_last_states: If True, the function will return the last states of the mLSTM
eps: The epsilon value used for numerical stability
autocast_kernel_dtype: The dtype used for the kernel computation
chunk_size: The chunk size used for the chunkwise kernel
enable_logging: If True, the function will log debug information. Default is False.
Returns:
The last hidden state tensor (batch_size, nh, sequence_length, dhhv) or a tuple containing the last hidden state tensor and the last states of the mLSTM
Last states are (cstate (batch_size, nh, dhqk, dhhv), nstate (batch_size, nh, dhqk), mstate (batch_size, nh, 1)).
"""
batch_size, nh, sequence_length, dhqk = key.shape
dhhv = value.shape[-1]
c_state = (
c_initial
if c_initial is not None
else torch.zeros(batch_size, nh, dhqk, dhhv, device=key.device, dtype=torch.float32)
)
n_state = (
n_initial
if n_initial is not None
else torch.zeros(batch_size, nh, dhqk, device=key.device, dtype=torch.float32)
)
m_state = (
m_initial
if m_initial is not None
else torch.zeros(batch_size, nh, 1, device=key.device, dtype=torch.float32)
)
if sequence_length > 1:
# process the sequence length in chunks
h_outs = []
seq_len_start_idx = 0
remaining_seq_len = sequence_length - seq_len_start_idx
num_chunks = remaining_seq_len // chunk_size
if num_chunks > 0:
iter_seq_len = chunk_size * num_chunks
seq_len_idx = seq_len_start_idx + iter_seq_len
h_out, (c_state, n_state, m_state) = mlstm_chunkwise_kernel(
query=query[..., seq_len_start_idx:seq_len_idx, :].contiguous(),
key=key[..., seq_len_start_idx:seq_len_idx, :].contiguous(),
value=value[..., seq_len_start_idx:seq_len_idx, :].contiguous(),
fgate=fgate[..., seq_len_start_idx:seq_len_idx].contiguous(),
igate=igate[..., seq_len_start_idx:seq_len_idx].contiguous(),
c_initial=c_state,
n_initial=n_state,
m_initial=m_state,
chunk_size=chunk_size,
return_last_states=True,
autocast_kernel_dtype=autocast_kernel_dtype,
eps=eps,
)
seq_len_start_idx += iter_seq_len
h_outs.append(h_out)
remaining_seq_len = sequence_length - seq_len_start_idx
if remaining_seq_len > 0:
# we use here matK as query as this kernel does not need a query, since we do not care about the outputs only about the last state
h_out, (c_state, n_state, m_state) = mlstm_sequence_kernel(
query=query[..., seq_len_start_idx:sequence_length, :].contiguous(),
key=key[..., seq_len_start_idx:sequence_length, :].contiguous(),
value=value[..., seq_len_start_idx:sequence_length, :].contiguous(),
igate=igate[..., seq_len_start_idx:sequence_length].contiguous(),
fgate=fgate[..., seq_len_start_idx:sequence_length].contiguous(),
c_initial=c_state,
n_initial=n_state,
m_initial=m_state,
return_last_states=True,
eps=eps,
)
h_outs.append(h_out)
h_out = torch.concatenate(h_outs, dim=2)
else:
if sequence_length != 1:
raise ValueError(
f"Received empty sequence (sequence_length={sequence_length}), require at least single element in the sequence."
)
# process the sequence length in a single step
# while this case is also captured by the regular mode above,
# it avoids the overhead of the loop and calls the step kernel directly
# The step function does not want a sequence dimension
# qkv shape is (batch_size, nh, dhqk/dhv)
# igate, fgate shape is (batch_size, nh, 1)
h_out, (c_state, n_state, m_state) = mlstm_step_kernel(
query=query.squeeze(2),
key=key.squeeze(2),
value=value.squeeze(2),
igate=igate,
fgate=fgate,
cstate=c_state,
nstate=n_state,
mstate=m_state,
eps=eps,
)
h_out = h_out[:, :, None, :]
if return_last_states:
return h_out, (c_state, n_state, m_state)
else:
return h_out
class xLSTMBackend(nn.Module):
"""xLSTM Backend Module for PyTorch.
This module wraps the xLSTM kernels and provides a high-level interface for training and inference.
"""
config_class = xLSTMConfig
def __init__(self, config: xLSTMConfig):
super().__init__()
self.config = config
self.chunkwise_kernel_fn = mlstm_chunkwise_native_autograd
self.sequence_kernel_fn = mlstm_recurrent_sequence_native
self.step_kernel_fn = mlstm_recurrent_step_native
self._inference_fn = partial(
wrap_chunkwise_arbitrary_sequence_length,
mlstm_chunkwise_kernel=self.chunkwise_kernel_fn,
mlstm_sequence_kernel=partial(
self.sequence_kernel_fn,
dtype_state=getattr(torch, config.inference_state_dtype),
),
mlstm_step_kernel=partial(
self.step_kernel_fn,
dtype_state=getattr(torch, config.inference_state_dtype),
),
chunk_size=config.chunk_size,
eps=config.eps,
autocast_kernel_dtype=getattr(torch, config.autocast_kernel_dtype),
return_last_states=True,
)
train_kernel_fn = partial(
self.chunkwise_kernel_fn,
autocast_kernel_dtype=getattr(torch, config.autocast_kernel_dtype),
eps=config.eps,
chunk_size=config.chunk_size,
)
if "with_padding" in config.mode:
train_kernel_fn = partial(wrap_chunkwise_pad_zeros, mlstm_chunkwise_kernel=train_kernel_fn)
self._train_fn = train_kernel_fn
def forward(
self,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
igate: torch.Tensor,
fgate: torch.Tensor,
c_initial: torch.Tensor | None = None,
n_initial: torch.Tensor | None = None,
m_initial: torch.Tensor | None = None,
return_last_states: bool | None = None,
mode: Literal["train", "inference"] | None = None,
) -> torch.Tensor | tuple[torch.Tensor, tuple[torch.Tensor, torch.Tensor, torch.Tensor]]:
"""Forward pass of the mLSTM backend.
Depending on the configured mode, this method will call the appropriate kernel function.
Args:
query: The query tensor of shape (batch_size, nh, sequence_length, dhqk).
key: The key tensor of shape (batch_size, nh, sequence_length, dhqk).
value: The value tensor of shape (batch_size, nh, sequence_length, dhhv).
igate: The input gate preactivation tensor of shape (batch_size, nh, sequence_length).
fgate: The forget gate preactivation tensor of shape (batch_size, nh, sequence_length).
c_initial: The initial cell state tensor of shape (batch_size, nh, dhqk, dhhv).
Defaults to None.
n_initial: The initial hidden state tensor of shape (batch_size, nh, dhqk). Defaults to None.
m_initial: The initial memory tensor of shape (batch_size, nh, 1). Defaults to None.
return_last_states: Whether to return the last states of the sequence. Defaults to None.
If None, the value from the config is used.
Returns:
hidden states of shape (batch_size, nh, sequence_length, dhhv)
hidden states and last states the last states are the cell state cstate (batch_size, nh, dhqk, dhhv),
the normalizer state nstate (batch_size, nh, dhqk), and the max state mstate (batch_size, nh, 1)
"""
if mode is None:
mode = self.config.mode
if "train" in mode:
if return_last_states is None:
return_last_states = self.config.return_last_states
if self.config.mode == "train_with_padding":
if return_last_states:
raise ValueError("return_last_states=True is not supported with train_with_padding mode.")
return self._train_fn(
query=query,
key=key,
value=value,
igate=igate,
fgate=fgate,
c_initial=c_initial,
n_initial=n_initial,
m_initial=m_initial,
return_last_states=return_last_states,
)
elif "inference" in mode:
# inference mode always returns the last states
return self._inference_fn(
query=query,
key=key,
value=value,
igate=igate,
fgate=fgate,
c_initial=c_initial,
n_initial=n_initial,
m_initial=m_initial,
)
else:
raise ValueError(f"Unknown mode: {self.config.mode}")
def extra_repr(self) -> str:
return f"{self.config}"
class xLSTMRMSNorm(nn.Module):
"""Root mean square normalization layer implementation similar
to https://pytorch.org/docs/stable/generated/torch.nn.RMSNorm.html.
It normalizes the input tensor by the root mean square of the last dimension.
Args:
num_features: The number of features in the input tensor.
eps: A small value to avoid division by zero.
use_weight: Whether to use a learnable weight.
use_bias: Whether to use a learnable bias.
force_float32_reductions: Whether to force float32 reductions.
"""
def __init__(
self,
num_features: int,
eps: float = 1e-6,
use_weight: bool = True,
use_bias: bool = False,
force_float32_reductions: bool = True,
):
super().__init__()
self.num_features = num_features
self.eps = eps
self.force_float32_reductions = force_float32_reductions
if use_weight:
self.weight = nn.Parameter(torch.ones(num_features))
else:
self.weight = None
if use_bias:
self.bias = nn.Parameter(torch.zeros(num_features))
else:
self.bias = None
def _apply_weight_bias(self, x: torch.Tensor) -> torch.Tensor:
if self.weight is not None:
x = x * self.weight
if self.bias is not None:
x = x + self.bias
return x
def _rms_normalize(self, x: torch.Tensor) -> torch.Tensor:
# apply rms norm over the last dimension, i.e. HD dimension
in_dtype = x.dtype
if self.force_float32_reductions:
x = x.float()
x = x * torch.rsqrt(x.pow(2).mean(dim=-1, keepdim=True) + self.eps)
return x.to(in_dtype)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self._rms_normalize(x)
x = self._apply_weight_bias(x)
return x
class xLSTMMultiHeadLayerNorm(nn.Module):
"""Multi-head version of the LayerNorm layer.
It normalizes the last dimension of the input tensor.
The input is assumed to have the shape (batch_size, sequence_length, nh, DH), where:
batch_size: batch size
sequence_length: sequence length
nh: number of heads
DH: head dimension
The normalization is applied over the last dimension (DH) of the input tensor.
Args:
num_heads: The number of heads.
head_dim: The head dimension.
eps: A small value to avoid division by zero.
use_weight: Whether to use a learnable weight.
use_bias: Whether to use a learnable bias.
force_float32_reductions: Whether to force float32 reductions
Returns:
The normalized tensor with the shape (batch_size, sequence_length, nh * DH).
"""
def __init__(
self,
num_heads: int,
head_dim: int,
eps: float = 1e-6,
use_weight: bool = True,
use_bias: bool = False,
force_float32_reductions: bool = True,
):
super().__init__()
self.num_features = num_heads * head_dim
self.eps = eps
self.force_float32_reductions = force_float32_reductions
if use_weight:
self.weight = nn.Parameter(torch.ones(self.num_features))
else:
self.weight = None
if use_bias:
self.bias = nn.Parameter(torch.zeros(self.num_features))
else:
self.bias = None
self.num_heads = num_heads
self.head_dim = head_dim
def _apply_weight_bias(self, x: torch.Tensor) -> torch.Tensor:
if self.weight is not None:
x = x * self.weight
if self.bias is not None:
x = x + self.bias
return x
def _layer_normalize(self, x: torch.Tensor) -> torch.Tensor:
# apply layer norm over the last dimension, i.e. HD dimension
in_dtype = x.dtype
if self.force_float32_reductions:
x = x.float()
x_centered = x - x.mean(dim=-1, keepdim=True)
y = x_centered * torch.rsqrt(x.var(dim=-1, keepdim=True, unbiased=False) + self.eps)
return y.to(in_dtype)
def forward(
self,
x: torch.Tensor,
) -> torch.Tensor:
batch_size, sequence_length, nh, DH = x.shape
if nh != self.num_heads:
raise ValueError(f"Expected {self.num_heads} heads, got {nh}, input shape: {x.shape}")
if self.head_dim != DH:
raise ValueError(f"Expected {self.head_dim} head dimension, got {DH}, input shape: {x.shape}")
x = self._layer_normalize(x)
x = x.reshape(batch_size, sequence_length, -1)
x = self._apply_weight_bias(x)
return x
class xLSTMFeedForward(nn.Module):
def __init__(self, config: xLSTMConfig):
super().__init__()
self.config = config
self.up_proj_dim = round_up_to_next_multiple_of(
config.hidden_size * config.ffn_proj_factor,
config.ffn_round_up_to_multiple_of,
)
if self.config.weight_mode == "single":
self.proj_up_gate = nn.Linear(
in_features=config.hidden_size,
out_features=self.up_proj_dim,
bias=self.config.use_bias,
)
self.proj_up = nn.Linear(
in_features=config.hidden_size,
out_features=self.up_proj_dim,
bias=self.config.use_bias,
)
elif self.config.weight_mode == "fused":
self.proj_up_gate_z = nn.Linear(
in_features=config.hidden_size,
out_features=2 * self.up_proj_dim,
bias=self.config.use_bias,
)
self.proj_down = nn.Linear(
in_features=self.up_proj_dim,
out_features=config.hidden_size,
bias=self.config.use_bias,
)
self.act_fn = nn.SiLU()
def forward(self, x: torch.Tensor) -> torch.Tensor:
if self.config.weight_mode == "single":
x = self.act_fn(self.proj_up_gate(x)) * self.proj_up(x)
elif self.config.weight_mode == "fused":
x = self.proj_up_gate_z(x)
gate, z = torch.tensor_split(x, (self.up_proj_dim,), dim=-1)
x = self.act_fn(gate) * z
y = self.proj_down(x)
return y
class xLSTMLayer(nn.Module):
def __init__(self, config: xLSTMConfig):
super().__init__()
self.config = config
self.v_dim = int(config.hidden_size * config.v_dim_factor)
self.qk_dim = int(config.hidden_size * config.qk_dim_factor)
if self.config.weight_mode == "single":
self.q = nn.Linear(
in_features=self.config.hidden_size,
out_features=self.qk_dim,
bias=self.config.use_bias,
)
self.k = nn.Linear(
in_features=self.config.hidden_size,
out_features=self.qk_dim,
bias=self.config.use_bias,
)
self.v = nn.Linear(
in_features=self.config.hidden_size,
out_features=self.v_dim,
bias=self.config.use_bias,
)
self.ogate_preact = nn.Linear(
in_features=self.config.hidden_size,
out_features=self.v_dim,
bias=self.config.use_bias,
)
self.igate_preact = nn.Linear(
in_features=self.config.hidden_size,
out_features=self.config.num_heads,
bias=True,
)
self.fgate_preact = nn.Linear(
in_features=self.config.hidden_size,
out_features=self.config.num_heads,
bias=True,
)
elif self.config.weight_mode == "fused":
self.qkv_opreact = nn.Linear(
in_features=self.config.hidden_size,
out_features=2 * self.qk_dim + 2 * self.v_dim,
bias=self.config.use_bias,
)
self.ifgate_preact = nn.Linear(
in_features=self.config.hidden_size,
out_features=2 * self.config.num_heads,
bias=True,
)
self.ogate_act_fn = nn.Sigmoid()
self.mlstm_backend = xLSTMBackend(config=self.config)
self.multihead_norm = xLSTMMultiHeadLayerNorm(
num_heads=self.config.num_heads,
head_dim=self.v_dim // self.config.num_heads,
eps=self.config.norm_eps,
use_weight=True,
use_bias=self.config.use_bias,
force_float32_reductions=self.config.norm_reduction_force_float32,
)
self.out_proj = nn.Linear(
in_features=self.v_dim,
out_features=self.config.hidden_size,
bias=self.config.use_bias,
)
def forward(
self, x: torch.Tensor, state: mLSTMLayerStateType | None = None
) -> tuple[torch.Tensor, mLSTMLayerStateType | None]:
if x.ndim != 3:
raise ValueError(f"Input must have shape [batch_size, sequence_length, HD], got {x.shape}")
batch_size, sequence_length, _ = x.shape
if self.config.weight_mode == "single":
query = self.q(x)
key = self.k(x)
value = self.v(x)
o_preact = self.ogate_preact(x)
i_preact = soft_cap(self.igate_preact(x), cap_value=self.config.gate_soft_cap)
f_preact = soft_cap(self.fgate_preact(x), cap_value=self.config.gate_soft_cap)
elif self.config.weight_mode == "fused":
qkv_opreact = self.qkv_opreact(x)
query, key, value, o_preact = torch.tensor_split(
qkv_opreact,
(
self.qk_dim,
2 * self.qk_dim,
2 * self.qk_dim + self.v_dim,
),
dim=-1,
)
if_preact = soft_cap(self.ifgate_preact(x), cap_value=self.config.gate_soft_cap)
i_preact, f_preact = torch.tensor_split(if_preact, (self.config.num_heads,), dim=-1)
query = query.reshape(batch_size, sequence_length, self.config.num_heads, -1).transpose(1, 2)
key = key.reshape(batch_size, sequence_length, self.config.num_heads, -1).transpose(1, 2)
value = value.reshape(batch_size, sequence_length, self.config.num_heads, -1).transpose(1, 2)
i_preact = i_preact.transpose(1, 2)
f_preact = f_preact.transpose(1, 2)
if state is None:
c_initial, n_initial, m_initial = None, None, None
else:
c_initial, n_initial, m_initial = state
h, state = self.mlstm_backend(
query=query,
key=key,
value=value,
igate=i_preact,
fgate=f_preact,
c_initial=c_initial,
n_initial=n_initial,
m_initial=m_initial,
)
expected_h_shape = (
batch_size,
self.config.num_heads,
sequence_length,
self.v_dim // self.config.num_heads,
)
if h.shape != expected_h_shape:
raise ValueError(f"Got {h.shape}, expected {expected_h_shape}")
h = h.transpose(1, 2)
h_norm = self.multihead_norm(h)
h_norm = h_norm.reshape(batch_size, sequence_length, -1)
h_out = self.ogate_act_fn(o_preact) * h_norm
y = self.out_proj(h_out)
return y, state
class xLSTMBlock(GradientCheckpointingLayer):
def __init__(self, config: xLSTMConfig):
super().__init__()
self.config = config
self.norm_mlstm = xLSTMRMSNorm(
num_features=config.hidden_size,
eps=config.norm_eps,
use_weight=True,
use_bias=config.use_bias,
force_float32_reductions=config.norm_reduction_force_float32,
)
self.mlstm_layer = xLSTMLayer(config)
self.norm_ffn = xLSTMRMSNorm(
num_features=config.hidden_size,
eps=config.norm_eps,
use_weight=True,
use_bias=config.use_bias,
force_float32_reductions=config.norm_reduction_force_float32,
)
self.ffn = xLSTMFeedForward(config)
def forward(self, x: torch.Tensor, state: mLSTMStateType | None = None) -> tuple[torch.Tensor, mLSTMStateType]:
x_mlstm = self.norm_mlstm(x)
x_mlstm, state = self.mlstm_layer(x_mlstm, state)
x = x + x_mlstm
x_ffn = self.norm_ffn(x)
x_ffn = self.ffn(x_ffn)
x = x + x_ffn
return x, state
def small_init_method(dim):
"""
Adapted from: https://github.com/EleutherAI/gpt-neox/blob/main/megatron/model/init_functions.py
Fills the input Tensor with values according to the method described in Transformers without Tears: Improving
the Normalization of Self-Attention - Nguyen, T. & Salazar, J. (2019), using a normal distribution."""
std = (2 / (5 * dim)) ** (1 / 2)
def init_(tensor):
return init.normal_(tensor, mean=0.0, std=std)
return init_
def wang_init_method(n_layers, dim):
"""
Adapted from https://github.com/EleutherAI/gpt-neox/blob/main/megatron/model/init_functions.py
"""
std = 2 / n_layers / dim ** (1 / 2)
def init_(tensor):
return init.normal_(tensor, mean=0.0, std=std)
return init_
class xLSTMPreTrainedModel(PreTrainedModel):
"""
An abstract class for an interface to loading a pre-trained xLSTM model.
"""
config_class = xLSTMConfig
base_model_prefix = "backbone"
_no_split_modules = ["xLSTMBlock"]
supports_gradient_checkpointing = True
_is_stateful = True
def _module_name_map(self, module):
for name, mod in self.named_modules():
if mod is module:
return name
return ""
@torch.no_grad()
def _init_weights(self, module):
if isinstance(module, nn.Embedding):
small_init_method(self.config.hidden_size)(self.embeddings.weight)
elif isinstance(module, nn.Linear):
if module.bias is not None:
init.zeros_(module.bias)
if self.config.weight_mode == "single" and "gate" in self._module_name_map(module):
init.zeros_(module.weight)
if "igate" in self._module_name_map(module):
init.copy_(module.bias, -10.0 * torch.ones_like(module.bias))
elif "fgate" in self._module_name_map(module):
init.copy_(
module.bias,
torch.linspace(
3.0,
6.0,
module.bias.shape[-1],
).to(
device=module.bias.device,
dtype=module.bias.dtype,
),
)
elif self.config.weight_mode == "fused" and "gate" in self._module_name_map(module):
init.zeros_(module.weight)
init.copy_(
module.bias[: self.config.num_heads],
module.bias[: self.config.num_heads]
- module.bias[: self.config.num_heads]
- 10.0 * torch.ones_like(module.bias),
)
init.copy_(
module.bias[: self.config.num_heads],
module.bias[: self.config.num_heads]
- module.bias[self.config.num_heads :]
+ torch.linspace(
3.0,
6.0,
module.bias.shape[-1],
).to(
device=module.bias.device,
dtype=module.bias.dtype,
),
)
elif "proj_down" in self._module_name_map(module):
wang_init_method(dim=module.weight.shape[1], n_layers=self.config.num_hidden_layers)(module.weight)
elif "out_proj" in self._module_name_map(module):
wang_init_method(dim=self.config.hidden_size, n_layers=self.config.num_hidden_layers)(module.weight)
elif module.weight is not None:
small_init_method(self.config.hidden_size)(module.weight)
elif isinstance(module, xLSTMRMSNorm) or hasattr(module, "_layer_normalize"):
init.ones_(module.weight)
if hasattr(module, "bias") and module.bias is not None:
init.zeros_(module.bias)
class xLSTMCache:
"""
Cache for xLSTM model which does not have attention mechanism and key value states.
Arguments:
config (`PreTrainedConfig):
The configuration file defining the shape-related attributes required to initialize the static cache.
max_batch_size (`int`):
The batch size with which the model will be used.
dtype (`torch.dtype`, *optional*, defaults to `torch.bfloat16`):
The default `dtype` to use when initializing the layer.
device (`torch.device` or `str`, *optional*):
The device on which the cache should be initialized. Should be the same as the layer.
Attributes:
seqlen_offset: int
dtype: torch.dtype
Example:
```python
>>> from transformers import AutoTokenizer, xLSTMForCausalLM, xLSTMCache
>>> model = xLSTMForCausalLM.from_pretrained("NX-AI/xLSTM-7b")
>>> tokenizer = xLSTMTokenizer.from_pretrained("NX-AI/xLSTM-7b")
>>> inputs = tokenizer(text="I am an xLSTM", return_tensors="pt")
>>> # Prepare a cache class and pass it to model's forward
>>> cache_params = xLSTMCache(config=model.config, max_batch_size=1, device=model.device, dtype=model.dtype)
>>> outputs = model(**inputs, cache_params=cache_params, use_cache=True)
>>> outputs.cache_params
xLSTMCache()
"""
def __init__(
self,
config: xLSTMConfig,
max_batch_size: int,
dtype: torch.dtype = torch.bfloat16,
device: str | None = None,
**kwargs,
):
self.seqlen_offset = 0
self.dtype = dtype
self.config = config
self.rnn_state = {
layer: (
torch.zeros(
[max_batch_size, config.num_heads, config.qk_head_dim, config.v_head_dim],
dtype=dtype,
device=device,
),
torch.zeros([max_batch_size, config.num_heads, config.qk_head_dim], dtype=dtype, device=device),
torch.zeros([max_batch_size, config.num_heads, 1], dtype=dtype, device=device),
)
for layer in range(config.num_hidden_layers)
}
def reset(self):
self.rnn_state = {
layer: (
torch.zeros_like(self.rnn_state[layer][0]),
torch.zeros_like(self.rnn_state[layer][1]),
torch.zeros_like(self.rnn_state[layer][2]),
)
for layer in self.rnn_state
}
@dataclass
@auto_docstring
class xLSTMOutput(ModelOutput):
r"""
cache_params (`xLSTMCache`):
The state of the model at the last time step. Can be used in a forward method with the next `input_ids` to
avoid providing the old `input_ids`.
"""
last_hidden_state: torch.FloatTensor | None
cache_params: xLSTMCache | None = None
hidden_states: tuple[torch.FloatTensor] | None = None
@auto_docstring
class xLSTMModel(xLSTMPreTrainedModel):
def __init__(self, config):
super().__init__(config)
# use embbeding_dim and num_blocks once here to make use of them
self.embeddings = nn.Embedding(config.vocab_size, config.embedding_dim)
self.blocks = nn.ModuleList([xLSTMBlock(config) for _ in range(config.num_blocks)])
self.out_norm = xLSTMRMSNorm(config.hidden_size, eps=config.norm_eps)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings
def set_input_embeddings(self, new_embedding):
self.embeddings = new_embedding
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
inputs_embeds: torch.LongTensor | None = None,
cache_params: xLSTMCache | None = None,
use_cache: bool | None = None,
output_hidden_states: bool | None = None,
**kwargs,
) -> tuple | xLSTMOutput:
r"""
cache_params (`xLSTMCache`, *optional*):
The xLSTMCache that carries the RNN states.
"""
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False)
if self.gradient_checkpointing and self.training and use_cache:
use_cache = False
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embeddings(input_ids)
if use_cache and cache_params is None:
cache_params = xLSTMCache(
self.config, inputs_embeds.size(0), device=inputs_embeds.device, dtype=inputs_embeds.dtype
)
hidden_states = inputs_embeds
if (
not self.training
and self.config.max_inference_chunksize < hidden_states.shape[1]
and not output_hidden_states
):
offset = 0
with torch.no_grad():
if cache_params is None:
cache_params = xLSTMCache(config=self.config, max_batch_size=hidden_states.shape[0])
final_state = torch.zeros_like(hidden_states)
while offset < hidden_states.shape[1]:
hidden_states_chunk = hidden_states[
:, offset : min(offset + self.config.max_inference_chunksize, hidden_states.shape[1])
]
for layer_idx, xlstm_block in enumerate(self.blocks):
hidden_states_chunk, rnn_state = xlstm_block(
hidden_states_chunk,
state=cache_params.rnn_state[layer_idx],
)
for state_idx in range(len(cache_params.rnn_state[layer_idx])):
local_rnn_state = rnn_state[state_idx]
cache_params.rnn_state[layer_idx][state_idx].copy_(local_rnn_state)
cache_params.rnn_state_initial = False
final_state[
:, offset : min(offset + self.config.max_inference_chunksize, hidden_states.shape[1])
] = hidden_states_chunk
offset += self.config.max_inference_chunksize
hidden_states = final_state
else:
all_hidden_states = () if output_hidden_states else None
for layer_idx, xlstm_block in enumerate(self.blocks):
hidden_states, rnn_state = xlstm_block(
hidden_states,
cache_params.rnn_state[layer_idx] if cache_params is not None else None,
)
if cache_params:
for state_idx in range(len(cache_params.rnn_state[layer_idx])):
local_rnn_state = rnn_state[state_idx]
cache_params.rnn_state[layer_idx][state_idx].copy_(local_rnn_state)
cache_params.rnn_state_initial = False
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if use_cache:
cache_params.seqlen_offset += inputs_embeds.shape[1]
hidden_states = self.out_norm(hidden_states)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
return xLSTMOutput(
last_hidden_state=hidden_states,
cache_params=cache_params,
hidden_states=all_hidden_states,
)
@dataclass
@auto_docstring
class xLSTMCausalLMOutput(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
cache_params (`xLSTMCache`, *optional*, carrying the RNN states):
The state of the model at the last time step. Can be used in a forward method with the next `input_ids` to
avoid providing the old `input_ids`.
"""
loss: torch.FloatTensor | None = None
logits: torch.FloatTensor | None = None
cache_params: xLSTMCache | None = None
hidden_states: tuple[torch.FloatTensor] | None = None
@auto_docstring
class xLSTMForCausalLM(xLSTMPreTrainedModel, GenerationMixin):
def __init__(self, config):
super().__init__(config)
self.backbone = xLSTMModel(config)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def get_input_embeddings(self):
return self.backbone.get_input_embeddings()
def set_input_embeddings(self, new_embeddings):
return self.backbone.set_input_embeddings(new_embeddings)
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
inputs_embeds: torch.FloatTensor | None = None,
cache_params: xLSTMCache | None = None,
labels: torch.LongTensor | None = None,
use_cache: bool | None = None,
output_hidden_states: bool | None = None,
**kwargs,
) -> tuple | xLSTMCausalLMOutput:
r"""
cache_params (`xLSTMCache`, *optional*):
The xLSTMCache that carries the RNN states.
"""
xlstm_outputs = self.backbone(
input_ids,
cache_params=cache_params,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_hidden_states=output_hidden_states,
**kwargs,
)
hidden_states = xlstm_outputs[0]
logits = self.lm_head(hidden_states.to(self.lm_head.weight.dtype)).float()
if not self.training and self.config.max_inference_chunksize < logits.shape[1]:
offset = 0
with torch.no_grad():
while offset < logits.shape[1]:
logits[:, offset : min(offset + self.config.max_inference_chunksize, logits.shape[1])] = soft_cap(
logits[:, offset : min(offset + self.config.max_inference_chunksize, logits.shape[1])],
self.config.output_logit_soft_cap,
)
offset += self.config.max_inference_chunksize
else:
logits = soft_cap(logits, self.config.output_logit_soft_cap)
loss = None
if labels is not None:
# move labels to correct device
labels = labels.to(logits.device)
# Shift so that tokens < nstate predict nstate
shift_logits = logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss()
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
return xLSTMCausalLMOutput(
loss=loss,
logits=logits,
cache_params=xlstm_outputs.cache_params,
hidden_states=xlstm_outputs.hidden_states,
)
__all__ = [
"xLSTMForCausalLM",
"xLSTMModel",
"xLSTMPreTrainedModel",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/xlstm/modeling_xlstm.py",
"license": "Apache License 2.0",
"lines": 1384,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/xlstm/test_modeling_xlstm.py | # Copyright 2025 NXAI GmbH. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, is_torch_available, xLSTMConfig
from transformers.testing_utils import (
require_torch,
require_torch_accelerator,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
xLSTMForCausalLM,
xLSTMModel,
)
from transformers.models.xlstm.modeling_xlstm import xLSTMBlock, xLSTMCache
class xLSTMModelTester:
def __init__(
self,
parent,
batch_size=13,
num_heads=2,
seq_length=7,
is_training=True,
use_labels=True,
vocab_size=99,
hidden_size=128,
qk_dim_factor=0.5,
v_dim_factor=1.0,
num_hidden_layers=2,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
num_labels=3,
num_choices=4,
scope=None,
chunkwise_kernel="chunkwise--native_autograd",
sequence_kernel="native_sequence__native",
step_kernel="native",
tie_word_embeddings=False,
):
self.parent = parent
self.num_heads = num_heads
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_labels = use_labels
self.vocab_size = vocab_size
self.num_hidden_layers = num_hidden_layers
self.hidden_size = hidden_size
self.qk_dim_factor = qk_dim_factor
self.v_dim_factor = v_dim_factor
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
self.bos_token_id = vocab_size - 1
self.eos_token_id = vocab_size - 1
self.pad_token_id = vocab_size - 1
self.chunkwise_kernel = chunkwise_kernel
self.sequence_kernel = sequence_kernel
self.step_kernel = step_kernel
self.tie_word_embeddings = tie_word_embeddings
def prepare_config_and_inputs(self, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config()
return (
config,
input_ids,
None,
sequence_labels,
token_labels,
choice_labels,
)
def get_config(self):
cfg = xLSTMConfig(
num_heads=self.num_heads,
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
qk_dim_factor=self.qk_dim_factor,
v_dim_factor=self.v_dim_factor,
n_positions=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
use_cache=True,
bos_token_id=self.bos_token_id,
eos_token_id=self.eos_token_id,
pad_token_id=self.pad_token_id,
chunkwise_kernel=self.chunkwise_kernel,
sequence_kernel=self.sequence_kernel,
step_kernel=self.step_kernel,
tie_word_embeddings=self.tie_word_embeddings,
)
# this is needed for compatibility with generic tests
# cfg.hidden_size = cfg.embedding_dim
# cfg.num_hidden_layers = cfg.num_blocks
return cfg
def prepare_config_and_inputs_for_common(self):
(
config,
input_ids,
_,
sequence_labels,
token_labels,
choice_labels,
) = self.prepare_config_and_inputs()
inputs_dict = {"input_ids": input_ids}
return config, inputs_dict
@require_torch
class xLSTMModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (xLSTMModel, xLSTMForCausalLM) if is_torch_available() else ()
all_generative_model_classes = (xLSTMForCausalLM,) if is_torch_available() else ()
has_attentions = False # xLSTM does not support attentions
pipeline_model_mapping = (
{"feature-extraction": xLSTMModel, "text-generation": xLSTMForCausalLM} if is_torch_available() else {}
)
def setUp(self):
self.model_tester = xLSTMModelTester(self)
self.config_tester = ConfigTester(
self, config_class=xLSTMConfig, n_embd=37, common_properties=["hidden_size", "num_hidden_layers"]
)
@unittest.skip(reason="xLSTM cache slicing test case is an edge case")
def test_generate_without_input_ids(self):
pass
@unittest.skip(reason="xLSTM cache slicing test case is an edge case")
@parameterized.expand([("greedy", 1), ("beam search", 2)])
def test_generate_from_inputs_embeds(self, _, num_beams):
pass
@unittest.skip(reason="xLSTM cache slicing test case is an edge case")
def test_greedy_generate_dict_outputs_use_cache(self):
pass
@unittest.skip(reason="xLSTM cache slicing is interacting with beam search")
def test_beam_search_generate_dict_outputs_use_cache(self):
pass
@unittest.skip(reason="xLSTM cache is not iterable")
def test_multi_gpu_data_parallel_forward(self):
pass
def test_model_outputs_equivalence(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}):
with torch.no_grad():
tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs)
dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple()
def recursive_check(tuple_object, dict_object):
if isinstance(tuple_object, xLSTMCache):
recursive_check(tuple_object.rnn_state, dict_object.rnn_state)
elif isinstance(tuple_object, (list, tuple)):
for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif isinstance(tuple_object, dict):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values(), dict_object.values()
):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(tuple_object, dict_object, atol=1e-5),
msg=(
"Tuple and dict output are not equal. Difference:"
f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:"
f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has"
f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}."
),
)
recursive_check(tuple_output, dict_output)
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})
def test_chunkwise_shape_calculation(self):
config = self.model_tester.get_config()
config.chunkwise_kernel = "chunkwise--native_autograd"
model = xLSTMModel(config)
model.to(torch_device)
model.train(False)
batch_size, seq_length = 2, config.chunk_size * 2
input_ids = ids_tensor([batch_size, seq_length], config.vocab_size)
with torch.no_grad():
outputs = model(input_ids)
expected_shape = (batch_size, seq_length, config.hidden_size)
self.assertEqual(outputs.last_hidden_state.shape, expected_shape)
@require_torch
@slow
@unittest.skip("Model is fully broken currently")
class xLSTMIntegrationTest(unittest.TestCase):
def setUp(self):
self.model_id = "NX-AI/xLSTM-7b"
self.tokenizer = AutoTokenizer.from_pretrained(self.model_id, legacy=False)
self.prompt = ("[INST]Write a hello world program in C++.",)
def test_simple_generate(self):
"""
Simple generate test to avoid regressions.
Note: state-spaces (cuda) implementation and pure torch implementation
have irreconciliable differences as of now, which will cause this test to fail
in an environment with state-spaces installed.
"""
tokenizer = self.tokenizer
tokenizer.pad_token_id = tokenizer.eos_token_id
model = xLSTMForCausalLM.from_pretrained(self.model_id, dtype=torch.bfloat16, device_map=torch_device)
input_ids = tokenizer("[INST]Write a hello world program in C++.[/INST]", return_tensors="pt")["input_ids"].to(
torch_device
)
out = model.generate(input_ids, do_sample=False, use_cache=True, max_new_tokens=30)
output_sentence = tokenizer.decode(out[0])
ground_truth_sentence = """<s>[INST]Write a hello world program in C++.[/INST] Sure, here is a simple "Hello, World!" program in C++:\n\n```cpp\n#include <iostream>\n\n"""
self.assertEqual(output_sentence, ground_truth_sentence)
def test_batched_equivalence_with_cache(self):
"""
Verifies that batched generation matches individual generation.
Important because of the specific caching mechanism + statefulness of the xLSTM model.
Depending on precision and devices, differences can be observed from generation to generation.
"""
tokenizer = self.tokenizer
prompt = [
"[INST]Write C#.[/INST]",
"[INST]Write a hello world in C++.[/INST]",
"[INST] Write a simple Fibonacci number computation function in Rust that does memoization, with comments, in safe Rust.[/INST]",
]
model = xLSTMForCausalLM.from_pretrained(self.model_id, dtype=torch.bfloat16, device_map=torch_device)
tokenizer.pad_token_id = tokenizer.eos_token_id
# batched generation
tokenized_prompts = tokenizer(prompt, return_tensors="pt", padding="longest").to(torch_device)
batched_gen = model.generate(**tokenized_prompts, max_new_tokens=30, use_cache=True)
batched_output = tokenizer.batch_decode(batched_gen, skip_special_tokens=True)
# individual generation
for index_gen, individual_prompt in enumerate(prompt):
inputs = tokenizer(individual_prompt, return_tensors="pt", padding="longest").to(torch_device)
individual_gen = model.generate(**inputs, max_new_tokens=30, use_cache=True)
individual_output = tokenizer.batch_decode(individual_gen, skip_special_tokens=True)[0]
self.assertEqual(individual_output[:100], batched_output[index_gen][:100])
def test_batched_equivalence_without_cache(self):
"""
Verifies that batched generation matches individual generation without cache.
Important because of the specific caching mechanism + statefulness of the xLSTM model.
Depending on precision and devices, differences can be observed from generation to generation.
"""
tokenizer = self.tokenizer
prompt = [
"[INST]Write C#.[/INST]",
"[INST]Write a hello world in C++.[/INST]",
"[INST] Write a simple Fibonacci number computation function in Rust that does memoization, with comments, in safe Rust.[/INST]",
]
model = xLSTMForCausalLM.from_pretrained(self.model_id, dtype=torch.bfloat16, device_map=torch_device)
tokenizer.pad_token_id = tokenizer.eos_token_id
# batched generation
tokenized_prompts = tokenizer(prompt, return_tensors="pt", padding="longest").to(torch_device)
batched_gen = model.generate(**tokenized_prompts, max_new_tokens=30, use_cache=True)
batched_output = tokenizer.batch_decode(batched_gen, skip_special_tokens=True)
# individual generation
for index_gen, individual_prompt in enumerate(prompt):
inputs = tokenizer(individual_prompt, return_tensors="pt", padding="longest").to(torch_device)
individual_gen = model.generate(**inputs, max_new_tokens=30, use_cache=True)
individual_output = tokenizer.batch_decode(individual_gen, skip_special_tokens=True)[0]
self.assertEqual(individual_output[:100], batched_output[index_gen][:100])
@require_torch_accelerator
def test_xlstm_block_train_vs_eval_equivalence(self):
# Based on https://github.com/sustcsonglin/flash-linear-attention/issues/63
# Credit to zhixuan-lin
B, T, D = 4, 512, 768
dtype = torch.bfloat16
config = xLSTMConfig(num_heads=24, head_dim=64, hidden_size=768, expand=2, n_groups=1)
torch.manual_seed(42)
with torch.amp.autocast(device_type="cuda", dtype=dtype):
with torch.no_grad():
block = xLSTMBlock(config.to_xlstm_block_config()).to("cuda")
hidden_states = torch.rand(size=(B, T, D), dtype=dtype, device="cuda")
block.train()
out_train = block(hidden_states)
block.eval()
out_eval = block(hidden_states)
self.assertTrue(torch.allclose(out_train, out_eval, atol=1e-3))
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/xlstm/test_modeling_xlstm.py",
"license": "Apache License 2.0",
"lines": 311,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/deepseek_vl/convert_deepseek_vl_weights_to_hf.py | # Copyright 2025 Deepseek AI and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import gc
import json
import os
import regex as re
import torch
from huggingface_hub import snapshot_download
from huggingface_hub.errors import HFValidationError
from safetensors.torch import load_file
from transformers import (
AutoTokenizer,
DeepseekVLConfig,
DeepseekVLForConditionalGeneration,
DeepseekVLImageProcessor,
DeepseekVLProcessor,
)
from transformers.image_utils import IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
# fmt: off
ORIGINAL_TO_CONVERTED_KEY_MAPPING = {
# Siglip (Low Resolution)
r"vision_model.vision_tower.pos_embed": r"model.vision_model.vision_model.embeddings.position_embedding.weight",
r"vision_model.vision_tower.patch_embed.proj.(weight|bias)": r"model.vision_model.vision_model.embeddings.patch_embedding.\1",
r"vision_model.vision_tower.blocks.(\d+).attn.qkv.(weight|bias)": r"model.vision_model.vision_model.encoder.layers.\1.self_attn.(q|k|v)_proj.\2",
r"vision_model.vision_tower.blocks.(\d+).attn.proj.(weight|bias)": r"model.vision_model.vision_model.encoder.layers.\1.self_attn.out_proj.\2",
r"vision_model.vision_tower.blocks.(\d+).norm(\d+).(weight|bias)": r"model.vision_model.vision_model.encoder.layers.\1.layer_norm\2.\3",
r"vision_model.vision_tower.blocks.(\d+).mlp.fc(\d+).(weight|bias)": r"model.vision_model.vision_model.encoder.layers.\1.mlp.fc\2.\3",
r"vision_model.vision_tower.norm.(weight|bias)": r"model.vision_model.vision_model.post_layernorm.\1",
r"vision_model.vision_tower.attn_pool.latent": r"model.vision_model.vision_model.head.probe",
r"vision_model.vision_tower.attn_pool.proj.(weight|bias)": r"model.vision_model.vision_model.head.attention.out_proj.\1",
r"vision_model.vision_tower.attn_pool.norm.(weight|bias)": r"model.vision_model.vision_model.head.layernorm.\1",
r"vision_model.vision_tower.attn_pool.mlp.fc(\d+).(weight|bias)": r"model.vision_model.vision_model.head.mlp.fc\1.\2",
# Aligner
r"aligner.layers.0.(weight|bias)": r"model.aligner.linear1.\1",
r"aligner.layers.2.(weight|bias)": r"model.aligner.linear2.\1",
# Llama (Text Model)
r"language_model.model.(\w+)": r"model.language_model.\1",
r"language_model.lm_head.(weight|bias)": r"lm_head.\1",
}
# fmt: on
# Adopted from https://github.com/deepseek-ai/DeepSeek-VL/blob/main/deepseek_vl/utils/conversation.py#L80-L91
CHAT_TEMPLATE = (
# Define separators and initialize counter
"{% set seps = ['\n\n', '<\uff5cend\u2581of\u2581sentence\uff5c>'] %}"
"{% set i = 0 %}"
# Start with default system prompt
"You are a helpful language and vision assistant. "
"You are able to understand the visual content that the user provides, "
"and assist the user with a variety of tasks using natural language.\n\n"
# Iterate through messages
"{% for message in messages %}"
# Identify user or assistant role
"{% if message['role']|lower == 'user' %}"
"User: "
"{% elif message['role']|lower == 'assistant' %}"
"Assistant:{% if not (loop.last and not add_generation_prompt and message['content'][0]['type']=='text' and message['content'][0]['text']=='') %} {% endif %}"
"{% else %}"
"{{ message['role'].capitalize() }}: "
"{% endif %}"
# Iterate through message content (text/images)
"{% for content in message['content'] %}"
# If content is an image, replace with placeholder
"{% if content['type'] == 'image' %}"
"<image_placeholder>"
# If content is text, handle formatting
"{% elif content['type'] == 'text' %}"
"{% set text = content['text'] %}"
# Strip whitespace for first and last text blocks
"{% if loop.first %}{% set text = text.lstrip() %}{% endif %}"
"{% if loop.last %}{% set text = text.rstrip() %}{% endif %}"
# If previous content was text, add space
"{% if not loop.first and message['content'][loop.index0-1]['type'] == 'text' %}"
"{{ ' ' + text }}"
"{% else %}"
"{{ text }}"
"{% endif %}"
"{% endif %}"
"{% endfor %}" # End message content loop
# Add separators between messages
"{% if not loop.last or add_generation_prompt %}"
"{% if message['role']|lower == 'user' %}"
"{{ seps[0] }}"
"{% else %}"
"{{ seps[1] }}"
"{% endif %}"
"{% endif %}"
"{% endfor %}" # End messages loop
# Add final Assistant prompt if required
"{% if add_generation_prompt %}Assistant:{% endif %}"
)
def convert_old_keys_to_new_keys(state_dict_keys: dict):
output_dict = {}
old_text = "\n".join(state_dict_keys)
new_text = old_text
for pattern, replacement in ORIGINAL_TO_CONVERTED_KEY_MAPPING.items():
if replacement is None:
new_text = re.sub(pattern, "", new_text) # an empty line
continue
new_text = re.sub(pattern, replacement, new_text)
output_dict = dict(zip(old_text.split("\n"), new_text.split("\n")))
return output_dict
def get_qkv_state_dict(key, parameter):
"""
new key which looks like this
xxxx.(q|k|v).xxx (m, n)
is converted to
xxxx.q.xxxx (m//3, n)
xxxx.k.xxxx (m//3, n)
xxxx.v.xxxx (m//3, n)
"""
qkv_state_dict = {}
placeholder = re.search(r"(\(.*?\))", key).group(1) # finds "(query|key|value)"
replacements_keys = placeholder[1:-1].split("|") # creates ['query', 'key', 'value']
replacements_vals = torch.split(
parameter, split_size_or_sections=parameter.size(0) // len(replacements_keys), dim=0
)
for replacement_key, replacement_val in zip(replacements_keys, replacements_vals):
qkv_state_dict[key.replace(placeholder, replacement_key)] = replacement_val
return qkv_state_dict
def update_state_dict(old_state_dict):
all_keys = list(old_state_dict.keys())
new_keys = convert_old_keys_to_new_keys(all_keys)
state_dict = {}
for key in all_keys:
new_key = new_keys[key]
current_parameter = old_state_dict.pop(key)
if "qkv" in key and "vision_tower_high" not in key:
qkv_state_dict = get_qkv_state_dict(new_key, current_parameter)
state_dict.update(qkv_state_dict)
elif "pos_embed" in key:
if "vision_tower_high" not in key:
# timm implementation of siglip creates this param of size [1, 576, 1024]
# transformers implementation of siglip creates this param of size [576, 1024]
state_dict[new_key] = current_parameter.squeeze(0)
else:
state_dict[new_key] = current_parameter
else:
state_dict[new_key] = current_parameter
return state_dict
def load_model_state_dict(input_path: str) -> dict:
"""
Load model state dict, handling both single and sharded files.
"""
index_path = os.path.join(input_path, "model.safetensors.index.json")
single_file_path = os.path.join(input_path, "model.safetensors")
# Check if we have a sharded model
if os.path.exists(index_path):
print("Loading sharded model...")
state_dict = {}
with open(index_path, "r") as f:
index = json.load(f)
# Get unique shard files and load each one only once
unique_shard_files = sorted(set(index["weight_map"].values()))
for shard_file in unique_shard_files:
print(f"Loading shard {shard_file}...")
shard_path = os.path.join(input_path, shard_file)
shard_dict = load_file(shard_path)
state_dict.update(shard_dict)
return state_dict
# Single file model
elif os.path.exists(single_file_path):
print("Loading single file model...")
return load_file(single_file_path, device="cpu")
else:
raise ValueError(f"No model files found in {input_path}")
def convert_model(
hf_repo_id: str,
output_dir: str | None = None,
output_hub_path: str | None = None,
):
if output_dir:
os.makedirs(output_dir, exist_ok=True)
try:
input_path = snapshot_download(hf_repo_id)
except HFValidationError:
# If the input path is not a HF repo ID, assume it's a local path
input_path = hf_repo_id
# ------------------------------------------------------------
# Create and save config
# ------------------------------------------------------------
config = DeepseekVLConfig(
text_config={
"hidden_size": 2048,
"intermediate_size": 5632,
"max_position_embeddings": 16384,
"num_attention_heads": 16,
"num_hidden_layers": 24,
"vocab_size": 102400,
},
vision_config={
"hidden_size": 1024,
"intermediate_size": 4096,
"image_size": 384,
"patch_size": 16,
"hidden_act": "gelu",
"vision_use_head": False,
"num_attention_heads": 16,
"num_hidden_layers": 24,
},
)
# save config
if output_dir:
config.save_pretrained(output_dir)
print("Model config saved successfully...")
# ------------------------------------------------------------
# Convert processor
# ------------------------------------------------------------
image_processor = DeepseekVLImageProcessor(
image_mean=IMAGENET_STANDARD_MEAN,
image_std=IMAGENET_STANDARD_STD,
)
tokenizer = AutoTokenizer.from_pretrained(
input_path,
extra_special_tokens={
"pad_token": "<|end▁of▁sentence|>",
"image_token": "<image_placeholder>",
},
)
processor = DeepseekVLProcessor(
image_processor=image_processor,
tokenizer=tokenizer,
chat_template=CHAT_TEMPLATE,
)
if output_dir:
print(f"Saving processor to {output_dir}...")
processor.save_pretrained(output_dir)
if output_hub_path:
print(f"Pushing processor to hub at {output_hub_path}...")
processor.push_to_hub(output_hub_path)
# ------------------------------------------------------------
# Convert weights
# ------------------------------------------------------------
print("Creating empty model...")
with torch.device("meta"):
model = DeepseekVLForConditionalGeneration(config)
# Load and convert state dict
print("Loading state dict...")
state_dict = load_model_state_dict(input_path)
state_dict = update_state_dict(state_dict)
# Load converted state dict
print("Loading converted weights into model...")
info = model.load_state_dict(state_dict, strict=False, assign=True)
if len(info.missing_keys) > 0:
raise ValueError(f"Missing keys: {info.missing_keys}")
# Tie weights before any device mapping
print("Tying weights...")
model.tie_weights()
# Save the model
if output_dir:
print(f"Saving model to {output_dir}...")
model.save_pretrained(output_dir)
if output_hub_path:
print(f"Pushing model to hub at {output_hub_path}...")
model.push_to_hub(output_hub_path)
del state_dict, model
gc.collect()
# Validate the saved model if saved locally
if output_dir:
print("Reloading the local model to check if it's saved correctly...")
DeepseekVLForConditionalGeneration.from_pretrained(output_dir, device_map="auto")
print("Local model reloaded successfully.")
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--hf_repo_id",
default="deepseek-ai/deepseek-vl-1.3b-chat",
help="Location of official weights from DeepseekAI on HF",
)
parser.add_argument(
"--output_dir",
default=None,
help="Location to write the converted model and processor",
)
parser.add_argument(
"--output_hub_path",
default=None,
help="Repository ID to push model to hub (e.g. 'username/model-name')",
)
args = parser.parse_args()
convert_model(
hf_repo_id=args.hf_repo_id,
output_dir=args.output_dir,
output_hub_path=args.output_hub_path,
)
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/deepseek_vl/convert_deepseek_vl_weights_to_hf.py",
"license": "Apache License 2.0",
"lines": 298,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/deepseek_vl/modular_deepseek_vl.py | # Copyright 2025 Deepseek AI and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
from ...configuration_utils import PreTrainedConfig
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessingKwargs, ProcessorMixin, Unpack
from ...tokenization_utils_base import (
PreTokenizedInput,
TextInput,
)
from ...utils import (
auto_docstring,
logging,
)
from ..auto import CONFIG_MAPPING, AutoConfig, AutoModel
from ..idefics.modeling_idefics import IdeficsBaseModelOutputWithPast, IdeficsCausalLMOutputWithPast
from ..janus.image_processing_janus import JanusImageProcessor
from ..janus.image_processing_janus_fast import JanusImageProcessorFast
from ..janus.modeling_janus import JanusForConditionalGeneration, JanusModel, JanusPreTrainedModel
logger = logging.get_logger(__name__)
class DeepseekVLConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`DeepseekVLModel`]. It is used to instantiate a
DeepseekVL model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the DeepseekVL
[deepseek-community/deepseek-vl-1.3b-chat](https://huggingface.co/deepseek-community/deepseek-vl-1.3b-chat) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `LlamaConfig`):
The config object or dictionary of the text backbone.
vision_config (`Union[AutoConfig, dict]`, *optional*, defaults to `SiglipVisionConfig`):
The config object or dictionary of the vision backbone.
image_token_id (`int`, *optional*, defaults to 100015):
The index representing image tokens in the model's token vocabulary.
tie_word_embeddings (`bool`, *optional*, defaults to `True`):
Whether to tie weight embeddings
Example:
```python
>>> from transformers import DeepseekVLConfig, DeepseekVLModel
>>> # Initializing a DeepseekVL deepseek-community/deepseek-vl-1.3b-chat style configuration
>>> configuration = DeepseekVLConfig()
>>> # Initializing a model (with random weights) from the deepseek-community/deepseek-vl-1.3b-chat style configuration
>>> model = DeepseekVLModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "deepseek_vl"
sub_configs = {"text_config": AutoConfig, "vision_config": AutoConfig}
def __init__(
self,
text_config: AutoConfig | None = None,
vision_config: AutoConfig | None = None,
image_token_id: int = 100015,
tie_word_embeddings: bool | None = True,
**kwargs,
):
if text_config is None:
text_config = {}
logger.info("`text_config` is `None`. Initializing the `LlamaConfig` with default values.")
if vision_config is None:
vision_config = {}
logger.info("`vision_config` is `None`. Initializing the `SiglipVisionConfig` with default values.")
if isinstance(text_config, dict):
text_config["model_type"] = text_config.get("model_type", "llama")
text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config)
if isinstance(vision_config, dict):
vision_config["model_type"] = vision_config.get("model_type", "siglip_vision_model")
vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config)
self.text_config = text_config
self.vision_config = vision_config
self.image_token_id = image_token_id
self.tie_word_embeddings = tie_word_embeddings
super().__init__(**kwargs)
class DeepseekVLBaseModelOutputWithPast(IdeficsBaseModelOutputWithPast):
pass
class DeepseekVLCausalLMOutputWithPast(IdeficsCausalLMOutputWithPast):
pass
class DeepseekVLAligner(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
in_features = config.vision_config.hidden_size
out_features = config.text_config.hidden_size
self.linear1 = nn.Linear(in_features, out_features)
self.activation = nn.GELU()
self.linear2 = nn.Linear(out_features, out_features)
def forward(self, vision_encodings: torch.Tensor) -> torch.Tensor:
x = self.linear1(vision_encodings)
x = self.activation(x)
x = self.linear2(x)
return x
class DeepseekVLPreTrainedModel(JanusPreTrainedModel):
_no_split_modules = ["LlamaDecoderLayer"]
def _init_weights(self, module):
raise AttributeError("No need to inherit!")
@auto_docstring
class DeepseekVLModel(JanusModel):
def __init__(self, config):
super().__init__(config)
self.config = config
self.vision_model = AutoModel.from_config(config.vision_config)
self.aligner = DeepseekVLAligner(config)
self.language_model = AutoModel.from_config(config=config.text_config)
self.gradient_checkpointing = False
# Initialize weights and apply final processing.
self.post_init()
del self.vqmodel
del self.generation_embeddings
del self.generation_aligner
del self.generation_head
class DeepseekVLForConditionalGeneration(JanusForConditionalGeneration):
output_modalities = ("text",)
def prepare_embeddings_for_image_generation(self):
raise AttributeError("Not needed for DeepseekVL")
def decode_image_tokens(self):
raise AttributeError("Not needed for DeepseekVL")
def generate(self):
raise AttributeError("Not needed for DeepseekVL")
class DeepseekVLImageProcessor(JanusImageProcessor):
def __init__(self, **super_kwargs):
super().__init__(**super_kwargs)
def postprocess(self):
raise AttributeError("Not needed for DeepseekVL")
def unnormalize(self):
raise AttributeError("Not needed for DeepseekVL")
class DeepseekVLImageProcessorFast(JanusImageProcessorFast):
def __init__(self, **super_kwargs):
super().__init__(**super_kwargs)
def postprocess(self):
raise AttributeError("Not needed for DeepseekVL")
class DeepseekVLProcessorKwargs(ProcessingKwargs, total=False):
_defaults = {
"text_kwargs": {"padding": False},
"common_kwargs": {"return_tensors": "pt"},
}
@auto_docstring
class DeepseekVLProcessor(ProcessorMixin):
def __init__(
self,
image_processor,
tokenizer,
chat_template=None,
num_image_tokens=576,
):
r"""
num_image_tokens (`int`, *optional*, defaults to 576):
The number of special image tokens used as placeholders for visual content in text sequences.
"""
self.image_token = tokenizer.image_token
self.num_image_tokens = num_image_tokens
super().__init__(image_processor, tokenizer, chat_template=chat_template)
@auto_docstring
def __call__(
self,
text: TextInput | PreTokenizedInput | list[TextInput] | list[PreTokenizedInput] = None,
images: ImageInput | None = None,
**kwargs: Unpack[DeepseekVLProcessorKwargs],
) -> BatchFeature:
r"""
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
"""
output_kwargs = self._merge_kwargs(
DeepseekVLProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs
)
if text is None and images is None:
raise ValueError("You must specify either text or images.")
if text is not None:
if isinstance(text, str):
text = [text]
elif not (isinstance(text, (list, tuple)) and all(isinstance(t, str) for t in text)):
raise ValueError("Invalid input text. Please provide a string, or a list of strings")
prompt_strings = []
one_img_tokens = self.image_token * self.num_image_tokens
for prompt in text:
prompt = prompt.replace(self.image_token, one_img_tokens)
prompt_strings.append(prompt)
data = self.tokenizer(prompt_strings, **output_kwargs["text_kwargs"])
# process images if pixel_values are provided
if images is not None:
data["pixel_values"] = self.image_processor(images, **output_kwargs["images_kwargs"])["pixel_values"]
return BatchFeature(data=data)
def batch_decode(self, *args, **kwargs):
"""
This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
refer to the docstring of this method for more information.
"""
return self.tokenizer.batch_decode(*args, **kwargs)
def decode(self, *args, **kwargs):
"""
This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
the docstring of this method for more information.
"""
return self.tokenizer.decode(*args, **kwargs)
@property
def model_input_names(self):
tokenizer_input_names = self.tokenizer.model_input_names
image_processor_input_names = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
__all__ = [
"DeepseekVLConfig",
"DeepseekVLPreTrainedModel",
"DeepseekVLModel",
"DeepseekVLForConditionalGeneration",
"DeepseekVLImageProcessor",
"DeepseekVLImageProcessorFast",
"DeepseekVLProcessor",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/deepseek_vl/modular_deepseek_vl.py",
"license": "Apache License 2.0",
"lines": 227,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/deepseek_vl_hybrid/convert_deepseek_vl_hybrid_weights_to_hf.py | # Copyright 2025 Deepseek AI and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import gc
import json
import os
import regex as re
import torch
from huggingface_hub import snapshot_download
from huggingface_hub.errors import HFValidationError
from safetensors.torch import load_file
from transformers import (
AutoTokenizer,
DeepseekVLHybridConfig,
DeepseekVLHybridForConditionalGeneration,
DeepseekVLHybridImageProcessor,
DeepseekVLHybridProcessor,
)
from transformers.image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
PILImageResampling,
)
# fmt: off
ORIGINAL_TO_CONVERTED_KEY_MAPPING = {
# # Sam (High Resolution)
r"vision_model.vision_tower_high.vision_tower.pos_embed": r"model.high_res_vision_model.vision_encoder.pos_embed",
r"vision_model.vision_tower_high.vision_tower.patch_embed.proj.(weight|bias)": r"model.high_res_vision_model.vision_encoder.patch_embed.projection.\1",
r"vision_model.vision_tower_high.vision_tower.blocks.(\d+).norm(\d+).(weight|bias)": r"model.high_res_vision_model.vision_encoder.layers.\1.layer_norm\2.\3",
r"vision_model.vision_tower_high.vision_tower.blocks.(\d+).attn.rel_pos_(h|w)": r"model.high_res_vision_model.vision_encoder.layers.\1.attn.rel_pos_\2",
r"vision_model.vision_tower_high.vision_tower.blocks.(\d+).attn.qkv.(weight|bias)": r"model.high_res_vision_model.vision_encoder.layers.\1.attn.qkv.\2",
r"vision_model.vision_tower_high.vision_tower.blocks.(\d+).attn.proj.(weight|bias)": r"model.high_res_vision_model.vision_encoder.layers.\1.attn.proj.\2",
r"vision_model.vision_tower_high.vision_tower.blocks.(\d+).mlp.lin(\d+).(weight|bias)": r"model.high_res_vision_model.vision_encoder.layers.\1.mlp.lin\2.\3",
r"vision_model.vision_tower_high.vision_tower.neck.0.weight": r"model.high_res_vision_model.vision_encoder.neck.conv1.weight",
r"vision_model.vision_tower_high.vision_tower.neck.1.(weight|bias)": r"model.high_res_vision_model.vision_encoder.neck.layer_norm1.\1",
r"vision_model.vision_tower_high.vision_tower.neck.2.weight": r"model.high_res_vision_model.vision_encoder.neck.conv2.weight",
r"vision_model.vision_tower_high.vision_tower.neck.3.(weight|bias)": r"model.high_res_vision_model.vision_encoder.neck.layer_norm2.\1",
r"vision_model.vision_tower_high.vision_tower.neck_hd.0.weight": r"model.high_res_vision_neck.conv1.weight",
r"vision_model.vision_tower_high.vision_tower.neck_hd.1.(weight|bias)": r"model.high_res_vision_neck.layer_norm1.\1",
r"vision_model.vision_tower_high.vision_tower.neck_hd.2.weight": r"model.high_res_vision_neck.conv2.weight",
r"vision_model.vision_tower_high.vision_tower.neck_hd.3.(weight|bias)": r"model.high_res_vision_neck.layer_norm2.\1",
r"vision_model.vision_tower_high.vision_tower.downsamples.0.weight": r"model.high_res_vision_proj.conv1.weight",
r"vision_model.vision_tower_high.vision_tower.downsamples.1.weight": r"model.high_res_vision_proj.conv2.weight",
r"vision_model.vision_tower_high.vision_tower.hd_alpha_downsamples": r"model.high_res_vision_alpha",
# Siglip (Low Resolution)
r"vision_model.vision_tower_low.vision_tower.pos_embed": r"model.vision_model.vision_model.embeddings.position_embedding.weight",
r"vision_model.vision_tower_low.vision_tower.patch_embed.proj.(weight|bias)": r"model.vision_model.vision_model.embeddings.patch_embedding.\1",
r"vision_model.vision_tower_low.vision_tower.blocks.(\d+).attn.qkv.(weight|bias)": r"model.vision_model.vision_model.encoder.layers.\1.self_attn.(q|k|v)_proj.\2",
r"vision_model.vision_tower_low.vision_tower.blocks.(\d+).attn.proj.(weight|bias)": r"model.vision_model.vision_model.encoder.layers.\1.self_attn.out_proj.\2",
r"vision_model.vision_tower_low.vision_tower.blocks.(\d+).norm(\d+).(weight|bias)": r"model.vision_model.vision_model.encoder.layers.\1.layer_norm\2.\3",
r"vision_model.vision_tower_low.vision_tower.blocks.(\d+).mlp.fc(\d+).(weight|bias)": r"model.vision_model.vision_model.encoder.layers.\1.mlp.fc\2.\3",
r"vision_model.vision_tower_low.vision_tower.norm.(weight|bias)": r"model.vision_model.vision_model.post_layernorm.\1",
r"vision_model.vision_tower_low.vision_tower.attn_pool.latent": r"model.vision_model.vision_model.head.probe",
r"vision_model.vision_tower_low.vision_tower.attn_pool.proj.(weight|bias)": r"model.vision_model.vision_model.head.attention.out_proj.\1",
r"vision_model.vision_tower_low.vision_tower.attn_pool.norm.(weight|bias)": r"model.vision_model.vision_model.head.layernorm.\1",
r"vision_model.vision_tower_low.vision_tower.attn_pool.mlp.fc(\d+).(weight|bias)": r"model.vision_model.vision_model.head.mlp.fc\1.\2",
# Vision Projection
r"aligner.layers.1.(weight|bias)": r"model.aligner.proj.\1",
r"aligner.low_up_proj.(weight|bias)": r"model.aligner.vision_proj.\1",
r"aligner.high_up_proj.(weight|bias)": r"model.aligner.high_res_vision_proj.\1",
# Llama (Text Model)
r"language_model.model.(\w+)": r"model.language_model.\1",
r"language_model.lm_head.(weight|bias)": r"lm_head.\1",
}
# fmt: on
# Adopted from https://github.com/deepseek-ai/DeepSeek-VL/blob/main/deepseek_vl/utils/conversation.py#L80-L91
CHAT_TEMPLATE = (
# Define separators and initialize counter
"{% set seps = ['\n\n', '<\uff5cend\u2581of\u2581sentence\uff5c>'] %}"
"{% set i = 0 %}"
# Start with default system prompt
"You are a helpful language and vision assistant. "
"You are able to understand the visual content that the user provides, "
"and assist the user with a variety of tasks using natural language.\n\n"
# Iterate through messages
"{% for message in messages %}"
# Identify user or assistant role
"{% if message['role']|lower == 'user' %}"
"User: "
"{% elif message['role']|lower == 'assistant' %}"
"Assistant:{% if not (loop.last and not add_generation_prompt and message['content'][0]['type']=='text' and message['content'][0]['text']=='') %} {% endif %}"
"{% else %}"
"{{ message['role'].capitalize() }}: "
"{% endif %}"
# Iterate through message content (text/images)
"{% for content in message['content'] %}"
# If content is an image, replace with placeholder
"{% if content['type'] == 'image' %}"
"<image_placeholder>"
# If content is text, handle formatting
"{% elif content['type'] == 'text' %}"
"{% set text = content['text'] %}"
# Strip whitespace for first and last text blocks
"{% if loop.first %}{% set text = text.lstrip() %}{% endif %}"
"{% if loop.last %}{% set text = text.rstrip() %}{% endif %}"
# If previous content was text, add space
"{% if not loop.first and message['content'][loop.index0-1]['type'] == 'text' %}"
"{{ ' ' + text }}"
"{% else %}"
"{{ text }}"
"{% endif %}"
"{% endif %}"
"{% endfor %}" # End message content loop
# Add separators between messages
"{% if not loop.last or add_generation_prompt %}"
"{% if message['role']|lower == 'user' %}"
"{{ seps[0] }}"
"{% else %}"
"{{ seps[1] }}"
"{% endif %}"
"{% endif %}"
"{% endfor %}" # End messages loop
# Add final Assistant prompt if required
"{% if add_generation_prompt %}Assistant:{% endif %}"
)
def convert_old_keys_to_new_keys(state_dict_keys: dict):
output_dict = {}
old_text = "\n".join(state_dict_keys)
new_text = old_text
for pattern, replacement in ORIGINAL_TO_CONVERTED_KEY_MAPPING.items():
if replacement is None:
new_text = re.sub(pattern, "", new_text) # an empty line
continue
new_text = re.sub(pattern, replacement, new_text)
output_dict = dict(zip(old_text.split("\n"), new_text.split("\n")))
return output_dict
def get_qkv_state_dict(key, parameter):
"""
new key which looks like this
xxxx.(q|k|v).xxx (m, n)
is converted to
xxxx.q.xxxx (m//3, n)
xxxx.k.xxxx (m//3, n)
xxxx.v.xxxx (m//3, n)
"""
qkv_state_dict = {}
placeholder = re.search(r"(\(.*?\))", key).group(1) # finds "(query|key|value)"
replacements_keys = placeholder[1:-1].split("|") # creates ['query', 'key', 'value']
replacements_vals = torch.split(
parameter, split_size_or_sections=parameter.size(0) // len(replacements_keys), dim=0
)
for replacement_key, replacement_val in zip(replacements_keys, replacements_vals):
qkv_state_dict[key.replace(placeholder, replacement_key)] = replacement_val
return qkv_state_dict
def update_state_dict(old_state_dict):
all_keys = list(old_state_dict.keys())
new_keys = convert_old_keys_to_new_keys(all_keys)
state_dict = {}
for key in all_keys:
new_key = new_keys[key]
current_parameter = old_state_dict.pop(key)
if "qkv" in key and "vision_tower_high" not in key:
qkv_state_dict = get_qkv_state_dict(new_key, current_parameter)
state_dict.update(qkv_state_dict)
elif "pos_embed" in key:
if "vision_tower_high" not in key:
# timm implementation of siglip creates this param of size [1, 576, 1024]
# transformers implementation of siglip creates this param of size [576, 1024]
state_dict[new_key] = current_parameter.squeeze(0)
else:
state_dict[new_key] = current_parameter
else:
state_dict[new_key] = current_parameter
return state_dict
def load_model_state_dict(input_path: str) -> dict:
"""
Load model state dict, handling both single and sharded files.
"""
index_path = os.path.join(input_path, "model.safetensors.index.json")
single_file_path = os.path.join(input_path, "model.safetensors")
# Check if we have a sharded model
if os.path.exists(index_path):
print("Loading sharded model...")
state_dict = {}
with open(index_path, "r") as f:
index = json.load(f)
# Get unique shard files and load each one only once
unique_shard_files = sorted(set(index["weight_map"].values()))
for shard_file in unique_shard_files:
print(f"Loading shard {shard_file}...")
shard_path = os.path.join(input_path, shard_file)
shard_dict = load_file(shard_path)
state_dict.update(shard_dict)
return state_dict
# Single file model
elif os.path.exists(single_file_path):
print("Loading single file model...")
return load_file(single_file_path, device="cpu")
else:
raise ValueError(f"No model files found in {input_path}")
def convert_model(
hf_repo_id: str,
output_dir: str | None = None,
output_hub_path: str | None = None,
):
if output_dir:
os.makedirs(output_dir, exist_ok=True)
try:
input_path = snapshot_download(hf_repo_id)
except HFValidationError:
# If the input path is not a HF repo ID, assume it's a local path
input_path = hf_repo_id
# ------------------------------------------------------------
# Create and save config
# ------------------------------------------------------------
config = DeepseekVLHybridConfig(
text_config={
"hidden_size": 4096,
"intermediate_size": 11008,
"max_position_embeddings": 16384,
"num_attention_heads": 32,
"num_hidden_layers": 30,
"vocab_size": 102400,
},
vision_config={
"hidden_size": 1024,
"intermediate_size": 4096,
"image_size": 384,
"patch_size": 16,
"hidden_act": "gelu",
"vision_use_head": False,
"num_attention_heads": 16,
"num_hidden_layers": 24,
},
high_res_vision_config={
"hidden_size": 768,
"intermediate_size": 3072,
"image_size": 1024,
"patch_size": 16,
"num_attention_heads": 12,
"num_hidden_layers": 12,
},
)
# save config
if output_dir:
config.save_pretrained(output_dir)
print("Model config saved successfully...")
# ------------------------------------------------------------
# Convert processor
# ------------------------------------------------------------
image_processor = DeepseekVLHybridImageProcessor(
image_mean=IMAGENET_STANDARD_MEAN,
image_std=IMAGENET_STANDARD_STD,
high_res_image_mean=OPENAI_CLIP_MEAN,
high_res_image_std=OPENAI_CLIP_STD,
resample=PILImageResampling.BILINEAR,
)
tokenizer = AutoTokenizer.from_pretrained(
input_path,
extra_special_tokens={
"pad_token": "<|end▁of▁sentence|>",
"image_token": "<image_placeholder>",
},
)
processor = DeepseekVLHybridProcessor(
image_processor=image_processor,
tokenizer=tokenizer,
chat_template=CHAT_TEMPLATE,
)
if output_dir:
print(f"Saving processor to {output_dir}...")
processor.save_pretrained(output_dir)
if output_hub_path:
print(f"Pushing processor to hub at {output_hub_path}...")
processor.push_to_hub(output_hub_path)
# ------------------------------------------------------------
# Convert weights
# ------------------------------------------------------------
print("Creating empty model...")
with torch.device("meta"):
model = DeepseekVLHybridForConditionalGeneration(config)
# Load and convert state dict
print("Loading state dict...")
state_dict = load_model_state_dict(input_path)
state_dict = update_state_dict(state_dict)
# Load converted state dict
print("Loading converted weights into model...")
info = model.load_state_dict(state_dict, strict=False, assign=True)
if len(info.missing_keys) > 0:
raise ValueError(f"Missing keys: {info.missing_keys}")
# Tie weights before any device mapping
print("Tying weights...")
model.tie_weights()
# Save the model
if output_dir:
print(f"Saving model to {output_dir}...")
model.save_pretrained(output_dir)
if output_hub_path:
print(f"Pushing model to hub at {output_hub_path}...")
model.push_to_hub(output_hub_path)
del state_dict, model
gc.collect()
# Validate the saved model if saved locally
if output_dir:
print("Reloading the local model to check if it's saved correctly...")
DeepseekVLHybridForConditionalGeneration.from_pretrained(output_dir, device_map="auto")
print("Local model reloaded successfully.")
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--hf_repo_id",
default="deepseek-ai/deepseek-vl-7b-chat",
help="Location of official weights from DeepseekAI on HF",
)
parser.add_argument(
"--output_dir",
default=None,
help="Location to write the converted model and processor",
)
parser.add_argument(
"--output_hub_path",
default=None,
help="Repository ID to push model to hub (e.g. 'username/model-name')",
)
args = parser.parse_args()
convert_model(
hf_repo_id=args.hf_repo_id,
output_dir=args.output_dir,
output_hub_path=args.output_hub_path,
)
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/deepseek_vl_hybrid/convert_deepseek_vl_hybrid_weights_to_hf.py",
"license": "Apache License 2.0",
"lines": 335,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/deepseek_vl_hybrid/modular_deepseek_vl_hybrid.py | # Copyright 2025 Deepseek AI and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Optional, Union
import torch
import torch.nn as nn
import torchvision.transforms.v2.functional as tvF
from ... import initialization as init
from ...cache_utils import Cache
from ...image_processing_utils_fast import (
BaseImageProcessorFast,
BatchFeature,
get_size_dict,
group_images_by_shape,
reorder_images,
)
from ...image_transforms import convert_to_rgb, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
SizeDict,
infer_channel_dimension_format,
is_scaled_image,
make_flat_list_of_images,
pil_torch_interpolation_mapping,
to_numpy_array,
valid_images,
validate_preprocess_arguments,
)
from ...modeling_outputs import BaseModelOutputWithPooling
from ...processing_utils import ImagesKwargs, Unpack
from ...tokenization_utils_base import (
PreTokenizedInput,
TextInput,
)
from ...utils import (
TensorType,
TransformersKwargs,
auto_docstring,
can_return_tuple,
filter_out_non_signature_kwargs,
logging,
)
from ..auto import CONFIG_MAPPING, AutoConfig, AutoModel
from ..deepseek_vl.configuration_deepseek_vl import DeepseekVLConfig
from ..deepseek_vl.image_processing_deepseek_vl import DeepseekVLImageProcessor
from ..deepseek_vl.image_processing_deepseek_vl_fast import DeepseekVLImageProcessorFast
from ..deepseek_vl.modeling_deepseek_vl import (
DeepseekVLForConditionalGeneration,
DeepseekVLModel,
DeepseekVLPreTrainedModel,
)
from ..deepseek_vl.processing_deepseek_vl import DeepseekVLProcessor, DeepseekVLProcessorKwargs
from ..idefics.modeling_idefics import IdeficsBaseModelOutputWithPast, IdeficsCausalLMOutputWithPast
from ..sam.modeling_sam import SamLayerNorm, SamVisionNeck
logger = logging.get_logger(__name__)
DEEPSEEK_VL_COMMON_CUSTOM_ARGS = r"""
high_res_pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size), *optional*):
The tensors corresponding to the input images. Pixel values can be obtained using
[`AutoImageProcessor`].
"""
class DeepseekVLHybridConfig(DeepseekVLConfig):
r"""
This is the configuration class to store the configuration of a [`DeepseekVLHybridModel`]. It is used to instantiate a
DeepseekVLHybrid model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the DeepseekVLHybrid
[deepseek-community/deepseek-vl-7b-chat](https://huggingface.co/deepseek-community/deepseek-vl-7b-chat) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `LlamaConfig`):
The config object or dictionary of the text backbone.
vision_config (`Union[AutoConfig, dict]`, *optional*, defaults to `SiglipVisionConfig`):
The config object or dictionary of the vision backbone.
high_res_vision_config (`Union[AutoConfig, dict]`, *optional*, defaults to `SamVisionConfig`):
The config object or dictionary of the high resolution vision backbone.
image_token_id (`int`, *optional*, defaults to 100015):
The index representing image tokens in the model's token vocabulary.
tie_word_embeddings (`bool`, *optional*, defaults to `True`):
Whether to tie weight embeddings
Example:
```python
>>> from transformers import DeepseekVLHybridConfig, DeepseekVLHybridModel
>>> # Initializing a DeepseekVLHybrid deepseek-community/deepseek-vl-7b-chat style configuration
>>> configuration = DeepseekVLHybridConfig()
>>> # Initializing a model (with random weights) from the deepseek-community/deepseek-vl-7b-chat style configuration
>>> model = DeepseekVLHybridModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "deepseek_vl_hybrid"
sub_configs = {"text_config": AutoConfig, "vision_config": AutoConfig, "high_res_vision_config": AutoConfig}
def __init__(
self,
text_config: AutoConfig | None = None,
vision_config: AutoConfig | None = None,
high_res_vision_config: AutoConfig | None = None,
image_token_id: int = 100015,
tie_word_embeddings: bool | None = True,
**kwargs,
):
if high_res_vision_config is None:
high_res_vision_config = {}
logger.info("`high_res_vision_config` is `None`. Initializing the `SamVisionConfig` with default values.")
if isinstance(high_res_vision_config, dict):
high_res_vision_config["model_type"] = high_res_vision_config.get("model_type", "sam_vision_model")
high_res_vision_config = CONFIG_MAPPING[high_res_vision_config["model_type"]](**high_res_vision_config)
self.high_res_vision_config = high_res_vision_config
super().__init__(
text_config=text_config,
vision_config=vision_config,
image_token_id=image_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
@dataclass
@auto_docstring
class BaseModelOutputWithHighResVisionEncodings(BaseModelOutputWithPooling):
r"""
high_res_vision_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the high resolution vision model.
high_res_vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the high resolution vision model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the high resolution vision model at the output of each layer plus the optional initial embedding outputs.
high_res_vision_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)` from the high resolution vision model.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
high_res_vision_last_hidden_state: torch.FloatTensor | None = None
high_res_vision_hidden_states: tuple[torch.FloatTensor] | None = None
high_res_vision_attentions: tuple[torch.FloatTensor] | None = None
class DeepseekVLHybridBaseModelOutputWithPast(IdeficsBaseModelOutputWithPast):
pass
class DeepseekVLHybridCausalLMOutputWithPast(IdeficsCausalLMOutputWithPast):
pass
class DeepseekVLHybridLayerNorm(SamLayerNorm):
pass
class DeepseekVLSamVisionNeck(SamVisionNeck):
def __init__(self, config):
super().__init__(config)
class DeepseekVLSamVisionProj(nn.Module):
def __init__(self, config, output_size: int = 24):
super().__init__()
self.config = config
self.output_size = output_size
self.conv1 = nn.Conv2d(
config.output_channels, config.output_channels * 2, kernel_size=3, stride=2, padding=1, bias=False
)
self.conv2 = nn.Conv2d(
config.output_channels * 2, config.output_channels * 4, kernel_size=3, stride=2, padding=1, bias=False
)
def forward(self, features: torch.Tensor) -> torch.Tensor:
# interpolate Sam encodings to match Siglip encodings
features = torch.nn.functional.interpolate(
features,
size=(4 * self.output_size, 4 * self.output_size),
mode="bilinear",
align_corners=False,
)
features = self.conv1(features)
features = self.conv2(features)
return features
class DeepseekVLHybridAligner(nn.Module):
def __init__(self, config: DeepseekVLHybridConfig):
super().__init__()
in_channels = config.vision_config.hidden_size
high_res_in_channels = config.high_res_vision_config.output_channels * 4
out_channels = config.text_config.hidden_size
self.vision_proj = nn.Linear(in_channels, out_channels // 2)
self.high_res_vision_proj = nn.Linear(high_res_in_channels, out_channels // 2)
self.act = nn.GELU()
self.proj = nn.Linear(out_channels, out_channels)
def forward(
self,
vision_encodings: torch.Tensor,
high_res_vision_encodings: torch.Tensor,
) -> torch.Tensor:
vision_encodings = self.vision_proj(vision_encodings)
high_res_vision_encodings = self.high_res_vision_proj(high_res_vision_encodings)
encodings = torch.concat([high_res_vision_encodings, vision_encodings], dim=-1)
encodings = self.act(encodings)
encodings = self.proj(encodings)
return encodings
class DeepseekVLHybridPreTrainedModel(DeepseekVLPreTrainedModel):
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
init.normal_(module.weight, mean=0.0, std=self.config.text_config.initializer_range)
if module.bias is not None:
init.zeros_(module.bias)
elif isinstance(module, nn.Conv2d):
init.kaiming_normal_(module.weight, mode="fan_out", nonlinearity="relu")
if module.bias is not None:
init.zeros_(module.bias)
elif isinstance(module, DeepseekVLHybridLayerNorm):
init.ones_(module.weight)
init.zeros_(module.bias)
elif isinstance(module, DeepseekVLHybridModel):
init.zeros_(module.high_res_vision_alpha)
class DeepseekVLHybridModel(DeepseekVLModel):
def __init__(self, config):
self.output_size = config.vision_config.image_size // config.vision_config.patch_size
self.global_attn_index = config.high_res_vision_config.global_attn_indexes[0]
self.high_res_vision_model = AutoModel.from_config(config.high_res_vision_config)
self.high_res_vision_neck = DeepseekVLSamVisionNeck(config.high_res_vision_config)
self.high_res_vision_proj = DeepseekVLSamVisionProj(
config.high_res_vision_config, output_size=self.output_size
)
self.high_res_vision_alpha = nn.Parameter(torch.zeros(1))
super().__init__(config)
def get_low_res_image_features(self, pixel_values: torch.FloatTensor, **kwargs: Unpack[TransformersKwargs]):
return self.vision_model(pixel_values, return_dict=True, **kwargs)
def get_high_res_image_features(
self,
pixel_values: torch.FloatTensor,
output_hidden_states: bool | None = None,
**kwargs: Unpack[TransformersKwargs],
):
high_res_outputs = self.high_res_vision_model(
pixel_values=pixel_values,
output_hidden_states=True, # Ignore arg on purpose
return_dict=True,
**kwargs,
)
last_hidden_state = high_res_outputs.last_hidden_state
last_hidden_state = self.high_res_vision_proj(last_hidden_state)
hidden_states = high_res_outputs.hidden_states
global_hidden_state = hidden_states[self.global_attn_index + 1] # +1 for embedding layer
global_hidden_state = self.high_res_vision_neck(global_hidden_state)
global_hidden_state = self.high_res_vision_proj(global_hidden_state)
output = last_hidden_state + global_hidden_state * self.high_res_vision_alpha
# batch_size, hidden_size, height, width -> batch_size, seq_len, hidden_size
output = output.permute(0, 2, 3, 1)
output = output.reshape(output.shape[0], -1, output.shape[-1])
high_res_outputs.last_hidden_state = output
return high_res_outputs
@can_return_tuple
@auto_docstring(custom_args=DEEPSEEK_VL_COMMON_CUSTOM_ARGS)
def get_image_features(
self,
pixel_values: torch.FloatTensor,
high_res_pixel_values: torch.FloatTensor,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | BaseModelOutputWithHighResVisionEncodings:
low_res_outputs = self.get_low_res_image_features(pixel_values, **kwargs)
high_res_outputs = self.get_high_res_image_features(high_res_pixel_values, **kwargs)
image_features = self.aligner(low_res_outputs.last_hidden_state, high_res_outputs.last_hidden_state)
return BaseModelOutputWithHighResVisionEncodings(
last_hidden_state=low_res_outputs.last_hidden_state,
pooler_output=image_features,
hidden_states=low_res_outputs.hidden_states,
attentions=low_res_outputs.attentions,
high_res_vision_last_hidden_state=high_res_outputs.last_hidden_state,
high_res_vision_hidden_states=high_res_outputs.hidden_states,
high_res_vision_attentions=high_res_outputs.attentions,
)
@can_return_tuple
@auto_docstring(custom_args=DEEPSEEK_VL_COMMON_CUSTOM_ARGS)
def forward(
self,
input_ids: torch.LongTensor | None = None,
pixel_values: torch.FloatTensor | None = None,
high_res_pixel_values: torch.FloatTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
cache_position: torch.LongTensor | None = None,
inputs_embeds: torch.FloatTensor | None = None,
use_cache: bool | None = None,
logits_to_keep: int | torch.Tensor = 0,
**kwargs,
) -> DeepseekVLHybridBaseModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError(
"You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one"
)
if pixel_values is not None and high_res_pixel_values is None:
raise ValueError("Both pixel_values and high_res_pixel_values should be specified at the same time")
if inputs_embeds is None:
inputs_embeds = self.get_input_embeddings()(input_ids)
if pixel_values is not None:
if input_ids is None:
image_attention_mask = inputs_embeds == self.get_input_embeddings()(
torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device)
)
image_attention_mask = image_attention_mask.all(-1)
else:
image_attention_mask = input_ids == self.config.image_token_id
image_attention_mask = image_attention_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
image_embeds = self.get_image_features(pixel_values, high_res_pixel_values, return_dict=True).pooler_output
image_features = image_embeds.reshape(-1, inputs_embeds.shape[-1])
image_features = image_features.to(inputs_embeds.device, inputs_embeds.dtype)
inputs_embeds = inputs_embeds.masked_scatter(image_attention_mask, image_features)
lm_output = self.language_model(
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
logits_to_keep=logits_to_keep,
**kwargs,
)
return DeepseekVLHybridBaseModelOutputWithPast(
last_hidden_state=lm_output.last_hidden_state,
past_key_values=lm_output.past_key_values,
hidden_states=lm_output.hidden_states,
attentions=lm_output.attentions,
image_hidden_states=image_embeds if pixel_values is not None else None,
)
class DeepseekVLHybridForConditionalGeneration(DeepseekVLForConditionalGeneration):
@can_return_tuple
@auto_docstring(custom_args=DEEPSEEK_VL_COMMON_CUSTOM_ARGS)
def forward(
self,
input_ids: torch.LongTensor | None = None,
pixel_values: torch.FloatTensor | None = None,
high_res_pixel_values: torch.FloatTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
cache_position: torch.LongTensor | None = None,
inputs_embeds: torch.FloatTensor | None = None,
labels: torch.LongTensor | None = None,
use_cache: bool | None = None,
logits_to_keep: int | torch.Tensor = 0,
**kwargs: Unpack[TransformersKwargs],
) -> DeepseekVLHybridCausalLMOutputWithPast:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
"""
outputs = self.model(
input_ids=input_ids,
pixel_values=pixel_values,
high_res_pixel_values=high_res_pixel_values,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(
logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size, **kwargs
)
return DeepseekVLHybridCausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=outputs.image_hidden_states,
)
def prepare_inputs_for_generation(
self,
input_ids,
past_key_values=None,
inputs_embeds=None,
pixel_values=None,
high_res_pixel_values=None,
attention_mask=None,
cache_position=None,
logits_to_keep=None,
is_first_iteration=False,
**kwargs,
):
model_inputs = super().prepare_inputs_for_generation(
input_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
logits_to_keep=logits_to_keep,
is_first_iteration=is_first_iteration,
**kwargs,
)
if is_first_iteration or not kwargs.get("use_cache", True):
# Pixel values are used only in the first iteration if available
# In subsequent iterations, they are already merged with text and cached
# NOTE: first iteration doesn't have to be prefill, it can be the first
# iteration with a question and cached system prompt (continue generate from cache)
model_inputs["pixel_values"] = pixel_values
model_inputs["high_res_pixel_values"] = high_res_pixel_values
return model_inputs
class DeepseekVLHybridImageProcessorKwargs(ImagesKwargs, total=False):
r"""
min_size (`int`, *optional*, defaults to 14):
The minimum allowed size for the resized image. Ensures that neither the height nor width
falls below this value after resizing.
high_res_size (`dict`, *optional*, defaults to `{"height": 1024, "width": 1024}`):
Size of the high resolution output image after resizing. Can be overridden by the `high_res_size` parameter in the `preprocess`
method.
high_res_resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`. Can be
overridden by the `high_res_resample` parameter in the `preprocess` method.
high_res_image_mean (`float` or `list[float]`, *optional*, defaults to `OPENAI_CLIP_MEAN`):
Mean to use if normalizing the high resolution image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `high_res_image_mean` parameter in the `preprocess` method.
high_res_image_std (`float` or `list[float]`, *optional*, defaults to `OPENAI_CLIP_STD`):
Standard deviation to use if normalizing the high resolution image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `high_res_image_std` parameter in the `preprocess` method.
"""
min_size: int
high_res_size: dict
high_res_resample: Union["PILImageResampling", int]
high_res_image_mean: float | list[float] | tuple[float, ...]
high_res_image_std: float | list[float] | tuple[float, ...]
class DeepseekVLHybridImageProcessor(DeepseekVLImageProcessor):
r"""
Constructs a DEEPSEEK_VL_HYBRID image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
`do_resize` parameter in the `preprocess` method.
size (`dict`, *optional*, defaults to `{"height": 384, "width": 384}`):
Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess`
method.
high_res_size (`dict`, *optional*, defaults to `{"height": 1024, "width": 1024}`):
Size of the high resolution output image after resizing. Can be overridden by the `high_res_size` parameter in the `preprocess`
method.
min_size (`int`, *optional*, defaults to 14):
The minimum allowed size for the resized image. Ensures that neither the height nor width
falls below this value after resizing.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`. Can be
overridden by the `resample` parameter in the `preprocess` method.
high_res_resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`. Can be
overridden by the `high_res_resample` parameter in the `preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the
`do_rescale` parameter in the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Only has an effect if `do_rescale` is set to `True`. Can be
overridden by the `rescale_factor` parameter in the `preprocess` method.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
method. Can be overridden by the `do_normalize` parameter in the `preprocess` method.
image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be
overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
Can be overridden by the `image_std` parameter in the `preprocess` method.
high_res_image_mean (`float` or `list[float]`, *optional*, defaults to `OPENAI_CLIP_MEAN`):
Mean to use if normalizing the high resolution image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `high_res_image_mean` parameter in the `preprocess` method.
high_res_image_std (`float` or `list[float]`, *optional*, defaults to `OPENAI_CLIP_STD`):
Standard deviation to use if normalizing the high resolution image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `high_res_image_std` parameter in the `preprocess` method.
do_convert_rgb (`bool`, *optional*, defaults to `True`):
Whether to convert the image to RGB.
do_pad (`bool`, *optional*, defaults to `True`):
Whether to pad the image to square or not.
"""
model_input_names = ["pixel_values", "high_res_pixel_values"]
valid_kwargs = DeepseekVLHybridImageProcessorKwargs
def __init__(
self,
do_resize: bool = True,
size: dict[str, int] | None = None,
high_res_size: dict[str, int] | None = None,
min_size: int = 14,
resample: PILImageResampling = PILImageResampling.BICUBIC,
high_res_resample: PILImageResampling = PILImageResampling.BICUBIC,
do_rescale: bool = True,
rescale_factor: int | float = 1 / 255,
do_normalize: bool = True,
image_mean: float | list[float] | None = None,
image_std: float | list[float] | None = None,
high_res_image_mean: float | list[float] | None = None,
high_res_image_std: float | list[float] | None = None,
do_convert_rgb: bool | None = None,
do_pad: bool = True,
**kwargs,
) -> None:
high_res_size = high_res_size if high_res_size is not None else {"height": 1024, "width": 1024}
high_res_size = get_size_dict(high_res_size, default_to_square=True)
self.high_res_size = high_res_size
self.high_res_image_mean = high_res_image_mean if high_res_image_mean is not None else OPENAI_CLIP_MEAN
self.high_res_image_std = high_res_image_std if high_res_image_std is not None else OPENAI_CLIP_STD
self.resample = resample
self.high_res_resample = high_res_resample
super().__init__(
do_resize=do_resize,
size=size,
min_size=min_size,
resample=resample,
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_convert_rgb=do_convert_rgb,
do_pad=do_pad,
**kwargs,
)
if high_res_image_mean is None:
self.high_res_background_color = (127, 127, 127)
else:
self.high_res_background_color = tuple(int(x * 255) for x in high_res_image_mean)
@filter_out_non_signature_kwargs()
def preprocess(
self,
images: ImageInput,
do_resize: bool | None = None,
size: dict[str, int] | None = None,
high_res_size: dict[str, int] | None = None,
resample: PILImageResampling | None = None,
high_res_resample: PILImageResampling | None = None,
do_rescale: bool | None = None,
rescale_factor: float | None = None,
do_normalize: bool | None = None,
image_mean: float | list[float] | None = None,
image_std: float | list[float] | None = None,
high_res_image_mean: float | list[float] | None = None,
high_res_image_std: float | list[float] | None = None,
return_tensors: str | TensorType | None = None,
data_format: str | ChannelDimension = ChannelDimension.FIRST,
input_data_format: str | ChannelDimension | None = None,
do_convert_rgb: bool | None = None,
do_pad: bool | None = None,
background_color: tuple[int, int, int] | None = None,
):
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`Dict[str, int]`, *optional*, defaults to `self.size`):
Dictionary in the format `{"height": h, "width": w}` specifying the size of the output image after
resizing.
high_res_size (`Dict[str, int]`, *optional*, defaults to `self.high_res_size`):
Dictionary in the format `{"height": h, "width": w}` specifying the size of the high resolution output image after
resizing.
resample (`PILImageResampling` filter, *optional*, defaults to `self.resample`):
`PILImageResampling` filter to use if resizing the image e.g. `PILImageResampling.BILINEAR`. Only has
an effect if `do_resize` is set to `True`.
high_res_resample (`PILImageResampling` filter, *optional*, defaults to `self.resample`):
`PILImageResampling` filter to use if resizing the image e.g. `PILImageResampling.BICUBIC`. Only has
an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use if `do_normalize` is set to `True`.
image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use if `do_normalize` is set to `True`.
high_res_image_mean (`float` or `List[float]`, *optional*, defaults to `self.high_res_image_mean`):
Image mean to use if `do_normalize` is set to `True`.
high_res_image_std (`float` or `List[float]`, *optional*, defaults to `self.high_res_image_std`):
Image standard deviation to use if `do_normalize` is set to `True`.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
Whether to convert the image to RGB.
do_pad (`bool`, *optional*, defaults to `self.do_pad`):
Whether to pad the image to square or not.
background_color (`tuple[int, int, int]`):
The background color to use for the padding.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
resample = resample if resample is not None else self.resample
high_res_resample = high_res_resample if high_res_resample is not None else self.high_res_resample
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
high_res_image_mean = high_res_image_mean if high_res_image_mean is not None else self.high_res_image_mean
high_res_image_std = high_res_image_std if high_res_image_std is not None else self.high_res_image_std
do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
do_pad = do_pad if do_pad is not None else self.do_pad
background_color = background_color if background_color is not None else self.background_color
size = size if size is not None else self.size
size_dict = get_size_dict(size)
high_res_size = high_res_size if high_res_size is not None else self.high_res_size
high_res_size_dict = get_size_dict(high_res_size)
images = self.fetch_images(images)
images = make_flat_list_of_images(images)
if not valid_images(images):
raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor")
validate_preprocess_arguments(
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_resize=do_resize,
size=size,
resample=resample,
)
if do_convert_rgb:
images = [convert_to_rgb(image) for image in images]
# All transformations expect numpy arrays.
images = [to_numpy_array(image) for image in images]
if do_rescale and is_scaled_image(images[0]):
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])
all_images = []
all_high_res_images = []
for image in images:
# high_res_image: resize (high) -> rescale -> normalize (high)
# low_res_image: resize (high) -> rescale -> resize (low) -> normalize (low)
high_res_image = image
if do_resize:
high_res_image = self.resize(
image=high_res_image,
size=high_res_size_dict,
resample=high_res_resample,
input_data_format=input_data_format,
)
if do_pad:
# Expand and pad the images to obtain a square image of dimensions `size x size`
high_res_image = self.pad_to_square(
image=high_res_image,
background_color=background_color,
input_data_format=input_data_format,
)
image = self.resize(
image=high_res_image,
size=size_dict,
resample=resample,
input_data_format=input_data_format,
)
if do_pad:
image = self.pad_to_square(
image=image,
background_color=background_color,
input_data_format=input_data_format,
)
if do_rescale:
image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
high_res_image = self.rescale(
image=high_res_image, scale=rescale_factor, input_data_format=input_data_format
)
if do_normalize:
image = self.normalize(
image=image, mean=image_mean, std=image_std, input_data_format=input_data_format
)
high_res_image = self.normalize(
image=high_res_image,
mean=high_res_image_mean,
std=high_res_image_std,
input_data_format=input_data_format,
)
image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
high_res_image = to_channel_dimension_format(
high_res_image, data_format, input_channel_dim=input_data_format
)
all_images.append(image)
all_high_res_images.append(high_res_image)
data = {"pixel_values": all_images, "high_res_pixel_values": all_high_res_images}
return BatchFeature(data=data, tensor_type=return_tensors)
class DeepseekVLHybridImageProcessorFast(DeepseekVLImageProcessorFast):
high_res_image_mean = OPENAI_CLIP_MEAN
high_res_image_std = OPENAI_CLIP_STD
high_res_size = {"height": 1024, "width": 1024}
high_res_resample = PILImageResampling.BICUBIC
model_input_names = ["pixel_values", "high_res_pixel_values"]
def __init__(self, **kwargs: Unpack[DeepseekVLHybridImageProcessorKwargs]):
if kwargs.get("image_mean") is None:
background_color = (127, 127, 127)
else:
background_color = tuple(int(x * 255) for x in kwargs.get("image_mean"))
if kwargs.get("high_res_image_mean") is None:
high_res_background_color = (127, 127, 127)
else:
high_res_background_color = tuple(int(x * 255) for x in kwargs.get("high_res_image_mean"))
BaseImageProcessorFast.__init__(self, **kwargs)
self.background_color = tuple(background_color)
self.high_res_background_color = tuple(high_res_background_color)
def _further_process_kwargs(
self,
size: SizeDict | None = None,
high_res_size: SizeDict | None = None,
default_to_square: bool | None = None,
image_mean: float | list[float] | None = None,
image_std: float | list[float] | None = None,
high_res_image_mean: float | list[float] | None = None,
high_res_image_std: float | list[float] | None = None,
data_format: ChannelDimension | None = None,
**kwargs,
) -> dict:
"""
Update kwargs that need further processing before being validated
Can be overridden by subclasses to customize the processing of kwargs.
"""
if kwargs is None:
kwargs = {}
if size is not None:
size = SizeDict(**get_size_dict(size=size, default_to_square=default_to_square))
if high_res_size is not None:
high_res_size = SizeDict(**get_size_dict(size=high_res_size, default_to_square=default_to_square))
if isinstance(image_mean, list):
image_mean = tuple(image_mean)
if isinstance(image_std, list):
image_std = tuple(image_std)
if isinstance(high_res_image_mean, list):
high_res_image_mean = tuple(high_res_image_mean)
if isinstance(high_res_image_std, list):
high_res_image_std = tuple(high_res_image_std)
if data_format is None:
data_format = ChannelDimension.FIRST
high_res_resample = kwargs.pop("high_res_resample")
kwargs["high_res_interpolation"] = (
pil_torch_interpolation_mapping[high_res_resample]
if isinstance(high_res_resample, (int, PILImageResampling))
else high_res_resample
)
low_res_resample = kwargs.pop("resample")
kwargs["interpolation"] = (
pil_torch_interpolation_mapping[low_res_resample]
if isinstance(low_res_resample, (int, PILImageResampling))
else low_res_resample
)
kwargs["size"] = size
kwargs["high_res_size"] = high_res_size
kwargs["image_mean"] = image_mean
kwargs["image_std"] = image_std
kwargs["high_res_image_mean"] = high_res_image_mean
kwargs["high_res_image_std"] = high_res_image_std
kwargs["data_format"] = data_format
return kwargs
def _preprocess(
self,
images: list["torch.Tensor"],
do_resize: bool,
size: SizeDict,
high_res_size: SizeDict,
min_size: int,
interpolation: Optional["tvF.InterpolationMode"],
high_res_interpolation: Optional["tvF.InterpolationMode"],
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
image_mean: float | list[float] | None,
image_std: float | list[float] | None,
high_res_image_mean: float | list[float] | None,
high_res_image_std: float | list[float] | None,
disable_grouping: bool | None,
return_tensors: str | TensorType | None,
do_pad: bool = True,
**kwargs,
) -> BatchFeature:
# Group images by size for batched resizing
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
high_res_resized_images_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_resize:
stacked_high_res_images = self.resize(
image=stacked_images, size=high_res_size, min_size=min_size, interpolation=high_res_interpolation
)
high_res_resized_images_grouped[shape] = stacked_high_res_images
high_res_resized_images = reorder_images(high_res_resized_images_grouped, grouped_images_index)
# Group images by size for further processing
# Needed in case do_resize is False, or resize returns images with different sizes
grouped_high_res_images, grouped_high_res_images_index = group_images_by_shape(
high_res_resized_images, disable_grouping=disable_grouping
)
high_res_padded_images = {}
high_res_processed_images_grouped = {}
for shape, stacked_high_res_images in grouped_high_res_images.items():
if do_pad:
stacked_high_res_images = self.pad_to_square(
stacked_high_res_images, background_color=self.high_res_background_color
)
high_res_padded_images[shape] = stacked_high_res_images
# Fused rescale and normalize
stacked_high_res_images = self.rescale_and_normalize(
stacked_high_res_images,
do_rescale,
rescale_factor,
do_normalize,
high_res_image_mean,
high_res_image_std,
)
high_res_processed_images_grouped[shape] = stacked_high_res_images
high_res_processed_images = reorder_images(high_res_processed_images_grouped, grouped_high_res_images_index)
resized_images_grouped = {}
for shape, stacked_high_res_padded_images in high_res_padded_images.items():
if do_resize:
stacked_images = self.resize(
image=stacked_high_res_padded_images, size=size, min_size=min_size, interpolation=interpolation
)
resized_images_grouped[shape] = stacked_images
resized_images = reorder_images(resized_images_grouped, grouped_high_res_images_index)
grouped_resized_images, grouped_resized_images_index = group_images_by_shape(
resized_images, disable_grouping=disable_grouping
)
processed_images_grouped = {}
for shape, stacked_images in grouped_resized_images.items():
if do_pad:
stacked_images = self.pad_to_square(stacked_images, background_color=self.background_color)
# Fused rescale and normalize
stacked_images = self.rescale_and_normalize(
stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
processed_images_grouped[shape] = stacked_images
processed_images = reorder_images(processed_images_grouped, grouped_resized_images_index)
return BatchFeature(
data={"pixel_values": processed_images, "high_res_pixel_values": high_res_processed_images},
tensor_type=return_tensors,
)
class DeepseekVLHybridProcessorKwargs(DeepseekVLProcessorKwargs):
pass
class DeepseekVLHybridProcessor(DeepseekVLProcessor):
def __call__(
self,
text: TextInput | PreTokenizedInput | list[TextInput] | list[PreTokenizedInput] = None,
images: ImageInput | None = None,
**kwargs: Unpack[DeepseekVLHybridProcessorKwargs],
) -> BatchFeature:
r"""
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
"""
output_kwargs = self._merge_kwargs(
DeepseekVLHybridProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs
)
if text is None and images is None:
raise ValueError("You must specify either text or images.")
if text is not None:
if isinstance(text, str):
text = [text]
elif not (isinstance(text, (list, tuple)) and all(isinstance(t, str) for t in text)):
raise ValueError("Invalid input text. Please provide a string, or a list of strings")
prompt_strings = []
one_img_tokens = self.image_token * self.num_image_tokens
for prompt in text:
prompt = prompt.replace(self.image_token, one_img_tokens)
prompt_strings.append(prompt)
data = self.tokenizer(prompt_strings, **output_kwargs["text_kwargs"])
# process images if pixel_values are provided
if images is not None:
inputs = self.image_processor(images, **output_kwargs["images_kwargs"])
data["pixel_values"] = inputs["pixel_values"]
data["high_res_pixel_values"] = inputs["high_res_pixel_values"]
return BatchFeature(data=data)
__all__ = [
"DeepseekVLHybridConfig",
"DeepseekVLHybridPreTrainedModel",
"DeepseekVLHybridModel",
"DeepseekVLHybridForConditionalGeneration",
"DeepseekVLHybridImageProcessor",
"DeepseekVLHybridImageProcessorFast",
"DeepseekVLHybridProcessor",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/deepseek_vl_hybrid/modular_deepseek_vl_hybrid.py",
"license": "Apache License 2.0",
"lines": 908,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/deepseek_vl/test_image_processing_deepseek_vl.py | # Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available
from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import DeepseekVLImageProcessor
if is_torchvision_available():
from transformers import DeepseekVLImageProcessorFast
# Copied from tests.models.vit.test_image_processing_vit.ViTImageProcessingTester with ViT->DeepseekVL
class DeepseekVLImageProcessingTester:
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
do_resize=True,
size=None,
do_normalize=True,
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
):
size = size if size is not None else {"height": 18, "width": 18}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
def prepare_image_processor_dict(self):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
# Ignore copy
def expected_output_image_shape(self, images):
max_size = max(self.size["height"], self.size["width"])
return self.num_channels, max_size, max_size
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
@require_torch
@require_vision
class DeepseekVLImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = DeepseekVLImageProcessor if is_vision_available() else None
fast_image_processing_class = DeepseekVLImageProcessorFast if is_torchvision_available() else None
def setUp(self):
super().setUp()
self.image_processor_tester = DeepseekVLImageProcessingTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "image_mean"))
self.assertTrue(hasattr(image_processing, "image_std"))
self.assertTrue(hasattr(image_processing, "do_normalize"))
self.assertTrue(hasattr(image_processing, "do_resize"))
self.assertTrue(hasattr(image_processing, "size"))
def test_image_processor_from_dict_with_kwargs(self):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {"height": 18, "width": 18})
image_processor = image_processing_class.from_dict(self.image_processor_dict, size=42)
self.assertEqual(image_processor.size, {"height": 42, "width": 42})
@require_vision
@require_torch
def test_slow_fast_equivalence_batched(self):
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast equivalence test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
if hasattr(self.image_processor_tester, "do_center_crop") and self.image_processor_tester.do_center_crop:
self.skipTest(
reason="Skipping as do_center_crop is True and center_crop functions are not equivalent for fast and slow processors"
)
dummy_images = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
encoding_slow = image_processor_slow(dummy_images, return_tensors=None)
encoding_fast = image_processor_fast(dummy_images, return_tensors=None)
# Overwrite as the outputs are not always all of the same shape (kept for BC)
for i in range(len(encoding_slow.pixel_values)):
self._assert_slow_fast_tensors_equivalence(
torch.from_numpy(encoding_slow.pixel_values[i]), encoding_fast.pixel_values[i]
)
# Ignore copy
@unittest.skip(reason="Not supported")
def test_call_numpy_4_channels(self):
pass
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/deepseek_vl/test_image_processing_deepseek_vl.py",
"license": "Apache License 2.0",
"lines": 123,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/models/deepseek_vl/test_modeling_deepseek_vl.py | # Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch DeepseekVL model."""
import re
import tempfile
import unittest
from transformers import (
AutoProcessor,
DeepseekVLConfig,
DeepseekVLForConditionalGeneration,
DeepseekVLModel,
is_torch_available,
)
from transformers.testing_utils import (
require_torch,
require_torch_accelerator,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
class DeepseekVLModelTester:
def __init__(
self,
parent,
batch_size=2,
seq_length=25,
num_channels=3,
initializer_range=0.02,
is_training=True,
use_cache=False,
text_config={
"num_hidden_layers": 2,
"vocab_size": 99,
"hidden_size": 16,
"intermediate_size": 37,
"max_position_embeddings": 512,
"num_attention_heads": 4,
"pad_token_id": 1,
},
vision_config={
"num_hidden_layers": 1,
"hidden_size": 16,
"intermediate_size": 37,
"image_size": 32,
"patch_size": 8,
"hidden_act": "gelu",
"vision_use_head": False,
"num_attention_heads": 4,
},
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.num_channels = num_channels
self.initializer_range = initializer_range
self.is_training = is_training
self.use_cache = use_cache
self.text_config = text_config
self.vision_config = vision_config
self.vision_config["num_channels"] = self.num_channels
self.num_hidden_layers = text_config["num_hidden_layers"]
self.vocab_size = text_config["vocab_size"]
self.hidden_size = text_config["hidden_size"]
self.num_attention_heads = text_config["num_attention_heads"]
self.image_size = vision_config["image_size"]
self.num_image_tokens = 16
self.pad_token_id = text_config["pad_token_id"]
self.image_token_id = 0
def get_config(self):
return DeepseekVLConfig(
text_config=self.text_config,
vision_config=self.vision_config,
image_token_id=self.image_token_id,
)
def prepare_config_and_inputs(self):
config = self.get_config()
# create text and vision inputs
input_ids = ids_tensor([self.batch_size, self.seq_length], config.text_config.vocab_size - 2) + 1
attention_mask = random_attention_mask([self.batch_size, self.seq_length])
pixel_values = floats_tensor(
[
self.batch_size,
self.num_channels,
self.image_size,
self.image_size,
]
)
# fill image_tokens
input_ids[input_ids == self.num_image_tokens] = config.text_config.pad_token_id
input_ids[:, : self.num_image_tokens] = self.image_token_id
return config, input_ids, attention_mask, pixel_values
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, attention_mask, pixel_values = config_and_inputs
inputs_dict = {"input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class DeepseekVLModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (DeepseekVLModel, DeepseekVLForConditionalGeneration) if is_torch_available() else ()
pipeline_model_mapping = (
{
"feature-extraction": DeepseekVLModel,
"image-text-to-text": DeepseekVLForConditionalGeneration,
"any-to-any": DeepseekVLForConditionalGeneration,
}
if is_torch_available()
else {}
)
_is_composite = True
def setUp(self):
self.model_tester = DeepseekVLModelTester(self)
self.config_tester = ConfigTester(self, config_class=DeepseekVLConfig, has_text_modality=False)
# overwrite inputs_embeds tests because we need to delete "pixel values" for LVLMs
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = self._prepare_for_class(inputs_dict, model_class)
input_ids = inputs["input_ids"]
del inputs["input_ids"]
del inputs["pixel_values"]
wte = model.get_input_embeddings()
inputs["inputs_embeds"] = wte(input_ids)
with torch.no_grad():
model(**inputs)
# overwrite inputs_embeds tests because we need to delete "pixel values" for VLMs.
def test_inputs_embeds_matches_input_ids(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = self._prepare_for_class(inputs_dict, model_class)
input_ids = inputs["input_ids"]
del inputs["input_ids"]
del inputs["pixel_values"]
inputs_embeds = model.get_input_embeddings()(input_ids)
with torch.no_grad():
out_ids = model(input_ids=input_ids, **inputs)[0]
out_embeds = model(inputs_embeds=inputs_embeds, **inputs)[0]
torch.testing.assert_close(out_embeds, out_ids)
# Copied from tests.models.janus.test_modeling_janus.JanusVisionText2TextModelTest.test_sdpa_can_dispatch_composite_models
def test_sdpa_can_dispatch_composite_models(self):
for model_class in self.all_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
# Load the model with SDPA
model_sdpa = model_class.from_pretrained(tmpdirname)
model_sdpa = model_sdpa.eval().to(torch_device)
# Load model with eager attention
model_eager = model_class.from_pretrained(
tmpdirname,
attn_implementation="eager",
)
model_eager = model_eager.eval().to(torch_device)
# SigLip has one shared cls attr for all models, so we assign both submodels heer
vision_attn = language_attn = "sdpa" if model._supports_sdpa else "eager"
if hasattr(model_sdpa, "vision_model") and hasattr(model_sdpa, "language_model"):
self.assertTrue(model_sdpa.vision_model.config._attn_implementation == vision_attn)
self.assertTrue(model_sdpa.language_model.config._attn_implementation == language_attn)
self.assertTrue(model_eager.vision_model.config._attn_implementation == "eager")
self.assertTrue(model_eager.language_model.config._attn_implementation == "eager")
self.assertTrue(model_sdpa.config._attn_implementation == "sdpa")
self.assertTrue(model_eager.config._attn_implementation == "eager")
for name, submodule in model_eager.named_modules():
class_name = submodule.__class__.__name__
if any(re.finditer(r"Attention(?!Pool)", class_name)):
self.assertTrue(submodule.config._attn_implementation == "eager")
for name, submodule in model_sdpa.named_modules():
class_name = submodule.__class__.__name__
if any(re.finditer(r"Attention(?!Pool)", class_name)):
self.assertTrue(submodule.config._attn_implementation == "sdpa")
@require_torch
@require_torch_accelerator
@slow
class DeepseekVLIntegrationTest(unittest.TestCase):
def setUp(self):
self.model_id = "deepseek-community/deepseek-vl-1.3b-chat"
def test_model_text_generation(self):
model = DeepseekVLForConditionalGeneration.from_pretrained(self.model_id, dtype="auto", device_map="auto")
model.to(torch_device)
model.eval()
processor = AutoProcessor.from_pretrained(self.model_id)
messages = [
{
"role": "user",
"content": [
{
"type": "image",
"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg",
},
{"type": "text", "text": "Describe this image."},
],
}
]
EXPECTED_TEXT = 'You are a helpful language and vision assistant. You are able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language.\n\nUser: Describe this image.\n\nAssistant:In the image, a majestic snow leopard is captured in a moment of tranquility. The snow leopard' # fmt: skip
inputs = processor.apply_chat_template(
messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt"
)
inputs = inputs.to(model.device, dtype=model.dtype)
output = model.generate(**inputs, max_new_tokens=20, do_sample=False)
text = processor.decode(output[0], skip_special_tokens=True)
self.assertEqual(
text,
EXPECTED_TEXT,
)
def test_model_text_generation_batched(self):
model = DeepseekVLForConditionalGeneration.from_pretrained(self.model_id, dtype="auto", device_map="auto")
model.to(torch_device)
model.eval()
processor = AutoProcessor.from_pretrained(self.model_id)
messages = [
[
{
"role": "user",
"content": [
{
"type": "image",
"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg",
},
{"type": "text", "text": "Describe this image."},
],
}
],
[
{
"role": "user",
"content": [
{
"type": "image",
"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg",
},
{"type": "text", "text": "What animal do you see in the image?"},
],
}
],
]
EXPECTED_TEXT = [
"You are a helpful language and vision assistant. You are able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language.\n\nUser: Describe this image.\n\nAssistant:The image depicts a snowy landscape with a focus on a bear. The bear is standing on all", # fmt: skip
"You are a helpful language and vision assistant. You are able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language.\n\nUser: What animal do you see in the image?\n\nAssistant:I see a bear in the image.What is the significance of the color red in the", # fmt: skip
]
inputs = processor.apply_chat_template(
messages, add_generation_prompt=True, tokenize=True, padding=True, return_dict=True, return_tensors="pt"
)
inputs = inputs.to(model.device, dtype=model.dtype)
output = model.generate(**inputs, max_new_tokens=20, do_sample=False)
text = processor.batch_decode(output, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT, text)
def test_model_text_generation_with_multi_image(self):
model = DeepseekVLForConditionalGeneration.from_pretrained(self.model_id, dtype="auto", device_map="auto")
model.to(torch_device)
model.eval()
processor = AutoProcessor.from_pretrained(self.model_id)
messages = [
{
"role": "user",
"content": [
{"type": "text", "text": "What's the difference between"},
{"type": "image", "url": "http://images.cocodataset.org/val2017/000000039769.jpg"},
{"type": "text", "text": " and "},
{
"type": "image",
"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg",
},
],
}
]
EXPECTED_TEXT = "You are a helpful language and vision assistant. You are able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language.\n\nUser: What's the difference between and \n\nAssistant:The image is a photograph featuring two cats lying on a pink blanket. The cat on the left is" # fmt: skip
inputs = processor.apply_chat_template(
messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt"
)
inputs = inputs.to(model.device, dtype=model.dtype)
output = model.generate(**inputs, max_new_tokens=20, do_sample=False)
text = processor.decode(output[0], skip_special_tokens=True)
self.assertEqual(
text,
EXPECTED_TEXT,
)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/deepseek_vl/test_modeling_deepseek_vl.py",
"license": "Apache License 2.0",
"lines": 298,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/models/deepseek_vl_hybrid/test_image_processing_deepseek_vl_hybrid.py | # Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from transformers.image_utils import load_image
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available
from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs
from ...test_processing_common import url_to_local_path
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeepseekVLHybridImageProcessor
if is_torchvision_available():
from transformers import DeepseekVLHybridImageProcessorFast
class DeepseekVLHybridImageProcessingTester:
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
do_resize=True,
size=None,
high_res_size=None,
do_normalize=True,
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
high_res_image_mean=[0.5, 0.5, 0.5],
high_res_image_std=[0.5, 0.5, 0.5],
):
size = size if size is not None else {"height": 18, "width": 18}
high_res_size = high_res_size if high_res_size is not None else {"height": 36, "width": 36}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.high_res_size = high_res_size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.high_res_image_mean = high_res_image_mean
self.high_res_image_std = high_res_image_std
def prepare_image_processor_dict(self):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"high_res_image_mean": self.high_res_image_mean,
"high_res_image_std": self.high_res_image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"high_res_size": self.high_res_size,
}
def expected_output_image_shape(self, images):
max_size = max(self.size["height"], self.size["width"])
return self.num_channels, max_size, max_size
def expected_output_high_res_image_shape(self, images):
max_size = max(self.high_res_size["height"], self.high_res_size["width"])
return self.num_channels, max_size, max_size
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
@require_torch
@require_vision
class DeepseekVLHybridImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = DeepseekVLHybridImageProcessor if is_vision_available() else None
fast_image_processing_class = DeepseekVLHybridImageProcessorFast if is_torchvision_available() else None
# Copied from tests.models.vit.test_image_processing_vit.ViTImageProcessingTester.setUp with ViT->DeepseekVLHybrid
def setUp(self):
super().setUp()
self.image_processor_tester = DeepseekVLHybridImageProcessingTester(self)
@property
# Copied from tests.models.vit.test_image_processing_vit.ViTImageProcessingTester.image_processor_dict with ViT->DeepseekVLHybrid
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
# Copied from tests.models.vit.test_image_processing_vit.ViTImageProcessingTester.test_image_processor_from_dict_with_kwargs
def test_image_processor_from_dict_with_kwargs(self):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {"height": 18, "width": 18})
image_processor = image_processing_class.from_dict(self.image_processor_dict, size=42)
self.assertEqual(image_processor.size, {"height": 42, "width": 42})
def test_image_processor_properties(self):
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "image_mean"))
self.assertTrue(hasattr(image_processing, "image_std"))
self.assertTrue(hasattr(image_processing, "high_res_image_mean"))
self.assertTrue(hasattr(image_processing, "high_res_image_std"))
self.assertTrue(hasattr(image_processing, "do_normalize"))
self.assertTrue(hasattr(image_processing, "do_resize"))
self.assertTrue(hasattr(image_processing, "size"))
self.assertTrue(hasattr(image_processing, "high_res_size"))
def test_call_pil_high_res(self):
for image_processing_class in self.image_processor_list:
# Initialize image_processing
image_processing = image_processing_class(**self.image_processor_dict)
# create random PIL images
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False)
for image in image_inputs:
self.assertIsInstance(image, Image.Image)
# Test not batched input
encoded_images = image_processing(image_inputs[0], return_tensors="pt").high_res_pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_high_res_image_shape(
[image_inputs[0]]
)
self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape))
# Test batched
encoded_images = image_processing(image_inputs, return_tensors="pt").high_res_pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_high_res_image_shape(
image_inputs
)
self.assertEqual(
tuple(encoded_images.shape), (self.image_processor_tester.batch_size, *expected_output_image_shape)
)
def test_call_numpy_high_res(self):
for image_processing_class in self.image_processor_list:
# Initialize image_processing
image_processing = image_processing_class(**self.image_processor_dict)
# create random numpy tensors
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True)
for image in image_inputs:
self.assertIsInstance(image, np.ndarray)
# Test not batched input
encoded_images = image_processing(image_inputs[0], return_tensors="pt").high_res_pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_high_res_image_shape(
[image_inputs[0]]
)
self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape))
# Test batched
encoded_images = image_processing(image_inputs, return_tensors="pt").high_res_pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_high_res_image_shape(
image_inputs
)
self.assertEqual(
tuple(encoded_images.shape), (self.image_processor_tester.batch_size, *expected_output_image_shape)
)
def test_call_pytorch_high_res(self):
for image_processing_class in self.image_processor_list:
# Initialize image_processing
image_processing = image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)
for image in image_inputs:
self.assertIsInstance(image, torch.Tensor)
# Test not batched input
encoded_images = image_processing(image_inputs[0], return_tensors="pt").high_res_pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_high_res_image_shape(
[image_inputs[0]]
)
self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape))
# Test batched
expected_output_image_shape = self.image_processor_tester.expected_output_high_res_image_shape(
image_inputs
)
encoded_images = image_processing(image_inputs, return_tensors="pt").high_res_pixel_values
self.assertEqual(
tuple(encoded_images.shape),
(self.image_processor_tester.batch_size, *expected_output_image_shape),
)
@require_vision
@require_torch
def test_slow_fast_equivalence(self):
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast equivalence test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
dummy_image = load_image(url_to_local_path("http://images.cocodataset.org/val2017/000000039769.jpg"))
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
encoding_slow = image_processor_slow(dummy_image, return_tensors="pt")
encoding_fast = image_processor_fast(dummy_image, return_tensors="pt")
self._assert_slow_fast_tensors_equivalence(encoding_slow.pixel_values, encoding_fast.pixel_values)
self._assert_slow_fast_tensors_equivalence(
encoding_slow.high_res_pixel_values, encoding_fast.high_res_pixel_values
)
@require_vision
@require_torch
def test_slow_fast_equivalence_batched(self):
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast equivalence test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
if hasattr(self.image_processor_tester, "do_center_crop") and self.image_processor_tester.do_center_crop:
self.skipTest(
reason="Skipping as do_center_crop is True and center_crop functions are not equivalent for fast and slow processors"
)
dummy_images = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
encoding_slow = image_processor_slow(dummy_images, return_tensors=None)
encoding_fast = image_processor_fast(dummy_images, return_tensors=None)
# Overwrite as the outputs are not always all of the same shape (kept for BC)
for i in range(len(encoding_slow.pixel_values)):
self._assert_slow_fast_tensors_equivalence(
torch.from_numpy(encoding_slow.pixel_values[i]), encoding_fast.pixel_values[i]
)
for i in range(len(encoding_slow.high_res_pixel_values)):
self._assert_slow_fast_tensors_equivalence(
torch.from_numpy(encoding_slow.high_res_pixel_values[i]), encoding_fast.high_res_pixel_values[i]
)
@unittest.skip(reason="Not supported")
def test_call_numpy_4_channels(self):
pass
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/deepseek_vl_hybrid/test_image_processing_deepseek_vl_hybrid.py",
"license": "Apache License 2.0",
"lines": 230,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/models/deepseek_vl_hybrid/test_modeling_deepseek_vl_hybrid.py | # Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch DeepseekVLHybrid model."""
import re
import tempfile
import unittest
from transformers import (
AutoProcessor,
DeepseekVLHybridConfig,
DeepseekVLHybridForConditionalGeneration,
DeepseekVLHybridModel,
is_torch_available,
)
from transformers.testing_utils import (
require_torch,
require_torch_accelerator,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
class DeepseekVLHybridModelTester:
def __init__(
self,
parent,
batch_size=2,
seq_length=25,
num_channels=3,
initializer_range=0.02,
is_training=True,
use_cache=False,
text_config={
"num_hidden_layers": 2,
"vocab_size": 99,
"hidden_size": 16,
"intermediate_size": 37,
"max_position_embeddings": 512,
"num_attention_heads": 4,
"pad_token_id": 1,
},
vision_config={
"num_hidden_layers": 1,
"hidden_size": 16,
"intermediate_size": 37,
"image_size": 32,
"patch_size": 8,
"hidden_act": "gelu",
"vision_use_head": False,
"num_attention_heads": 4,
},
high_res_vision_config={
"num_hidden_layers": 2,
"global_attn_indexes": [0],
"hidden_size": 16,
"intermediate_size": 37,
"mlp_dim": 24,
"output_channels": 4,
"image_size": 128,
"patch_size": 32,
"num_attention_heads": 4,
},
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.num_channels = num_channels
self.initializer_range = initializer_range
self.is_training = is_training
self.use_cache = use_cache
self.text_config = text_config
self.vision_config = vision_config
self.high_res_vision_config = high_res_vision_config
self.vision_config["num_channels"] = self.num_channels
self.high_res_vision_config["num_channels"] = self.num_channels
self.num_hidden_layers = text_config["num_hidden_layers"]
self.vocab_size = text_config["vocab_size"]
self.hidden_size = text_config["hidden_size"]
self.num_attention_heads = text_config["num_attention_heads"]
self.high_res_image_size = high_res_vision_config["image_size"]
self.image_size = vision_config["image_size"]
self.num_image_tokens = vision_config["image_size"] // vision_config["patch_size"]
self.pad_token_id = text_config["pad_token_id"]
self.image_token_id = self.vocab_size - 1
def get_config(self):
return DeepseekVLHybridConfig(
text_config=self.text_config,
vision_config=self.vision_config,
high_res_vision_config=self.high_res_vision_config,
image_token_id=self.image_token_id,
)
def prepare_config_and_inputs(self):
config = self.get_config()
# create text and vision inputs
input_ids = ids_tensor([self.batch_size, self.seq_length], config.text_config.vocab_size - 2) + 1
attention_mask = random_attention_mask([self.batch_size, self.seq_length])
pixel_values = floats_tensor(
[
self.batch_size,
self.num_channels,
self.image_size,
self.image_size,
]
)
high_res_pixel_values = floats_tensor(
[
self.batch_size,
self.num_channels,
self.high_res_image_size,
self.high_res_image_size,
]
)
# fill image_tokens
input_ids[:, : self.num_image_tokens] = self.image_token_id
return config, input_ids, attention_mask, pixel_values, high_res_pixel_values
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, attention_mask, pixel_values, high_res_pixel_values = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"pixel_values": pixel_values,
"high_res_pixel_values": high_res_pixel_values,
}
return config, inputs_dict
@require_torch
class DeepseekVLHybridModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(DeepseekVLHybridModel, DeepseekVLHybridForConditionalGeneration) if is_torch_available() else ()
)
pipeline_model_mapping = (
{
"feature-extraction": DeepseekVLHybridModel,
"image-text-to-text": DeepseekVLHybridForConditionalGeneration,
"any-to-any": DeepseekVLHybridForConditionalGeneration,
}
if is_torch_available()
else {}
)
_is_composite = True
model_split_percents = [0.5, 0.85, 0.9] # it tries to offload everything with the default value
def setUp(self):
self.model_tester = DeepseekVLHybridModelTester(self)
self.config_tester = ConfigTester(self, config_class=DeepseekVLHybridConfig, has_text_modality=False)
# overwrite inputs_embeds tests because we need to delete "pixel values" for LVLMs
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = self._prepare_for_class(inputs_dict, model_class)
input_ids = inputs["input_ids"]
del inputs["input_ids"]
del inputs["pixel_values"]
del inputs["high_res_pixel_values"]
wte = model.get_input_embeddings()
inputs["inputs_embeds"] = wte(input_ids)
with torch.no_grad():
model(**inputs)
# overwrite inputs_embeds tests because we need to delete "pixel values" for VLMs.
def test_inputs_embeds_matches_input_ids(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = self._prepare_for_class(inputs_dict, model_class)
input_ids = inputs["input_ids"]
del inputs["input_ids"]
del inputs["pixel_values"]
del inputs["high_res_pixel_values"]
inputs_embeds = model.get_input_embeddings()(input_ids)
with torch.no_grad():
out_ids = model(input_ids=input_ids, **inputs)[0]
out_embeds = model(inputs_embeds=inputs_embeds, **inputs)[0]
torch.testing.assert_close(out_embeds, out_ids)
def test_sdpa_can_dispatch_composite_models(self):
for model_class in self.all_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
# Load the model with SDPA
model_sdpa = model_class.from_pretrained(
tmpdirname,
attn_implementation="sdpa",
)
model_sdpa = model_sdpa.eval().to(torch_device)
# Load model with eager attention
model_eager = model_class.from_pretrained(
tmpdirname,
attn_implementation="eager",
)
model_eager = model_eager.eval().to(torch_device)
self.assertTrue(model_sdpa.config._attn_implementation == "sdpa")
self.assertTrue(model_eager.config._attn_implementation == "eager")
if (
hasattr(model_sdpa, "vision_model")
and hasattr(model_sdpa, "high_res_vision_model")
and hasattr(model_sdpa, "language_model")
):
self.assertTrue(model_sdpa.language_model.config._attn_implementation == "sdpa")
self.assertTrue(model_sdpa.vision_model.config._attn_implementation == "sdpa")
self.assertTrue(model_sdpa.high_res_vision_model.config._attn_implementation == "sdpa")
self.assertTrue(model_eager.language_model.config._attn_implementation == "eager")
self.assertTrue(model_eager.high_res_vision_model.config._attn_implementation == "eager")
for name, submodule in model_eager.named_modules():
class_name = submodule.__class__.__name__
if (
any(re.finditer(r"Attention(?!Pool)", class_name))
and getattr(submodule, "config", None)
and submodule.config._attn_implementation == "sdpa"
):
self.assertTrue(submodule.config._attn_implementation == "eager")
for name, submodule in model_sdpa.named_modules():
class_name = submodule.__class__.__name__
if (
any(re.finditer(r"Attention(?!Pool)", class_name))
and getattr(submodule, "config", None)
and submodule.config._attn_implementation == "eager"
):
self.assertTrue(submodule.config._attn_implementation == "sdpa")
@require_torch_accelerator
@slow
def test_sdpa_can_dispatch_on_flash(self):
self.skipTest(
"deepseek_vl_hybrid uses SAM, which requires an attention_mask input for relative positional embeddings"
)
@require_torch
@require_torch_accelerator
@slow
class DeepseekVLHybridIntegrationTest(unittest.TestCase):
def setUp(self):
self.model_id = "deepseek-community/deepseek-vl-7b-chat"
def test_model_text_generation(self):
model = DeepseekVLHybridForConditionalGeneration.from_pretrained(
self.model_id, dtype="auto", device_map="auto"
)
model.to(torch_device)
model.eval()
processor = AutoProcessor.from_pretrained(self.model_id)
messages = [
{
"role": "user",
"content": [
{
"type": "image",
"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg",
},
{"type": "text", "text": "Describe this image."},
],
}
]
EXPECTED_TEXT = 'You are a helpful language and vision assistant. You are able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language.\n\nUser: Describe this image.\n\nAssistant:The image depicts a fluffy, light brown animal with a white face and black markings on its face and' # fmt: skip
inputs = processor.apply_chat_template(
messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt"
)
inputs = inputs.to(model.device, dtype=model.dtype)
output = model.generate(**inputs, max_new_tokens=20, do_sample=False)
text = processor.decode(output[0], skip_special_tokens=True)
self.assertEqual(
text,
EXPECTED_TEXT,
)
def test_model_text_generation_batched(self):
model = DeepseekVLHybridForConditionalGeneration.from_pretrained(
self.model_id, dtype="auto", device_map="auto"
)
model.to(torch_device)
model.eval()
processor = AutoProcessor.from_pretrained(self.model_id)
messages = [
[
{
"role": "user",
"content": [
{
"type": "image",
"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg",
},
{"type": "text", "text": "Describe this image."},
],
}
],
[
{
"role": "user",
"content": [
{
"type": "image",
"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg",
},
{"type": "text", "text": "What animal do you see in the image?"},
],
}
],
]
EXPECTED_TEXT = [
"You are a helpful language and vision assistant. You are able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language.\n\nUser: Describe this image.\n\nAssistant:\nThe image depicts a fluffy, light brown animal with a white face and black markings around its eyes", # fmt: skip
"You are a helpful language and vision assistant. You are able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language.\n\nUser: What animal do you see in the image?\n\nAssistant:I see a large, furry animal that appears to be a type of bear.The ", # fmt: skip
]
inputs = processor.apply_chat_template(
messages, add_generation_prompt=True, tokenize=True, padding=True, return_dict=True, return_tensors="pt"
)
inputs = inputs.to(model.device, dtype=model.dtype)
output = model.generate(**inputs, max_new_tokens=20, do_sample=False)
text = processor.batch_decode(output, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT, text)
def test_model_text_generation_with_multi_image(self):
model = DeepseekVLHybridForConditionalGeneration.from_pretrained(
self.model_id, dtype="auto", device_map="auto"
)
model.to(torch_device)
model.eval()
processor = AutoProcessor.from_pretrained(self.model_id)
messages = [
{
"role": "user",
"content": [
{"type": "text", "text": "What's the difference between"},
{"type": "image", "url": "http://images.cocodataset.org/val2017/000000039769.jpg"},
{"type": "text", "text": " and "},
{
"type": "image",
"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg",
},
],
}
]
EXPECTED_TEXT = "You are a helpful language and vision assistant. You are able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language.\n\nUser: What's the difference between and \n\nAssistant:The image shows a street scene with a prominent red stop sign in the foreground. The sign has the" # fmt: skip
inputs = processor.apply_chat_template(
messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt"
)
inputs = inputs.to(model.device, dtype=model.dtype)
output = model.generate(**inputs, max_new_tokens=20, do_sample=False)
text = processor.decode(output[0], skip_special_tokens=True)
self.assertEqual(
text,
EXPECTED_TEXT,
)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/deepseek_vl_hybrid/test_modeling_deepseek_vl_hybrid.py",
"license": "Apache License 2.0",
"lines": 355,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/evolla/configuration_evolla.py | # Copyright 2025 Westlake Representational Learning Lab (Fajie Yuan Lab) team and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evolla model configuration"""
from ...configuration_utils import PreTrainedConfig
from ...modeling_rope_utils import RopeParameters
from ...utils import logging
logger = logging.get_logger(__name__)
class SaProtConfig(PreTrainedConfig):
r"""This is the configuration class to store the configuration of a [`EvollaSaProtProteinEncoder`]. It is used to instantiate a
SaProt model according to the specified arguments, defining the model architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 446):
Vocabulary size of the protein sequence model. Defines the number of different tokens that can be represented
by the `inputs_ids` passed when calling [`EvollaModel`].
mask_token_id (`int`, *optional*, defaults to 4):
The id of the *mask* token in the protein sequence model.
pad_token_id (`int`, *optional*, defaults to 1):
The id of the *padding* token in the protein sequence model.
hidden_size (`int`, *optional*, defaults to 1280):
Dimensionality of the protein sequence model layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 33):
Number of hidden layers in the protein sequence model.
num_attention_heads (`int`, *optional*, defaults to 20):
Number of attention heads for each attention layer in the protein sequence model.
intermediate_size (`int`, *optional*, defaults to 5120):
Dimensionality of the intermediate layers in the protein sequence model.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the hidden layers in the protein sequence model.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities in the protein sequence model.
max_position_embeddings (`int`, *optional*, defaults to 1026):
The maximum sequence length that the protein sequence model might ever be used with. Typically set this to
something large just in case (e.g., 512 or 1024 or 2048).
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon value for the layer normalization layer in the protein sequence model.
position_embedding_type (`str`, *optional*, defaults to `"rotary"`):
The type of position embedding to use in the protein sequence model. Currently only `"rotary"` is supported.
emb_layer_norm_before (`bool`, *optional*, defaults to `False`):
Whether to apply layer normalization before the position embedding in the protein sequence model.
token_dropout (`bool`, *optional*, defaults to `True`):
Whether to apply dropout to the tokens in the protein sequence model."""
def __init__(
self,
vocab_size=446,
mask_token_id=4,
pad_token_id=1,
hidden_size=1280,
num_hidden_layers=33,
num_attention_heads=20,
intermediate_size=5120,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=1026,
initializer_range=0.02,
layer_norm_eps=1e-05,
position_embedding_type="rotary",
emb_layer_norm_before=False,
token_dropout=True,
is_decoder=False,
add_cross_attention=False,
**kwargs,
):
super().__init__(**kwargs)
self.pad_token_id = pad_token_id
self.mask_token_id = mask_token_id
self.is_decoder = is_decoder
self.add_cross_attention = add_cross_attention
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.position_embedding_type = position_embedding_type
self.emb_layer_norm_before = emb_layer_norm_before
self.token_dropout = token_dropout
class EvollaConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`EvollaModel`]. It is used to instantiate an
Evolla model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the Evolla-10B.
e.g. [westlake-repl/Evolla-10B-hf](https://huggingface.co/westlake-repl/Evolla-10B-hf)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
protein_encoder_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`SaProtConfig`].
vocab_size (`int`, *optional*, defaults to 128256):
Vocabulary size of the Evolla llama model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`EvollaModel`].
hidden_size (`int`, *optional*, defaults to 4096):
Dimensionality of the llama layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 14336):
Dimensionality of the intermediate layers in the llama model.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the llama model.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the llama model.
num_key_value_heads (`int`, *optional*, defaults to 8):
Number of key-value pairs for each attention layer in the llama model.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the llama model. If string, `"gelu"`, `"relu"`,
`"selu"` and `"silu"` are supported.
max_position_embeddings (`int`, *optional*, defaults to 8192):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon value for the RMS-norm layer in the llama model.
rope_parameters (`float`, *optional*):
The scaling factor for the RoPE layer in the llama model.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use bias in the attention layer.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention layer.
mlp_bias (`bool`, *optional*, defaults to `False`):
Whether to use bias in the MLP layer.
aligner_ffn_mult (`int`, *optional*, defaults to 4):
The FFN multiplier for the aligner layer.
aligner_enable_bias (`bool`, *optional*, defaults to `True`):
Whether to use bias in the aligner layer.
aligner_attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities in the aligner layer.
aligner_num_add_layers (`int`, *optional*, defaults to 8):
The number of additional layers for the aligner layer.
resampler_depth (`int`, *optional*, defaults to 6):
The depth of the resampler layer in the llama model.
resampler_dim_head (`int`, *optional*, defaults to 64):
The dimension of the heads in the resampler layer in the llama model.
resampler_heads (`int`, *optional*, defaults to 8):
The number of heads in the resampler layer in the llama model.
resampler_num_latents (`int`, *optional*, defaults to 64):
The number of latents in the resampler layer in the llama model.
resampler_ff_mult (`int`, *optional*, defaults to 4):
The FFN multiplier for the resampler layer.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
pad_token_id (`int`, *optional*):
The id of the *padding* token.
bos_token_id (`int`, *optional*, defaults to 128000):
The id of the *beginning-of-sequence* token.
eos_token_id (`int`, *optional*, defaults to 128009):
The id of the *end-of-sequence* token.
use_cache (`bool`, *optional*, defaults to `False`):
Whether or not the model should return the last key/values attentions (not used by all models).
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether or not to tie the input and output word embeddings.
is_decoder (`bool`, *optional*, defaults to `False`):
Whether to only use the decoder in an encoder-decoder architecture, otherwise it has no effect on
decoder-only or encoder-only architectures.
add_cross_attention (`bool`, *optional*, defaults to `False`):
Whether cross-attention layers should be added to the model.
Example:
```python
>>> from transformers import EvollaModel, EvollaConfig
>>> # Initializing a Evolla evolla-10b style configuration
>>> configuration = EvollaConfig()
>>> # Initializing a model from the evolla-10b style configuration
>>> model = EvollaModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "EvollaModel"
sub_configs = {"protein_encoder_config": SaProtConfig}
default_theta = 500000.0
def __init__(
self,
protein_encoder_config: dict | None = None,
vocab_size: int | None = 128256, # llama vocab size
hidden_size: int | None = 4096, # llama hidden size
intermediate_size: int | None = 14336, # llama intermediate size
num_hidden_layers: int | None = 32, # llama num layers
num_attention_heads: int | None = 32, # llama num heads
num_key_value_heads: int | None = 8, # llama num key-value heads
hidden_act: str | None = "silu", # llama activation function
max_position_embeddings: int | None = 8192, # llama rope max length
rms_norm_eps: int | None = 1e-05,
rope_parameters: RopeParameters | dict[str, RopeParameters] | None = None,
attention_bias: bool | None = False,
attention_dropout: float | None = 0.0,
mlp_bias: bool | None = False,
aligner_ffn_mult: int | None = 4,
aligner_enable_bias: bool | None = True,
aligner_attention_probs_dropout_prob: float | None = 0.1,
aligner_num_add_layers: int | None = 8,
resampler_depth: int | None = 6,
resampler_dim_head: int | None = 64,
resampler_heads: int | None = 8,
resampler_num_latents: int | None = 64,
resampler_ff_mult: int | None = 4,
initializer_range: float | None = 0.02,
pad_token_id: int | None = None,
bos_token_id: int | None = 128000,
eos_token_id: int | None = 128009,
use_cache: bool | None = False,
tie_word_embeddings: bool | None = False,
is_decoder: bool | None = False,
add_cross_attention: bool | None = False,
**kwargs,
):
self.is_decoder = is_decoder
self.add_cross_attention = add_cross_attention
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.max_position_embeddings = max_position_embeddings
self.rms_norm_eps = rms_norm_eps
self.tie_word_embeddings = tie_word_embeddings
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.mlp_bias = mlp_bias
self.aligner_ffn_mult = aligner_ffn_mult
self.aligner_enable_bias = aligner_enable_bias
self.aligner_attention_probs_dropout_prob = aligner_attention_probs_dropout_prob
self.aligner_num_add_layers = aligner_num_add_layers
self.use_cache = use_cache
self.initializer_range = initializer_range
self.resampler_depth = resampler_depth
self.resampler_dim_head = resampler_dim_head
self.resampler_heads = resampler_heads
self.resampler_num_latents = resampler_num_latents
self.resampler_ff_mult = resampler_ff_mult
self.rope_parameters = rope_parameters
# Subconfig
if protein_encoder_config is None:
protein_encoder_config = {}
logger.info("`protein_encoder_config` is `None`. Initializing the `SaProtConfig` with default values.")
self.protein_encoder_config = SaProtConfig(**protein_encoder_config)
self.tie_word_embeddings = tie_word_embeddings
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
super().__init__(**kwargs)
__all__ = ["EvollaConfig"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/evolla/configuration_evolla.py",
"license": "Apache License 2.0",
"lines": 254,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.