repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/gemma2/__init__.py | src/transformers/models/gemma2/__init__.py | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_gemma2 import *
from .modeling_gemma2 import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/gemma2/modeling_gemma2.py | src/transformers/models/gemma2/modeling_gemma2.py | # π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨
# This file was automatically generated from src/transformers/models/gemma2/modular_gemma2.py.
# Do NOT edit this file manually as any edits will be overwritten by the generation of
# the file from the modular. If any change should be done, please apply the change to the
# modular_gemma2.py file directly. One of our CI enforces this.
# π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨
# coding=utf-8
# Copyright 2024 Google Inc. HuggingFace Inc. team. All rights reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Callable
from typing import Optional, Union
import torch
import torch.nn as nn
from ... import initialization as init
from ...activations import ACT2FN
from ...cache_utils import Cache, DynamicCache
from ...generation import GenerationMixin
from ...integrations import use_kernel_func_from_hub, use_kernelized_func
from ...masking_utils import create_causal_mask, create_sliding_window_causal_mask
from ...modeling_flash_attention_utils import FlashAttentionKwargs
from ...modeling_layers import (
GenericForSequenceClassification,
GenericForTokenClassification,
GradientCheckpointingLayer,
)
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
from ...utils.generic import check_model_inputs, maybe_autocast
from .configuration_gemma2 import Gemma2Config
class Gemma2RMSNorm(nn.Module):
def __init__(self, dim: int, eps: float = 1e-6):
super().__init__()
self.eps = eps
self.weight = nn.Parameter(torch.zeros(dim))
def _norm(self, x):
return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
def forward(self, x):
output = self._norm(x.float())
# Llama does x.to(float16) * w whilst Gemma2 is (x * w).to(float16)
# See https://github.com/huggingface/transformers/pull/29402
output = output * (1.0 + self.weight.float())
return output.type_as(x)
def extra_repr(self):
return f"{tuple(self.weight.shape)}, eps={self.eps}"
class Gemma2MLP(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
self.act_fn = ACT2FN[config.hidden_activation]
def forward(self, x):
down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
return down_proj
class Gemma2RotaryEmbedding(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, config: Gemma2Config, device=None):
super().__init__()
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_type = self.config.rope_parameters["rope_type"]
rope_init_fn: Callable = self.compute_default_rope_parameters
if self.rope_type != "default":
rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.register_buffer("original_inv_freq", inv_freq.clone(), persistent=False)
@staticmethod
def compute_default_rope_parameters(
config: Optional[Gemma2Config] = None,
device: Optional["torch.device"] = None,
seq_len: Optional[int] = None,
) -> tuple["torch.Tensor", float]:
"""
Computes the inverse frequencies according to the original RoPE implementation
Args:
config ([`~transformers.PreTrainedConfig`]):
The model configuration.
device (`torch.device`):
The device to use for initialization of the inverse frequencies.
seq_len (`int`, *optional*):
The current sequence length. Unused for this type of RoPE.
Returns:
Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
"""
base = config.rope_parameters["rope_theta"]
dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
attention_factor = 1.0 # Unused in this type of RoPE
# Compute the inverse frequencies
inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
)
return inv_freq, attention_factor
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with maybe_autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
@use_kernel_func_from_hub("rotary_pos_emb")
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`, *optional*):
Deprecated and unused.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
dropout: float = 0.0,
scaling: Optional[float] = None,
softcap: Optional[float] = None,
**kwargs,
) -> tuple[torch.Tensor, torch.Tensor]:
if scaling is None:
scaling = module.head_dim**-0.5
key_states = repeat_kv(key, module.num_key_value_groups)
value_states = repeat_kv(value, module.num_key_value_groups)
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
if softcap is not None:
attn_weights = attn_weights / softcap
attn_weights = torch.tanh(attn_weights)
attn_weights = attn_weights * softcap
if attention_mask is not None: # no matter the length, we just slice it
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
attn_weights = attn_weights + causal_mask
# upcast attention to fp32
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
@use_kernelized_func(apply_rotary_pos_emb)
class Gemma2Attention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: Gemma2Config, layer_idx: int):
super().__init__()
self.layer_type = config.layer_types[layer_idx] if hasattr(config, "layer_types") else None
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = config.query_pre_attn_scalar**-0.5
self.attention_dropout = self.config.attention_dropout
self.is_causal = not getattr(config, "use_bidirectional_attention", False)
self.q_proj = nn.Linear(
config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
)
self.k_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.v_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.o_proj = nn.Linear(
config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias
)
self.attn_logit_softcapping = self.config.attn_logit_softcapping
self.sliding_window = config.sliding_window if self.layer_type == "sliding_attention" else None
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=self.attention_dropout if self.training else 0.0,
scaling=self.scaling,
sliding_window=self.sliding_window,
softcap=self.attn_logit_softcapping,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class Gemma2DecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: Gemma2Config, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.config = config
self.attention_type = config.layer_types[layer_idx]
self.self_attn = Gemma2Attention(config=config, layer_idx=layer_idx)
self.mlp = Gemma2MLP(config)
self.input_layernorm = Gemma2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = Gemma2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.pre_feedforward_layernorm = Gemma2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_feedforward_layernorm = Gemma2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs,
) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
position_embeddings=position_embeddings,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
cache_position=cache_position,
**kwargs,
)
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.pre_feedforward_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = self.post_feedforward_layernorm(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
@auto_docstring
class Gemma2PreTrainedModel(PreTrainedModel):
config: Gemma2Config
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["Gemma2DecoderLayer"]
_skip_keys_device_placement = ["past_key_values"]
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": Gemma2DecoderLayer,
"attentions": Gemma2Attention,
}
@torch.no_grad()
def _init_weights(self, module):
super()._init_weights(module)
# We initialize with 0s to be 1 centered as the RMSNorm here does (1 + weight)
if "RMSNorm" in module.__class__.__name__:
init.zeros_(module.weight)
@auto_docstring
class Gemma2Model(Gemma2PreTrainedModel):
def __init__(self, config: Gemma2Config):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList(
[Gemma2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = Gemma2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = Gemma2RotaryEmbedding(config)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
@check_model_inputs
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds: torch.Tensor = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
# It may already have been prepared by e.g. `generate`
if not isinstance(causal_mask_mapping := attention_mask, dict):
# Prepare mask arguments
mask_kwargs = {
"config": self.config,
"input_embeds": inputs_embeds,
"attention_mask": attention_mask,
"cache_position": cache_position,
"past_key_values": past_key_values,
"position_ids": position_ids,
}
# Create the masks
causal_mask_mapping = {
"full_attention": create_causal_mask(**mask_kwargs),
"sliding_attention": create_sliding_window_causal_mask(**mask_kwargs),
}
# embed positions
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids)
# normalized
# Gemma2 downcasts the below to float16, causing sqrt(3072)=55.4256 to become 55.5
# See https://github.com/huggingface/transformers/pull/29402
normalizer = torch.tensor(self.config.hidden_size**0.5, dtype=hidden_states.dtype)
hidden_states = hidden_states * normalizer
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
hidden_states = decoder_layer(
hidden_states,
attention_mask=causal_mask_mapping[decoder_layer.attention_type],
position_embeddings=position_embeddings,
position_ids=position_ids,
past_key_values=past_key_values,
cache_position=cache_position,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
@auto_docstring
class Gemma2ForCausalLM(Gemma2PreTrainedModel, GenerationMixin):
_tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
_tp_plan = {"lm_head": "colwise_rep"}
_pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
def __init__(self, config):
super().__init__(config)
self.model = Gemma2Model(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
) -> CausalLMOutputWithPast:
r"""
Example:
```python
>>> from transformers import AutoTokenizer, Gemma2ForCausalLM
>>> model = Gemma2ForCausalLM.from_pretrained("google/gemma-2-9b")
>>> tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-9b")
>>> prompt = "What is your favorite condiment?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"What is your favorite condiment?"
```"""
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs: BaseModelOutputWithPast = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
if self.config.final_logit_softcapping is not None:
logits = logits / self.config.final_logit_softcapping
logits = torch.tanh(logits)
logits = logits * self.config.final_logit_softcapping
loss = None
if labels is not None:
loss = self.loss_function(logits, labels, self.vocab_size, **kwargs)
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class Gemma2ForSequenceClassification(GenericForSequenceClassification, Gemma2PreTrainedModel):
pass
class Gemma2ForTokenClassification(GenericForTokenClassification, Gemma2PreTrainedModel):
pass
__all__ = [
"Gemma2ForCausalLM",
"Gemma2Model",
"Gemma2PreTrainedModel",
"Gemma2ForSequenceClassification",
"Gemma2ForTokenClassification",
]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/bert_generation/configuration_bert_generation.py | src/transformers/models/bert_generation/configuration_bert_generation.py | # coding=utf-8
# Copyright 2020 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BertGeneration model configuration"""
from ...configuration_utils import PreTrainedConfig
class BertGenerationConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`BertGenerationPreTrainedModel`]. It is used to
instantiate a BertGeneration model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the BertGeneration
[google/bert_for_seq_generation_L-24_bbc_encoder](https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder)
architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50358):
Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`BertGeneration`].
hidden_size (`int`, *optional*, defaults to 1024):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 24):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 4096):
Dimensionality of the "intermediate" (often called feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
pad_token_id (`int`, *optional*, defaults to 0):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 2):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 1):
End of stream token id.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
Examples:
```python
>>> from transformers import BertGenerationConfig, BertGenerationEncoder
>>> # Initializing a BertGeneration config
>>> configuration = BertGenerationConfig()
>>> # Initializing a model (with random weights) from the config
>>> model = BertGenerationEncoder(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "bert-generation"
def __init__(
self,
vocab_size=50358,
hidden_size=1024,
num_hidden_layers=24,
num_attention_heads=16,
intermediate_size=4096,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
initializer_range=0.02,
layer_norm_eps=1e-12,
pad_token_id=0,
bos_token_id=2,
eos_token_id=1,
use_cache=True,
**kwargs,
):
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.use_cache = use_cache
__all__ = ["BertGenerationConfig"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/bert_generation/modeling_bert_generation.py | src/transformers/models/bert_generation/modeling_bert_generation.py | # coding=utf-8
# Copyright 2020 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model specific for generation."""
from collections.abc import Callable
from typing import Optional, Union
import torch
from torch import nn
from ... import initialization as init
from ...activations import ACT2FN
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from ...generation import GenerationMixin
from ...masking_utils import create_bidirectional_mask, create_causal_mask
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...processing_utils import Unpack
from ...pytorch_utils import apply_chunking_to_forward
from ...utils import (
TransformersKwargs,
auto_docstring,
logging,
)
from ...utils.generic import can_return_tuple, check_model_inputs
from .configuration_bert_generation import BertGenerationConfig
logger = logging.get_logger(__name__)
# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->BertGeneration
class BertGenerationSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.eager_attention_forward
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: Optional[float] = None,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
if scaling is None:
scaling = query.size(-1) ** -0.5
# Take the dot product between "query" and "key" to get the raw attention scores.
attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling
if attention_mask is not None:
attention_mask = attention_mask[:, :, :, : key.shape[-2]]
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->BertGeneration
class BertGenerationSelfAttention(nn.Module):
def __init__(self, config, is_causal=False, layer_idx=None):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.config = config
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.scaling = self.attention_head_size**-0.5
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.is_decoder = config.is_decoder
self.is_causal = is_causal
self.layer_idx = layer_idx
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.attention_head_size)
# get all proj
query_layer = self.query(hidden_states).view(*hidden_shape).transpose(1, 2)
key_layer = self.key(hidden_states).view(*hidden_shape).transpose(1, 2)
value_layer = self.value(hidden_states).view(*hidden_shape).transpose(1, 2)
if past_key_values is not None:
# decoder-only bert can have a simple dynamic cache for example
current_past_key_values = past_key_values
if isinstance(past_key_values, EncoderDecoderCache):
current_past_key_values = past_key_values.self_attention_cache
# save all key/value_layer to cache to be re-used for fast auto-regressive generation
key_layer, value_layer = current_past_key_values.update(
key_layer,
value_layer,
self.layer_idx,
{"cache_position": cache_position},
)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_layer,
key_layer,
value_layer,
attention_mask,
dropout=0.0 if not self.training else self.dropout.p,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
return attn_output, attn_weights
# Copied from transformers.models.bert.modeling_bert.BertCrossAttention with Bert->BertGeneration
class BertGenerationCrossAttention(nn.Module):
def __init__(self, config, is_causal=False, layer_idx=None):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.config = config
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.scaling = self.attention_head_size**-0.5
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.is_causal = is_causal
self.layer_idx = layer_idx
def forward(
self,
hidden_states: torch.Tensor,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
past_key_values: Optional[EncoderDecoderCache] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor]:
# determine input shapes
bsz, tgt_len = hidden_states.shape[:-1]
src_len = encoder_hidden_states.shape[1]
q_input_shape = (bsz, tgt_len, -1, self.attention_head_size)
kv_input_shape = (bsz, src_len, -1, self.attention_head_size)
# get query proj
query_layer = self.query(hidden_states).view(*q_input_shape).transpose(1, 2)
is_updated = past_key_values.is_updated.get(self.layer_idx) if past_key_values is not None else False
if past_key_values is not None and is_updated:
# reuse k,v, cross_attentions
key_layer = past_key_values.cross_attention_cache.layers[self.layer_idx].keys
value_layer = past_key_values.cross_attention_cache.layers[self.layer_idx].values
else:
key_layer = self.key(encoder_hidden_states).view(*kv_input_shape).transpose(1, 2)
value_layer = self.value(encoder_hidden_states).view(*kv_input_shape).transpose(1, 2)
if past_key_values is not None:
# save all states to the cache
key_layer, value_layer = past_key_values.cross_attention_cache.update(
key_layer, value_layer, self.layer_idx
)
# set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls
past_key_values.is_updated[self.layer_idx] = True
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_layer,
key_layer,
value_layer,
attention_mask,
dropout=0.0 if not self.training else self.dropout.p,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous()
return attn_output, attn_weights
# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->BertGeneration,BERT->BERT_GENERATION
class BertGenerationAttention(nn.Module):
def __init__(self, config, is_causal=False, layer_idx=None, is_cross_attention=False):
super().__init__()
self.is_cross_attention = is_cross_attention
attention_class = BertGenerationCrossAttention if is_cross_attention else BertGenerationSelfAttention
self.self = attention_class(config, is_causal=is_causal, layer_idx=layer_idx)
self.output = BertGenerationSelfOutput(config)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor]:
attention_mask = attention_mask if not self.is_cross_attention else encoder_attention_mask
attention_output, attn_weights = self.self(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
past_key_values=past_key_values,
cache_position=cache_position,
**kwargs,
)
attention_output = self.output(attention_output, hidden_states)
return attention_output, attn_weights
# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->BertGeneration
class BertGenerationIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->BertGeneration
class BertGenerationOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->BertGeneration
class BertGenerationLayer(GradientCheckpointingLayer):
def __init__(self, config, layer_idx=None):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = BertGenerationAttention(config, is_causal=config.is_decoder, layer_idx=layer_idx)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
if not self.is_decoder:
raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
self.crossattention = BertGenerationAttention(
config,
is_causal=False,
layer_idx=layer_idx,
is_cross_attention=True,
)
self.intermediate = BertGenerationIntermediate(config)
self.output = BertGenerationOutput(config)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor]:
self_attention_output, _ = self.attention(
hidden_states,
attention_mask,
past_key_values=past_key_values,
cache_position=cache_position,
**kwargs,
)
attention_output = self_attention_output
if self.is_decoder and encoder_hidden_states is not None:
if not hasattr(self, "crossattention"):
raise ValueError(
f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
" by setting `config.add_cross_attention=True`"
)
cross_attention_output, _ = self.crossattention(
self_attention_output,
None, # attention_mask
encoder_hidden_states,
encoder_attention_mask,
past_key_values=past_key_values,
**kwargs,
)
attention_output = cross_attention_output
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
return layer_output
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
# Copied from transformers.models.bert.modeling_bert.BertEncoder
class BertEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
# Ignore copy
self.layer = nn.ModuleList([BertGenerationLayer(config, layer_idx=i) for i in range(config.num_hidden_layers)])
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
for i, layer_module in enumerate(self.layer):
hidden_states = layer_module(
hidden_states,
attention_mask,
encoder_hidden_states, # as a positional argument for gradient checkpointing
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
cache_position=cache_position,
**kwargs,
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=past_key_values if use_cache else None,
)
class BertGenerationEmbeddings(nn.Module):
"""Construct the embeddings from word and position embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer(
"position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
)
def forward(self, input_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
embeddings = inputs_embeds + position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
@auto_docstring
class BertGenerationPreTrainedModel(PreTrainedModel):
config_class = BertGenerationConfig
base_model_prefix = "bert"
supports_gradient_checkpointing = True
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": BertGenerationLayer,
"attentions": BertGenerationSelfAttention,
"cross_attentions": BertGenerationCrossAttention,
}
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights"""
super()._init_weights(module)
if isinstance(module, BertGenerationOnlyLMHead):
init.zeros_(module.bias)
elif isinstance(module, BertGenerationEmbeddings):
init.copy_(module.position_ids, torch.arange(module.position_ids.shape[-1]).expand((1, -1)))
@auto_docstring(
custom_intro="""
The bare BertGeneration model transformer outputting raw hidden-states without any specific head on top.
"""
)
class BertGenerationEncoder(BertGenerationPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in [Attention is
all you need](https://huggingface.co/papers/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
This model should be used when leveraging Bert or Roberta checkpoints for the [`EncoderDecoderModel`] class as
described in [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://huggingface.co/papers/1907.12461)
by Sascha Rothe, Shashi Narayan, and Aliaksei Severyn.
To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
`add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
"""
def __init__(self, config):
super().__init__(config)
self.config = config
self.gradient_checkpointing = False
self.embeddings = BertGenerationEmbeddings(config)
self.encoder = BertEncoder(config)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
@check_model_inputs
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if use_cache and past_key_values is None:
past_key_values = (
EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config))
if encoder_hidden_states is not None or self.config.is_encoder_decoder
else DynamicCache(config=self.config)
)
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if input_ids is not None:
device = input_ids.device
input_shape = input_ids.shape
else:
device = inputs_embeds.device
input_shape = inputs_embeds.shape[:-1]
seq_length = input_shape[1]
past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0
if cache_position is None:
cache_position = torch.arange(past_key_values_length, past_key_values_length + seq_length, device=device)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
attention_mask, encoder_attention_mask = self._create_attention_masks(
attention_mask=attention_mask,
encoder_attention_mask=encoder_attention_mask,
embedding_output=embedding_output,
encoder_hidden_states=encoder_hidden_states,
cache_position=cache_position,
past_key_values=past_key_values,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_ids=position_ids,
**kwargs,
)
sequence_output = encoder_outputs[0]
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=sequence_output,
past_key_values=encoder_outputs.past_key_values,
)
# Copied from transformers.models.bert.modeling_bert.BertModel._create_attention_masks
def _create_attention_masks(
self,
attention_mask,
encoder_attention_mask,
embedding_output,
encoder_hidden_states,
cache_position,
past_key_values,
):
if self.config.is_decoder:
attention_mask = create_causal_mask(
config=self.config,
input_embeds=embedding_output,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
)
else:
attention_mask = create_bidirectional_mask(
config=self.config,
input_embeds=embedding_output,
attention_mask=attention_mask,
)
if encoder_attention_mask is not None:
encoder_attention_mask = create_bidirectional_mask(
config=self.config,
input_embeds=embedding_output,
attention_mask=encoder_attention_mask,
encoder_hidden_states=encoder_hidden_states,
)
return attention_mask, encoder_attention_mask
class BertGenerationOnlyLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.decoder = nn.Linear(config.hidden_size, config.vocab_size)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
def forward(self, hidden_states):
logits = self.decoder(hidden_states)
return logits
@auto_docstring(
custom_intro="""
BertGeneration Model with a `language modeling` head on top for CLM fine-tuning.
"""
)
class BertGenerationDecoder(BertGenerationPreTrainedModel, GenerationMixin):
_tied_weights_keys = {
"lm_head.decoder.weight": "bert.embeddings.word_embeddings.weight",
"lm_head.decoder.bias": "lm_head.bias",
}
def __init__(self, config):
super().__init__(config)
if not config.is_decoder:
logger.warning("If you want to use `BertGenerationDecoder` as a standalone, add `is_decoder=True.`")
self.bert = BertGenerationEncoder(config)
self.lm_head = BertGenerationOnlyLMHead(config)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.lm_head.decoder
def set_output_embeddings(self, new_embeddings):
self.lm_head.decoder = new_embeddings
self.lm_head.bias = new_embeddings.bias
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
past_key_values: Optional[tuple[tuple[torch.FloatTensor]]] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, CausalLMOutputWithCrossAttentions]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
`[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
Example:
```python
>>> from transformers import AutoTokenizer, BertGenerationDecoder, BertGenerationConfig
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
>>> config = BertGenerationConfig.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
>>> config.is_decoder = True
>>> model = BertGenerationDecoder.from_pretrained(
... "google/bert_for_seq_generation_L-24_bbc_encoder", config=config
... )
>>> inputs = tokenizer("Hello, my dog is cute", return_token_type_ids=False, return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
```"""
if labels is not None:
use_cache = False
outputs: BaseModelOutputWithPastAndCrossAttentions = self.bert(
input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
return_dict=True,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
return CausalLMOutputWithCrossAttentions(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
__all__ = [
"BertGenerationDecoder",
"BertGenerationEncoder",
"BertGenerationPreTrainedModel",
]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/bert_generation/__init__.py | src/transformers/models/bert_generation/__init__.py | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_bert_generation import *
from .modeling_bert_generation import *
from .tokenization_bert_generation import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/bert_generation/tokenization_bert_generation.py | src/transformers/models/bert_generation/tokenization_bert_generation.py | # coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization class for model BertGeneration."""
from typing import Any, Optional
from ...tokenization_utils_sentencepiece import SentencePieceBackend
from ...utils import logging
from ...utils.import_utils import requires
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"}
@requires(backends=("sentencepiece",))
class BertGenerationTokenizer(SentencePieceBackend):
"""
Construct a BertGeneration tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
contains the vocabulary necessary to instantiate a tokenizer.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The begin of sequence token.
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
sep_token (`str`, *optional*, defaults to `"<::::>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
sp_model_kwargs (`dict`, *optional*):
Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
to set:
- `enable_sampling`: Enable subword regularization.
- `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
- `nbest_size = {0,1}`: No sampling is performed.
- `nbest_size > 1`: samples from the nbest_size results.
- `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
using forward-filtering-and-backward-sampling algorithm.
- `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
BPE-dropout.
"""
vocab_files_names = VOCAB_FILES_NAMES
prefix_tokens: list[int] = []
model_input_names = ["input_ids", "attention_mask"]
is_fast = False
def __init__(
self,
vocab_file,
bos_token="<s>",
eos_token="</s>",
unk_token="<unk>",
pad_token="<pad>",
sep_token="<::::>",
sp_model_kwargs: Optional[dict[str, Any]] = None,
**kwargs,
) -> None:
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
# Call parent init (which will load sp_model)
super().__init__(
vocab_file=vocab_file,
bos_token=bos_token,
eos_token=eos_token,
unk_token=unk_token,
pad_token=pad_token,
sep_token=sep_token,
sp_model_kwargs=self.sp_model_kwargs,
special_tokens_pattern="none",
**kwargs,
)
__all__ = ["BertGenerationTokenizer"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/sam2/convert_sam2_to_hf.py | src/transformers/models/sam2/convert_sam2_to_hf.py | # coding=utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Convert SAM checkpoints from the original repository.
URL: https://github.com/facebookresearch/segment-anything-2.
"""
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
Sam2Config,
Sam2HieraDetConfig,
Sam2ImageProcessorFast,
Sam2MaskDecoderConfig,
Sam2Model,
Sam2Processor,
Sam2PromptEncoderConfig,
Sam2VisionConfig,
)
def get_config(model_name):
if "hiera_tiny" in model_name:
hiera_det_config = Sam2HieraDetConfig()
vision_config = Sam2VisionConfig(backbone_config=hiera_det_config)
elif "hiera_small" in model_name:
hiera_det_config = Sam2HieraDetConfig(blocks_per_stage=[1, 2, 11, 2], global_attention_blocks=[7, 10, 13])
vision_config = Sam2VisionConfig(backbone_config=hiera_det_config)
elif "hiera_base_plus" in model_name:
hiera_det_config = Sam2HieraDetConfig(
hidden_size=112,
embed_dim_per_stage=[112, 224, 448, 896],
num_attention_heads_per_stage=[2, 4, 8, 16],
blocks_per_stage=[2, 3, 16, 3],
global_attention_blocks=[12, 16, 20],
window_positional_embedding_background_size=(14, 14),
)
vision_config = Sam2VisionConfig(
backbone_config=hiera_det_config,
backbone_channel_list=[896, 448, 224, 112],
)
elif "hiera_large" in model_name:
hiera_det_config = Sam2HieraDetConfig(
hidden_size=144,
embed_dim_per_stage=[144, 288, 576, 1152],
num_attention_heads_per_stage=[2, 4, 8, 16],
blocks_per_stage=[2, 6, 36, 4],
global_attention_blocks=[23, 33, 43],
window_positional_embedding_background_size=(7, 7),
window_size_per_stage=[8, 4, 16, 8],
)
vision_config = Sam2VisionConfig(
backbone_config=hiera_det_config,
backbone_channel_list=[1152, 576, 288, 144],
)
prompt_encoder_config = Sam2PromptEncoderConfig()
mask_decoder_config = Sam2MaskDecoderConfig()
if "sam2.1" in model_name:
enable_temporal_pos_encoding_for_object_pointers = True
enable_occlusion_spatial_embedding = True
else:
enable_temporal_pos_encoding_for_object_pointers = False
enable_occlusion_spatial_embedding = False
config = Sam2Config(
vision_config=vision_config,
prompt_encoder_config=prompt_encoder_config,
mask_decoder_config=mask_decoder_config,
enable_temporal_pos_encoding_for_object_pointers=enable_temporal_pos_encoding_for_object_pointers,
enable_occlusion_spatial_embedding=enable_occlusion_spatial_embedding,
)
return config
KEYS_TO_MODIFY_MAPPING = {
"iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
"iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
"iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
"mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
"mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
"mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
"mask_downscaling.0": "mask_embed.conv1",
"mask_downscaling.1": "mask_embed.layer_norm1",
"mask_downscaling.3": "mask_embed.conv2",
"mask_downscaling.4": "mask_embed.layer_norm2",
"mask_downscaling.6": "mask_embed.conv3",
"dwconv": "depthwise_conv",
"pwconv": "pointwise_conv",
"fuser": "memory_fuser",
"point_embeddings": "point_embed",
"pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
"obj_ptr_tpos_proj": "temporal_positional_encoding_projection_layer",
"no_obj_embed_spatial": "occlusion_spatial_embedding_parameter",
"sam_prompt_encoder": "prompt_encoder",
"sam_mask_decoder": "mask_decoder",
"maskmem_tpos_enc": "memory_temporal_positional_encoding",
"gamma": "scale",
"image_encoder.neck": "vision_encoder.neck",
"image_encoder": "vision_encoder.backbone",
"neck.0": "neck.conv1",
"neck.1": "neck.layer_norm1",
"neck.2": "neck.conv2",
"neck.3": "neck.layer_norm2",
"pix_feat_proj": "feature_projection",
"patch_embed.proj": "patch_embed.projection",
"no_mem_embed": "no_memory_embedding",
"no_mem_pos_enc": "no_memory_positional_encoding",
"obj_ptr": "object_pointer",
".norm": ".layer_norm",
"trunk.": "",
"out_proj": "o_proj",
}
def replace_keys(state_dict):
model_state_dict = {}
output_hypernetworks_mlps_pattern = r".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
output_mask_decoder_mlps_pattern = r"mask_decoder.transformer.layers.(\d+).mlp.layers.(\d+).*"
output_mask_decoder_score_head_pattern = r"mask_decoder.pred_obj_score_head.layers.(\d+).*"
output_vision_encoder_mlps_pattern = r"vision_encoder.backbone.blocks.(\d+).mlp.layers.(\d+).*"
output_vision_encoder_neck_pattern = r"vision_encoder.neck.convs.(\d+).conv"
output_memory_encoder_projection_pattern = r"memory_encoder.o_proj.*"
output_object_pointer_proj_pattern = r"object_pointer_proj.layers.(\d+).*"
# Stack the point embed module list:
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
key = key.replace(key_to_modify, new_key)
# vision_encoder.blocks.0.mlp.layers.1.weight -> vision_encoder.blocks.0.mlp.proj_out.weight
if re.match(output_vision_encoder_mlps_pattern, key):
layer_nb = int(re.match(output_vision_encoder_mlps_pattern, key).group(2))
if layer_nb == 0:
key = key.replace("layers.0", "proj_in")
elif layer_nb == 1:
key = key.replace("layers.1", "proj_out")
# mask_decoder.transformer.layers.0.mlp.layers.1.weight -> mask_decoder.transformer.layers.1.mlp.proj_out.weight
if re.match(output_mask_decoder_mlps_pattern, key):
layer_nb = int(re.match(output_mask_decoder_mlps_pattern, key).group(2))
if layer_nb == 0:
key = key.replace("mlp.layers.0", "mlp.proj_in")
elif layer_nb == 1:
key = key.replace("mlp.layers.1", "mlp.proj_out")
# mask_decoder.pred_obj_score_head.layers.1.weight -> mask_decoder.pred_obj_score_head.proj_in.weight
if re.match(output_mask_decoder_score_head_pattern, key):
layer_nb = int(re.match(output_mask_decoder_score_head_pattern, key).group(1))
if layer_nb == 0:
key = key.replace("layers.0", "proj_in")
elif layer_nb == 1:
key = key.replace("layers.1", "layers.0")
elif layer_nb == 2:
key = key.replace("layers.2", "proj_out")
if re.match(output_hypernetworks_mlps_pattern, key):
layer_nb = int(re.match(output_hypernetworks_mlps_pattern, key).group(2))
if layer_nb == 0:
key = key.replace("layers.0", "proj_in")
elif layer_nb == 1:
key = key.replace("layers.1", "layers.0")
elif layer_nb == 2:
key = key.replace("layers.2", "proj_out")
# vision_encoder.neck.convs.1.conv.bias -> vision_encoder.neck.convs.1.bias
if re.match(output_vision_encoder_neck_pattern, key):
key = key.replace(".conv.", ".")
# memory_encoder.out_proj.weight -> memory_encoder.projection.weight
if re.match(output_memory_encoder_projection_pattern, key):
key = key.replace(".o_proj.", ".projection.")
if re.match(output_object_pointer_proj_pattern, key):
layer_nb = int(re.match(output_object_pointer_proj_pattern, key).group(1))
if layer_nb == 0:
key = key.replace("layers.0", "proj_in")
elif layer_nb == 1:
key = key.replace("layers.1", "layers.0")
elif layer_nb == 2:
key = key.replace("layers.2", "proj_out")
model_state_dict[key] = value
model_state_dict["shared_image_embedding.positional_embedding"] = model_state_dict[
"prompt_encoder.shared_embedding.positional_embedding"
]
model_state_dict["prompt_encoder.point_embed.weight"] = torch.cat(
[model_state_dict.pop(f"prompt_encoder.point_embed.{i}.weight") for i in range(4)],
dim=0,
)
return model_state_dict
def convert_sam2_checkpoint(model_name, checkpoint_path, pytorch_dump_folder, push_to_hub):
config = get_config(model_name)
state_dict = torch.load(checkpoint_path, map_location="cpu")["model"]
state_dict = replace_keys(state_dict)
image_processor = Sam2ImageProcessorFast()
processor = Sam2Processor(image_processor=image_processor)
hf_model = Sam2Model(config)
hf_model.eval()
device = "cuda" if torch.cuda.is_available() else "cpu"
missing_keys, unexpected_keys = hf_model.load_state_dict(state_dict, strict=False)
hf_model = hf_model.to(device)
for pattern in Sam2Model._keys_to_ignore_on_load_unexpected:
unexpected_keys = [k for k in unexpected_keys if re.search(pattern, k) is None]
if missing_keys or unexpected_keys:
print("Missing keys:", missing_keys)
print("Unexpected keys:", unexpected_keys)
raise ValueError("Missing or unexpected keys in the state dict")
img_url = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB")
input_points = [[[[1000, 600]]]]
input_labels = [[[1]]]
inputs = processor(
images=np.array(raw_image), input_points=input_points, input_labels=input_labels, return_tensors="pt"
).to(device)
with torch.no_grad():
output = hf_model(**inputs)
scores = output.iou_scores.squeeze()
if model_name == "sam2.1_hiera_tiny":
assert torch.allclose(scores, torch.tensor([0.0316, 0.9647, 0.1029]).cuda(), atol=1e-2)
elif model_name == "sam2.1_hiera_small":
assert torch.allclose(scores, torch.tensor([0.9664, 0.1494, 0.0456]).cuda(), atol=1e-2)
elif model_name == "sam2.1_hiera_base_plus":
assert torch.allclose(scores, torch.tensor([0.0361, 0.9775, 0.1307]).cuda(), atol=1e-2)
elif model_name == "sam2.1_hiera_large":
assert torch.allclose(scores, torch.tensor([0.9648, 0.0371, 0.1898]).cuda(), atol=1e-2)
elif model_name == "sam2_hiera_tiny":
assert torch.allclose(scores, torch.tensor([0.0439, 0.9567, 0.1415]).cuda(), atol=1e-2)
elif model_name == "sam2_hiera_small":
assert torch.allclose(scores, torch.tensor([0.9593, 0.1633, 0.0392]).cuda(), atol=1e-2)
elif model_name == "sam2_hiera_base_plus":
assert torch.allclose(scores, torch.tensor([0.0423, 0.9815, 0.0897]).cuda(), atol=1e-2)
elif model_name == "sam2_hiera_large":
assert torch.allclose(scores, torch.tensor([0.9514, 0.0535, 0.1787]).cuda(), atol=1e-2)
else:
raise ValueError(f"Model {model_name} not supported")
if pytorch_dump_folder is not None:
processor.save_pretrained(pytorch_dump_folder)
hf_model.save_pretrained(pytorch_dump_folder)
if push_to_hub:
repo_id = f"danelcsb/{pytorch_dump_folder.split('/')[-1]}"
processor.push_to_hub(repo_id)
hf_model.push_to_hub(repo_id)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
choices = [
"sam2.1_hiera_tiny",
"sam2.1_hiera_small",
"sam2.1_hiera_base_plus",
"sam2.1_hiera_large",
"sam2_hiera_tiny",
"sam2_hiera_small",
"sam2_hiera_base_plus",
"sam2_hiera_large",
]
parser.add_argument(
"--model_name",
default="sam2.1_hiera_tiny",
choices=choices,
type=str,
help="Name of the original model to convert",
)
parser.add_argument(
"--checkpoint_path",
type=str,
required=False,
help="Path to the original checkpoint",
)
parser.add_argument("--pytorch_dump_folder_path", default="", type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
args = parser.parse_args()
hf_model_name = args.model_name.replace("_", "-")
checkpoint_path = (
hf_hub_download(f"facebook/{hf_model_name}", f"{args.model_name.lower()}.pt")
if args.checkpoint_path is None
else args.checkpoint_path
)
convert_sam2_checkpoint(args.model_name, checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/sam2/image_processing_sam2_fast.py | src/transformers/models/sam2/image_processing_sam2_fast.py | # π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨
# This file was automatically generated from src/transformers/models/sam2/modular_sam2.py.
# Do NOT edit this file manually as any edits will be overwritten by the generation of
# the file from the modular. If any change should be done, please apply the change to the
# modular_sam2.py file directly. One of our CI enforces this.
# π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨
# coding=utf-8
# Copyright 2025 The Meta AI Authors and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from copy import deepcopy
from itertools import product
from typing import Any, Optional, Union
import numpy as np
import torch
import torch.nn.functional as F
from torchvision.ops.boxes import batched_nms
from ...image_processing_utils import BatchFeature, get_size_dict
from ...image_processing_utils_fast import BaseImageProcessorFast
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
SizeDict,
pil_torch_interpolation_mapping,
)
from ...processing_utils import ImagesKwargs, Unpack
from ...utils import TensorType, auto_docstring
class Sam2FastImageProcessorKwargs(ImagesKwargs, total=False):
r"""
mask_size (`dict[str, int]`, *optional*):
The size `{"height": int, "width": int}` to resize the segmentation maps to.
"""
mask_size: dict[str, int]
def _compute_stability_score(masks: "torch.Tensor", mask_threshold: float, stability_score_offset: int):
# One mask is always contained inside the other.
# Save memory by preventing unnecessary cast to torch.int64
intersections = (
(masks > (mask_threshold + stability_score_offset)).sum(-1, dtype=torch.int16).sum(-1, dtype=torch.int32)
)
unions = (masks > (mask_threshold - stability_score_offset)).sum(-1, dtype=torch.int16).sum(-1, dtype=torch.int32)
stability_scores = intersections / unions
return stability_scores
def _mask_to_rle(input_mask: "torch.Tensor"):
"""
Encodes masks the run-length encoding (RLE), in the format expected by pycoco tools.
"""
# Put in fortran order and flatten height and width
batch_size, height, width = input_mask.shape
input_mask = input_mask.permute(0, 2, 1).flatten(1)
# Compute change indices
diff = input_mask[:, 1:] ^ input_mask[:, :-1]
change_indices = diff.nonzero()
# Encode run length
out = []
for i in range(batch_size):
cur_idxs = change_indices[change_indices[:, 0] == i, 1] + 1
if len(cur_idxs) == 0:
# No changes => either all 0 or all 1
# If the entire mask is 0, RLE is [height*width] or if the entire mask is 1, RLE is [0, height*width].
if input_mask[i, 0] == 0:
out.append({"size": [height, width], "counts": [height * width]})
else:
out.append({"size": [height, width], "counts": [0, height * width]})
continue
btw_idxs = cur_idxs[1:] - cur_idxs[:-1]
counts = [] if input_mask[i, 0] == 0 else [0]
counts += [cur_idxs[0].item()] + btw_idxs.tolist() + [height * width - cur_idxs[-1].item()]
out.append({"size": [height, width], "counts": counts})
return out
def _batched_mask_to_box(masks: "torch.Tensor"):
"""
Computes the bounding boxes around the given input masks. The bounding boxes are in the XYXY format which
corresponds the following required indices:
- LEFT: left hand side of the bounding box
- TOP: top of the bounding box
- RIGHT: right of the bounding box
- BOTTOM: bottom of the bounding box
Return [0,0,0,0] for an empty mask. For input shape channel_1 x channel_2 x ... x height x width, the output shape
is channel_1 x channel_2 x ... x 4.
Args:
- masks (`torch.Tensor` of shape `(batch, nb_mask, height, width)`)
"""
# torch.max below raises an error on empty inputs, just skip in this case
if torch.numel(masks) == 0:
return torch.zeros(*masks.shape[:-2], 4, device=masks.device)
# Normalize shape to Cxheightxwidth
shape = masks.shape
height, width = shape[-2:]
# Get top and bottom edges
in_height, _ = torch.max(masks, dim=-1)
in_height_coords = in_height * torch.arange(height, device=in_height.device)[None, :]
bottom_edges, _ = torch.max(in_height_coords, dim=-1)
in_height_coords = in_height_coords + height * (~in_height)
top_edges, _ = torch.min(in_height_coords, dim=-1)
# Get left and right edges
in_width, _ = torch.max(masks, dim=-2)
in_width_coords = in_width * torch.arange(width, device=in_width.device)[None, :]
right_edges, _ = torch.max(in_width_coords, dim=-1)
in_width_coords = in_width_coords + width * (~in_width)
left_edges, _ = torch.min(in_width_coords, dim=-1)
# If the mask is empty the right edge will be to the left of the left edge.
# Replace these boxes with [0, 0, 0, 0]
empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)
out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1)
out = out * (~empty_filter).unsqueeze(-1)
# Return to original shape
out = out.reshape(*shape[:-2], 4)
return out
def _is_box_near_crop_edge(boxes, crop_box, orig_box, atol=20.0):
"""Filter masks at the edge of a crop, but not at the edge of the original image."""
crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device)
orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device)
left, top, _, _ = crop_box
offset = torch.tensor([[left, top, left, top]], device=boxes.device)
# Check if boxes has a channel dimension
if len(boxes.shape) == 3:
offset = offset.unsqueeze(1)
boxes = (boxes + offset).float()
near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0)
near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0)
near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge)
return torch.any(near_crop_edge, dim=1)
def _pad_masks(masks, crop_box: list[int], orig_height: int, orig_width: int):
left, top, right, bottom = crop_box
if left == 0 and top == 0 and right == orig_width and bottom == orig_height:
return masks
# Coordinate transform masks
pad_x, pad_y = orig_width - (right - left), orig_height - (bottom - top)
pad = (left, pad_x - left, top, pad_y - top)
return torch.nn.functional.pad(masks, pad, value=0)
def _generate_crop_boxes(
image,
target_size: int, # Is it tuple here?
crop_n_layers: int = 0,
overlap_ratio: float = 512 / 1500,
points_per_crop: Optional[int] = 32,
crop_n_points_downscale_factor: Optional[list[int]] = 1,
) -> tuple[list[list[int]], list[int]]:
"""
Generates a list of crop boxes of different sizes. Each layer has (2**i)**2 boxes for the ith layer.
Args:
image (Union[`numpy.ndarray`, `PIL.Image`, `torch.Tensor`]):
Image to generate crops for.
target_size (`int`):
Size of the smallest crop.
crop_n_layers (`int`, *optional*):
If `crops_n_layers>0`, mask prediction will be run again on crops of the image. Sets the number of layers
to run, where each layer has 2**i_layer number of image crops.
overlap_ratio (`int`, *optional*):
Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of the
image length. Later layers with more crops scale down this overlap.
points_per_crop (`int`, *optional*):
Number of points to sam2ple per crop.
crop_n_points_downscale_factor (`int`, *optional*):
The number of points-per-side sam2pled in layer n is scaled down by crop_n_points_downscale_factor**n.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
if isinstance(image, list):
raise ValueError("Only one image is allowed for crop generation.")
original_size = image.shape[-2:]
points_grid = []
for i in range(crop_n_layers + 1):
n_points = int(points_per_crop / (crop_n_points_downscale_factor**i))
points_grid.append(_build_point_grid(n_points))
crop_boxes, layer_idxs = _generate_per_layer_crops(crop_n_layers, overlap_ratio, original_size)
cropped_images, point_grid_per_crop = _generate_crop_images(
crop_boxes, image, points_grid, layer_idxs, target_size, original_size
)
crop_boxes = torch.tensor(crop_boxes)
crop_boxes = crop_boxes.float()
points_per_crop = torch.stack(point_grid_per_crop)
points_per_crop = points_per_crop.unsqueeze(0).permute(0, 2, 1, 3)
cropped_images = torch.stack(cropped_images)
input_labels = torch.ones_like(points_per_crop[:, :, :, 0], dtype=torch.int64)
return crop_boxes, points_per_crop, cropped_images, input_labels
def _generate_per_layer_crops(crop_n_layers, overlap_ratio, original_size):
"""
Generates 2 ** (layers idx + 1) crops for each crop_n_layers. Crops are in the XYWH format : The XYWH format
consists of the following required indices:
- X: X coordinate of the top left of the bounding box
- Y: Y coordinate of the top left of the bounding box
- W: width of the bounding box
- H: height of the bounding box
"""
crop_boxes, layer_idxs = [], []
im_height, im_width = original_size
short_side = min(im_height, im_width)
# Original image
crop_boxes.append([0, 0, im_width, im_height])
layer_idxs.append(0)
for i_layer in range(crop_n_layers):
n_crops_per_side = 2 ** (i_layer + 1)
overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side))
crop_width = int(math.ceil((overlap * (n_crops_per_side - 1) + im_width) / n_crops_per_side))
crop_height = int(math.ceil((overlap * (n_crops_per_side - 1) + im_height) / n_crops_per_side))
crop_box_x0 = [int((crop_width - overlap) * i) for i in range(n_crops_per_side)]
crop_box_y0 = [int((crop_height - overlap) * i) for i in range(n_crops_per_side)]
for left, top in product(crop_box_x0, crop_box_y0):
box = [left, top, min(left + crop_width, im_width), min(top + crop_height, im_height)]
crop_boxes.append(box)
layer_idxs.append(i_layer + 1)
return crop_boxes, layer_idxs
def _build_point_grid(n_per_side: int) -> torch.Tensor:
"""Generates a 2D grid of points evenly spaced in [0,1]x[0,1]."""
offset = 1 / (2 * n_per_side)
points_one_side = torch.linspace(offset, 1 - offset, n_per_side)
points_x = torch.tile(points_one_side[None, :], (n_per_side, 1))
points_y = torch.tile(points_one_side[:, None], (1, n_per_side))
points = torch.stack([points_x, points_y], dim=-1).reshape(-1, 2)
return points
def _generate_crop_images(
crop_boxes, image, points_grid, layer_idxs, target_size, original_size, input_data_format=None
):
"""
Takes as an input bounding boxes that are used to crop the image. Based in the crops, the corresponding points are
also passed.
"""
cropped_images = []
total_points_per_crop = []
for i, crop_box in enumerate(crop_boxes):
left, top, right, bottom = crop_box
cropped_im = image[:, top:bottom, left:right]
cropped_images.append(cropped_im)
cropped_im_size = cropped_im.shape[-2:]
points_scale = torch.tensor(cropped_im_size).flip(dims=(0,)).unsqueeze(0)
points = points_grid[layer_idxs[i]] * points_scale
normalized_points = _normalize_coordinates(target_size, points, original_size)
total_points_per_crop.append(normalized_points)
return cropped_images, total_points_per_crop
def _normalize_coordinates(
target_size: int, coords: torch.Tensor, original_size: tuple[int, int], is_bounding_box=False
) -> torch.Tensor:
"""
Expects a numpy array of length 2 in the final dimension. Requires the original image size in (height, width)
format.
"""
old_height, old_width = original_size
scale = target_size * 1.0 / max(old_height, old_width)
new_height, new_width = old_height * scale, old_width * scale
new_width = int(new_width + 0.5)
new_height = int(new_height + 0.5)
coords = deepcopy(coords).float()
if is_bounding_box:
coords = coords.reshape(-1, 2, 2)
coords[..., 0] = coords[..., 0] * (new_width / old_width)
coords[..., 1] = coords[..., 1] * (new_height / old_height)
if is_bounding_box:
coords = coords.reshape(-1, 4)
return coords
def _rle_to_mask(rle: dict[str, Any]) -> torch.Tensor:
"""Compute a binary mask from an uncompressed RLE."""
height, width = rle["size"]
mask = torch.empty(height * width, dtype=bool)
idx = 0
parity = False
for count in rle["counts"]:
mask[idx : idx + count] = parity
idx += count
parity = not parity
mask = mask.reshape(width, height)
return mask.transpose(0, 1) # Reshape to original shape
def _post_process_for_mask_generation(rle_masks, iou_scores, mask_boxes, amg_crops_nms_thresh=0.7):
"""
Perform NMS (Non Maximum Suppression) on the outputs.
Args:
rle_masks (`torch.Tensor`):
binary masks in the RLE format
iou_scores (`torch.Tensor` of shape (nb_masks, 1)):
iou_scores predicted by the model
mask_boxes (`torch.Tensor`):
The bounding boxes corresponding to segmentation masks
amg_crops_nms_thresh (`float`, *optional*, defaults to 0.7):
NMS threshold.
"""
keep_by_nms = batched_nms(
boxes=mask_boxes.float(),
scores=iou_scores,
idxs=torch.zeros(mask_boxes.shape[0]),
iou_threshold=amg_crops_nms_thresh,
)
iou_scores = iou_scores[keep_by_nms]
rle_masks = [rle_masks[i] for i in keep_by_nms]
mask_boxes = mask_boxes[keep_by_nms]
masks = [_rle_to_mask(rle) for rle in rle_masks]
return masks, iou_scores, rle_masks, mask_boxes
@auto_docstring
class Sam2ImageProcessorFast(BaseImageProcessorFast):
resample = PILImageResampling.BILINEAR
image_mean = IMAGENET_DEFAULT_MEAN
image_std = IMAGENET_DEFAULT_STD
size = {"height": 1024, "width": 1024}
mask_size = {"height": 256, "width": 256}
do_resize = True
do_rescale = True
do_normalize = True
do_convert_rgb = True
valid_kwargs = Sam2FastImageProcessorKwargs
# modular artefacts
do_pad = None
pad_size = None
mask_pad_size = None
def __init__(self, **kwargs: Unpack[Sam2FastImageProcessorKwargs]):
super().__init__(**kwargs)
def _further_process_kwargs(
self,
size: Optional[SizeDict] = None,
mask_size: Optional[SizeDict] = None,
default_to_square: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
data_format: Optional[ChannelDimension] = None,
**kwargs,
) -> dict:
"""
Update kwargs that need further processing before being validated
Can be overridden by subclasses to customize the processing of kwargs.
"""
if kwargs is None:
kwargs = {}
if size is not None:
size = SizeDict(**get_size_dict(size=size, default_to_square=default_to_square))
if mask_size is not None:
mask_size = SizeDict(**get_size_dict(mask_size, param_name="mask_size"))
if isinstance(image_mean, list):
image_mean = tuple(image_mean)
if isinstance(image_std, list):
image_std = tuple(image_std)
if data_format is None:
data_format = ChannelDimension.FIRST
kwargs["size"] = size
kwargs["mask_size"] = mask_size
kwargs["image_mean"] = image_mean
kwargs["image_std"] = image_std
kwargs["data_format"] = data_format
# torch resize uses interpolation instead of resample
# Check if resample is an int before checking if it's an instance of PILImageResampling
# because if pillow < 9.1.0, resample is an int and PILImageResampling is a module.
# Checking PILImageResampling will fail with error `TypeError: isinstance() arg 2 must be a type or tuple of types`.
resample = kwargs.pop("resample")
kwargs["interpolation"] = (
pil_torch_interpolation_mapping[resample] if isinstance(resample, (PILImageResampling, int)) else resample
)
return kwargs
@auto_docstring
def preprocess(
self,
images: ImageInput,
segmentation_maps: Optional[ImageInput] = None,
**kwargs: Unpack[Sam2FastImageProcessorKwargs],
) -> BatchFeature:
r"""
segmentation_maps (`ImageInput`, *optional*):
The segmentation maps to preprocess.
"""
return super().preprocess(images, segmentation_maps, **kwargs)
def _preprocess_image_like_inputs(
self,
images: ImageInput,
segmentation_maps: Optional[ImageInput],
do_convert_rgb: bool,
input_data_format: ChannelDimension,
device: Optional[Union[str, "torch.device"]] = None,
**kwargs: Unpack[Sam2FastImageProcessorKwargs],
) -> BatchFeature:
"""
Preprocess image-like inputs.
"""
images = self._prepare_image_like_inputs(
images=images, do_convert_rgb=do_convert_rgb, input_data_format=input_data_format, device=device
)
original_sizes = [image.shape[-2:] for image in images]
images_kwargs = kwargs.copy()
pixel_values = self._preprocess(images, **images_kwargs)
data = {
"pixel_values": pixel_values,
"original_sizes": original_sizes,
}
if segmentation_maps is not None:
processed_segmentation_maps = self._prepare_image_like_inputs(
images=segmentation_maps,
expected_ndims=2,
do_convert_rgb=False,
input_data_format=ChannelDimension.FIRST,
)
segmentation_maps_kwargs = kwargs.copy()
segmentation_maps_kwargs.update(
{
"do_normalize": False,
"do_rescale": False,
"interpolation": pil_torch_interpolation_mapping[PILImageResampling.NEAREST],
"size": segmentation_maps_kwargs.pop("mask_size"),
}
)
processed_segmentation_maps = self._preprocess(
images=processed_segmentation_maps, **segmentation_maps_kwargs
)
data["labels"] = processed_segmentation_maps.squeeze(1).to(torch.int64)
return BatchFeature(data=data, tensor_type=kwargs["return_tensors"])
def _preprocess(
self,
images: list["torch.Tensor"],
return_tensors: Optional[Union[str, TensorType]],
**kwargs,
) -> "torch.Tensor":
return super()._preprocess(images, return_tensors=return_tensors, **kwargs).pixel_values
def generate_crop_boxes(
self,
image: "torch.Tensor",
target_size,
crop_n_layers: int = 0,
overlap_ratio: float = 512 / 1500,
points_per_crop: Optional[int] = 32,
crop_n_points_downscale_factor: Optional[list[int]] = 1,
device: Optional["torch.device"] = None,
):
"""
Generates a list of crop boxes of different sizes. Each layer has (2**i)**2 boxes for the ith layer.
Args:
image (`torch.Tensor`):
Input original image
target_size (`int`):
Target size of the resized image
crop_n_layers (`int`, *optional*, defaults to 0):
If >0, mask prediction will be run again on crops of the image. Sets the number of layers to run, where
each layer has 2**i_layer number of image crops.
overlap_ratio (`float`, *optional*, defaults to 512/1500):
Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of
the image length. Later layers with more crops scale down this overlap.
points_per_crop (`int`, *optional*, defaults to 32):
Number of points to sam2ple from each crop.
crop_n_points_downscale_factor (`list[int]`, *optional*, defaults to 1):
The number of points-per-side sam2pled in layer n is scaled down by crop_n_points_downscale_factor**n.
device (`torch.device`, *optional*, defaults to None):
Device to use for the computation. If None, cpu will be used.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
return_tensors (`str`, *optional*, defaults to `pt`):
If `pt`, returns `torch.Tensor`.
"""
image = self._process_image(image)
crop_boxes, points_per_crop, cropped_images, input_labels = _generate_crop_boxes(
image,
target_size,
crop_n_layers,
overlap_ratio,
points_per_crop,
crop_n_points_downscale_factor,
)
if device is None:
device = torch.device("cpu")
crop_boxes = crop_boxes.to(device)
points_per_crop = points_per_crop.to(device)
# cropped_images stays as torch.Tensor
input_labels = input_labels.to(device)
return crop_boxes, points_per_crop, cropped_images, input_labels
def filter_masks(
self,
masks,
iou_scores,
original_size,
cropped_box_image,
pred_iou_thresh=0.88,
stability_score_thresh=0.95,
mask_threshold=0,
stability_score_offset=1,
):
"""
Filters the predicted masks by selecting only the ones that meets several criteria. The first criterion being
that the iou scores needs to be greater than `pred_iou_thresh`. The second criterion is that the stability
score needs to be greater than `stability_score_thresh`. The method also converts the predicted masks to
bounding boxes and pad the predicted masks if necessary.
Args:
masks (`torch.Tensor`):
Input masks.
iou_scores (`torch.Tensor`):
List of IoU scores.
original_size (`tuple[int,int]`):
Size of the original image.
cropped_box_image (`torch.Tensor`):
The cropped image.
pred_iou_thresh (`float`, *optional*, defaults to 0.88):
The threshold for the iou scores.
stability_score_thresh (`float`, *optional*, defaults to 0.95):
The threshold for the stability score.
mask_threshold (`float`, *optional*, defaults to 0):
The threshold for the predicted masks.
stability_score_offset (`float`, *optional*, defaults to 1):
The offset for the stability score used in the `_compute_stability_score` method.
"""
original_height, original_width = original_size
iou_scores = iou_scores.flatten(0, 1)
masks = masks.flatten(0, 1)
if masks.shape[0] != iou_scores.shape[0]:
raise ValueError("masks and iou_scores must have the sam2e batch size.")
if masks.device != iou_scores.device:
iou_scores = iou_scores.to(masks.device)
batch_size = masks.shape[0]
keep_mask = torch.ones(batch_size, dtype=torch.bool, device=masks.device)
if pred_iou_thresh > 0.0:
keep_mask = keep_mask & (iou_scores > pred_iou_thresh)
# compute stability score
if stability_score_thresh > 0.0:
stability_scores = _compute_stability_score(masks, mask_threshold, stability_score_offset)
keep_mask = keep_mask & (stability_scores > stability_score_thresh)
scores = iou_scores[keep_mask]
masks = masks[keep_mask]
# binarize masks
masks = masks > mask_threshold
converted_boxes = _batched_mask_to_box(masks)
keep_mask = ~_is_box_near_crop_edge(
converted_boxes, cropped_box_image, [0, 0, original_width, original_height]
)
scores = scores[keep_mask]
masks = masks[keep_mask]
converted_boxes = converted_boxes[keep_mask]
masks = _pad_masks(masks, cropped_box_image, original_height, original_width)
# conversion to rle is necessary to run non-maximum suppression
masks = _mask_to_rle(masks)
return masks, scores, converted_boxes
def post_process_masks(
self,
masks,
original_sizes,
mask_threshold=0.0,
binarize=True,
max_hole_area=0.0,
max_sprinkle_area=0.0,
apply_non_overlapping_constraints=False,
**kwargs,
):
"""
Remove padding and upscale masks to the original image size.
Args:
masks (`Union[torch.Tensor, List[torch.Tensor], np.ndarray, List[np.ndarray]]`):
Batched masks from the mask_decoder in (batch_size, num_channels, height, width) format.
original_sizes (`Union[torch.Tensor, List[Tuple[int,int]]]`):
The original sizes of each image before it was resized to the model's expected input shape, in (height,
width) format.
mask_threshold (`float`, *optional*, defaults to 0.0):
Threshold for binarization and post-processing operations.
binarize (`bool`, *optional*, defaults to `True`):
Whether to binarize the masks.
max_hole_area (`float`, *optional*, defaults to 0.0):
The maximum area of a hole to fill.
max_sprinkle_area (`float`, *optional*, defaults to 0.0):
The maximum area of a sprinkle to fill.
apply_non_overlapping_constraints (`bool`, *optional*, defaults to `False`):
Whether to apply non-overlapping constraints to the masks.
Returns:
(`torch.Tensor`): Batched masks in batch_size, num_channels, height, width) format, where (height, width)
is given by original_size.
"""
if isinstance(original_sizes, (torch.Tensor, np.ndarray)):
original_sizes = original_sizes.tolist()
# TODO: add connected components kernel for postprocessing
output_masks = []
for i, original_size in enumerate(original_sizes):
if isinstance(masks[i], np.ndarray):
masks[i] = torch.from_numpy(masks[i])
elif not isinstance(masks[i], torch.Tensor):
raise TypeError("Input masks should be a list of `torch.tensors` or a list of `np.ndarray`")
interpolated_mask = F.interpolate(masks[i], original_size, mode="bilinear", align_corners=False)
if apply_non_overlapping_constraints:
interpolated_mask = self._apply_non_overlapping_constraints(interpolated_mask)
if binarize:
interpolated_mask = interpolated_mask > mask_threshold
output_masks.append(interpolated_mask)
return output_masks
def post_process_for_mask_generation(self, all_masks, all_scores, all_boxes, crops_nms_thresh):
"""
Post processes mask that are generated by calling the Non Maximum Suppression algorithm on the predicted masks.
Args:
all_masks (`torch.Tensor`):
List of all predicted segmentation masks
all_scores (`torch.Tensor`):
List of all predicted iou scores
all_boxes (`torch.Tensor`):
List of all bounding boxes of the predicted masks
crops_nms_thresh (`float`):
Threshold for NMS (Non Maximum Suppression) algorithm.
"""
return _post_process_for_mask_generation(all_masks, all_scores, all_boxes, crops_nms_thresh)
def _apply_non_overlapping_constraints(self, pred_masks: torch.Tensor) -> torch.Tensor:
"""
Apply non-overlapping constraints to the object scores in pred_masks. Here we
keep only the highest scoring object at each spatial location in pred_masks.
"""
batch_size = pred_masks.size(0)
if batch_size == 1:
return pred_masks
device = pred_masks.device
# "max_obj_inds": object index of the object with the highest score at each location
max_obj_inds = torch.argmax(pred_masks, dim=0, keepdim=True)
# "batch_obj_inds": object index of each object slice (along dim 0) in `pred_masks`
batch_obj_inds = torch.arange(batch_size, device=device)[:, None, None, None]
keep = max_obj_inds == batch_obj_inds
# suppress overlapping regions' scores below -10.0 so that the foreground regions
# don't overlap (here sigmoid(-10.0)=4.5398e-05)
pred_masks = torch.where(keep, pred_masks, torch.clamp(pred_masks, max=-10.0))
return pred_masks
__all__ = ["Sam2ImageProcessorFast"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/sam2/processing_sam2.py | src/transformers/models/sam2/processing_sam2.py | # coding=utf-8
# Copyright 2025 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Processor class for SAM2.
"""
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_torch_available, logging
from ...utils.import_utils import requires
logger = logging.get_logger(__name__)
if is_torch_available():
import torch
@requires(backends=("torch",))
class Sam2Processor(ProcessorMixin):
r"""
Constructs a SAM2 processor which wraps a SAM2 image processor and an 2D points & Bounding boxes processor into a
single processor.
[`Sam2Processor`] offers all the functionalities of [`Sam2ImageProcessorFast`] and [`Sam2VideoProcessor`]. See the docstring of
[`~Sam2ImageProcessorFast.__call__`] and [`~Sam2VideoProcessor.__call__`] for more information.
Args:
image_processor (`Sam2ImageProcessorFast`):
An instance of [`Sam2ImageProcessorFast`].
target_size (`int`, *optional*):
The target size (target_size, target_size) to which the image will be resized.
point_pad_value (`int`, *optional*, defaults to -10):
The value used for padding input points.
"""
def __init__(self, image_processor, target_size: Optional[int] = None, point_pad_value: int = -10, **kwargs):
super().__init__(image_processor, **kwargs)
self.point_pad_value = point_pad_value
self.target_size = target_size if target_size is not None else self.image_processor.size["height"]
def __call__(
self,
images: Optional[ImageInput] = None,
segmentation_maps: Optional[ImageInput] = None,
input_points: Optional[Union[list[list[list[list[float]]]], torch.Tensor]] = None,
input_labels: Optional[Union[list[list[list[int]]], torch.Tensor]] = None,
input_boxes: Optional[Union[list[list[list[float]]], torch.Tensor]] = None,
original_sizes: Optional[Union[list[list[float]], torch.Tensor]] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
**kwargs,
) -> BatchEncoding:
r"""
This method uses [`Sam2ImageProcessorFast.__call__`] method to prepare image(s) for the model. It also prepares 2D
points and bounding boxes for the model if they are provided.
Args:
images (`ImageInput`, *optional*):
The image(s) to process.
segmentation_maps (`ImageInput`, *optional*):
The segmentation maps to process.
input_points (`list[list[list[list[float]]]]`, `torch.Tensor`, *optional*):
The points to add to the frame.
input_labels (`list[list[list[int]]]`, `torch.Tensor`, *optional*):
The labels for the points.
input_boxes (`list[list[list[float]]]`, `torch.Tensor`, *optional*):
The bounding boxes to add to the frame.
original_sizes (`list[list[float]]`, `torch.Tensor`, *optional*):
The original sizes of the images.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return.
**kwargs:
Additional keyword arguments to pass to the image processor.
Returns:
A [`BatchEncoding`] with the following fields:
- `pixel_values` (`torch.Tensor`): The processed image(s).
- `original_sizes` (`list[list[float]]`): The original sizes of the images.
- `labels` (`torch.Tensor`): The processed segmentation maps (if provided).
- `input_points` (`torch.Tensor`): The processed points.
- `input_labels` (`torch.Tensor`): The processed labels.
- `input_boxes` (`torch.Tensor`): The processed bounding boxes.
"""
if images is not None:
encoding_image_processor = self.image_processor(
images,
segmentation_maps=segmentation_maps,
return_tensors=return_tensors,
**kwargs,
)
elif original_sizes is not None:
if isinstance(original_sizes, torch.Tensor):
original_sizes = original_sizes.cpu().tolist()
encoding_image_processor = BatchEncoding({"original_sizes": original_sizes}, tensor_type=return_tensors)
else:
raise ValueError("Either images or original_sizes must be provided")
# pop arguments that are not used in the forward but used nevertheless
original_sizes = encoding_image_processor["original_sizes"]
# Check original_sizes is of length 1 or len(images)
if images is not None and len(original_sizes) != 1 and len(original_sizes) != len(images):
raise ValueError(
"original_sizes must be of length 1 or len(images). If you are passing a single image, you must pass a single original_size."
)
# Process input points, labels, and boxes if provided
if input_points is not None or input_labels is not None or input_boxes is not None:
# Validate and convert inputs to standardized format
processed_points = self._validate_single_input(
input_points,
expected_depth=4,
input_name="points",
expected_format="[image level, object level, point level, point coordinates]",
expected_coord_size=2,
)
processed_labels = self._validate_single_input(
input_labels,
expected_depth=3,
input_name="labels",
expected_format="[image level, object level, point level]",
)
processed_boxes = self._validate_single_input(
input_boxes,
expected_depth=3,
input_name="boxes",
expected_format="[image level, box level, box coordinates]",
expected_coord_size=4,
)
# Get padding requirements for all inputs
if processed_points is not None:
points_max_dims = self._get_nested_dimensions(processed_points)[:3]
if processed_labels is not None:
labels_max_dims = self._get_nested_dimensions(processed_labels)[:3]
if processed_boxes is not None:
boxes_max_dims = self._get_nested_dimensions(processed_boxes)[:2]
# Ensure points and labels have consistent dimensions
if processed_points is not None and processed_labels is not None:
if points_max_dims != labels_max_dims:
raise ValueError(
"Input points and labels have inconsistent dimensions. Please ensure they have the same dimensions."
)
# Check that boxes don't need padding (model limitation)
if processed_boxes is not None and len(processed_boxes) >= 2:
if any(len(img_boxes) < boxes_max_dims[1] for img_boxes in processed_boxes):
raise ValueError(
"Input boxes have inconsistent dimensions that would require padding, "
"but boxes cannot be padded due to model limitations. "
"Please ensure all images have the same number of boxes."
)
# Pad and normalize all inputs to final tensor format
if processed_points is not None:
padded_points = self._pad_nested_list(processed_points, points_max_dims + [2])
final_points = torch.tensor(padded_points, dtype=torch.float32)
self._normalize_tensor_coordinates(final_points, original_sizes, preserve_padding=True)
encoding_image_processor.update({"input_points": final_points})
if processed_labels is not None:
padded_labels = self._pad_nested_list(processed_labels, labels_max_dims)
final_labels = torch.tensor(padded_labels, dtype=torch.int64)
encoding_image_processor.update({"input_labels": final_labels})
if processed_boxes is not None:
final_boxes = torch.tensor(processed_boxes, dtype=torch.float32)
self._normalize_tensor_coordinates(final_boxes, original_sizes, is_bounding_box=True)
encoding_image_processor.update({"input_boxes": final_boxes})
return encoding_image_processor
def _normalize_coordinates(
self, target_size: int, coords: "torch.Tensor", original_size, is_bounding_box=False
) -> "torch.Tensor":
"""
Expects a numpy array of length 2 in the final dimension. Requires the original image size in (H, W) format.
Args:
target_size (`int`):
The target size of the image.
coords (`torch.Tensor`):
The coordinates to be normalized.
original_size (`tuple`):
The original size of the image.
is_bounding_box (`bool`, *optional*, defaults to `False`):
Whether the coordinates are bounding boxes.
"""
old_h, old_w = original_size
new_h, new_w = target_size, target_size
coords = deepcopy(coords).float()
if is_bounding_box:
coords = coords.reshape(-1, 2, 2)
coords[..., 0] = coords[..., 0] * (new_w / old_w)
coords[..., 1] = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
coords = coords.reshape(-1, 4)
return coords
def _convert_to_nested_list(self, data, expected_depth, current_depth=0):
"""
Recursively convert various input formats (tensors, numpy arrays, lists) to nested lists.
Args:
data: Input data in any format
expected_depth: Expected nesting depth
current_depth: Current depth in recursion
Returns:
Nested list representation of the data
"""
if data is None:
return None
# Convert tensor/numpy to list if we're at a leaf level or if it's a multi-dimensional array
if isinstance(data, torch.Tensor): # PyTorch tensor
if current_depth == expected_depth - 2 or len(data.shape) <= 2: # At coordinate level or small tensor
return data.numpy().tolist()
else:
return [self._convert_to_nested_list(item, expected_depth, current_depth + 1) for item in data]
elif isinstance(data, np.ndarray): # NumPy array
if current_depth == expected_depth - 2 or len(data.shape) <= 2: # At coordinate level or small array
return data.tolist()
else:
return [self._convert_to_nested_list(item, expected_depth, current_depth + 1) for item in data]
elif isinstance(data, list):
if current_depth == expected_depth:
# We've reached the expected depth, return as is
return data
else:
# Continue recursion
return [self._convert_to_nested_list(item, expected_depth, current_depth + 1) for item in data]
elif isinstance(data, (int, float)):
return data
else:
raise TypeError(f"Unsupported data type: {type(data)}")
def _get_nested_dimensions(self, nested_list, max_dims=None):
"""
Get the maximum dimensions at each level of nesting.
Args:
nested_list (`list`):
Nested list structure.
max_dims (`list`, *optional*):
Current maximum dimensions (for recursion).
Returns:
`list`: A list of maximum dimensions for each nesting level.
"""
if max_dims is None:
max_dims = []
if not isinstance(nested_list, list):
return max_dims
if len(max_dims) == 0:
max_dims.append(len(nested_list))
else:
max_dims[0] = max(max_dims[0], len(nested_list))
if len(nested_list) > 0:
for item in nested_list:
if isinstance(item, list):
sub_dims = self._get_nested_dimensions(item)
# Merge sub_dims into max_dims
for i, dim in enumerate(sub_dims):
if i + 1 >= len(max_dims):
max_dims.append(dim)
else:
max_dims[i + 1] = max(max_dims[i + 1], dim)
return max_dims
def _pad_nested_list(self, nested_list, target_dims, current_level=0, pad_value=None):
"""
Recursively pad a nested list to match target dimensions.
Args:
nested_list (`list`):
Nested list to pad.
target_dims (`list`):
Target dimensions for each level.
current_level (`int`, *optional*, defaults to 0):
Current nesting level.
pad_value (`int`, *optional*):
Value to use for padding.
Returns:
`list`: The padded nested list.
"""
if pad_value is None:
pad_value = self.point_pad_value
if current_level >= len(target_dims):
return nested_list
# Ensure we have a list
if not isinstance(nested_list, list):
nested_list = [nested_list]
# Pad current level
current_size = len(nested_list)
target_size = target_dims[current_level]
# Pad with appropriate values
if current_level == len(target_dims) - 1:
# At the coordinate level, pad with pad_value
nested_list.extend([pad_value] * (target_size - current_size))
else:
# At higher levels, pad with nested structures
if current_size > 0:
# Create appropriately sized template
if current_level < len(target_dims) - 2:
# For non-coordinate levels, create empty nested structure
template_dims = target_dims[current_level + 1 :]
template = self._create_empty_nested_structure(template_dims, pad_value)
else:
# For coordinate level, create list of pad_values
template = [pad_value] * target_dims[current_level + 1]
nested_list.extend([deepcopy(template) for _ in range(target_size - current_size)])
else:
# Create from scratch
template_dims = target_dims[current_level + 1 :]
template = self._create_empty_nested_structure(template_dims, pad_value)
nested_list.extend([deepcopy(template) for _ in range(target_size)])
# Recursively pad sublists
if current_level < len(target_dims) - 1:
for i in range(len(nested_list)):
if isinstance(nested_list[i], list):
nested_list[i] = self._pad_nested_list(nested_list[i], target_dims, current_level + 1, pad_value)
return nested_list
def _create_empty_nested_structure(self, dims, pad_value):
"""
Create an empty nested structure with given dimensions filled with pad_value.
Args:
dims (`list`):
The dimensions of the nested structure.
pad_value (`int`):
The value to fill the structure with.
"""
if len(dims) == 1:
return [pad_value] * dims[0]
else:
return [self._create_empty_nested_structure(dims[1:], pad_value) for _ in range(dims[0])]
def _get_nesting_level(self, input_list):
"""
Get the nesting level of a list structure.
Args:
input_list (`list`):
The list to get the nesting level of.
"""
if isinstance(input_list, list):
if len(input_list) == 0:
return 1
return 1 + self._get_nesting_level(input_list[0])
elif isinstance(input_list, (np.ndarray, torch.Tensor)):
# For arrays/tensors, the nesting level is the number of dimensions
return len(input_list.shape)
return 0
def _validate_single_input(
self,
data: Union[torch.Tensor, np.ndarray, list],
expected_depth: int,
input_name: str,
expected_format: str,
expected_coord_size: Optional[int] = None,
) -> list:
"""
Validate a single input by ensuring proper nesting and raising an error if the input is not valid.
Args:
data (`torch.Tensor`, `np.ndarray`, or `list`):
Input data to process.
expected_depth (`int`):
Expected nesting depth.
input_name (`str`):
Name of the input for error messages.
expected_format (`str`):
The expected format of the input.
expected_coord_size (`int`, *optional*):
Expected coordinate size (2 for points, 4 for boxes, None for labels).
.
"""
if data is None:
return None
# Handle tensors and numpy arrays first
if isinstance(data, (torch.Tensor, np.ndarray)):
# For tensors/arrays, we can directly check the number of dimensions
if data.ndim != expected_depth:
raise ValueError(
f"Input {input_name} must be a tensor/array with {expected_depth} dimensions. The expected nesting format is {expected_format}. Got {data.ndim} dimensions."
)
elif expected_coord_size is not None:
if data.shape[-1] != expected_coord_size:
raise ValueError(
f"Input {input_name} must be a tensor/array with {expected_coord_size} as the last dimension, got {data.shape[-1]}."
)
return self._convert_to_nested_list(data, expected_depth)
# Handle nested lists
if isinstance(data, list):
current_depth = self._get_nesting_level(data)
if current_depth != expected_depth:
raise ValueError(
f"Input {input_name} must be a nested list with {expected_depth} levels. The expected nesting format is {expected_format}. Got {current_depth} levels."
)
return self._convert_to_nested_list(data, expected_depth)
def _normalize_tensor_coordinates(self, tensor, original_sizes, is_bounding_box=False, preserve_padding=False):
"""
Helper method to normalize coordinates in a tensor across multiple images.
Args:
tensor (`torch.Tensor`):
Input tensor with coordinates.
original_sizes (`list`):
Original image sizes.
is_bounding_box (`bool`, *optional*, defaults to `False`):
Whether coordinates are bounding boxes.
preserve_padding (`bool`, *optional*, defaults to `False`):
Whether to preserve padding values (for points).
"""
if preserve_padding:
# For points: avoid normalizing pad values
mask = tensor != self.point_pad_value
coord_mask = mask.all(dim=-1, keepdim=True)
for img_idx in range(len(original_sizes)):
if img_idx < tensor.shape[0]:
original_size = original_sizes[img_idx] if img_idx < len(original_sizes) else original_sizes[0]
normalized_coords = self._normalize_coordinates(
self.target_size, tensor[img_idx], original_size, is_bounding_box=is_bounding_box
)
if preserve_padding:
# Only update non-padded values
img_mask = coord_mask[img_idx]
tensor[img_idx] = torch.where(
img_mask.expand_as(tensor[img_idx]), normalized_coords, tensor[img_idx]
)
else:
tensor[img_idx] = normalized_coords
def post_process_masks(
self,
masks,
original_sizes,
mask_threshold=0.0,
binarize=True,
max_hole_area=0.0,
max_sprinkle_area=0.0,
apply_non_overlapping_constraints=False,
**kwargs,
):
"""
Remove padding and upscale masks to the original image size.
Args:
masks (`Union[List[torch.Tensor], List[np.ndarray]]`):
Batched masks from the mask_decoder in (batch_size, num_channels, height, width) format.
original_sizes (`Union[torch.Tensor, List[Tuple[int,int]]]`):
The original sizes of each image before it was resized to the model's expected input shape, in (height,
width) format.
mask_threshold (`float`, *optional*, defaults to 0.0):
Threshold for binarization and post-processing operations.
binarize (`bool`, *optional*, defaults to `True`):
Whether to binarize the masks.
max_hole_area (`float`, *optional*, defaults to 0.0):
The maximum area of a hole to fill.
max_sprinkle_area (`float`, *optional*, defaults to 0.0):
The maximum area of a sprinkle to fill.
apply_non_overlapping_constraints (`bool`, *optional*, defaults to `False`):
Whether to apply non-overlapping constraints to the masks.
Returns:
(`torch.Tensor`): Batched masks in batch_size, num_channels, height, width) format, where (height, width)
is given by original_size.
"""
return self.image_processor.post_process_masks(
masks,
original_sizes,
mask_threshold,
binarize,
max_hole_area,
max_sprinkle_area,
apply_non_overlapping_constraints,
**kwargs,
)
@property
def model_input_names(self):
image_processor_input_names = self.image_processor.model_input_names
return list(image_processor_input_names + ["original_sizes"])
__all__ = ["Sam2Processor"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/sam2/modeling_sam2.py | src/transformers/models/sam2/modeling_sam2.py | # π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨
# This file was automatically generated from src/transformers/models/sam2/modular_sam2.py.
# Do NOT edit this file manually as any edits will be overwritten by the generation of
# the file from the modular. If any change should be done, please apply the change to the
# modular_sam2.py file directly. One of our CI enforces this.
# π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨
# coding=utf-8
# Copyright 2025 The Meta AI Authors and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from collections.abc import Callable
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from transformers.utils.generic import OutputRecorder
from ... import initialization as init
from ...activations import ACT2FN
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import BaseModelOutput
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...processing_utils import Unpack
from ...pytorch_utils import compile_compatible_method_lru_cache
from ...utils import ModelOutput, auto_docstring
from ...utils.generic import TransformersKwargs, check_model_inputs
from ..auto import AutoModel
from .configuration_sam2 import (
Sam2Config,
Sam2HieraDetConfig,
Sam2MaskDecoderConfig,
Sam2PromptEncoderConfig,
Sam2VisionConfig,
)
@dataclass
@auto_docstring(custom_intro="Base class for the vision encoder's outputs.")
class Sam2VisionEncoderOutput(ModelOutput):
r"""
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, height, width, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
fpn_hidden_states (`tuple(torch.FloatTensor)`):
Tuple of `torch.FloatTensor` (one for each feature level, from high to low resolution) of shape
`(batch_size, hidden_size, height, width)`. Feature maps from the Feature Pyramid Network neck.
fpn_position_encoding (`tuple(torch.FloatTensor)`):
Tuple of `torch.FloatTensor` (one for each feature level, from high to low resolution) of shape
`(batch_size, hidden_size, height, width)`. Positional encodings corresponding to the `fpn_hidden_states`.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each stage) of shape `(batch_size, height, width, hidden_size)`. Hidden-states of the
model at the output of each stage.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
the self-attention heads.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
fpn_hidden_states: Optional[torch.FloatTensor] = None
fpn_position_encoding: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass
@auto_docstring(custom_intro="Base class for the Sam2 model's output.")
class Sam2ImageSegmentationOutput(ModelOutput):
r"""
iou_scores (`torch.FloatTensor` of shape `(batch_size, point_batch_size, num_masks)`):
The Intersection over Union (IoU) scores of the predicted masks.
pred_masks (`torch.FloatTensor` of shape `(batch_size, point_batch_size, num_masks, height, width)`):
The predicted low-resolution masks. This is an alias for `low_res_masks`. These masks need to be post-processed
by the processor to be brought to the original image size.
object_score_logits (`torch.FloatTensor` of shape `(batch_size, point_batch_size, 1)`):
Logits for the object score, indicating if an object is present.
image_embeddings (`tuple(torch.FloatTensor)`):
The features from the FPN, which are used by the mask decoder. This is a tuple of `torch.FloatTensor` where each
tensor has shape `(batch_size, channels, height, width)`.
vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of each stage) of shape `(batch_size, height, width, hidden_size)`.
Hidden-states of the vision model at the output of each stage.
vision_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights of the vision model.
mask_decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights of the mask decoder.
"""
iou_scores: Optional[torch.FloatTensor] = None
pred_masks: Optional[torch.FloatTensor] = None
object_score_logits: Optional[torch.FloatTensor] = None
image_embeddings: tuple[torch.FloatTensor, ...] = None
vision_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
vision_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
mask_decoder_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
class Sam2PatchEmbeddings(nn.Module):
r"""
Turns pixel values into patch embeddings for transformer consumption.
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`Sam2ImageProcessorFast.__call__`] for details.
Returns:
embeddings (`torch.FloatTensor`):
Patch embeddings depend on image_size, patch_kernel_size, patch_stride and patch_padding
"""
def __init__(self, config: Sam2HieraDetConfig):
super().__init__()
num_channels = config.num_channels
hidden_size = config.hidden_size
self.projection = nn.Conv2d(
num_channels,
hidden_size,
kernel_size=config.patch_kernel_size,
stride=config.patch_stride,
padding=config.patch_padding,
)
def forward(self, pixel_values):
_, num_channels, height, width = pixel_values.shape
embeddings = self.projection(pixel_values.to(self.projection.weight.dtype)).permute(0, 2, 3, 1)
return embeddings
# copied and adapted from original implementation, also practically equal to DetrSinePositionEmbedding
class Sam2SinePositionEmbedding(nn.Module):
"""
This is a more standard version of the position embedding, very similar to the one used by the Attention is all you
need paper, generalized to work on images.
"""
def __init__(
self, num_pos_feats: int = 64, temperature: int = 10000, normalize: bool = False, scale: Optional[float] = None
):
super().__init__()
if scale is not None and normalize is False:
raise ValueError("normalize should be True if scale is passed")
self.num_pos_feats = num_pos_feats
self.temperature = temperature
self.normalize = normalize
self.scale = 2 * math.pi if scale is None else scale
@compile_compatible_method_lru_cache(maxsize=1)
def forward(
self,
shape: torch.Size,
device: Union[torch.device, str],
dtype: torch.dtype,
mask: Optional[Tensor] = None,
) -> Tensor:
if mask is None:
mask = torch.zeros((shape[0], shape[2], shape[3]), device=device, dtype=torch.bool)
not_mask = (~mask).to(dtype)
y_embed = not_mask.cumsum(1)
x_embed = not_mask.cumsum(2)
if self.normalize:
eps = 1e-6
y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
dim_t = torch.arange(self.num_pos_feats, dtype=torch.int64, device=device).to(dtype)
dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode="floor") / self.num_pos_feats)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
return pos
class Sam2VisionNeck(nn.Module):
def __init__(self, config: Sam2VisionConfig):
super().__init__()
self.config = config
self.position_encoding = Sam2SinePositionEmbedding(num_pos_feats=config.fpn_hidden_size // 2, normalize=True)
self.convs = nn.ModuleList()
for in_channels in config.backbone_channel_list:
self.convs.append(
nn.Conv2d(
in_channels=in_channels,
out_channels=config.fpn_hidden_size,
kernel_size=config.fpn_kernel_size,
stride=config.fpn_stride,
padding=config.fpn_padding,
),
)
self.fpn_top_down_levels = config.fpn_top_down_levels
def forward(self, hidden_states: torch.Tensor) -> tuple[tuple[torch.Tensor, ...], tuple[torch.Tensor, ...]]:
fpn_hidden_states = ()
fpn_position_encoding = ()
# forward in top-down order (from low to high resolution)
n = len(self.convs) - 1
for i in range(n, -1, -1):
lateral_features = hidden_states[i].permute(0, 3, 1, 2)
lateral_features = self.convs[n - i](lateral_features.to(self.convs[i].weight.dtype))
if i not in self.fpn_top_down_levels or i == n:
prev_features = lateral_features
else:
top_down_features = F.interpolate(
prev_features.to(dtype=torch.float32),
scale_factor=2.0,
mode="nearest",
align_corners=None,
antialias=False,
).to(lateral_features.dtype)
prev_features = lateral_features + top_down_features
prev_position_encoding = self.position_encoding(
prev_features.shape, prev_features.device, prev_features.dtype
).to(prev_features.dtype)
fpn_hidden_states += (prev_features,)
fpn_position_encoding += (prev_position_encoding,)
return fpn_hidden_states, fpn_position_encoding
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs,
):
attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling
if attention_mask is not None:
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
def do_pool(x: torch.Tensor, query_stride: Optional[int] = None) -> torch.Tensor:
if query_stride is None:
return x
# (B, H, W, C) -> (B, C, H, W)
x = x.permute(0, 3, 1, 2)
x = nn.functional.max_pool2d(x, kernel_size=query_stride, stride=query_stride, ceil_mode=False)
# (B, C, H', W') -> (B, H', W', C)
x = x.permute(0, 2, 3, 1)
return x
class Sam2MultiScaleAttention(nn.Module):
def __init__(
self,
config: Sam2HieraDetConfig,
dim: int,
dim_out: int,
num_attention_heads: int,
query_stride: Optional[tuple[int, int]] = None,
):
super().__init__()
self.config = config
self.dim = dim
self.dim_out = dim_out
self.query_stride = query_stride
self.num_attention_heads = num_attention_heads
head_dim = dim_out // num_attention_heads
self.scale = head_dim**-0.5
self.qkv = nn.Linear(dim, dim_out * 3)
self.proj = nn.Linear(dim_out, dim_out)
self.is_causal = False
def forward(self, hidden_states: torch.Tensor, **kwargs) -> torch.Tensor:
batch_size, height, width, _ = hidden_states.shape
# qkv with shape (B, H * W, 3, nHead, C)
qkv = self.qkv(hidden_states).reshape(batch_size, height * width, 3, self.num_attention_heads, -1)
# q, k, v with shape (B, H * W, nheads, C)
query, key, value = torch.unbind(qkv, 2)
attn_weights = (query * self.scale) @ key.transpose(-2, -1)
attn_weights = torch.nn.functional.softmax(attn_weights, dtype=torch.float32, dim=-1).to(query.dtype)
# Q pooling (for downsample at stage changes)
if self.query_stride:
query = do_pool(query.reshape(batch_size, height, width, -1), self.query_stride)
height, width = query.shape[1:3] # downsampled shape
query = query.reshape(batch_size, height * width, self.num_attention_heads, -1)
# transpose query, key, value to (B, nHead, H * W, C)
query = query.transpose(1, 2)
key = key.transpose(1, 2)
value = value.transpose(1, 2)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, _ = attention_interface(
self,
query,
key,
value,
attention_mask=None,
is_causal=self.is_causal,
scaling=self.scale,
**kwargs,
)
attn_output = attn_output.reshape(batch_size, height, width, -1)
attn_output = self.proj(attn_output)
return attn_output
class Sam2FeedForward(nn.Module):
def __init__(
self,
input_dim: int,
hidden_dim: int,
output_dim: int,
num_layers: int,
activation: str = "relu",
sigmoid_output: bool = False,
):
super().__init__()
self.num_layers = num_layers
self.activation = ACT2FN[activation]
self.proj_in = nn.Linear(input_dim, hidden_dim)
self.proj_out = nn.Linear(hidden_dim, output_dim)
self.layers = nn.ModuleList([nn.Linear(hidden_dim, hidden_dim) for _ in range(num_layers - 2)])
self.sigmoid_output = sigmoid_output
def forward(self, hidden_states):
hidden_states = self.proj_in(hidden_states)
hidden_states = self.activation(hidden_states)
for layer in self.layers:
hidden_states = self.activation(layer(hidden_states))
hidden_states = self.proj_out(hidden_states)
if self.sigmoid_output:
hidden_states = F.sigmoid(hidden_states)
return hidden_states
def window_partition(hidden_state, window_size):
"""
Partition into non-overlapping windows with padding if needed.
Args:
hidden_state (`torch.Tensor`):
Input tokens with [batch_size, height, width, num_channels].
window_size (`int`):
Window size.
Returns:
`tuple(torch.FloatTensor)` comprising various elements:
- windows: windows after partition with [batch_size * num_windows, window_size, window_size, num_channels].
- (padded_height, padded_width): padded height and width before partition
"""
batch_size, height, width, num_channels = hidden_state.shape
pad_height = (window_size - height % window_size) % window_size
pad_width = (window_size - width % window_size) % window_size
# Noop in case pad_width == 0 and pad_height == 0.
hidden_state = nn.functional.pad(hidden_state, (0, 0, 0, pad_width, 0, pad_height))
padded_height, padded_width = height + pad_height, width + pad_width
hidden_state = hidden_state.view(
batch_size, padded_height // window_size, window_size, padded_width // window_size, window_size, num_channels
)
windows = hidden_state.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, num_channels)
return windows, (padded_height, padded_width)
def window_unpartition(windows, window_size, pad_height_width, height_width):
"""
Window unpartition into original sequences and removing padding.
Args:
windows (`torch.Tensor`):
Input tokens with [batch_size * num_windows, window_size, window_size, num_channels].
window_size (`int`):
Window size.
pad_height_width (`tuple[int]`):
Padded height and width (padded_height, padded_width).
height_width (`tuple[int]`):
Original height and width before padding.
Returns:
hidden_state: unpartitioned sequences with [batch_size, height, width, num_channels].
"""
padded_height, padded_width = pad_height_width
height, width = height_width
batch_size = windows.shape[0] // (padded_height * padded_width // window_size // window_size)
hidden_state = windows.view(
batch_size, padded_height // window_size, padded_width // window_size, window_size, window_size, -1
)
hidden_state = hidden_state.permute(0, 1, 3, 2, 4, 5).contiguous()
hidden_state = hidden_state.view(batch_size, padded_height, padded_width, -1)
# We always have height <= padded_height and width <= padded_width
hidden_state = hidden_state[:, :height, :width, :].contiguous()
return hidden_state
class Sam2MultiScaleBlock(GradientCheckpointingLayer):
def __init__(
self,
config: Sam2HieraDetConfig,
stage_idx: int,
block_idx: int,
total_block_idx: int,
):
super().__init__()
# take embed dim from previous stage if first block of stage
self.dim = (
config.embed_dim_per_stage[stage_idx - 1]
if stage_idx > 0 and block_idx == 0
else config.embed_dim_per_stage[stage_idx]
)
self.dim_out = config.embed_dim_per_stage[stage_idx]
self.layer_norm1 = nn.LayerNorm(self.dim, eps=config.layer_norm_eps)
# take window size from previous stage if first block of stage
self.window_size = (
config.window_size_per_stage[stage_idx - 1]
if stage_idx > 0 and block_idx == 0
else config.window_size_per_stage[stage_idx]
)
self.window_size = 0 if total_block_idx in config.global_attention_blocks else self.window_size
# use query stride for first block of stage if stage is a query pool stage
self.query_stride = (
config.query_stride if 0 < stage_idx <= config.num_query_pool_stages and block_idx == 0 else None
)
self.attn = Sam2MultiScaleAttention(
config,
self.dim,
self.dim_out,
num_attention_heads=config.num_attention_heads_per_stage[stage_idx],
query_stride=self.query_stride,
)
self.layer_norm2 = nn.LayerNorm(self.dim_out, eps=config.layer_norm_eps)
self.mlp = Sam2FeedForward(
self.dim_out,
int(self.dim_out * config.mlp_ratio),
self.dim_out,
num_layers=2,
activation=config.hidden_act,
)
if self.dim != self.dim_out:
self.proj = nn.Linear(self.dim, self.dim_out)
def forward(
self,
hidden_states: torch.Tensor,
**kwargs: Unpack[TransformersKwargs],
) -> torch.FloatTensor:
residual = hidden_states # batch_size, height, width, channel
hidden_states = self.layer_norm1(hidden_states)
# Skip connection
if self.dim != self.dim_out:
residual = do_pool(self.proj(hidden_states), self.query_stride)
# Window partition
window_size = self.window_size
if self.window_size > 0:
H, W = hidden_states.shape[1], hidden_states.shape[2]
hidden_states, pad_hw = window_partition(hidden_states, window_size)
# Window Attention + Q Pooling (if stage change)
attn_output = self.attn(
hidden_states=hidden_states,
**kwargs,
)
hidden_states = attn_output
if self.query_stride:
# Shapes have changed due to Q pooling
window_size = self.window_size // self.query_stride[0]
H, W = residual.shape[1:3]
pad_h = (window_size - H % window_size) % window_size
pad_w = (window_size - W % window_size) % window_size
pad_hw = (H + pad_h, W + pad_w)
# Reverse window partition
if self.window_size > 0:
hidden_states = window_unpartition(hidden_states, window_size, pad_hw, (H, W))
hidden_states = residual + hidden_states
layernorm_output = self.layer_norm2(hidden_states)
hidden_states = hidden_states + self.mlp(layernorm_output)
return hidden_states
@dataclass
@auto_docstring(
custom_intro="""
Hiera model's outputs that also contains a pooling of the last hidden states.
"""
)
class Sam2HieraDetModelOutput(ModelOutput):
r"""
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, height, width, hidden_size)`):
hidden-states at the output of the last layer of the model.
intermediate_hidden_states (`tuple[torch.FloatTensor]` of shape `(batch_size, height, width, hidden_size)`):
Sequence of hidden-states at the output of the intermediate layers of the model.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
intermediate_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
@auto_docstring
class Sam2PreTrainedModel(PreTrainedModel):
config_class = Sam2Config
base_model_prefix = "sam2"
main_input_name = "pixel_values"
input_modalities = ("image",)
_supports_sdpa = True
_supports_flash_attn_2 = True
_supports_attention_backend = True
@torch.no_grad()
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, Sam2HieraDetModel):
if module.pos_embed is not None:
init.zeros_(module.pos_embed)
if module.pos_embed_window is not None:
init.zeros_(module.pos_embed_window)
elif isinstance(module, Sam2PositionalEmbedding):
init.normal_(module.positional_embedding, std=module.scale)
elif isinstance(module, Sam2Model):
if module.no_memory_embedding is not None:
init.zeros_(module.no_memory_embedding)
class Sam2HieraDetModel(Sam2PreTrainedModel):
config_class = Sam2HieraDetConfig
main_input_name = "pixel_values"
_can_record_outputs = {
"hidden_states": Sam2MultiScaleBlock,
"attentions": Sam2MultiScaleAttention,
}
def __init__(self, config: Sam2HieraDetConfig):
super().__init__(config)
self.patch_embed = Sam2PatchEmbeddings(config)
# Windowed positional embedding (https://huggingface.co/papers/2311.05613)
self.pos_embed = nn.Parameter(
torch.zeros(1, config.hidden_size, *config.window_positional_embedding_background_size)
)
self.pos_embed_window = nn.Parameter(
torch.zeros(1, config.hidden_size, config.window_size_per_stage[0], config.window_size_per_stage[0])
)
self.stage_ends = (np.cumsum(config.blocks_per_stage) - 1).tolist()
self.blocks = nn.ModuleList()
total_block_idx = 0
for stage_idx, blocks_per_stage in enumerate(config.blocks_per_stage):
for block_idx in range(blocks_per_stage):
block = Sam2MultiScaleBlock(
config=config, stage_idx=stage_idx, block_idx=block_idx, total_block_idx=total_block_idx
)
self.blocks.append(block)
total_block_idx += 1
self.post_init()
def get_input_embeddings(self):
return self.patch_embed
def _get_pos_embed(self, hw: tuple[int, int]) -> torch.Tensor:
h, w = hw
window_embed = self.pos_embed_window
pos_embed = F.interpolate(self.pos_embed, size=(h, w), mode="bicubic")
pos_embed = pos_embed + window_embed.tile([x // y for x, y in zip(pos_embed.shape, window_embed.shape)])
pos_embed = pos_embed.permute(0, 2, 3, 1)
return pos_embed
@check_model_inputs
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, Sam2HieraDetModelOutput]:
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
hidden_states = self.patch_embed(pixel_values)
hidden_states = hidden_states + self._get_pos_embed(hidden_states.shape[1:3])
intermediate_hidden_states = ()
for i, block_module in enumerate(self.blocks):
hidden_states = block_module(hidden_states, **kwargs)
if i in self.stage_ends:
intermediate_hidden_states = intermediate_hidden_states + (hidden_states,)
return Sam2HieraDetModelOutput(
last_hidden_state=hidden_states,
intermediate_hidden_states=intermediate_hidden_states,
)
@auto_docstring(
custom_intro="""
The vision model from Sam without any head or projection on top.
"""
)
class Sam2VisionModel(Sam2PreTrainedModel):
config_class = Sam2VisionConfig
main_input_name = "pixel_values"
_can_record_outputs = {
"hidden_states": Sam2MultiScaleBlock,
"attentions": Sam2MultiScaleAttention,
}
def __init__(self, config: Sam2VisionConfig):
super().__init__(config)
self.config = config
self.backbone = AutoModel.from_config(config.backbone_config)
self.neck = Sam2VisionNeck(config)
self.num_feature_levels = config.num_feature_levels
self.post_init()
def get_input_embeddings(self):
return self.backbone.get_input_embeddings()
@check_model_inputs
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, Sam2VisionEncoderOutput]:
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
# Forward through backbone
backbone_output = self.backbone(pixel_values, **kwargs)
hidden_states = backbone_output.last_hidden_state
intermediate_hidden_states = backbone_output.intermediate_hidden_states
fpn_hidden_states, fpn_position_encoding = self.neck(intermediate_hidden_states)
# Select last `num_feature_levels` feature levels from FPN and reverse order to get features from high to low resolution
fpn_hidden_states = fpn_hidden_states[-self.num_feature_levels :][::-1]
fpn_position_encoding = fpn_position_encoding[-self.num_feature_levels :][::-1]
return Sam2VisionEncoderOutput(
last_hidden_state=hidden_states,
fpn_hidden_states=fpn_hidden_states,
fpn_position_encoding=fpn_position_encoding,
)
class Sam2PositionalEmbedding(nn.Module):
def __init__(self, config: Sam2PromptEncoderConfig):
super().__init__()
self.scale = config.scale
positional_embedding = self.scale * torch.randn((2, config.hidden_size // 2))
self.register_buffer("positional_embedding", positional_embedding)
def forward(self, input_coords, input_shape=None):
"""Positionally encode points that are normalized to [0,1]."""
coordinates = input_coords.clone()
if input_shape is not None:
coordinates[:, :, :, 0] = coordinates[:, :, :, 0] / input_shape[1]
coordinates[:, :, :, 1] = coordinates[:, :, :, 1] / input_shape[0]
coordinates.to(torch.float32)
# assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape
coordinates = 2 * coordinates - 1
coordinates = coordinates.to(self.positional_embedding.dtype)
coordinates = coordinates @ self.positional_embedding
coordinates = 2 * np.pi * coordinates
# outputs d_1 x ... x d_n x channel shape
return torch.cat([torch.sin(coordinates), torch.cos(coordinates)], dim=-1)
class Sam2MaskEmbedding(nn.Module):
def __init__(self, config: Sam2PromptEncoderConfig):
super().__init__()
self.mask_input_channels = config.mask_input_channels // 4
self.activation = ACT2FN[config.hidden_act]
self.conv1 = nn.Conv2d(1, self.mask_input_channels, kernel_size=2, stride=2)
self.conv2 = nn.Conv2d(self.mask_input_channels, config.mask_input_channels, kernel_size=2, stride=2)
self.conv3 = nn.Conv2d(config.mask_input_channels, config.hidden_size, kernel_size=1)
self.layer_norm1 = Sam2LayerNorm(
self.mask_input_channels, eps=config.layer_norm_eps, data_format="channels_first"
)
self.layer_norm2 = Sam2LayerNorm(
self.mask_input_channels * 4, eps=config.layer_norm_eps, data_format="channels_first"
)
def forward(self, masks):
hidden_states = self.conv1(masks)
hidden_states = self.layer_norm1(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = self.conv2(hidden_states)
hidden_states = self.layer_norm2(hidden_states)
hidden_states = self.activation(hidden_states)
dense_embeddings = self.conv3(hidden_states)
return dense_embeddings
class Sam2PromptEncoder(nn.Module):
def __init__(self, config: Sam2PromptEncoderConfig):
super().__init__()
self.shared_embedding = Sam2PositionalEmbedding(config)
self.mask_embed = Sam2MaskEmbedding(config)
self.no_mask_embed = nn.Embedding(1, config.hidden_size)
self.image_embedding_size = (config.image_size // config.patch_size, config.image_size // config.patch_size)
self.mask_input_size = (4 * config.image_size // config.patch_size, 4 * config.image_size // config.patch_size)
self.input_image_size = config.image_size
self.point_embed = nn.Embedding(config.num_point_embeddings, config.hidden_size)
self.hidden_size = config.hidden_size
self.not_a_point_embed = nn.Embedding(1, config.hidden_size)
def _embed_points(self, points: torch.Tensor, labels: torch.Tensor, pad: bool) -> torch.Tensor:
"""Embeds point prompts."""
points = points + 0.5 # Shift to center of pixel
if pad:
points = torch.nn.functional.pad(points, (0, 0, 0, 1), mode="constant", value=0)
labels = torch.nn.functional.pad(labels, (0, 1), mode="constant", value=-1)
input_shape = (self.input_image_size, self.input_image_size)
point_embedding = self.shared_embedding(points, input_shape)
# torch.where and expanding the labels tensor is required by the ONNX export
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | true |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/sam2/__init__.py | src/transformers/models/sam2/__init__.py | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_sam2 import *
from .image_processing_sam2_fast import *
from .modeling_sam2 import *
from .processing_sam2 import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/sam2/configuration_sam2.py | src/transformers/models/sam2/configuration_sam2.py | # coding=utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SAM2 model configuration"""
from ...configuration_utils import PreTrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING, AutoConfig
logger = logging.get_logger(__name__)
class Sam2HieraDetConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Sam2HieraDetModel`]. It is used to instantiate
a HieraDet model as defined in the original sam2 repo according to the specified arguments, defining the model architecture.
Instantiating a configuration defaults will yield a similar configuration to that of SAM 2.1 Hiera-tiny
[facebook/sam2.1-hiera-tiny](https://huggingface.co/facebook/sam2.1-hiera-tiny) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 96):
The hidden dimension of the image encoder.
num_attention_heads (`int`, *optional*, defaults to 1):
Number of attention heads for each attention layer in the Transformer encoder.
num_channels (`int`, *optional*, defaults to 3):
The number of channels in the image.
image_size (`list[int]`, *optional*, defaults to `[1024, 1024]`):
The size of the image.
patch_kernel_size (`list[int]`, *optional*, defaults to `[7, 7]`):
The kernel size of the patch.
patch_stride (`list[int]`, *optional*, defaults to `[4, 4]`):
The stride of the patch.
patch_padding (`list[int]`, *optional*, defaults to `[3, 3]`):
The padding of the patch.
query_stride (`list[int]`, *optional*, defaults to `[2, 2]`):
The downsample stride between stages.
window_positional_embedding_background_size (`list[int]`, *optional*, defaults to `[7, 7]`):
The window size per stage when not using global attention.
num_query_pool_stages (`int`, *optional*, defaults to 3):
The number of query pool stages.
blocks_per_stage (`list[int]`, *optional*, defaults to `[1, 2, 7, 2]`):
The number of blocks per stage.
embed_dim_per_stage (`list[int]`, *optional*, defaults to `[96, 192, 384, 768]`):
The embedding dimension per stage.
num_attention_heads_per_stage (`list[int]`, *optional*, defaults to `[1, 2, 4, 8]`):
The number of attention heads per stage.
window_size_per_stage (`list[int]`, *optional*, defaults to `[8, 4, 14, 7]`):
The window size per stage.
global_attention_blocks (`list[int]`, *optional*, defaults to `[5, 7, 9]`):
The blocks where global attention is used.
mlp_ratio (`float`, *optional*, defaults to 4.0):
The ratio of the MLP hidden dimension to the embedding dimension.
hidden_act (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function in the neck.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon for the layer normalization.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
"""
base_config_key = "backbone_config"
model_type = "sam2_hiera_det_model"
def __init__(
self,
hidden_size=96,
num_attention_heads=1,
num_channels=3,
image_size=None,
patch_kernel_size=None,
patch_stride=None,
patch_padding=None,
query_stride=None,
window_positional_embedding_background_size=None,
num_query_pool_stages=3,
blocks_per_stage=None,
embed_dim_per_stage=None,
num_attention_heads_per_stage=None,
window_size_per_stage=None,
global_attention_blocks=None,
mlp_ratio=4.0,
hidden_act="gelu",
layer_norm_eps=1e-6,
initializer_range=0.02,
**kwargs,
):
super().__init__(**kwargs)
image_size = image_size if image_size is not None else [1024, 1024]
patch_kernel_size = patch_kernel_size if patch_kernel_size is not None else [7, 7]
patch_stride = patch_stride if patch_stride is not None else [4, 4]
patch_padding = patch_padding if patch_padding is not None else [3, 3]
query_stride = query_stride if query_stride is not None else [2, 2]
window_positional_embedding_background_size = (
window_positional_embedding_background_size
if window_positional_embedding_background_size is not None
else [7, 7]
)
blocks_per_stage = blocks_per_stage if blocks_per_stage is not None else [1, 2, 7, 2]
embed_dim_per_stage = embed_dim_per_stage if embed_dim_per_stage is not None else [96, 192, 384, 768]
num_attention_heads_per_stage = (
num_attention_heads_per_stage if num_attention_heads_per_stage is not None else [1, 2, 4, 8]
)
window_size_per_stage = window_size_per_stage if window_size_per_stage is not None else [8, 4, 14, 7]
global_attention_blocks = global_attention_blocks if global_attention_blocks is not None else [5, 7, 9]
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
self.num_channels = num_channels
self.image_size = image_size
self.patch_kernel_size = patch_kernel_size
self.patch_stride = patch_stride
self.patch_padding = patch_padding
self.query_stride = query_stride
self.window_positional_embedding_background_size = window_positional_embedding_background_size
self.num_query_pool_stages = num_query_pool_stages
self.blocks_per_stage = blocks_per_stage
self.embed_dim_per_stage = embed_dim_per_stage
self.num_attention_heads_per_stage = num_attention_heads_per_stage
self.window_size_per_stage = window_size_per_stage
self.global_attention_blocks = global_attention_blocks
self.mlp_ratio = mlp_ratio
self.hidden_act = hidden_act
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range
class Sam2VisionConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Sam2VisionModel`]. It is used to instantiate a SAM
vision encoder according to the specified arguments, defining the model architecture. Instantiating a configuration
defaults will yield a similar configuration to that of SAM 2.1 Hiera-tiny
[facebook/sam2.1-hiera-tiny](https://huggingface.co/facebook/sam2.1-hiera-tiny) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
backbone_config (`Union[dict, "PreTrainedConfig"]`, *optional*, defaults to `Sam2HieraDetConfig()`):
Configuration for the vision backbone. This is used to instantiate the backbone using
`AutoModel.from_config`.
backbone_channel_list (`List[int]`, *optional*, defaults to `[768, 384, 192, 96]`):
The list of channel dimensions for the backbone.
backbone_feature_sizes (`List[List[int]]`, *optional*, defaults to `[[256, 256], [128, 128], [64, 64]]`):
The spatial sizes of the feature maps from the backbone.
fpn_hidden_size (`int`, *optional*, defaults to 256):
The hidden dimension of the FPN.
fpn_kernel_size (`int`, *optional*, defaults to 1):
The kernel size for the convolutions in the neck.
fpn_stride (`int`, *optional*, defaults to 1):
The stride for the convolutions in the neck.
fpn_padding (`int`, *optional*, defaults to 0):
The padding for the convolutions in the neck.
fpn_top_down_levels (`List[int]`, *optional*, defaults to `[2, 3]`):
The levels for the top-down FPN connections.
num_feature_levels (`int`, *optional*, defaults to 3):
The number of feature levels from the FPN to use.
hidden_act (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function in the neck.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon for the layer normalization.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
"""
base_config_key = "vision_config"
model_type = "sam2_vision_model"
sub_configs = {
"backbone_config": AutoConfig,
}
def __init__(
self,
backbone_config=None,
backbone_channel_list=None,
backbone_feature_sizes=None,
fpn_hidden_size=256,
fpn_kernel_size=1,
fpn_stride=1,
fpn_padding=0,
fpn_top_down_levels=None,
num_feature_levels=3,
hidden_act="gelu",
layer_norm_eps=1e-6,
initializer_range=0.02,
**kwargs,
):
backbone_channel_list = [768, 384, 192, 96] if backbone_channel_list is None else backbone_channel_list
backbone_feature_sizes = (
[[256, 256], [128, 128], [64, 64]] if backbone_feature_sizes is None else backbone_feature_sizes
)
fpn_top_down_levels = [2, 3] if fpn_top_down_levels is None else fpn_top_down_levels
if isinstance(backbone_config, dict):
backbone_config["model_type"] = backbone_config.get("model_type", "sam2_hiera_det_model")
backbone_config = CONFIG_MAPPING[backbone_config["model_type"]](**backbone_config)
elif isinstance(backbone_config, Sam2HieraDetConfig):
pass
elif backbone_config is None:
backbone_config = Sam2HieraDetConfig()
self.backbone_config = backbone_config
# Neck
self.backbone_channel_list = backbone_channel_list
self.backbone_feature_sizes = backbone_feature_sizes
self.fpn_hidden_size = fpn_hidden_size
self.fpn_kernel_size = fpn_kernel_size
self.fpn_stride = fpn_stride
self.fpn_padding = fpn_padding
self.fpn_top_down_levels = fpn_top_down_levels
self.num_feature_levels = num_feature_levels
self.hidden_act = hidden_act
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range
super().__init__(**kwargs)
class Sam2PromptEncoderConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Sam2PromptEncoder`]. The [`Sam2PromptEncoder`]
module is used to encode the input 2D points and bounding boxes.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 256):
Dimensionality of the hidden states.
image_size (`int`, *optional*, defaults to 1024):
The expected output resolution of the image.
patch_size (`int`, *optional*, defaults to 16):
The size (resolution) of each patch.
mask_input_channels (`int`, *optional*, defaults to 16):
The number of channels to be fed to the `MaskDecoder` module.
num_point_embeddings (`int`, *optional*, defaults to 4):
The number of point embeddings to be used.
hidden_act (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function in the encoder and pooler.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
scale (`float`, *optional*, defaults to 1):
The scale factor for the prompt encoder.
"""
base_config_key = "prompt_encoder_config"
def __init__(
self,
hidden_size=256,
image_size=1024,
patch_size=16,
mask_input_channels=16,
num_point_embeddings=4,
hidden_act="gelu",
layer_norm_eps=1e-6,
scale=1,
**kwargs,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.image_size = image_size
self.patch_size = patch_size
self.mask_input_channels = mask_input_channels
self.num_point_embeddings = num_point_embeddings
self.hidden_act = hidden_act
self.layer_norm_eps = layer_norm_eps
self.scale = scale
class Sam2MaskDecoderConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Sam2MaskDecoder`]. It is used to instantiate a SAM2
memory encoder according to the specified arguments, defining the model architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 256):
Dimensionality of the hidden states.
hidden_act (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function in the SAM2 mask decoder.
mlp_dim (`int`, *optional*, defaults to 2048):
The dimension of the MLP in the two-way transformer.
num_hidden_layers (`int`, *optional*, defaults to 2):
The number of hidden layers in the two-way transformer.
num_attention_heads (`int`, *optional*, defaults to 8):
The number of attention heads in the two-way transformer.
attention_downsample_rate (`int`, *optional*, defaults to 2):
The downsample rate for the attention layers.
num_multimask_outputs (`int`, *optional*, defaults to 3):
The number of multimask outputs.
iou_head_depth (`int`, *optional*, defaults to 3):
The depth of the IoU head.
iou_head_hidden_dim (`int`, *optional*, defaults to 256):
The hidden dimension of the IoU head.
dynamic_multimask_via_stability (`bool`, *optional*, defaults to `True`):
Whether to use dynamic multimask via stability.
dynamic_multimask_stability_delta (`float`, *optional*, defaults to 0.05):
The stability delta for the dynamic multimask.
dynamic_multimask_stability_thresh (`float`, *optional*, defaults to 0.98):
The stability threshold for the dynamic multimask.
"""
base_config_key = "mask_decoder_config"
def __init__(
self,
hidden_size=256,
hidden_act="gelu",
mlp_dim=2048,
num_hidden_layers=2,
num_attention_heads=8,
attention_downsample_rate=2,
num_multimask_outputs=3,
iou_head_depth=3,
iou_head_hidden_dim=256,
dynamic_multimask_via_stability=True,
dynamic_multimask_stability_delta=0.05,
dynamic_multimask_stability_thresh=0.98,
**kwargs,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.num_multimask_outputs = num_multimask_outputs
self.hidden_act = hidden_act
self.iou_head_depth = iou_head_depth
self.iou_head_hidden_dim = iou_head_hidden_dim
self.dynamic_multimask_via_stability = dynamic_multimask_via_stability
self.dynamic_multimask_stability_delta = dynamic_multimask_stability_delta
self.dynamic_multimask_stability_thresh = dynamic_multimask_stability_thresh
# TwoWayTransformer configuration
self.num_hidden_layers = num_hidden_layers
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
self.mlp_dim = mlp_dim
self.attention_downsample_rate = attention_downsample_rate
class Sam2Config(PreTrainedConfig):
r"""
[`Sam2Config`] is the configuration class to store the configuration of a [`Sam2Model`]. It is used to instantiate a
SAM2 model according to the specified arguments, defining the memory attention, memory encoder, and image encoder
configs. Instantiating a configuration defaults will yield a similar configuration to that of the SAM 2.1 Hiera-tiny
[facebook/sam2.1-hiera-tiny](https://huggingface.co/facebook/sam2.1-hiera-tiny) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vision_config (Union[`dict`, `Sam2VisionConfig`], *optional*):
Dictionary of configuration options used to initialize [`Sam2VisionConfig`].
prompt_encoder_config (Union[`dict`, `Sam2PromptEncoderConfig`], *optional*):
Dictionary of configuration options used to initialize [`Sam2PromptEncoderConfig`].
mask_decoder_config (Union[`dict`, `Sam2MaskDecoderConfig`], *optional*):
Dictionary of configuration options used to initialize [`Sam2MaskDecoderConfig`].
initializer_range (`float`, *optional*, defaults to 0.02):
Standard deviation for parameter initialization.
Example:
```python
>>> from transformers import (
... Sam2VisionConfig,
... Sam2PromptEncoderConfig,
... Sam2MaskDecoderConfig,
... Sam2Model,
... )
>>> # Initializing a Sam2Config with `"facebook/sam2.1_hiera_tiny"` style configuration
>>> configuration = Sam2config()
>>> # Initializing a Sam2Model (with random weights) from the `"facebook/sam2.1_hiera_tiny"` style configuration
>>> model = Sam2Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
>>> # We can also initialize a Sam2Config from a Sam2VisionConfig, Sam2PromptEncoderConfig, and Sam2MaskDecoderConfig
>>> # Initializing SAM2 vision encoder, memory attention, and memory encoder configurations
>>> vision_config = Sam2VisionConfig()
>>> prompt_encoder_config = Sam2PromptEncoderConfig()
>>> mask_decoder_config = Sam2MaskDecoderConfig()
>>> config = Sam2Config(vision_config, prompt_encoder_config, mask_decoder_config)
```"""
model_type = "sam2"
sub_configs = {
"vision_config": AutoConfig,
"prompt_encoder_config": Sam2PromptEncoderConfig,
"mask_decoder_config": Sam2MaskDecoderConfig,
}
def __init__(
self,
vision_config=None,
prompt_encoder_config=None,
mask_decoder_config=None,
initializer_range=0.02,
**kwargs,
):
vision_config = vision_config if vision_config is not None else {}
prompt_encoder_config = prompt_encoder_config if prompt_encoder_config is not None else {}
mask_decoder_config = mask_decoder_config if mask_decoder_config is not None else {}
if isinstance(vision_config, dict):
vision_config["model_type"] = vision_config.get("model_type", "sam2_vision_model")
vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config)
if isinstance(prompt_encoder_config, Sam2PromptEncoderConfig):
prompt_encoder_config = prompt_encoder_config.to_dict()
if isinstance(mask_decoder_config, Sam2MaskDecoderConfig):
mask_decoder_config = mask_decoder_config.to_dict()
self.vision_config = vision_config
self.prompt_encoder_config = Sam2PromptEncoderConfig(**prompt_encoder_config)
self.mask_decoder_config = Sam2MaskDecoderConfig(**mask_decoder_config)
self.initializer_range = initializer_range
super().__init__(**kwargs)
__all__ = [
"Sam2Config",
"Sam2HieraDetConfig",
"Sam2VisionConfig",
"Sam2PromptEncoderConfig",
"Sam2MaskDecoderConfig",
]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/sam2/modular_sam2.py | src/transformers/models/sam2/modular_sam2.py | # coding=utf-8
# Copyright 2025 The Meta AI Authors and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch SAM 2 model."""
from collections.abc import Callable
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from ... import initialization as init
from ...activations import ACT2FN
from ...image_processing_utils import BatchFeature, get_size_dict
from ...image_processing_utils_fast import BaseImageProcessorFast
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
SizeDict,
pil_torch_interpolation_mapping,
)
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...processing_utils import ImagesKwargs, Unpack
from ...utils import (
ModelOutput,
TensorType,
auto_docstring,
logging,
)
from ...utils.generic import TransformersKwargs, check_model_inputs
from ..auto import AutoModel
from ..maskformer.modeling_maskformer import MaskFormerSinePositionEmbedding
from ..sam.image_processing_sam_fast import SamImageProcessorFast
from ..sam.modeling_sam import (
SamLayerNorm,
SamMaskDecoder,
SamMaskEmbedding,
SamModel,
SamPromptEncoder,
SamTwoWayAttentionBlock,
SamTwoWayTransformer,
eager_attention_forward,
)
from ..vitdet.modeling_vitdet import window_partition, window_unpartition
from .configuration_sam2 import (
Sam2Config,
Sam2HieraDetConfig,
Sam2MaskDecoderConfig,
Sam2PromptEncoderConfig,
Sam2VisionConfig,
)
logger = logging.get_logger(__name__)
class Sam2FastImageProcessorKwargs(ImagesKwargs, total=False):
r"""
mask_size (`dict[str, int]`, *optional*):
The size `{"height": int, "width": int}` to resize the segmentation maps to.
"""
mask_size: dict[str, int]
@auto_docstring
class Sam2ImageProcessorFast(SamImageProcessorFast):
resample = PILImageResampling.BILINEAR
image_mean = IMAGENET_DEFAULT_MEAN
image_std = IMAGENET_DEFAULT_STD
size = {"height": 1024, "width": 1024}
mask_size = {"height": 256, "width": 256}
do_resize = True
do_rescale = True
do_normalize = True
do_convert_rgb = True
valid_kwargs = Sam2FastImageProcessorKwargs
# modular artefacts
do_pad = None
pad_size = None
mask_pad_size = None
def __init__(self, **kwargs: Unpack[Sam2FastImageProcessorKwargs]):
BaseImageProcessorFast.__init__(self, **kwargs)
def _preprocess(
self,
images: list["torch.Tensor"],
return_tensors: Optional[Union[str, TensorType]],
**kwargs,
) -> "torch.Tensor":
return BaseImageProcessorFast._preprocess(self, images, return_tensors=return_tensors, **kwargs).pixel_values
@auto_docstring
def preprocess(
self,
images: ImageInput,
segmentation_maps: Optional[ImageInput] = None,
**kwargs: Unpack[Sam2FastImageProcessorKwargs],
) -> BatchFeature:
r"""
segmentation_maps (`ImageInput`, *optional*):
The segmentation maps to preprocess.
"""
return super().preprocess(images, segmentation_maps, **kwargs)
def _preprocess_image_like_inputs(
self,
images: ImageInput,
segmentation_maps: Optional[ImageInput],
do_convert_rgb: bool,
input_data_format: ChannelDimension,
device: Optional[Union[str, "torch.device"]] = None,
**kwargs: Unpack[Sam2FastImageProcessorKwargs],
) -> BatchFeature:
"""
Preprocess image-like inputs.
"""
images = self._prepare_image_like_inputs(
images=images, do_convert_rgb=do_convert_rgb, input_data_format=input_data_format, device=device
)
original_sizes = [image.shape[-2:] for image in images]
images_kwargs = kwargs.copy()
pixel_values = self._preprocess(images, **images_kwargs)
data = {
"pixel_values": pixel_values,
"original_sizes": original_sizes,
}
if segmentation_maps is not None:
processed_segmentation_maps = self._prepare_image_like_inputs(
images=segmentation_maps,
expected_ndims=2,
do_convert_rgb=False,
input_data_format=ChannelDimension.FIRST,
)
segmentation_maps_kwargs = kwargs.copy()
segmentation_maps_kwargs.update(
{
"do_normalize": False,
"do_rescale": False,
"interpolation": pil_torch_interpolation_mapping[PILImageResampling.NEAREST],
"size": segmentation_maps_kwargs.pop("mask_size"),
}
)
processed_segmentation_maps = self._preprocess(
images=processed_segmentation_maps, **segmentation_maps_kwargs
)
data["labels"] = processed_segmentation_maps.squeeze(1).to(torch.int64)
return BatchFeature(data=data, tensor_type=kwargs["return_tensors"])
def _further_process_kwargs(
self,
size: Optional[SizeDict] = None,
mask_size: Optional[SizeDict] = None,
default_to_square: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
data_format: Optional[ChannelDimension] = None,
**kwargs,
) -> dict:
"""
Update kwargs that need further processing before being validated
Can be overridden by subclasses to customize the processing of kwargs.
"""
if kwargs is None:
kwargs = {}
if size is not None:
size = SizeDict(**get_size_dict(size=size, default_to_square=default_to_square))
if mask_size is not None:
mask_size = SizeDict(**get_size_dict(mask_size, param_name="mask_size"))
if isinstance(image_mean, list):
image_mean = tuple(image_mean)
if isinstance(image_std, list):
image_std = tuple(image_std)
if data_format is None:
data_format = ChannelDimension.FIRST
kwargs["size"] = size
kwargs["mask_size"] = mask_size
kwargs["image_mean"] = image_mean
kwargs["image_std"] = image_std
kwargs["data_format"] = data_format
# torch resize uses interpolation instead of resample
# Check if resample is an int before checking if it's an instance of PILImageResampling
# because if pillow < 9.1.0, resample is an int and PILImageResampling is a module.
# Checking PILImageResampling will fail with error `TypeError: isinstance() arg 2 must be a type or tuple of types`.
resample = kwargs.pop("resample")
kwargs["interpolation"] = (
pil_torch_interpolation_mapping[resample] if isinstance(resample, (PILImageResampling, int)) else resample
)
return kwargs
def _apply_non_overlapping_constraints(self, pred_masks: torch.Tensor) -> torch.Tensor:
"""
Apply non-overlapping constraints to the object scores in pred_masks. Here we
keep only the highest scoring object at each spatial location in pred_masks.
"""
batch_size = pred_masks.size(0)
if batch_size == 1:
return pred_masks
device = pred_masks.device
# "max_obj_inds": object index of the object with the highest score at each location
max_obj_inds = torch.argmax(pred_masks, dim=0, keepdim=True)
# "batch_obj_inds": object index of each object slice (along dim 0) in `pred_masks`
batch_obj_inds = torch.arange(batch_size, device=device)[:, None, None, None]
keep = max_obj_inds == batch_obj_inds
# suppress overlapping regions' scores below -10.0 so that the foreground regions
# don't overlap (here sigmoid(-10.0)=4.5398e-05)
pred_masks = torch.where(keep, pred_masks, torch.clamp(pred_masks, max=-10.0))
return pred_masks
def post_process_masks(
self,
masks,
original_sizes,
mask_threshold=0.0,
binarize=True,
max_hole_area=0.0,
max_sprinkle_area=0.0,
apply_non_overlapping_constraints=False,
**kwargs,
):
"""
Remove padding and upscale masks to the original image size.
Args:
masks (`Union[torch.Tensor, List[torch.Tensor], np.ndarray, List[np.ndarray]]`):
Batched masks from the mask_decoder in (batch_size, num_channels, height, width) format.
original_sizes (`Union[torch.Tensor, List[Tuple[int,int]]]`):
The original sizes of each image before it was resized to the model's expected input shape, in (height,
width) format.
mask_threshold (`float`, *optional*, defaults to 0.0):
Threshold for binarization and post-processing operations.
binarize (`bool`, *optional*, defaults to `True`):
Whether to binarize the masks.
max_hole_area (`float`, *optional*, defaults to 0.0):
The maximum area of a hole to fill.
max_sprinkle_area (`float`, *optional*, defaults to 0.0):
The maximum area of a sprinkle to fill.
apply_non_overlapping_constraints (`bool`, *optional*, defaults to `False`):
Whether to apply non-overlapping constraints to the masks.
Returns:
(`torch.Tensor`): Batched masks in batch_size, num_channels, height, width) format, where (height, width)
is given by original_size.
"""
if isinstance(original_sizes, (torch.Tensor, np.ndarray)):
original_sizes = original_sizes.tolist()
# TODO: add connected components kernel for postprocessing
output_masks = []
for i, original_size in enumerate(original_sizes):
if isinstance(masks[i], np.ndarray):
masks[i] = torch.from_numpy(masks[i])
elif not isinstance(masks[i], torch.Tensor):
raise TypeError("Input masks should be a list of `torch.tensors` or a list of `np.ndarray`")
interpolated_mask = F.interpolate(masks[i], original_size, mode="bilinear", align_corners=False)
if apply_non_overlapping_constraints:
interpolated_mask = self._apply_non_overlapping_constraints(interpolated_mask)
if binarize:
interpolated_mask = interpolated_mask > mask_threshold
output_masks.append(interpolated_mask)
return output_masks
def _get_preprocess_shape(self):
raise NotImplementedError("No _get_preprocess_shape for SAM 2.")
def resize(self):
raise NotImplementedError("No need to override resize for SAM 2.")
@dataclass
@auto_docstring(custom_intro="Base class for the vision encoder's outputs.")
class Sam2VisionEncoderOutput(ModelOutput):
r"""
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, height, width, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
fpn_hidden_states (`tuple(torch.FloatTensor)`):
Tuple of `torch.FloatTensor` (one for each feature level, from high to low resolution) of shape
`(batch_size, hidden_size, height, width)`. Feature maps from the Feature Pyramid Network neck.
fpn_position_encoding (`tuple(torch.FloatTensor)`):
Tuple of `torch.FloatTensor` (one for each feature level, from high to low resolution) of shape
`(batch_size, hidden_size, height, width)`. Positional encodings corresponding to the `fpn_hidden_states`.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each stage) of shape `(batch_size, height, width, hidden_size)`. Hidden-states of the
model at the output of each stage.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
the self-attention heads.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
fpn_hidden_states: Optional[torch.FloatTensor] = None
fpn_position_encoding: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass
@auto_docstring(custom_intro="Base class for the Sam2 model's output.")
class Sam2ImageSegmentationOutput(ModelOutput):
r"""
iou_scores (`torch.FloatTensor` of shape `(batch_size, point_batch_size, num_masks)`):
The Intersection over Union (IoU) scores of the predicted masks.
pred_masks (`torch.FloatTensor` of shape `(batch_size, point_batch_size, num_masks, height, width)`):
The predicted low-resolution masks. This is an alias for `low_res_masks`. These masks need to be post-processed
by the processor to be brought to the original image size.
object_score_logits (`torch.FloatTensor` of shape `(batch_size, point_batch_size, 1)`):
Logits for the object score, indicating if an object is present.
image_embeddings (`tuple(torch.FloatTensor)`):
The features from the FPN, which are used by the mask decoder. This is a tuple of `torch.FloatTensor` where each
tensor has shape `(batch_size, channels, height, width)`.
vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of each stage) of shape `(batch_size, height, width, hidden_size)`.
Hidden-states of the vision model at the output of each stage.
vision_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights of the vision model.
mask_decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights of the mask decoder.
"""
iou_scores: Optional[torch.FloatTensor] = None
pred_masks: Optional[torch.FloatTensor] = None
object_score_logits: Optional[torch.FloatTensor] = None
image_embeddings: tuple[torch.FloatTensor, ...] = None
vision_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
vision_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
mask_decoder_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
class Sam2PatchEmbeddings(nn.Module):
r"""
Turns pixel values into patch embeddings for transformer consumption.
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`Sam2ImageProcessorFast.__call__`] for details.
Returns:
embeddings (`torch.FloatTensor`):
Patch embeddings depend on image_size, patch_kernel_size, patch_stride and patch_padding
"""
def __init__(self, config: Sam2HieraDetConfig):
super().__init__()
num_channels = config.num_channels
hidden_size = config.hidden_size
self.projection = nn.Conv2d(
num_channels,
hidden_size,
kernel_size=config.patch_kernel_size,
stride=config.patch_stride,
padding=config.patch_padding,
)
def forward(self, pixel_values):
_, num_channels, height, width = pixel_values.shape
embeddings = self.projection(pixel_values.to(self.projection.weight.dtype)).permute(0, 2, 3, 1)
return embeddings
class Sam2SinePositionEmbedding(MaskFormerSinePositionEmbedding):
pass
class Sam2VisionNeck(nn.Module):
def __init__(self, config: Sam2VisionConfig):
super().__init__()
self.config = config
self.position_encoding = Sam2SinePositionEmbedding(num_pos_feats=config.fpn_hidden_size // 2, normalize=True)
self.convs = nn.ModuleList()
for in_channels in config.backbone_channel_list:
self.convs.append(
nn.Conv2d(
in_channels=in_channels,
out_channels=config.fpn_hidden_size,
kernel_size=config.fpn_kernel_size,
stride=config.fpn_stride,
padding=config.fpn_padding,
),
)
self.fpn_top_down_levels = config.fpn_top_down_levels
def forward(self, hidden_states: torch.Tensor) -> tuple[tuple[torch.Tensor, ...], tuple[torch.Tensor, ...]]:
fpn_hidden_states = ()
fpn_position_encoding = ()
# forward in top-down order (from low to high resolution)
n = len(self.convs) - 1
for i in range(n, -1, -1):
lateral_features = hidden_states[i].permute(0, 3, 1, 2)
lateral_features = self.convs[n - i](lateral_features.to(self.convs[i].weight.dtype))
if i not in self.fpn_top_down_levels or i == n:
prev_features = lateral_features
else:
top_down_features = F.interpolate(
prev_features.to(dtype=torch.float32),
scale_factor=2.0,
mode="nearest",
align_corners=None,
antialias=False,
).to(lateral_features.dtype)
prev_features = lateral_features + top_down_features
prev_position_encoding = self.position_encoding(
prev_features.shape, prev_features.device, prev_features.dtype
).to(prev_features.dtype)
fpn_hidden_states += (prev_features,)
fpn_position_encoding += (prev_position_encoding,)
return fpn_hidden_states, fpn_position_encoding
def do_pool(x: torch.Tensor, query_stride: Optional[int] = None) -> torch.Tensor:
if query_stride is None:
return x
# (B, H, W, C) -> (B, C, H, W)
x = x.permute(0, 3, 1, 2)
x = nn.functional.max_pool2d(x, kernel_size=query_stride, stride=query_stride, ceil_mode=False)
# (B, C, H', W') -> (B, H', W', C)
x = x.permute(0, 2, 3, 1)
return x
class Sam2MultiScaleAttention(nn.Module):
def __init__(
self,
config: Sam2HieraDetConfig,
dim: int,
dim_out: int,
num_attention_heads: int,
query_stride: Optional[tuple[int, int]] = None,
):
super().__init__()
self.config = config
self.dim = dim
self.dim_out = dim_out
self.query_stride = query_stride
self.num_attention_heads = num_attention_heads
head_dim = dim_out // num_attention_heads
self.scale = head_dim**-0.5
self.qkv = nn.Linear(dim, dim_out * 3)
self.proj = nn.Linear(dim_out, dim_out)
self.is_causal = False
def forward(self, hidden_states: torch.Tensor, **kwargs) -> torch.Tensor:
batch_size, height, width, _ = hidden_states.shape
# qkv with shape (B, H * W, 3, nHead, C)
qkv = self.qkv(hidden_states).reshape(batch_size, height * width, 3, self.num_attention_heads, -1)
# q, k, v with shape (B, H * W, nheads, C)
query, key, value = torch.unbind(qkv, 2)
attn_weights = (query * self.scale) @ key.transpose(-2, -1)
attn_weights = torch.nn.functional.softmax(attn_weights, dtype=torch.float32, dim=-1).to(query.dtype)
# Q pooling (for downsample at stage changes)
if self.query_stride:
query = do_pool(query.reshape(batch_size, height, width, -1), self.query_stride)
height, width = query.shape[1:3] # downsampled shape
query = query.reshape(batch_size, height * width, self.num_attention_heads, -1)
# transpose query, key, value to (B, nHead, H * W, C)
query = query.transpose(1, 2)
key = key.transpose(1, 2)
value = value.transpose(1, 2)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, _ = attention_interface(
self,
query,
key,
value,
attention_mask=None,
is_causal=self.is_causal,
scaling=self.scale,
**kwargs,
)
attn_output = attn_output.reshape(batch_size, height, width, -1)
attn_output = self.proj(attn_output)
return attn_output
class Sam2FeedForward(nn.Module):
def __init__(
self,
input_dim: int,
hidden_dim: int,
output_dim: int,
num_layers: int,
activation: str = "relu",
sigmoid_output: bool = False,
):
super().__init__()
self.num_layers = num_layers
self.activation = ACT2FN[activation]
self.proj_in = nn.Linear(input_dim, hidden_dim)
self.proj_out = nn.Linear(hidden_dim, output_dim)
self.layers = nn.ModuleList([nn.Linear(hidden_dim, hidden_dim) for _ in range(num_layers - 2)])
self.sigmoid_output = sigmoid_output
def forward(self, hidden_states):
hidden_states = self.proj_in(hidden_states)
hidden_states = self.activation(hidden_states)
for layer in self.layers:
hidden_states = self.activation(layer(hidden_states))
hidden_states = self.proj_out(hidden_states)
if self.sigmoid_output:
hidden_states = F.sigmoid(hidden_states)
return hidden_states
class Sam2MultiScaleBlock(GradientCheckpointingLayer):
def __init__(
self,
config: Sam2HieraDetConfig,
stage_idx: int,
block_idx: int,
total_block_idx: int,
):
super().__init__()
# take embed dim from previous stage if first block of stage
self.dim = (
config.embed_dim_per_stage[stage_idx - 1]
if stage_idx > 0 and block_idx == 0
else config.embed_dim_per_stage[stage_idx]
)
self.dim_out = config.embed_dim_per_stage[stage_idx]
self.layer_norm1 = nn.LayerNorm(self.dim, eps=config.layer_norm_eps)
# take window size from previous stage if first block of stage
self.window_size = (
config.window_size_per_stage[stage_idx - 1]
if stage_idx > 0 and block_idx == 0
else config.window_size_per_stage[stage_idx]
)
self.window_size = 0 if total_block_idx in config.global_attention_blocks else self.window_size
# use query stride for first block of stage if stage is a query pool stage
self.query_stride = (
config.query_stride if 0 < stage_idx <= config.num_query_pool_stages and block_idx == 0 else None
)
self.attn = Sam2MultiScaleAttention(
config,
self.dim,
self.dim_out,
num_attention_heads=config.num_attention_heads_per_stage[stage_idx],
query_stride=self.query_stride,
)
self.layer_norm2 = nn.LayerNorm(self.dim_out, eps=config.layer_norm_eps)
self.mlp = Sam2FeedForward(
self.dim_out,
int(self.dim_out * config.mlp_ratio),
self.dim_out,
num_layers=2,
activation=config.hidden_act,
)
if self.dim != self.dim_out:
self.proj = nn.Linear(self.dim, self.dim_out)
def forward(
self,
hidden_states: torch.Tensor,
**kwargs: Unpack[TransformersKwargs],
) -> torch.FloatTensor:
residual = hidden_states # batch_size, height, width, channel
hidden_states = self.layer_norm1(hidden_states)
# Skip connection
if self.dim != self.dim_out:
residual = do_pool(self.proj(hidden_states), self.query_stride)
# Window partition
window_size = self.window_size
if self.window_size > 0:
H, W = hidden_states.shape[1], hidden_states.shape[2]
hidden_states, pad_hw = window_partition(hidden_states, window_size)
# Window Attention + Q Pooling (if stage change)
attn_output = self.attn(
hidden_states=hidden_states,
**kwargs,
)
hidden_states = attn_output
if self.query_stride:
# Shapes have changed due to Q pooling
window_size = self.window_size // self.query_stride[0]
H, W = residual.shape[1:3]
pad_h = (window_size - H % window_size) % window_size
pad_w = (window_size - W % window_size) % window_size
pad_hw = (H + pad_h, W + pad_w)
# Reverse window partition
if self.window_size > 0:
hidden_states = window_unpartition(hidden_states, window_size, pad_hw, (H, W))
hidden_states = residual + hidden_states
layernorm_output = self.layer_norm2(hidden_states)
hidden_states = hidden_states + self.mlp(layernorm_output)
return hidden_states
@dataclass
@auto_docstring(
custom_intro="""
Hiera model's outputs that also contains a pooling of the last hidden states.
"""
)
class Sam2HieraDetModelOutput(ModelOutput):
r"""
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, height, width, hidden_size)`):
hidden-states at the output of the last layer of the model.
intermediate_hidden_states (`tuple[torch.FloatTensor]` of shape `(batch_size, height, width, hidden_size)`):
Sequence of hidden-states at the output of the intermediate layers of the model.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
intermediate_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
@auto_docstring
class Sam2PreTrainedModel(PreTrainedModel):
config_class = Sam2Config
base_model_prefix = "sam2"
main_input_name = "pixel_values"
input_modalities = ("image",)
_supports_sdpa = True
_supports_flash_attn_2 = True
_supports_attention_backend = True
@torch.no_grad()
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, Sam2HieraDetModel):
if module.pos_embed is not None:
init.zeros_(module.pos_embed)
if module.pos_embed_window is not None:
init.zeros_(module.pos_embed_window)
elif isinstance(module, Sam2PositionalEmbedding):
init.normal_(module.positional_embedding, std=module.scale)
elif isinstance(module, Sam2Model):
if module.no_memory_embedding is not None:
init.zeros_(module.no_memory_embedding)
class Sam2HieraDetModel(Sam2PreTrainedModel):
config_class = Sam2HieraDetConfig
main_input_name = "pixel_values"
_can_record_outputs = {
"hidden_states": Sam2MultiScaleBlock,
"attentions": Sam2MultiScaleAttention,
}
def __init__(self, config: Sam2HieraDetConfig):
super().__init__(config)
self.patch_embed = Sam2PatchEmbeddings(config)
# Windowed positional embedding (https://huggingface.co/papers/2311.05613)
self.pos_embed = nn.Parameter(
torch.zeros(1, config.hidden_size, *config.window_positional_embedding_background_size)
)
self.pos_embed_window = nn.Parameter(
torch.zeros(1, config.hidden_size, config.window_size_per_stage[0], config.window_size_per_stage[0])
)
self.stage_ends = (np.cumsum(config.blocks_per_stage) - 1).tolist()
self.blocks = nn.ModuleList()
total_block_idx = 0
for stage_idx, blocks_per_stage in enumerate(config.blocks_per_stage):
for block_idx in range(blocks_per_stage):
block = Sam2MultiScaleBlock(
config=config, stage_idx=stage_idx, block_idx=block_idx, total_block_idx=total_block_idx
)
self.blocks.append(block)
total_block_idx += 1
self.post_init()
def get_input_embeddings(self):
return self.patch_embed
def _get_pos_embed(self, hw: tuple[int, int]) -> torch.Tensor:
h, w = hw
window_embed = self.pos_embed_window
pos_embed = F.interpolate(self.pos_embed, size=(h, w), mode="bicubic")
pos_embed = pos_embed + window_embed.tile([x // y for x, y in zip(pos_embed.shape, window_embed.shape)])
pos_embed = pos_embed.permute(0, 2, 3, 1)
return pos_embed
@check_model_inputs
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, Sam2HieraDetModelOutput]:
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
hidden_states = self.patch_embed(pixel_values)
hidden_states = hidden_states + self._get_pos_embed(hidden_states.shape[1:3])
intermediate_hidden_states = ()
for i, block_module in enumerate(self.blocks):
hidden_states = block_module(hidden_states, **kwargs)
if i in self.stage_ends:
intermediate_hidden_states = intermediate_hidden_states + (hidden_states,)
return Sam2HieraDetModelOutput(
last_hidden_state=hidden_states,
intermediate_hidden_states=intermediate_hidden_states,
)
@auto_docstring(
custom_intro="""
The vision model from Sam without any head or projection on top.
"""
)
class Sam2VisionModel(Sam2PreTrainedModel):
config_class = Sam2VisionConfig
main_input_name = "pixel_values"
_can_record_outputs = {
"hidden_states": Sam2MultiScaleBlock,
"attentions": Sam2MultiScaleAttention,
}
def __init__(self, config: Sam2VisionConfig):
super().__init__(config)
self.config = config
self.backbone = AutoModel.from_config(config.backbone_config)
self.neck = Sam2VisionNeck(config)
self.num_feature_levels = config.num_feature_levels
self.post_init()
def get_input_embeddings(self):
return self.backbone.get_input_embeddings()
@check_model_inputs
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, Sam2VisionEncoderOutput]:
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
# Forward through backbone
backbone_output = self.backbone(pixel_values, **kwargs)
hidden_states = backbone_output.last_hidden_state
intermediate_hidden_states = backbone_output.intermediate_hidden_states
fpn_hidden_states, fpn_position_encoding = self.neck(intermediate_hidden_states)
# Select last `num_feature_levels` feature levels from FPN and reverse order to get features from high to low resolution
fpn_hidden_states = fpn_hidden_states[-self.num_feature_levels :][::-1]
fpn_position_encoding = fpn_position_encoding[-self.num_feature_levels :][::-1]
return Sam2VisionEncoderOutput(
last_hidden_state=hidden_states,
fpn_hidden_states=fpn_hidden_states,
fpn_position_encoding=fpn_position_encoding,
)
class Sam2PositionalEmbedding(nn.Module):
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | true |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/depth_anything/convert_distill_any_depth_to_hf.py | src/transformers/models/depth_anything/convert_distill_any_depth_to_hf.py | # coding=utf-8
# Copyright 2025 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert Distill Any Depth checkpoints from the original repository. URL:
https://github.com/Westlake-AGI-Lab/Distill-Any-Depth"""
import argparse
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from safetensors.torch import load_file
from transformers import DepthAnythingConfig, DepthAnythingForDepthEstimation, Dinov2Config, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
logger = logging.get_logger(__name__)
ORIGINAL_TO_CONVERTED_KEY_MAPPING = {
r"(backbone|pretrained)\.cls_token": r"backbone.embeddings.cls_token",
r"(backbone|pretrained)\.mask_token": r"backbone.embeddings.mask_token",
r"(backbone|pretrained)\.pos_embed": r"backbone.embeddings.position_embeddings",
r"(backbone|pretrained)\.patch_embed\.proj\.(weight|bias)": r"backbone.embeddings.patch_embeddings.projection.\2",
r"(backbone|pretrained)\.norm\.(weight|bias)": r"backbone.layernorm.\2",
r"(backbone|pretrained)(\.blocks(\.\d+)?)?\.(\d+)\.attn\.proj\.(weight|bias)": r"backbone.encoder.layer.\4.attention.output.dense.\5",
r"(backbone|pretrained)(\.blocks(\.\d+)?)?\.(\d+)\.ls(1|2)\.gamma": r"backbone.encoder.layer.\4.layer_scale\5.lambda1",
r"(backbone|pretrained)(\.blocks(\.\d+)?)?\.(\d+)\.mlp\.fc(1|2)\.(weight|bias)": r"backbone.encoder.layer.\4.mlp.fc\5.\6",
r"(backbone|pretrained)(\.blocks(\.\d+)?)?\.(\d+)\.norm(1|2)\.(weight|bias)": r"backbone.encoder.layer.\4.norm\5.\6",
r"depth_head\.projects\.(\d+)\.(weight|bias)": r"neck.reassemble_stage.layers.\1.projection.\2",
r"depth_head\.resize_layers\.(?!2)(\d+)\.(weight|bias)": r"neck.reassemble_stage.layers.\1.resize.\2",
r"depth_head\.scratch\.layer(\d+)_rn\.weight": lambda m: f"neck.convs.{int(m[1]) - 1}.weight",
r"depth_head\.scratch\.output_conv(\d+)(?:\.(\d+))?\.(weight|bias)": lambda m: (
f"head.conv{int(m[1]) + (int(m[2]) // 2 if m[2] else 0)}.{m[3]}" if m[1] == "2" else f"head.conv{m[1]}.{m[3]}"
),
r"depth_head\.scratch\.refinenet(\d+)\.out_conv\.(weight|bias)": lambda m: f"neck.fusion_stage.layers.{3 - (int(m[1]) - 1)}.projection.{m[2]}",
r"depth_head\.scratch\.refinenet(\d+)\.resConfUnit(\d+)\.conv(\d+)\.(weight|bias)": lambda m: f"neck.fusion_stage.layers.{3 - (int(m[1]) - 1)}.residual_layer{m[2]}.convolution{m[3]}.{m[4]}",
}
def get_dpt_config(model_name):
if "small" in model_name:
out_indices = [3, 6, 9, 12]
backbone_config = Dinov2Config.from_pretrained(
"facebook/dinov2-small", out_indices=out_indices, apply_layernorm=True, reshape_hidden_states=False
)
fusion_hidden_size = 64
neck_hidden_sizes = [48, 96, 192, 384]
elif "base" in model_name:
out_indices = [3, 6, 9, 12]
backbone_config = Dinov2Config.from_pretrained(
"facebook/dinov2-base", out_indices=out_indices, apply_layernorm=True, reshape_hidden_states=False
)
fusion_hidden_size = 128
neck_hidden_sizes = [96, 192, 384, 768]
elif "large" in model_name:
out_indices = [5, 12, 18, 24]
backbone_config = Dinov2Config.from_pretrained(
"facebook/dinov2-large", out_indices=out_indices, apply_layernorm=True, reshape_hidden_states=False
)
fusion_hidden_size = 256
neck_hidden_sizes = [256, 512, 1024, 1024]
else:
raise NotImplementedError(f"Model not supported: {model_name}")
depth_estimation_type = "relative"
max_depth = None
config = DepthAnythingConfig(
reassemble_hidden_size=backbone_config.hidden_size,
patch_size=backbone_config.patch_size,
backbone_config=backbone_config,
fusion_hidden_size=fusion_hidden_size,
neck_hidden_sizes=neck_hidden_sizes,
depth_estimation_type=depth_estimation_type,
max_depth=max_depth,
)
return config
def convert_key_pattern(key, mapping):
for pattern, replacement in mapping.items():
match = re.fullmatch(pattern, key)
if match:
if callable(replacement):
return replacement(match)
return re.sub(pattern, replacement, key)
return None
def convert_keys(state_dict, config):
new_state_dict = {}
qkv_pattern = r"(backbone|pretrained)(\.blocks(\.\d+)?)?\.(\d+)\.attn\.qkv\.(weight|bias)"
qkv_keys = [k for k in list(state_dict.keys()) if re.match(qkv_pattern, k)]
for old_key in qkv_keys:
value = state_dict.pop(old_key)
match = re.match(qkv_pattern, old_key)
_, _, _, layer, attr = match.groups()
hidden_size = config.backbone_config.hidden_size
q = value[:hidden_size]
k = value[hidden_size : hidden_size * 2]
v = value[-hidden_size:]
for proj, tensor in zip(["query", "key", "value"], [q, k, v]):
new_key = f"backbone.encoder.layer.{layer}.attention.attention.{proj}.{attr}"
new_state_dict[new_key] = tensor
for old_key in list(state_dict.keys()):
value = state_dict.pop(old_key)
new_key = convert_key_pattern(old_key, ORIGINAL_TO_CONVERTED_KEY_MAPPING)
new_state_dict[new_key] = value
return new_state_dict
def prepare_img():
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
return Image.open(requests.get(url, stream=True).raw)
name_to_checkpoint = {
"distill-any-depth-small": "small/model.safetensors",
"distill-any-depth-base": "base/model.safetensors",
"distill-any-depth-large": "large/model.safetensors",
}
@torch.no_grad()
def convert_dpt_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub, verify_logits):
config = get_dpt_config(model_name)
repo_id = "xingyang1/Distill-Any-Depth"
filepath = hf_hub_download(repo_id=repo_id, filename=name_to_checkpoint[model_name])
state_dict = load_file(filepath)
converted_state_dict = convert_keys(state_dict, config)
model = DepthAnythingForDepthEstimation(config)
model.load_state_dict(converted_state_dict)
model.eval()
processor = DPTImageProcessor(
do_resize=True,
size={"height": 518, "width": 518},
ensure_multiple_of=14,
keep_aspect_ratio=True,
do_rescale=True,
do_normalize=True,
image_mean=[0.485, 0.456, 0.406],
image_std=[0.229, 0.224, 0.225],
)
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
image = Image.open(requests.get(url, stream=True).raw)
pixel_values = processor(image, return_tensors="pt").pixel_values
with torch.no_grad():
outputs = model(pixel_values)
predicted_depth = outputs.predicted_depth
print("Shape of predicted depth:", predicted_depth.shape)
print("First values:", predicted_depth[0, :3, :3])
if verify_logits:
print("Verifying logits...")
expected_shape = torch.Size([1, 518, 686])
if model_name == "distill-any-depth-small":
expected_slice = torch.tensor(
[[2.5653, 2.5249, 2.5570], [2.4897, 2.5235, 2.5355], [2.5255, 2.5261, 2.5422]]
)
elif model_name == "distill-any-depth-base":
expected_slice = torch.tensor(
[[4.8976, 4.9075, 4.9403], [4.8872, 4.8906, 4.9448], [4.8712, 4.8898, 4.8838]]
)
elif model_name == "distill-any-depth-large":
expected_slice = torch.tensor(
[[55.1067, 51.1828, 51.6803], [51.9098, 50.7529, 51.4494], [50.1745, 50.5491, 50.8818]]
)
else:
raise ValueError("Not supported")
assert predicted_depth.shape == torch.Size(expected_shape)
assert torch.allclose(predicted_depth[0, :3, :3], expected_slice, atol=1e-4)
print("Looks ok!")
if pytorch_dump_folder_path is not None:
Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
print(f"Saving model and processor to {pytorch_dump_folder_path}")
model.save_pretrained(pytorch_dump_folder_path)
processor.save_pretrained(pytorch_dump_folder_path)
if push_to_hub:
print("Pushing model and processor to hub...")
model.push_to_hub(repo_id=f"{model_name.title()}-hf")
processor.push_to_hub(repo_id=f"{model_name.title()}-hf")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="distill-any-depth-small",
type=str,
choices=name_to_checkpoint.keys(),
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model to the hub after conversion.",
)
parser.add_argument(
"--verify_logits",
action="store_true",
required=False,
help="Whether to verify the logits after conversion.",
)
args = parser.parse_args()
convert_dpt_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.verify_logits)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/depth_anything/convert_depth_anything_to_hf.py | src/transformers/models/depth_anything/convert_depth_anything_to_hf.py | # coding=utf-8
# Copyright 2024 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert Depth Anything checkpoints from the original repository. URL:
https://github.com/LiheYoung/Depth-Anything"""
import argparse
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DepthAnythingConfig, DepthAnythingForDepthEstimation, Dinov2Config, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
logger = logging.get_logger(__name__)
def get_dpt_config(model_name):
if "small" in model_name:
out_indices = [3, 6, 9, 12] if "v2" in model_name else [9, 10, 11, 12]
backbone_config = Dinov2Config.from_pretrained(
"facebook/dinov2-small", out_indices=out_indices, apply_layernorm=True, reshape_hidden_states=False
)
fusion_hidden_size = 64
neck_hidden_sizes = [48, 96, 192, 384]
elif "base" in model_name:
out_indices = [3, 6, 9, 12] if "v2" in model_name else [9, 10, 11, 12]
backbone_config = Dinov2Config.from_pretrained(
"facebook/dinov2-base", out_indices=out_indices, apply_layernorm=True, reshape_hidden_states=False
)
fusion_hidden_size = 128
neck_hidden_sizes = [96, 192, 384, 768]
elif "large" in model_name:
out_indices = [5, 12, 18, 24] if "v2" in model_name else [21, 22, 23, 24]
backbone_config = Dinov2Config.from_pretrained(
"facebook/dinov2-large", out_indices=out_indices, apply_layernorm=True, reshape_hidden_states=False
)
fusion_hidden_size = 256
neck_hidden_sizes = [256, 512, 1024, 1024]
else:
raise NotImplementedError(f"Model not supported: {model_name}")
if "metric" in model_name:
depth_estimation_type = "metric"
max_depth = 20 if "indoor" in model_name else 80
else:
depth_estimation_type = "relative"
max_depth = None
config = DepthAnythingConfig(
reassemble_hidden_size=backbone_config.hidden_size,
patch_size=backbone_config.patch_size,
backbone_config=backbone_config,
fusion_hidden_size=fusion_hidden_size,
neck_hidden_sizes=neck_hidden_sizes,
depth_estimation_type=depth_estimation_type,
max_depth=max_depth,
)
return config
def create_rename_keys(config):
rename_keys = []
# fmt: off
# stem
rename_keys.append(("pretrained.cls_token", "backbone.embeddings.cls_token"))
rename_keys.append(("pretrained.mask_token", "backbone.embeddings.mask_token"))
rename_keys.append(("pretrained.pos_embed", "backbone.embeddings.position_embeddings"))
rename_keys.append(("pretrained.patch_embed.proj.weight", "backbone.embeddings.patch_embeddings.projection.weight"))
rename_keys.append(("pretrained.patch_embed.proj.bias", "backbone.embeddings.patch_embeddings.projection.bias"))
# Transformer encoder
for i in range(config.backbone_config.num_hidden_layers):
rename_keys.append((f"pretrained.blocks.{i}.ls1.gamma", f"backbone.encoder.layer.{i}.layer_scale1.lambda1"))
rename_keys.append((f"pretrained.blocks.{i}.ls2.gamma", f"backbone.encoder.layer.{i}.layer_scale2.lambda1"))
rename_keys.append((f"pretrained.blocks.{i}.norm1.weight", f"backbone.encoder.layer.{i}.norm1.weight"))
rename_keys.append((f"pretrained.blocks.{i}.norm1.bias", f"backbone.encoder.layer.{i}.norm1.bias"))
rename_keys.append((f"pretrained.blocks.{i}.norm2.weight", f"backbone.encoder.layer.{i}.norm2.weight"))
rename_keys.append((f"pretrained.blocks.{i}.norm2.bias", f"backbone.encoder.layer.{i}.norm2.bias"))
rename_keys.append((f"pretrained.blocks.{i}.mlp.fc1.weight", f"backbone.encoder.layer.{i}.mlp.fc1.weight"))
rename_keys.append((f"pretrained.blocks.{i}.mlp.fc1.bias", f"backbone.encoder.layer.{i}.mlp.fc1.bias"))
rename_keys.append((f"pretrained.blocks.{i}.mlp.fc2.weight", f"backbone.encoder.layer.{i}.mlp.fc2.weight"))
rename_keys.append((f"pretrained.blocks.{i}.mlp.fc2.bias", f"backbone.encoder.layer.{i}.mlp.fc2.bias"))
rename_keys.append((f"pretrained.blocks.{i}.attn.proj.weight", f"backbone.encoder.layer.{i}.attention.output.dense.weight"))
rename_keys.append((f"pretrained.blocks.{i}.attn.proj.bias", f"backbone.encoder.layer.{i}.attention.output.dense.bias"))
# Head
rename_keys.append(("pretrained.norm.weight", "backbone.layernorm.weight"))
rename_keys.append(("pretrained.norm.bias", "backbone.layernorm.bias"))
# activation postprocessing (readout projections + resize blocks)
# Depth Anything does not use CLS token => readout_projects not required
for i in range(4):
rename_keys.append((f"depth_head.projects.{i}.weight", f"neck.reassemble_stage.layers.{i}.projection.weight"))
rename_keys.append((f"depth_head.projects.{i}.bias", f"neck.reassemble_stage.layers.{i}.projection.bias"))
if i != 2:
rename_keys.append((f"depth_head.resize_layers.{i}.weight", f"neck.reassemble_stage.layers.{i}.resize.weight"))
rename_keys.append((f"depth_head.resize_layers.{i}.bias", f"neck.reassemble_stage.layers.{i}.resize.bias"))
# refinenet (tricky here)
mapping = {1:3, 2:2, 3:1, 4:0}
for i in range(1, 5):
j = mapping[i]
rename_keys.append((f"depth_head.scratch.refinenet{i}.out_conv.weight", f"neck.fusion_stage.layers.{j}.projection.weight"))
rename_keys.append((f"depth_head.scratch.refinenet{i}.out_conv.bias", f"neck.fusion_stage.layers.{j}.projection.bias"))
rename_keys.append((f"depth_head.scratch.refinenet{i}.resConfUnit1.conv1.weight", f"neck.fusion_stage.layers.{j}.residual_layer1.convolution1.weight"))
rename_keys.append((f"depth_head.scratch.refinenet{i}.resConfUnit1.conv1.bias", f"neck.fusion_stage.layers.{j}.residual_layer1.convolution1.bias"))
rename_keys.append((f"depth_head.scratch.refinenet{i}.resConfUnit1.conv2.weight", f"neck.fusion_stage.layers.{j}.residual_layer1.convolution2.weight"))
rename_keys.append((f"depth_head.scratch.refinenet{i}.resConfUnit1.conv2.bias", f"neck.fusion_stage.layers.{j}.residual_layer1.convolution2.bias"))
rename_keys.append((f"depth_head.scratch.refinenet{i}.resConfUnit2.conv1.weight", f"neck.fusion_stage.layers.{j}.residual_layer2.convolution1.weight"))
rename_keys.append((f"depth_head.scratch.refinenet{i}.resConfUnit2.conv1.bias", f"neck.fusion_stage.layers.{j}.residual_layer2.convolution1.bias"))
rename_keys.append((f"depth_head.scratch.refinenet{i}.resConfUnit2.conv2.weight", f"neck.fusion_stage.layers.{j}.residual_layer2.convolution2.weight"))
rename_keys.append((f"depth_head.scratch.refinenet{i}.resConfUnit2.conv2.bias", f"neck.fusion_stage.layers.{j}.residual_layer2.convolution2.bias"))
# scratch convolutions
for i in range(4):
rename_keys.append((f"depth_head.scratch.layer{i+1}_rn.weight", f"neck.convs.{i}.weight"))
# head
rename_keys.append(("depth_head.scratch.output_conv1.weight", "head.conv1.weight"))
rename_keys.append(("depth_head.scratch.output_conv1.bias", "head.conv1.bias"))
rename_keys.append(("depth_head.scratch.output_conv2.0.weight", "head.conv2.weight"))
rename_keys.append(("depth_head.scratch.output_conv2.0.bias", "head.conv2.bias"))
rename_keys.append(("depth_head.scratch.output_conv2.2.weight", "head.conv3.weight"))
rename_keys.append(("depth_head.scratch.output_conv2.2.bias", "head.conv3.bias"))
return rename_keys
# we split up the matrix of each encoder layer into queries, keys and values
def read_in_q_k_v(state_dict, config):
hidden_size = config.backbone_config.hidden_size
for i in range(config.backbone_config.num_hidden_layers):
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
in_proj_weight = state_dict.pop(f"pretrained.blocks.{i}.attn.qkv.weight")
in_proj_bias = state_dict.pop(f"pretrained.blocks.{i}.attn.qkv.bias")
# next, add query, keys and values (in that order) to the state dict
state_dict[f"backbone.encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[:hidden_size, :]
state_dict[f"backbone.encoder.layer.{i}.attention.attention.query.bias"] = in_proj_bias[:hidden_size]
state_dict[f"backbone.encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[
hidden_size : hidden_size * 2, :
]
state_dict[f"backbone.encoder.layer.{i}.attention.attention.key.bias"] = in_proj_bias[
hidden_size : hidden_size * 2
]
state_dict[f"backbone.encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[-hidden_size:, :]
state_dict[f"backbone.encoder.layer.{i}.attention.attention.value.bias"] = in_proj_bias[-hidden_size:]
def rename_key(dct, old, new):
val = dct.pop(old)
dct[new] = val
# We will verify our results on an image of cute cats
def prepare_img():
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
im = Image.open(requests.get(url, stream=True).raw)
return im
name_to_checkpoint = {
"depth-anything-small": "pytorch_model.bin",
"depth-anything-base": "pytorch_model.bin",
"depth-anything-large": "pytorch_model.bin",
"depth-anything-v2-small": "depth_anything_v2_vits.pth",
"depth-anything-v2-base": "depth_anything_v2_vitb.pth",
"depth-anything-v2-large": "depth_anything_v2_vitl.pth",
"depth-anything-v2-metric-indoor-small": "depth_anything_v2_metric_hypersim_vits.pth",
"depth-anything-v2-metric-indoor-base": "depth_anything_v2_metric_hypersim_vitb.pth",
"depth-anything-v2-metric-indoor-large": "depth_anything_v2_metric_hypersim_vitl.pth",
"depth-anything-v2-metric-outdoor-small": "depth_anything_v2_metric_vkitti_vits.pth",
"depth-anything-v2-metric-outdoor-base": "depth_anything_v2_metric_vkitti_vitb.pth",
"depth-anything-v2-metric-outdoor-large": "depth_anything_v2_metric_vkitti_vitl.pth",
# v2-giant pending
}
@torch.no_grad()
def convert_dpt_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub, verify_logits):
"""
Copy/paste/tweak model's weights to our DPT structure.
"""
# define DPT configuration
config = get_dpt_config(model_name)
model_name_to_repo = {
"depth-anything-small": "LiheYoung/depth_anything_vits14",
"depth-anything-base": "LiheYoung/depth_anything_vitb14",
"depth-anything-large": "LiheYoung/depth_anything_vitl14",
"depth-anything-v2-small": "depth-anything/Depth-Anything-V2-Small",
"depth-anything-v2-base": "depth-anything/Depth-Anything-V2-Base",
"depth-anything-v2-large": "depth-anything/Depth-Anything-V2-Large",
"depth-anything-v2-metric-indoor-small": "depth-anything/Depth-Anything-V2-Metric-Hypersim-Small",
"depth-anything-v2-metric-indoor-base": "depth-anything/Depth-Anything-V2-Metric-Hypersim-Base",
"depth-anything-v2-metric-indoor-large": "depth-anything/Depth-Anything-V2-Metric-Hypersim-Large",
"depth-anything-v2-metric-outdoor-small": "depth-anything/Depth-Anything-V2-Metric-VKITTI-Small",
"depth-anything-v2-metric-outdoor-base": "depth-anything/Depth-Anything-V2-Metric-VKITTI-Base",
"depth-anything-v2-metric-outdoor-large": "depth-anything/Depth-Anything-V2-Metric-VKITTI-Large",
}
# load original state_dict
repo_id = model_name_to_repo[model_name]
filename = name_to_checkpoint[model_name]
filepath = hf_hub_download(
repo_id=repo_id,
filename=f"{filename}",
)
state_dict = torch.load(filepath, map_location="cpu", weights_only=True)
# rename keys
rename_keys = create_rename_keys(config)
for src, dest in rename_keys:
rename_key(state_dict, src, dest)
# read in qkv matrices
read_in_q_k_v(state_dict, config)
# load HuggingFace model
model = DepthAnythingForDepthEstimation(config)
model.load_state_dict(state_dict)
model.eval()
processor = DPTImageProcessor(
do_resize=True,
size={"height": 518, "width": 518},
ensure_multiple_of=14,
keep_aspect_ratio=True,
do_rescale=True,
do_normalize=True,
image_mean=[0.485, 0.456, 0.406],
image_std=[0.229, 0.224, 0.225],
)
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
image = Image.open(requests.get(url, stream=True).raw)
pixel_values = processor(image, return_tensors="pt").pixel_values
# Verify forward pass
with torch.no_grad():
outputs = model(pixel_values)
predicted_depth = outputs.predicted_depth
print("Shape of predicted depth:", predicted_depth.shape)
print("First values:", predicted_depth[0, :3, :3])
# assert logits
if verify_logits:
expected_shape = torch.Size([1, 518, 686])
if model_name == "depth-anything-small":
expected_slice = torch.tensor(
[[8.8204, 8.6468, 8.6195], [8.3313, 8.6027, 8.7526], [8.6526, 8.6866, 8.7453]],
)
elif model_name == "depth-anything-base":
expected_slice = torch.tensor(
[[26.3997, 26.3004, 26.3928], [26.2260, 26.2092, 26.3427], [26.0719, 26.0483, 26.1254]],
)
elif model_name == "depth-anything-large":
expected_slice = torch.tensor(
[[87.9968, 87.7493, 88.2704], [87.1927, 87.6611, 87.3640], [86.7789, 86.9469, 86.7991]]
)
elif model_name == "depth-anything-v2-small":
expected_slice = torch.tensor(
[[2.6751, 2.6211, 2.6571], [2.5820, 2.6138, 2.6271], [2.6160, 2.6141, 2.6306]]
)
elif model_name == "depth-anything-v2-base":
expected_slice = torch.tensor(
[[4.3576, 4.3723, 4.3908], [4.3231, 4.3146, 4.3611], [4.3016, 4.3170, 4.3121]]
)
elif model_name == "depth-anything-v2-large":
expected_slice = torch.tensor(
[[162.2751, 161.8504, 162.8788], [160.3138, 160.8050, 161.9835], [159.3812, 159.9884, 160.0768]]
)
elif model_name == "depth-anything-v2-metric-indoor-small":
expected_slice = torch.tensor(
[[1.3349, 1.2946, 1.2801], [1.2793, 1.2337, 1.2899], [1.2629, 1.2218, 1.2476]]
)
elif model_name == "depth-anything-v2-metric-indoor-base":
expected_slice = torch.tensor(
[[1.4601, 1.3824, 1.4904], [1.5031, 1.4349, 1.4274], [1.4570, 1.4578, 1.4200]]
)
elif model_name == "depth-anything-v2-metric-indoor-large":
expected_slice = torch.tensor(
[[1.5040, 1.5019, 1.5218], [1.5087, 1.5195, 1.5149], [1.5437, 1.5128, 1.5252]]
)
elif model_name == "depth-anything-v2-metric-outdoor-small":
expected_slice = torch.tensor(
[[9.5804, 8.0339, 7.7386], [7.9890, 7.2464, 7.7149], [7.7021, 7.2330, 7.3304]]
)
elif model_name == "depth-anything-v2-metric-outdoor-base":
expected_slice = torch.tensor(
[[10.2916, 9.0933, 8.8622], [9.1964, 9.3393, 9.0644], [8.9618, 9.4201, 9.2262]]
)
elif model_name == "depth-anything-v2-metric-outdoor-large":
expected_slice = torch.tensor(
[[14.0137, 13.3627, 13.1080], [13.2522, 13.3943, 13.3705], [13.0581, 13.4505, 13.3925]]
)
else:
raise ValueError("Not supported")
assert predicted_depth.shape == torch.Size(expected_shape)
assert torch.allclose(predicted_depth[0, :3, :3], expected_slice, atol=1e-4)
print("Looks ok!")
if pytorch_dump_folder_path is not None:
Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
print(f"Saving model and processor to {pytorch_dump_folder_path}")
model.save_pretrained(pytorch_dump_folder_path)
processor.save_pretrained(pytorch_dump_folder_path)
if push_to_hub:
print("Pushing model and processor to hub...")
model.push_to_hub(repo_id=f"{model_name.title()}-hf")
processor.push_to_hub(repo_id=f"{model_name.title()}-hf")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="depth-anything-small",
type=str,
choices=name_to_checkpoint.keys(),
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model to the hub after conversion.",
)
parser.add_argument(
"--verify_logits",
action="store_false",
required=False,
help="Whether to verify the logits after conversion.",
)
args = parser.parse_args()
convert_dpt_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.verify_logits)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/depth_anything/configuration_depth_anything.py | src/transformers/models/depth_anything/configuration_depth_anything.py | # coding=utf-8
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DepthAnything model configuration"""
from ...configuration_utils import PreTrainedConfig
from ...utils import logging
from ...utils.backbone_utils import verify_backbone_config_arguments
from ..auto.configuration_auto import CONFIG_MAPPING, AutoConfig
logger = logging.get_logger(__name__)
class DepthAnythingConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`DepthAnythingModel`]. It is used to instantiate a DepthAnything
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the DepthAnything
[LiheYoung/depth-anything-small-hf](https://huggingface.co/LiheYoung/depth-anything-small-hf) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
backbone_config (`Union[dict, "PreTrainedConfig"]`, *optional*, defaults to `Dinov2Config()`):
The configuration of the backbone model.
backbone (`str`, *optional*):
Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this
will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone`
is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights.
use_pretrained_backbone (`bool`, *optional*, defaults to `False`):
Whether to use pretrained weights for the backbone.
use_timm_backbone (`bool`, *optional*, defaults to `False`):
Whether or not to use the `timm` library for the backbone. If set to `False`, will use the [`AutoBackbone`]
API.
backbone_kwargs (`dict`, *optional*):
Keyword arguments to be passed to AutoBackbone when loading from a checkpoint
e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set.
patch_size (`int`, *optional*, defaults to 14):
The size of the patches to extract from the backbone features.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
reassemble_hidden_size (`int`, *optional*, defaults to 384):
The number of input channels of the reassemble layers.
reassemble_factors (`list[int]`, *optional*, defaults to `[4, 2, 1, 0.5]`):
The up/downsampling factors of the reassemble layers.
neck_hidden_sizes (`list[str]`, *optional*, defaults to `[48, 96, 192, 384]`):
The hidden sizes to project to for the feature maps of the backbone.
fusion_hidden_size (`int`, *optional*, defaults to 64):
The number of channels before fusion.
head_in_index (`int`, *optional*, defaults to -1):
The index of the features to use in the depth estimation head.
head_hidden_size (`int`, *optional*, defaults to 32):
The number of output channels in the second convolution of the depth estimation head.
depth_estimation_type (`str`, *optional*, defaults to `"relative"`):
The type of depth estimation to use. Can be one of `["relative", "metric"]`.
max_depth (`float`, *optional*):
The maximum depth to use for the "metric" depth estimation head. 20 should be used for indoor models
and 80 for outdoor models. For "relative" depth estimation, this value is ignored.
Example:
```python
>>> from transformers import DepthAnythingConfig, DepthAnythingForDepthEstimation
>>> # Initializing a DepthAnything small style configuration
>>> configuration = DepthAnythingConfig()
>>> # Initializing a model from the DepthAnything small style configuration
>>> model = DepthAnythingForDepthEstimation(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "depth_anything"
sub_configs = {"backbone_config": AutoConfig}
def __init__(
self,
backbone_config=None,
backbone=None,
use_pretrained_backbone=False,
use_timm_backbone=False,
backbone_kwargs=None,
patch_size=14,
initializer_range=0.02,
reassemble_hidden_size=384,
reassemble_factors=[4, 2, 1, 0.5],
neck_hidden_sizes=[48, 96, 192, 384],
fusion_hidden_size=64,
head_in_index=-1,
head_hidden_size=32,
depth_estimation_type="relative",
max_depth=None,
**kwargs,
):
if backbone_config is None and backbone is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `Dinov2` backbone.")
backbone_config = CONFIG_MAPPING["dinov2"](
image_size=518,
hidden_size=384,
num_attention_heads=6,
out_indices=[9, 10, 11, 12],
apply_layernorm=True,
reshape_hidden_states=False,
)
elif isinstance(backbone_config, dict):
backbone_model_type = backbone_config.get("model_type")
config_class = CONFIG_MAPPING[backbone_model_type]
backbone_config = config_class.from_dict(backbone_config)
verify_backbone_config_arguments(
use_timm_backbone=use_timm_backbone,
use_pretrained_backbone=use_pretrained_backbone,
backbone=backbone,
backbone_config=backbone_config,
backbone_kwargs=backbone_kwargs,
)
self.backbone_config = backbone_config
self.backbone = backbone
self.use_pretrained_backbone = use_pretrained_backbone
self.use_timm_backbone = use_timm_backbone
self.backbone_kwargs = backbone_kwargs
self.reassemble_hidden_size = reassemble_hidden_size
self.patch_size = patch_size
self.initializer_range = initializer_range
self.reassemble_factors = reassemble_factors
self.neck_hidden_sizes = neck_hidden_sizes
self.fusion_hidden_size = fusion_hidden_size
self.head_in_index = head_in_index
self.head_hidden_size = head_hidden_size
if depth_estimation_type not in ["relative", "metric"]:
raise ValueError("depth_estimation_type must be one of ['relative', 'metric']")
self.depth_estimation_type = depth_estimation_type
self.max_depth = max_depth if max_depth else 1
super().__init__(**kwargs)
__all__ = ["DepthAnythingConfig"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/depth_anything/__init__.py | src/transformers/models/depth_anything/__init__.py | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_depth_anything import *
from .modeling_depth_anything import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/depth_anything/modeling_depth_anything.py | src/transformers/models/depth_anything/modeling_depth_anything.py | # coding=utf-8
# Copyright 2024 TikTok and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch Depth Anything model."""
from typing import Optional, Union
import torch
from torch import nn
from ...modeling_outputs import DepthEstimatorOutput
from ...modeling_utils import PreTrainedModel
from ...utils import auto_docstring, logging
from ...utils.backbone_utils import load_backbone
from .configuration_depth_anything import DepthAnythingConfig
logger = logging.get_logger(__name__)
# General docstring
class DepthAnythingReassembleLayer(nn.Module):
def __init__(self, config, channels, factor):
super().__init__()
self.projection = nn.Conv2d(in_channels=config.reassemble_hidden_size, out_channels=channels, kernel_size=1)
# up/down sampling depending on factor
if factor > 1:
self.resize = nn.ConvTranspose2d(channels, channels, kernel_size=factor, stride=factor, padding=0)
elif factor == 1:
self.resize = nn.Identity()
elif factor < 1:
# so should downsample
self.resize = nn.Conv2d(channels, channels, kernel_size=3, stride=int(1 / factor), padding=1)
# Copied from transformers.models.dpt.modeling_dpt.DPTReassembleLayer.forward
def forward(self, hidden_state):
hidden_state = self.projection(hidden_state)
hidden_state = self.resize(hidden_state)
return hidden_state
class DepthAnythingReassembleStage(nn.Module):
"""
This class reassembles the hidden states of the backbone into image-like feature representations at various
resolutions.
This happens in 3 stages:
1. Take the patch embeddings and reshape them to image-like feature representations.
2. Project the channel dimension of the hidden states according to `config.neck_hidden_sizes`.
3. Resizing the spatial dimensions (height, width).
Args:
config (`[DepthAnythingConfig]`):
Model configuration class defining the model architecture.
"""
def __init__(self, config):
super().__init__()
self.config = config
self.layers = nn.ModuleList()
for channels, factor in zip(config.neck_hidden_sizes, config.reassemble_factors):
self.layers.append(DepthAnythingReassembleLayer(config, channels=channels, factor=factor))
def forward(self, hidden_states: list[torch.Tensor], patch_height=None, patch_width=None) -> list[torch.Tensor]:
"""
Args:
hidden_states (`list[torch.FloatTensor]`, each of shape `(batch_size, sequence_length + 1, hidden_size)`):
List of hidden states from the backbone.
"""
out = []
for i, hidden_state in enumerate(hidden_states):
# reshape to (batch_size, num_channels, height, width)
hidden_state = hidden_state[:, 1:]
batch_size, _, num_channels = hidden_state.shape
hidden_state = hidden_state.reshape(batch_size, patch_height, patch_width, num_channels)
hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous()
hidden_state = self.layers[i](hidden_state)
out.append(hidden_state)
return out
class DepthAnythingPreActResidualLayer(nn.Module):
"""
ResidualConvUnit, pre-activate residual unit.
Args:
config (`[DepthAnythingConfig]`):
Model configuration class defining the model architecture.
"""
def __init__(self, config):
super().__init__()
self.activation1 = nn.ReLU()
self.convolution1 = nn.Conv2d(
config.fusion_hidden_size,
config.fusion_hidden_size,
kernel_size=3,
stride=1,
padding=1,
bias=True,
)
self.activation2 = nn.ReLU()
self.convolution2 = nn.Conv2d(
config.fusion_hidden_size,
config.fusion_hidden_size,
kernel_size=3,
stride=1,
padding=1,
bias=True,
)
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
residual = hidden_state
hidden_state = self.activation1(hidden_state)
hidden_state = self.convolution1(hidden_state)
hidden_state = self.activation2(hidden_state)
hidden_state = self.convolution2(hidden_state)
return hidden_state + residual
class DepthAnythingFeatureFusionLayer(nn.Module):
"""Feature fusion layer, merges feature maps from different stages.
Args:
config (`[DepthAnythingConfig]`):
Model configuration class defining the model architecture.
"""
def __init__(self, config):
super().__init__()
self.projection = nn.Conv2d(config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=1, bias=True)
self.residual_layer1 = DepthAnythingPreActResidualLayer(config)
self.residual_layer2 = DepthAnythingPreActResidualLayer(config)
def forward(self, hidden_state, residual=None, size=None):
if residual is not None:
if hidden_state.shape != residual.shape:
residual = nn.functional.interpolate(
residual, size=(hidden_state.shape[2], hidden_state.shape[3]), mode="bilinear", align_corners=False
)
hidden_state = hidden_state + self.residual_layer1(residual)
hidden_state = self.residual_layer2(hidden_state)
modifier = {"scale_factor": 2} if size is None else {"size": size}
hidden_state = nn.functional.interpolate(
hidden_state,
**modifier,
mode="bilinear",
align_corners=True,
)
hidden_state = self.projection(hidden_state)
return hidden_state
class DepthAnythingFeatureFusionStage(nn.Module):
# Copied from transformers.models.dpt.modeling_dpt.DPTFeatureFusionStage.__init__ with DPT->DepthAnything
def __init__(self, config: DepthAnythingConfig):
super().__init__()
self.layers = nn.ModuleList()
for _ in range(len(config.neck_hidden_sizes)):
self.layers.append(DepthAnythingFeatureFusionLayer(config))
def forward(self, hidden_states, size=None):
# reversing the hidden_states, we start from the last
hidden_states = hidden_states[::-1]
fused_hidden_states = []
fused_hidden_state = None
for idx, (hidden_state, layer) in enumerate(zip(hidden_states, self.layers)):
size = hidden_states[idx + 1].shape[2:] if idx != (len(hidden_states) - 1) else None
if fused_hidden_state is None:
# first layer only uses the last hidden_state
fused_hidden_state = layer(hidden_state, size=size)
else:
fused_hidden_state = layer(fused_hidden_state, hidden_state, size=size)
fused_hidden_states.append(fused_hidden_state)
return fused_hidden_states
# Modified from transformers.models.dpt.modeling_dpt.DPTPreTrainedModel with DPT->DepthAnything,dpt->depth_anything
# avoiding sdpa and flash_attn_2 support, it's done in the backend
@auto_docstring
class DepthAnythingPreTrainedModel(PreTrainedModel):
config: DepthAnythingConfig
base_model_prefix = "depth_anything"
main_input_name = "pixel_values"
input_modalities = ("image",)
supports_gradient_checkpointing = True
class DepthAnythingNeck(nn.Module):
"""
DepthAnythingNeck. A neck is a module that is normally used between the backbone and the head. It takes a list of tensors as
input and produces another list of tensors as output. For DepthAnything, it includes 2 stages:
* DepthAnythingReassembleStage
* DepthAnythingFeatureFusionStage.
Args:
config (dict): config dict.
"""
def __init__(self, config):
super().__init__()
self.config = config
self.reassemble_stage = DepthAnythingReassembleStage(config)
self.convs = nn.ModuleList()
for channel in config.neck_hidden_sizes:
self.convs.append(nn.Conv2d(channel, config.fusion_hidden_size, kernel_size=3, padding=1, bias=False))
# fusion
self.fusion_stage = DepthAnythingFeatureFusionStage(config)
def forward(self, hidden_states: list[torch.Tensor], patch_height=None, patch_width=None) -> list[torch.Tensor]:
"""
Args:
hidden_states (`list[torch.FloatTensor]`, each of shape `(batch_size, sequence_length, hidden_size)` or `(batch_size, hidden_size, height, width)`):
List of hidden states from the backbone.
"""
if not isinstance(hidden_states, (tuple, list)):
raise TypeError("hidden_states should be a tuple or list of tensors")
if len(hidden_states) != len(self.config.neck_hidden_sizes):
raise ValueError("The number of hidden states should be equal to the number of neck hidden sizes.")
# postprocess hidden states
hidden_states = self.reassemble_stage(hidden_states, patch_height, patch_width)
features = [self.convs[i](feature) for i, feature in enumerate(hidden_states)]
# fusion blocks
output = self.fusion_stage(features)
return output
class DepthAnythingDepthEstimationHead(nn.Module):
"""
Output head consisting of 3 convolutional layers. It progressively halves the feature dimension and upsamples
the predictions to the input resolution after the first convolutional layer (details can be found in the DPT paper's
supplementary material). The final activation function is either ReLU or Sigmoid, depending on the depth estimation
type (relative or metric). For metric depth estimation, the output is scaled by the maximum depth used during pretraining.
"""
def __init__(self, config):
super().__init__()
self.head_in_index = config.head_in_index
self.patch_size = config.patch_size
features = config.fusion_hidden_size
self.conv1 = nn.Conv2d(features, features // 2, kernel_size=3, stride=1, padding=1)
self.conv2 = nn.Conv2d(features // 2, config.head_hidden_size, kernel_size=3, stride=1, padding=1)
self.activation1 = nn.ReLU()
self.conv3 = nn.Conv2d(config.head_hidden_size, 1, kernel_size=1, stride=1, padding=0)
if config.depth_estimation_type == "relative":
self.activation2 = nn.ReLU()
elif config.depth_estimation_type == "metric":
self.activation2 = nn.Sigmoid()
else:
raise ValueError(f"Unknown depth estimation type: {config.depth_estimation_type}")
self.max_depth = config.max_depth
def forward(self, hidden_states: list[torch.Tensor], patch_height, patch_width) -> torch.Tensor:
hidden_states = hidden_states[self.head_in_index]
predicted_depth = self.conv1(hidden_states)
predicted_depth = nn.functional.interpolate(
predicted_depth,
(int(patch_height * self.patch_size), int(patch_width * self.patch_size)),
mode="bilinear",
align_corners=True,
)
predicted_depth = self.conv2(predicted_depth)
predicted_depth = self.activation1(predicted_depth)
predicted_depth = self.conv3(predicted_depth)
predicted_depth = self.activation2(predicted_depth) * self.max_depth
predicted_depth = predicted_depth.squeeze(dim=1) # shape (batch_size, height, width)
return predicted_depth
@auto_docstring(
custom_intro="""
Depth Anything Model with a depth estimation head on top (consisting of 3 convolutional layers) e.g. for KITTI, NYUv2.
"""
)
class DepthAnythingForDepthEstimation(DepthAnythingPreTrainedModel):
_no_split_modules = ["DPTViTEmbeddings"]
def __init__(self, config):
super().__init__(config)
self.backbone = load_backbone(config)
self.neck = DepthAnythingNeck(config)
self.head = DepthAnythingDepthEstimationHead(config)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
pixel_values: torch.FloatTensor,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
**kwargs,
) -> Union[tuple[torch.Tensor], DepthEstimatorOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):
Ground truth depth estimation maps for computing the loss.
Examples:
```python
>>> from transformers import AutoImageProcessor, AutoModelForDepthEstimation
>>> import torch
>>> import numpy as np
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("LiheYoung/depth-anything-small-hf")
>>> model = AutoModelForDepthEstimation.from_pretrained("LiheYoung/depth-anything-small-hf")
>>> # prepare image for the model
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> # interpolate to original size
>>> post_processed_output = image_processor.post_process_depth_estimation(
... outputs,
... target_sizes=[(image.height, image.width)],
... )
>>> # visualize the prediction
>>> predicted_depth = post_processed_output[0]["predicted_depth"]
>>> depth = predicted_depth * 255 / predicted_depth.max()
>>> depth = depth.detach().cpu().numpy()
>>> depth = Image.fromarray(depth.astype("uint8"))
```"""
loss = None
if labels is not None:
raise NotImplementedError("Training is not implemented yet")
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
outputs = self.backbone.forward_with_filtered_kwargs(
pixel_values, output_hidden_states=output_hidden_states, output_attentions=output_attentions
)
hidden_states = outputs.feature_maps
_, _, height, width = pixel_values.shape
patch_size = self.config.patch_size
patch_height = height // patch_size
patch_width = width // patch_size
hidden_states = self.neck(hidden_states, patch_height, patch_width)
predicted_depth = self.head(hidden_states, patch_height, patch_width)
if not return_dict:
if output_hidden_states:
output = (predicted_depth,) + outputs[1:]
else:
output = (predicted_depth,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return DepthEstimatorOutput(
loss=loss,
predicted_depth=predicted_depth,
hidden_states=outputs.hidden_states if output_hidden_states else None,
attentions=outputs.attentions,
)
__all__ = ["DepthAnythingForDepthEstimation", "DepthAnythingPreTrainedModel"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/openai/convert_openai_original_tf_checkpoint_to_pytorch.py | src/transformers/models/openai/convert_openai_original_tf_checkpoint_to_pytorch.py | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert OpenAI GPT checkpoint."""
import argparse
import json
import os
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
logger = logging.get_logger(__name__)
def load_tf_weights_in_openai_gpt(model, config, openai_checkpoint_folder_path):
"""Load tf pre-trained weights in a pytorch model (from NumPy arrays here)"""
import re
import numpy as np
if ".ckpt" in openai_checkpoint_folder_path:
openai_checkpoint_folder_path = os.path.dirname(openai_checkpoint_folder_path)
logger.info(f"Loading weights from {openai_checkpoint_folder_path}")
with open(openai_checkpoint_folder_path + "/parameters_names.json", "r", encoding="utf-8") as names_handle:
names = json.load(names_handle)
with open(openai_checkpoint_folder_path + "/params_shapes.json", "r", encoding="utf-8") as shapes_handle:
shapes = json.load(shapes_handle)
offsets = np.cumsum([np.prod(shape) for shape in shapes])
init_params = [np.load(openai_checkpoint_folder_path + f"/params_{n}.npy") for n in range(10)]
init_params = np.split(np.concatenate(init_params, 0), offsets)[:-1]
init_params = [param.reshape(shape) for param, shape in zip(init_params, shapes)]
# This was used when we had a single embedding matrix for positions and tokens
# init_params[0] = np.concatenate([init_params[1], init_params[0]], 0)
# del init_params[1]
init_params = [arr.squeeze() for arr in init_params]
# Check that the token and position embeddings weight dimensions map those of the init parameters.
if model.tokens_embed.weight.shape != init_params[1].shape:
raise ValueError(
f"tokens_embed.weight.shape: {model.tokens_embed.weight.shape} does not match init_param[1].shape:"
f" {init_params[1].shape}"
)
if model.positions_embed.weight.shape != init_params[0].shape:
raise ValueError(
f"positions_embed.weight.shape: {model.positions_embed.weight.shape} does not match init_param[0].shape:"
f" {init_params[0].shape}"
)
model.tokens_embed.weight.data = torch.from_numpy(init_params[1])
model.positions_embed.weight.data = torch.from_numpy(init_params[0])
names.pop(0)
# Pop position and token embedding arrays
init_params.pop(0)
init_params.pop(0)
for name, array in zip(names, init_params): # names[1:n_transfer], init_params[1:n_transfer]):
name = name[6:] # skip "model/"
if name[-2:] != ":0":
raise ValueError(f"Layer {name} does not end with :0")
name = name[:-2]
name = name.split("/")
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+\d+", m_name):
scope_names = re.split(r"(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "g":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "b":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "w":
pointer = getattr(pointer, "weight")
else:
pointer = getattr(pointer, scope_names[0])
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
# Ensure that the pointer and array have compatible shapes.
if pointer.shape != array.shape:
raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
logger.info(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array)
return model
def convert_openai_checkpoint_to_pytorch(openai_checkpoint_folder_path, openai_config_file, pytorch_dump_folder_path):
# Construct model
if openai_config_file == "":
config = OpenAIGPTConfig()
else:
config = OpenAIGPTConfig.from_json_file(openai_config_file)
model = OpenAIGPTModel(config)
# Load weights from numpy
load_tf_weights_in_openai_gpt(model, config, openai_checkpoint_folder_path)
# Save pytorch-model
pytorch_weights_dump_path = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
pytorch_config_dump_path = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(f"Save PyTorch model to {pytorch_weights_dump_path}")
torch.save(model.state_dict(), pytorch_weights_dump_path)
print(f"Save configuration file to {pytorch_config_dump_path}")
with open(pytorch_config_dump_path, "w", encoding="utf-8") as f:
f.write(config.to_json_string())
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--openai_checkpoint_folder_path",
default=None,
type=str,
required=True,
help="Path to the TensorFlow checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--openai_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
args = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/openai/configuration_openai.py | src/transformers/models/openai/configuration_openai.py | # coding=utf-8
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OpenAI GPT configuration"""
from ...configuration_utils import PreTrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
class OpenAIGPTConfig(PreTrainedConfig):
"""
This is the configuration class to store the configuration of a [`OpenAIGPTModel`]. It is
used to instantiate a GPT model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the GPT
[openai-community/openai-gpt](https://huggingface.co/openai-community/openai-gpt) architecture from OpenAI.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 40478):
Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`OpenAIGPTModel`].
n_positions (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
n_embd (`int`, *optional*, defaults to 768):
Dimensionality of the embeddings and hidden states.
n_layer (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
n_head (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
afn (`str` or `Callable`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
resid_pdrop (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
embd_pdrop (`int`, *optional*, defaults to 0.1):
The dropout ratio for the embeddings.
attn_pdrop (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention.
layer_norm_epsilon (`float`, *optional*, defaults to 1e-05):
The epsilon to use in the layer normalization layers
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
summary_type (`str`, *optional*, defaults to `"cls_index"`):
Argument used when doing sequence summary, used in the models [`OpenAIGPTDoubleHeadsModel`] and
[`OpenAIGPTDoubleHeadsModel`].
Has to be one of the following options:
- `"last"`: Take the last token hidden state (like XLNet).
- `"first"`: Take the first token hidden state (like BERT).
- `"mean"`: Take the mean of all tokens hidden states.
- `"cls_index"`: Supply a Tensor of classification token position (like GPT/GPT-2).
- `"attn"`: Not implemented now, use multi-head attention.
summary_use_proj (`bool`, *optional*, defaults to `True`):
Argument used when doing sequence summary, used in the models [`OpenAIGPTDoubleHeadsModel`] and
[`OpenAIGPTDoubleHeadsModel`].
Whether or not to add a projection after the vector extraction.
summary_activation (`str`, *optional*):
Argument used when doing sequence summary, used in the models [`OpenAIGPTDoubleHeadsModel`] and
[`OpenAIGPTDoubleHeadsModel`].
Pass `"tanh"` for a tanh activation to the output, any other value will result in no activation.
summary_proj_to_labels (`bool`, *optional*, defaults to `True`):
Argument used when doing sequence summary, used in the models [`OpenAIGPTDoubleHeadsModel`] and
[`OpenAIGPTDoubleHeadsModel`].
Whether the projection outputs should have `config.num_labels` or `config.hidden_size` classes.
summary_first_dropout (`float`, *optional*, defaults to 0.1):
Argument used when doing sequence summary, used in the models [`OpenAIGPTDoubleHeadsModel`] and
[`OpenAIGPTDoubleHeadsModel`].
The dropout ratio to be used after the projection and activation.
Examples:
```python
>>> from transformers import OpenAIGPTConfig, OpenAIGPTModel
>>> # Initializing a GPT configuration
>>> configuration = OpenAIGPTConfig()
>>> # Initializing a model (with random weights) from the configuration
>>> model = OpenAIGPTModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "openai-gpt"
attribute_map = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__(
self,
vocab_size=40478,
n_positions=512,
n_embd=768,
n_layer=12,
n_head=12,
afn="gelu",
resid_pdrop=0.1,
embd_pdrop=0.1,
attn_pdrop=0.1,
layer_norm_epsilon=1e-5,
initializer_range=0.02,
summary_type="cls_index",
summary_use_proj=True,
summary_activation=None,
summary_proj_to_labels=True,
summary_first_dropout=0.1,
**kwargs,
):
self.vocab_size = vocab_size
self.n_positions = n_positions
self.n_embd = n_embd
self.n_layer = n_layer
self.n_head = n_head
self.afn = afn
self.resid_pdrop = resid_pdrop
self.embd_pdrop = embd_pdrop
self.attn_pdrop = attn_pdrop
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_range = initializer_range
self.summary_type = summary_type
self.summary_use_proj = summary_use_proj
self.summary_activation = summary_activation
self.summary_first_dropout = summary_first_dropout
self.summary_proj_to_labels = summary_proj_to_labels
super().__init__(**kwargs)
__all__ = ["OpenAIGPTConfig"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/openai/__init__.py | src/transformers/models/openai/__init__.py | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_openai import *
from .modeling_openai import *
from .tokenization_openai import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/openai/modeling_openai.py | src/transformers/models/openai/modeling_openai.py | # coding=utf-8
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch OpenAI GPT model."""
import math
from collections.abc import Callable
from dataclasses import dataclass
from typing import Any, Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ... import initialization as init
from ...activations import gelu_new, get_activation, silu
from ...generation import GenerationMixin
from ...modeling_outputs import BaseModelOutput, CausalLMOutput, SequenceClassifierOutput
from ...modeling_utils import PreTrainedModel
from ...pytorch_utils import Conv1D
from ...utils import (
ModelOutput,
auto_docstring,
logging,
)
from .configuration_openai import OpenAIGPTConfig
logger = logging.get_logger(__name__)
ACT_FNS = {"relu": nn.ReLU(), "silu": silu, "gelu": gelu_new, "swish": silu}
class Attention(nn.Module):
def __init__(self, nx, n_positions, config, scale=False):
super().__init__()
self.n_positions = n_positions
n_state = nx # in Attention: n_state=768 (nx=n_embd)
if n_state % config.n_head != 0:
raise ValueError(f"Attention n_state shape: {n_state} must be divisible by config.n_head {config.n_head}")
self.register_buffer(
"bias",
torch.tril(torch.ones(n_positions, n_positions)).view(1, 1, n_positions, n_positions),
persistent=False,
)
self.n_head = config.n_head
self.split_size = n_state
self.scale = scale
self.c_attn = Conv1D(n_state * 3, nx)
self.c_proj = Conv1D(n_state, nx)
self.attn_dropout = nn.Dropout(config.attn_pdrop)
self.resid_dropout = nn.Dropout(config.resid_pdrop)
def _attn(self, q, k, v, attention_mask=None, output_attentions=False):
w = torch.matmul(q, k)
if self.scale:
w = w / math.sqrt(v.size(-1))
# XD: self.b may be larger than w, so we need to crop it
b = self.bias[:, :, : w.size(-2), : w.size(-1)]
w = w * b + -1e4 * (1 - b)
if attention_mask is not None:
# Apply the attention mask
w = w + attention_mask
w = nn.functional.softmax(w, dim=-1)
w = self.attn_dropout(w)
outputs = [torch.matmul(w, v)]
if output_attentions:
outputs.append(w)
return outputs
def merge_heads(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
return x.view(*new_x_shape)
def split_heads(self, x, k=False):
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
x = x.view(*new_x_shape)
if k:
return x.permute(0, 2, 3, 1)
else:
return x.permute(0, 2, 1, 3)
def forward(self, x, attention_mask=None, output_attentions=False):
x = self.c_attn(x)
query, key, value = x.split(self.split_size, dim=2)
query = self.split_heads(query)
key = self.split_heads(key, k=True)
value = self.split_heads(value)
attn_outputs = self._attn(query, key, value, attention_mask, output_attentions)
a = attn_outputs[0]
a = self.merge_heads(a)
a = self.c_proj(a)
a = self.resid_dropout(a)
outputs = [a] + attn_outputs[1:]
return outputs # a, (attentions)
class MLP(nn.Module):
def __init__(self, n_state, config): # in MLP: n_state=3072 (4 * n_embd)
super().__init__()
nx = config.n_embd
self.c_fc = Conv1D(n_state, nx)
self.c_proj = Conv1D(nx, n_state)
self.act = ACT_FNS[config.afn]
self.dropout = nn.Dropout(config.resid_pdrop)
def forward(self, x):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
return self.dropout(h2)
class Block(nn.Module):
def __init__(self, n_positions, config, scale=False):
super().__init__()
nx = config.n_embd
self.attn = Attention(nx, n_positions, config, scale)
self.ln_1 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon)
self.mlp = MLP(4 * nx, config)
self.ln_2 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon)
def forward(self, x, attention_mask=None, output_attentions=False):
attn_outputs = self.attn(
x,
attention_mask=attention_mask,
output_attentions=output_attentions,
)
a = attn_outputs[0]
n = self.ln_1(x + a)
m = self.mlp(n)
h = self.ln_2(n + m)
outputs = [h] + attn_outputs[1:]
return outputs
# Copied from transformers.models.xlm.modeling_xlm.XLMSequenceSummary with XLM->OpenAIGPT
class OpenAIGPTSequenceSummary(nn.Module):
r"""
Compute a single vector summary of a sequence hidden states.
Args:
config ([`OpenAIGPTConfig`]):
The config used by the model. Relevant arguments in the config class of the model are (refer to the actual
config class of your model for the default values it uses):
- **summary_type** (`str`) -- The method to use to make this summary. Accepted values are:
- `"last"` -- Take the last token hidden state (like XLNet)
- `"first"` -- Take the first token hidden state (like Bert)
- `"mean"` -- Take the mean of all tokens hidden states
- `"cls_index"` -- Supply a Tensor of classification token position (GPT/GPT-2)
- `"attn"` -- Not implemented now, use multi-head attention
- **summary_use_proj** (`bool`) -- Add a projection after the vector extraction.
- **summary_proj_to_labels** (`bool`) -- If `True`, the projection outputs to `config.num_labels` classes
(otherwise to `config.hidden_size`).
- **summary_activation** (`Optional[str]`) -- Set to `"tanh"` to add a tanh activation to the output,
another string or `None` will add no activation.
- **summary_first_dropout** (`float`) -- Optional dropout probability before the projection and activation.
- **summary_last_dropout** (`float`)-- Optional dropout probability after the projection and activation.
"""
def __init__(self, config: OpenAIGPTConfig):
super().__init__()
self.summary_type = getattr(config, "summary_type", "last")
if self.summary_type == "attn":
# We should use a standard multi-head attention module with absolute positional embedding for that.
# Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276
# We can probably just use the multi-head attention module of PyTorch >=1.1.0
raise NotImplementedError
self.summary = nn.Identity()
if hasattr(config, "summary_use_proj") and config.summary_use_proj:
if hasattr(config, "summary_proj_to_labels") and config.summary_proj_to_labels and config.num_labels > 0:
num_classes = config.num_labels
else:
num_classes = config.hidden_size
self.summary = nn.Linear(config.hidden_size, num_classes)
activation_string = getattr(config, "summary_activation", None)
self.activation: Callable = get_activation(activation_string) if activation_string else nn.Identity()
self.first_dropout = nn.Identity()
if hasattr(config, "summary_first_dropout") and config.summary_first_dropout > 0:
self.first_dropout = nn.Dropout(config.summary_first_dropout)
self.last_dropout = nn.Identity()
if hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0:
self.last_dropout = nn.Dropout(config.summary_last_dropout)
def forward(
self, hidden_states: torch.FloatTensor, cls_index: Optional[torch.LongTensor] = None
) -> torch.FloatTensor:
"""
Compute a single vector summary of a sequence hidden states.
Args:
hidden_states (`torch.FloatTensor` of shape `[batch_size, seq_len, hidden_size]`):
The hidden states of the last layer.
cls_index (`torch.LongTensor` of shape `[batch_size]` or `[batch_size, ...]` where ... are optional leading dimensions of `hidden_states`, *optional*):
Used if `summary_type == "cls_index"` and takes the last token of the sequence as classification token.
Returns:
`torch.FloatTensor`: The summary of the sequence hidden states.
"""
if self.summary_type == "last":
output = hidden_states[:, -1]
elif self.summary_type == "first":
output = hidden_states[:, 0]
elif self.summary_type == "mean":
output = hidden_states.mean(dim=1)
elif self.summary_type == "cls_index":
if cls_index is None:
cls_index = torch.full_like(
hidden_states[..., :1, :],
hidden_states.shape[-2] - 1,
dtype=torch.long,
)
else:
cls_index = cls_index.unsqueeze(-1).unsqueeze(-1)
cls_index = cls_index.expand((-1,) * (cls_index.dim() - 1) + (hidden_states.size(-1),))
# shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states
output = hidden_states.gather(-2, cls_index).squeeze(-2) # shape (bsz, XX, hidden_size)
elif self.summary_type == "attn":
raise NotImplementedError
output = self.first_dropout(output)
output = self.summary(output)
output = self.activation(output)
output = self.last_dropout(output)
return output
@auto_docstring
class OpenAIGPTPreTrainedModel(PreTrainedModel):
config: OpenAIGPTConfig
base_model_prefix = "transformer"
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, Attention):
n_positions = module.n_positions
init.copy_(
module.bias, torch.tril(torch.ones(n_positions, n_positions)).view(1, 1, n_positions, n_positions)
)
elif isinstance(module, OpenAIGPTModel):
init.copy_(module.position_ids, torch.arange(module.config.n_positions))
@dataclass
@auto_docstring(
custom_intro="""
Base class for outputs of models predicting if two sentences are consecutive or not.
"""
)
class OpenAIGPTDoubleHeadsModelOutput(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss.
mc_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `mc_labels` is provided):
Multiple choice classification loss.
logits (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
mc_logits (`torch.FloatTensor` of shape `(batch_size, num_choices)`):
Prediction scores of the multiple choice classification head (scores for each choice before SoftMax).
"""
loss: Optional[torch.FloatTensor] = None
mc_loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
mc_logits: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
@auto_docstring
class OpenAIGPTModel(OpenAIGPTPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.tokens_embed = nn.Embedding(config.vocab_size, config.n_embd)
self.positions_embed = nn.Embedding(config.n_positions, config.n_embd)
self.drop = nn.Dropout(config.embd_pdrop)
self.h = nn.ModuleList([Block(config.n_positions, config, scale=True) for _ in range(config.n_layer)])
self.register_buffer("position_ids", torch.arange(config.n_positions), persistent=False)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.tokens_embed
def set_input_embeddings(self, new_embeddings):
self.tokens_embed = new_embeddings
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
**kwargs,
) -> Union[tuple[torch.Tensor], BaseModelOutput]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if position_ids is None:
# Code is different from when we had a single embedding matrix from position and token embeddings
position_ids = self.position_ids[None, : input_shape[-1]]
# Attention mask.
if attention_mask is not None:
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and the dtype's smallest value for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_mask = attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min
if inputs_embeds is None:
inputs_embeds = self.tokens_embed(input_ids)
position_embeds = self.positions_embed(position_ids)
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
token_type_embeds = self.tokens_embed(token_type_ids)
else:
token_type_embeds = 0
hidden_states = inputs_embeds + position_embeds + token_type_embeds
hidden_states = self.drop(hidden_states)
output_shape = input_shape + (hidden_states.size(-1),)
all_attentions = () if output_attentions else None
all_hidden_states = () if output_hidden_states else None
for i, block in enumerate(self.h):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = block(hidden_states, attention_mask, output_attentions=output_attentions)
hidden_states = outputs[0]
if output_attentions:
all_attentions = all_attentions + (outputs[1],)
hidden_states = hidden_states.view(*output_shape)
# Add last layer
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_attentions,
)
@auto_docstring(
custom_intro="""
OpenAI GPT Model transformer with a language modeling head on top (linear layer with weights tied to the input
embeddings).
"""
)
class OpenAIGPTLMHeadModel(OpenAIGPTPreTrainedModel, GenerationMixin):
_tied_weights_keys = {"lm_head.weight": "transformer.tokens_embed.weight"}
def __init__(self, config):
super().__init__(config)
self.transformer = OpenAIGPTModel(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs,
) -> Union[tuple[torch.Tensor], CausalLMOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
if not return_dict:
output = (logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return CausalLMOutput(
loss=loss,
logits=logits,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
def prepare_inputs_for_generation(self, input_ids: torch.LongTensor, **kwargs) -> dict[str, Any]:
# Overwritten -- old model with reduced inputs
model_inputs = {"input_ids": input_ids}
# Forward ALL kwargs that are uninitialized (e.g. `use_cache`).
for key, value in kwargs.items():
if key not in model_inputs:
model_inputs[key] = value
return model_inputs
@auto_docstring(
custom_intro="""
OpenAI GPT Model transformer with a language modeling and a multiple-choice classification head on top e.g. for
RocStories/SWAG tasks. The two heads are two linear layers. The language modeling head has its weights tied to the
input embeddings, the classification head takes as input the input of a specified classification token index in the
input sequence).
"""
)
class OpenAIGPTDoubleHeadsModel(OpenAIGPTPreTrainedModel):
_tied_weights_keys = {"transformer.tokens_embed.weight": "lm_head.weight"}
def __init__(self, config):
super().__init__(config)
config.num_labels = 1
self.transformer = OpenAIGPTModel(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.multiple_choice_head = OpenAIGPTSequenceSummary(config)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
mc_token_ids: Optional[torch.LongTensor] = None,
labels: Optional[torch.LongTensor] = None,
mc_labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
**kwargs,
) -> Union[tuple[torch.Tensor], OpenAIGPTDoubleHeadsModelOutput]:
r"""
mc_token_ids (`torch.LongTensor` of shape `(batch_size, num_choices)`, *optional*, default to index of the last token of the input):
Index of the classification token in each input sequence. Selected in the range `[0, input_ids.size(-1) -
1]`.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
`labels = input_ids` Indices are selected in `[-1, 0, ..., config.vocab_size]` All labels set to `-100` are
ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
mc_labels (`torch.LongTensor` of shape `(batch_size)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
where *num_choices* is the size of the second dimension of the input tensors. (see *input_ids* above)
Examples:
```python
>>> from transformers import AutoTokenizer, OpenAIGPTDoubleHeadsModel
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("openai-community/openai-gpt")
>>> model = OpenAIGPTDoubleHeadsModel.from_pretrained("openai-community/openai-gpt")
>>> tokenizer.add_special_tokens(
... {"cls_token": "[CLS]"}
... ) # Add a [CLS] to the vocabulary (we should train it also!)
>>> model.resize_token_embeddings(len(tokenizer))
>>> choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"]
>>> input_ids = torch.tensor([tokenizer.encode(s) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices
>>> mc_token_ids = torch.tensor([input_ids.size(-1) - 1, input_ids.size(-1) - 1]).unsqueeze(0) # Batch size 1
>>> outputs = model(input_ids, mc_token_ids=mc_token_ids)
>>> lm_logits = outputs.logits
>>> mc_logits = outputs.mc_logits
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids).squeeze(-1)
lm_loss, mc_loss = None, None
if mc_labels is not None:
loss_fct = CrossEntropyLoss()
mc_loss = loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1))
if labels is not None:
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
loss_fct = CrossEntropyLoss()
lm_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
if not return_dict:
output = (lm_logits, mc_logits) + transformer_outputs[1:]
if mc_loss is not None:
output = (mc_loss,) + output
return ((lm_loss,) + output) if lm_loss is not None else output
return OpenAIGPTDoubleHeadsModelOutput(
loss=lm_loss,
mc_loss=mc_loss,
logits=lm_logits,
mc_logits=mc_logits,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
@auto_docstring(
custom_intro="""
The Original OpenAI GPT Model transformer with a sequence classification head on top (linear layer).
[`OpenAIGPTForSequenceClassification`] uses the last token in order to do the classification, as other causal
models (e.g. GPT-2) do. Since it does classification on the last token, it requires to know the position of the
last token. If a `pad_token_id` is defined in the configuration, it finds the last token that is not a padding
token in each row. If no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since
it cannot guess the padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take
the last value in each row of the batch).
"""
)
class OpenAIGPTForSequenceClassification(OpenAIGPTPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = OpenAIGPTModel(config)
self.score = nn.Linear(config.n_embd, self.num_labels, bias=False)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
**kwargs,
) -> Union[tuple[torch.Tensor], SequenceClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
logits = self.score(hidden_states)
if input_ids is not None:
batch_size, sequence_length = input_ids.shape[:2]
else:
batch_size, sequence_length = inputs_embeds.shape[:2]
# Ensure the batch size is > 1 if there is no padding.
if self.config.pad_token_id is None and batch_size != 1:
raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
if self.config.pad_token_id is None:
last_non_pad_token = -1
elif input_ids is not None:
# To handle both left- and right- padding, we take the rightmost token that is not equal to pad_token_id
non_pad_mask = (input_ids != self.config.pad_token_id).to(logits.device, torch.int32)
token_indices = torch.arange(input_ids.shape[-1], device=logits.device, dtype=torch.int32)
last_non_pad_token = (token_indices * non_pad_mask).argmax(-1)
else:
last_non_pad_token = -1
logger.warning_once(
f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
"unexpected if using padding tokens in conjunction with `inputs_embeds.`"
)
pooled_logits = logits[torch.arange(batch_size, device=logits.device), last_non_pad_token]
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(pooled_logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(pooled_logits, labels)
if not return_dict:
output = (pooled_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=pooled_logits,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
__all__ = [
"OpenAIGPTDoubleHeadsModel",
"OpenAIGPTForSequenceClassification",
"OpenAIGPTLMHeadModel",
"OpenAIGPTModel",
"OpenAIGPTPreTrainedModel",
]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/openai/tokenization_openai.py | src/transformers/models/openai/tokenization_openai.py | # coding=utf-8
# Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for OpenAI GPT."""
from typing import Optional, Union
from tokenizers import Tokenizer, decoders, normalizers, pre_tokenizers
from tokenizers.models import BPE
from ...tokenization_utils_tokenizers import TokenizersBackend
from ...utils import logging
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
class OpenAIGPTTokenizer(TokenizersBackend):
"""
Construct a GPT Tokenizer (backed by HuggingFace's *tokenizers* library). Based on Byte-Pair-Encoding with
the following peculiarities:
- lower case all inputs
- uses BERT's BasicTokenizer for pre-BPE tokenization
This tokenizer inherits from [`TokenizersBackend`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`, *optional*):
Path to the vocabulary file.
merges_file (`str`, *optional*):
Path to the merges file.
tokenizer_file (`str`, *optional*):
Path to a tokenizers JSON file containing the serialization of a tokenizer.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
vocab (`str` or `dict[str, int]`, *optional*):
Custom vocabulary dictionary. If not provided, a blank vocabulary is initialized.
merges (`str` or `list[str]`, *optional*):
Custom merges list. If not provided, an empty list is used.
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ["input_ids", "attention_mask"]
model = BPE
def __init__(
self,
vocab: Optional[Union[str, dict[str, int]]] = None,
merges: Optional[Union[str, list[str]]] = None,
unk_token: str = "<unk>",
**kwargs,
):
self._vocab = vocab if vocab is not None else {str(unk_token): 0}
self._merges = merges or []
self._tokenizer = Tokenizer(
BPE(
vocab=self._vocab,
merges=self._merges,
dropout=None,
continuing_subword_prefix="",
end_of_word_suffix="</w>",
fuse_unk=False,
unk_token=str(unk_token),
)
)
# Set normalizer and pre-tokenizer to mimic OpenAI GPT behavior
# OpenAI GPT uses BERT BasicTokenizer with lower_case=True
self._tokenizer.normalizer = normalizers.Sequence(
[
normalizers.NFD(),
normalizers.Lowercase(),
normalizers.StripAccents(),
]
)
self._tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer()
self._tokenizer.decoder = decoders.BPEDecoder(suffix="</w>")
super().__init__(
unk_token=unk_token,
**kwargs,
)
@property
def do_lower_case(self):
return True
__all__ = ["OpenAIGPTTokenizer"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/sam3/modeling_sam3.py | src/transformers/models/sam3/modeling_sam3.py | # coding=utf-8
# Copyright 2025 The Meta AI Authors and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from collections.abc import Callable, Iterable
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torch import Tensor
from transformers import CLIPTextModelWithProjection
from ... import initialization as init
from ...activations import ACT2FN
from ...masking_utils import create_bidirectional_mask
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import (
BaseModelOutput,
ModelOutput,
)
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...processing_utils import Unpack
from ...pytorch_utils import compile_compatible_method_lru_cache
from ...utils import auto_docstring, logging
from ...utils.generic import TransformersKwargs, check_model_inputs
from ..auto import AutoModel
from .configuration_sam3 import (
Sam3Config,
Sam3DETRDecoderConfig,
Sam3DETREncoderConfig,
Sam3GeometryEncoderConfig,
Sam3MaskDecoderConfig,
Sam3VisionConfig,
Sam3ViTConfig,
)
logger = logging.get_logger(__name__)
@dataclass
@auto_docstring
class Sam3VisionEncoderOutput(ModelOutput):
r"""
fpn_hidden_states (`tuple[torch.FloatTensor]`):
Tuple of multi-level FPN feature maps.
fpn_position_encoding (`tuple[torch.FloatTensor]`):
Tuple of position encodings for each FPN level.
hidden_states (`tuple[torch.FloatTensor]`, *optional*):
Tuple of hidden states from all ViT layers.
attentions (`tuple[torch.FloatTensor]`, *optional*):
Tuple of attention weights from all ViT layers.
"""
last_hidden_state: torch.FloatTensor = None
fpn_hidden_states: tuple[torch.FloatTensor, ...] = None
fpn_position_encoding: tuple[torch.FloatTensor, ...] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
@dataclass
@auto_docstring
class Sam3GeometryEncoderOutput(ModelOutput):
r"""
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_prompts, hidden_size)`):
Encoded geometry prompt features (boxes).
attention_mask (`torch.BoolTensor` of shape `(batch_size, num_prompts)`, *optional*):
Attention mask for geometry prompts where True indicates valid positions and False indicates padding.
"""
last_hidden_state: torch.FloatTensor = None
attention_mask: Optional[torch.BoolTensor] = None
@dataclass
@auto_docstring
class Sam3DETREncoderOutput(ModelOutput):
r"""
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Encoded vision features (flattened from multi-level features).
pos_embeds_flattened (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Flattened position embeddings for the vision features.
text_features (`torch.FloatTensor` of shape `(batch_size, text_seq_len, hidden_size)`, *optional*):
Text features (may be pooled after encoder processing).
spatial_shapes (`torch.LongTensor` of shape `(num_levels, 2)`, *optional*):
Spatial shapes (height, width) for each feature pyramid level.
hidden_states (`tuple[torch.FloatTensor]`, *optional*):
Tuple of hidden states from all encoder layers.
attentions (`tuple[torch.FloatTensor]`, *optional*):
Tuple of attention weights from all encoder layers.
"""
last_hidden_state: torch.FloatTensor = None
pos_embeds_flattened: Optional[torch.FloatTensor] = None
text_features: Optional[torch.FloatTensor] = None
spatial_shapes: Optional[torch.LongTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
@dataclass
@auto_docstring
class Sam3DETRDecoderOutput(ModelOutput):
r"""
intermediate_hidden_states (`torch.FloatTensor` of shape `(num_layers, batch_size, num_queries, hidden_size)`):
Decoder hidden states from all layers.
reference_boxes (`torch.FloatTensor` of shape `(num_layers, batch_size, num_queries, 4)`):
Predicted reference boxes from all decoder layers in (cx, cy, w, h) format.
presence_logits (`torch.FloatTensor` of shape `(num_layers, batch_size, 1)`):
Presence logits from all decoder layers indicating object presence confidence.
hidden_states (`tuple[torch.FloatTensor]`, *optional*):
Tuple of hidden states from all decoder layers.
attentions (`tuple[torch.FloatTensor]`, *optional*):
Tuple of attention weights from all decoder layers (self-attention and cross-attention).
"""
intermediate_hidden_states: torch.FloatTensor = None
reference_boxes: torch.FloatTensor = None
presence_logits: torch.FloatTensor = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
@dataclass
@auto_docstring
class Sam3MaskDecoderOutput(ModelOutput):
r"""
pred_masks (`torch.FloatTensor` of shape `(batch_size, num_queries, height, width)`):
Predicted segmentation masks for each query.
semantic_seg (`torch.FloatTensor` of shape `(batch_size, 1, height, width)`, *optional*):
Semantic segmentation output.
attentions (`tuple[torch.FloatTensor]`, *optional*):
Tuple of attention weights from mask decoder cross-attention layers.
"""
pred_masks: torch.FloatTensor = None
semantic_seg: Optional[torch.FloatTensor] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
@dataclass
@auto_docstring
class Sam3ImageSegmentationOutput(ModelOutput):
r"""
pred_masks (`torch.FloatTensor` of shape `(batch_size, num_queries, height, width)`):
Predicted segmentation masks for each query.
pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`):
Predicted bounding boxes in (x1, y1, x2, y2) format.
pred_logits (`torch.FloatTensor` of shape `(batch_size, num_queries)`, *optional*):
Classification confidence scores for each query, computed via dot product between
decoder query features and text features.
presence_logits (`torch.FloatTensor` of shape `(batch_size, 1)`, *optional*):
Presence logits from the DETR decoder presence token (last layer only). These indicate whether objects
are present in the scene. Can be used to compute final scores by multiplying with pred_logits:
`final_scores = pred_logits.sigmoid() * presence_logits.sigmoid()`.
semantic_seg (`torch.FloatTensor` of shape `(batch_size, 1, height, width)`, *optional*):
Semantic segmentation output.
decoder_hidden_states (`tuple[torch.FloatTensor]`, *optional*):
Tuple of hidden states from all DETR decoder layers. Each tensor has shape `(batch_size, num_queries, hidden_size)`.
decoder_reference_boxes (`torch.FloatTensor` of shape `(num_layers, batch_size, num_queries, 4)`, *optional*):
Reference boxes from all DETR decoder layers.
encoder_hidden_states (`tuple[torch.FloatTensor]`, *optional*):
Tuple of hidden states from all DETR encoder layers.
vision_hidden_states (`tuple[torch.FloatTensor]`, *optional*):
Tuple of hidden states from all vision encoder (ViT) layers.
vision_attentions (`tuple[torch.FloatTensor]`, *optional*):
Attention weights from vision encoder (ViT) layers.
detr_encoder_attentions (`tuple[torch.FloatTensor]`, *optional*):
Attention weights from DETR encoder layers.
detr_decoder_attentions (`tuple[torch.FloatTensor]`, *optional*):
Attention weights from DETR decoder layers (self-attention and cross-attention).
mask_decoder_attentions (`tuple[torch.FloatTensor]`, *optional*):
Attention weights from mask decoder layers.
"""
pred_masks: torch.FloatTensor = None
pred_boxes: torch.FloatTensor = None
pred_logits: Optional[torch.FloatTensor] = None
presence_logits: Optional[torch.FloatTensor] = None
semantic_seg: Optional[torch.FloatTensor] = None
decoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
decoder_reference_boxes: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
vision_hidden_states: Optional[tuple[torch.FloatTensor]] = None
vision_attentions: Optional[tuple[torch.FloatTensor]] = None
detr_encoder_attentions: Optional[tuple[torch.FloatTensor]] = None
detr_decoder_attentions: Optional[tuple[torch.FloatTensor]] = None
mask_decoder_attentions: Optional[tuple[torch.FloatTensor]] = None
def inverse_sigmoid(x: torch.Tensor, eps: float = 1e-3) -> torch.Tensor:
"""The inverse function for sigmoid activation function."""
x = x.clamp(min=0, max=1)
x1 = x.clamp(min=eps)
x2 = (1 - x).clamp(min=eps)
return torch.log(x1 / x2)
def concat_padded_sequences(seq1, mask1, seq2, mask2, return_index: bool = False):
"""
Concatenates two right-padded sequences, such that the resulting sequence
is contiguous and also right-padded.
Tensors are batch-first, masks are batch-first with True=valid, False=padding.
Args:
seq1: A tensor of shape (batch_size, seq1_length, hidden_size).
mask1: A tensor of shape (batch_size, seq1_length) with True=valid, False=padding.
seq2: A tensor of shape (batch_size, seq2_length, hidden_size).
mask2: A tensor of shape (batch_size, seq2_length) with True=valid, False=padding.
return_index: If True, also returns the index of the ids of the element of seq2
in the concatenated sequence. This can be used to retrieve the elements of seq2.
Returns:
A tuple (concatenated_sequence, concatenated_mask) if return_index is False,
otherwise (concatenated_sequence, concatenated_mask, index).
The concatenated_mask uses True=valid, False=padding convention.
"""
batch_size, seq1_length, hidden_size = seq1.shape
batch_size2, seq2_length, hidden_size2 = seq2.shape
assert batch_size == batch_size2 == mask1.size(0) == mask2.size(0)
assert hidden_size == hidden_size2
assert seq1_length == mask1.size(1)
assert seq2_length == mask2.size(1)
actual_seq1_lengths = mask1.sum(dim=-1)
actual_seq2_lengths = mask2.sum(dim=-1)
final_lengths = actual_seq1_lengths + actual_seq2_lengths
max_length = seq1_length + seq2_length
concatenated_mask = (
torch.arange(max_length, device=seq2.device)[None].repeat(batch_size, 1) < final_lengths[:, None]
)
concatenated_sequence = torch.zeros((batch_size, max_length, hidden_size), device=seq2.device, dtype=seq2.dtype)
concatenated_sequence[:, :seq1_length, :] = seq1
# Shift seq2 elements to start at the end of valid seq1
index = torch.arange(seq2_length, device=seq2.device)[None].repeat(batch_size, 1)
index = index + actual_seq1_lengths[:, None]
# Scatter seq2 into the right positions
concatenated_sequence = concatenated_sequence.scatter(1, index[:, :, None].expand(-1, -1, hidden_size), seq2)
if return_index:
return concatenated_sequence, concatenated_mask, index
return concatenated_sequence, concatenated_mask
def box_cxcywh_to_xyxy(x):
"""Convert boxes from (cx, cy, w, h) format to (x1, y1, x2, y2) format."""
x_c, y_c, w, h = x.unbind(-1)
b = [(x_c - 0.5 * w), (y_c - 0.5 * h), (x_c + 0.5 * w), (y_c + 0.5 * h)]
return torch.stack(b, dim=-1)
class Sam3MLP(nn.Module):
def __init__(self, config: Union[Sam3ViTConfig]):
super().__init__()
self.config = config
self.activation_fn = ACT2FN[config.hidden_act]
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.fc1(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = self.fc2(hidden_states)
return hidden_states
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: Optional[float] = None,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
if scaling is None:
scaling = query.size(-1) ** -0.5
# Take the dot product between "query" and "key" to get the raw attention scores.
attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling
if attention_mask is not None:
attention_mask = attention_mask[:, :, :, : key.shape[-2]]
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
class Sam3Attention(nn.Module):
"""
Multi-head attention.
Handles standard [batch_size, seq_len, hidden_size] tensors.
"""
def __init__(self, config):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.num_attention_heads = config.num_attention_heads
self.head_dim = self.hidden_size // config.num_attention_heads
self.scaling = self.head_dim**-0.5
self.is_causal = False
self.q_proj = nn.Linear(self.hidden_size, self.hidden_size)
self.k_proj = nn.Linear(self.hidden_size, self.hidden_size)
self.v_proj = nn.Linear(self.hidden_size, self.hidden_size)
self.o_proj = nn.Linear(self.hidden_size, self.hidden_size)
def forward(
self,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, torch.Tensor]:
"""
Args:
query: [batch_size, query_len, hidden_size]
key: [batch_size, key_len, hidden_size]
value: [batch_size, value_len, hidden_size]
attention_mask: [batch_size, num_heads, query_len, key_len] or broadcastable
Returns:
Tuple of (output, attention_weights)
output: [batch_size, query_len, hidden_size]
attention_weights: [batch_size, num_heads, query_len, key_len]
"""
batch_size = query.shape[0]
query_len = query.shape[1]
key_len = key.shape[1]
query = self.q_proj(query).view(batch_size, query_len, self.num_attention_heads, self.head_dim).transpose(1, 2)
key = self.k_proj(key).view(batch_size, key_len, self.num_attention_heads, self.head_dim).transpose(1, 2)
value = self.v_proj(value).view(batch_size, key_len, self.num_attention_heads, self.head_dim).transpose(1, 2)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
if (
"flash" in self.config._attn_implementation
and attention_mask is not None
and attention_mask.dtype != torch.bool
):
# Relative position bias tensors are represented as float masks and are incompatible with Flash Attention
# Fallback to SDPA for this call only so the rest of the model can still benefit from FA
attention_interface = ALL_ATTENTION_FUNCTIONS["sdpa"]
logger.warning_once(
"Sam3Attention: falling back to SDPA for relative-position cross-attention because "
"Flash Attention does not support additive bias masks."
)
attn_output, attn_weights = attention_interface(
self,
query,
key,
value,
attention_mask=attention_mask,
dropout=0.0,
scaling=self.scaling,
is_causal=self.is_causal,
**kwargs,
)
attn_output = attn_output.reshape(batch_size, query_len, self.num_attention_heads * self.head_dim).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class Sam3ViTRotaryEmbedding(nn.Module):
"""
Vision Rotary Position Embedding for SAM3, following transformers library standards.
Supports 2D (axial) rotary embeddings for spatial dimensions.
"""
def __init__(self, config: Sam3ViTConfig, end_x: int, end_y: int, scale: float = 1.0):
super().__init__()
dim = config.hidden_size // config.num_attention_heads
# Ensure even dimension for proper axial splitting
if dim % 4 != 0:
raise ValueError("Dimension must be divisible by 4 for axial RoPE")
self.end_x, self.end_y = end_x, end_y
self.dim = dim
self.rope_theta = config.rope_theta
self.scale = scale
freqs = 1.0 / (config.rope_theta ** (torch.arange(0, dim, 4)[: (dim // 4)].float() / dim))
flattened_indices = torch.arange(end_x * end_y, dtype=torch.long)
x_positions = (flattened_indices % end_x) * scale
y_positions = torch.div(flattened_indices, end_x, rounding_mode="floor") * scale
freqs_x = torch.outer(x_positions, freqs).float()
freqs_y = torch.outer(y_positions, freqs).float()
inv_freq = torch.cat([freqs_x, freqs_y], dim=-1)
inv_freq = inv_freq.repeat_interleave(2, dim=-1)
# directly register the cos and sin embeddings as we have a fixed feature shape
self.register_buffer("rope_embeddings_cos", inv_freq.cos(), persistent=False)
self.register_buffer("rope_embeddings_sin", inv_freq.sin(), persistent=False)
@torch.no_grad()
def forward(self) -> tuple[torch.Tensor, torch.Tensor]:
# As the feature map size is fixed for each stage, we can just return the pre-computed embeddings.
return self.rope_embeddings_cos, self.rope_embeddings_sin
def rotate_pairwise(x):
"""
pairwise rotation of the hidden dims of the input. Differerent from Llama Half-Tensor Rotation.
This is an optimized version of the following more explicit implementation:
```python
x_rotated = torch.zeros_like(x, dtype=x.dtype, device=x.device)
x_rotated[..., ::2] = -x[..., 1::2]
x_rotated[..., 1::2] = x[..., ::2]
return x_rotated
```
"""
x = x.view(*x.shape[:-1], -1, 2)
x1, x2 = x.unbind(dim=-1)
x = torch.stack((-x2, x1), dim=-1)
return x.flatten(start_dim=-2)
def apply_rotary_pos_emb_2d(
q: torch.Tensor,
k: torch.Tensor,
cos: torch.Tensor,
sin: torch.Tensor,
) -> tuple[torch.Tensor, torch.Tensor]:
"""
Apply rotary position embedding to query and key tensors for self-attention.
Args:
q: Query tensor of shape (batch_size, num_windows, seq_len, num_heads, head_dim)
k: Key tensor of shape (batch_size, num_windows, seq_len, num_heads, head_dim)
cos: Cosine position embedding of shape (seq_len, head_dim)
sin: Sine position embedding of shape (seq_len, head_dim)
Returns:
Rotated (q, k) tensors
"""
q_embed = q.float()
q_embed = (q_embed * cos) + (rotate_pairwise(q_embed) * sin)
k_embed = k.float()
k_embed = (k_embed * cos) + (rotate_pairwise(k_embed) * sin)
return q_embed.type_as(q), k_embed.type_as(k)
class Sam3ViTRoPEAttention(nn.Module):
"""Self-attention with rotary position encoding."""
def __init__(self, config: Sam3ViTConfig):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.num_attention_heads = config.num_attention_heads
self.head_dim = self.hidden_size // config.num_attention_heads
self.scaling = self.head_dim**-0.5
self.attention_dropout = config.attention_dropout
self.is_causal = False
self.q_proj = nn.Linear(self.hidden_size, self.hidden_size)
self.k_proj = nn.Linear(self.hidden_size, self.hidden_size)
self.v_proj = nn.Linear(self.hidden_size, self.hidden_size)
self.o_proj = nn.Linear(self.hidden_size, self.hidden_size)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
**kwargs: Unpack[TransformersKwargs],
) -> Tensor:
batch_size, height, width, _ = hidden_states.shape
seq_len = height * width
new_shape = (batch_size, seq_len, self.num_attention_heads, self.head_dim)
query = self.q_proj(hidden_states).view(*new_shape).transpose(1, 2)
key = self.k_proj(hidden_states).view(*new_shape).transpose(1, 2)
value = self.v_proj(hidden_states).view(*new_shape).transpose(1, 2)
cos, sin = position_embeddings
query, key = apply_rotary_pos_emb_2d(query, key, cos=cos, sin=sin)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query,
key,
value,
attention_mask=None,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
is_causal=self.is_causal,
**kwargs,
)
attn_output = attn_output.reshape(batch_size, height, width, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class Sam3ViTPatchEmbeddings(nn.Module):
"""
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
`hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
Transformer.
"""
def __init__(self, config: Sam3ViTConfig):
super().__init__()
image_size, patch_size = config.pretrain_image_size, config.patch_size
num_channels, hidden_size = config.num_channels, config.hidden_size
image_size = image_size if isinstance(image_size, Iterable) else (image_size, image_size)
patch_size = patch_size if isinstance(patch_size, Iterable) else (patch_size, patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.num_patches = num_patches
self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size, bias=False)
def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
embeddings = self.projection(pixel_values.to(self.projection.weight.dtype)).flatten(2).transpose(1, 2)
return embeddings
class Sam3ViTEmbeddings(nn.Module):
"""
Construct the patch embeddings and position embeddings for SAM3 ViT.
Position embeddings are tiled (not interpolated) when resizing to match different input sizes.
"""
def __init__(self, config: Sam3ViTConfig):
super().__init__()
self.patch_embeddings = Sam3ViTPatchEmbeddings(config)
num_patches = self.patch_embeddings.num_patches
self.position_embeddings = nn.Parameter(
torch.randn(1, num_patches, config.hidden_size)
) # !Remove cls token in convert weights!
self.dropout = nn.Dropout(config.hidden_dropout)
self.patch_size = config.patch_size
def _tile_position_embeddings(
self,
position_embeddings: torch.Tensor,
height: int,
width: int,
) -> torch.Tensor:
"""
Tile position embeddings to match target spatial dimensions.
Args:
position_embeddings: Shape [1, num_pretrain_patches, hidden_size]
height: Target height in patches
width: Target width in patches
Returns:
Shape [1, height * width, hidden_size]
"""
pretrain_size = int(position_embeddings.shape[1] ** 0.5)
# Skip tiling if sizes match (but always tile during tracing for consistent graph)
if not torch.jit.is_tracing() and pretrain_size == height and pretrain_size == width:
return position_embeddings.reshape(1, height * width, -1)
# Tile position embeddings to match target spatial dimensions
hidden_size = position_embeddings.shape[-1]
pos_embed = position_embeddings.reshape(1, pretrain_size, pretrain_size, hidden_size).permute(0, 3, 1, 2)
repeat_h = height // pretrain_size + 1
repeat_w = width // pretrain_size + 1
pos_embed = pos_embed.tile([1, 1, repeat_h, repeat_w])[:, :, :height, :width]
return pos_embed.permute(0, 2, 3, 1).reshape(1, height * width, hidden_size)
def forward(
self,
pixel_values: torch.Tensor,
interpolate_pos_encoding: bool = False,
) -> torch.Tensor:
height, width = pixel_values.shape[-2:]
embeddings = self.patch_embeddings(pixel_values)
# Calculate spatial dimensions in patches
height_patches = height // self.patch_size
width_patches = width // self.patch_size
position_embeddings = self._tile_position_embeddings(
self.position_embeddings,
height_patches,
width_patches,
)
embeddings = embeddings + position_embeddings
embeddings = self.dropout(embeddings)
return embeddings
def window_partition(hidden_state, window_size):
"""
Partition into non-overlapping windows with padding if needed.
Args:
hidden_state (`torch.Tensor`):
Input tokens with [batch_size, height, width, num_channels].
window_size (`int`):
Window size.
Returns:
`tuple(torch.FloatTensor)` comprising various elements:
- windows: windows after partition with [batch_size * num_windows, window_size, window_size, num_channels].
- (padded_height, padded_width): padded height and width before partition
"""
batch_size, height, width, num_channels = hidden_state.shape
pad_height = (window_size - height % window_size) % window_size
pad_width = (window_size - width % window_size) % window_size
# Noop in case pad_width == 0 and pad_height == 0.
hidden_state = nn.functional.pad(hidden_state, (0, 0, 0, pad_width, 0, pad_height))
padded_height, padded_width = height + pad_height, width + pad_width
hidden_state = hidden_state.view(
batch_size, padded_height // window_size, window_size, padded_width // window_size, window_size, num_channels
)
windows = hidden_state.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, num_channels)
return windows, (padded_height, padded_width)
def window_unpartition(windows, window_size, pad_height_width, height_width):
"""
Window unpartition into original sequences and removing padding.
Args:
windows (`torch.Tensor`):
Input tokens with [batch_size * num_windows, window_size, window_size, num_channels].
window_size (`int`):
Window size.
pad_height_width (`tuple[int]`):
Padded height and width (padded_height, padded_width).
height_width (`tuple[int]`):
Original height and width before padding.
Returns:
hidden_state: unpartitioned sequences with [batch_size, height, width, num_channels].
"""
padded_height, padded_width = pad_height_width
height, width = height_width
batch_size = windows.shape[0] // (padded_height * padded_width // window_size // window_size)
hidden_state = windows.view(
batch_size, padded_height // window_size, padded_width // window_size, window_size, window_size, -1
)
hidden_state = hidden_state.permute(0, 1, 3, 2, 4, 5).contiguous()
hidden_state = hidden_state.view(batch_size, padded_height, padded_width, -1)
# We always have height <= padded_height and width <= padded_width
hidden_state = hidden_state[:, :height, :width, :].contiguous()
return hidden_state
class Sam3ViTLayerScale(nn.Module):
def __init__(self, config) -> None:
super().__init__()
self.lambda1 = nn.Parameter(config.layer_scale_init_value * torch.ones(config.hidden_size))
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
return hidden_state * self.lambda1
class Sam3ViTLayer(GradientCheckpointingLayer):
"""Vision Transformer layer with rotary position embeddings and optional windowed attention."""
def __init__(self, config: Sam3ViTConfig, window_size: int = 0) -> None:
super().__init__()
hidden_size = config.hidden_size
image_size = config.image_size
image_size = image_size if isinstance(image_size, (list, tuple)) else (image_size, image_size)
patch_size = config.patch_size
patch_size = patch_size if isinstance(patch_size, (list, tuple)) else (patch_size, patch_size)
input_size = (image_size[0] // patch_size[0], image_size[1] // patch_size[1])
self.layer_norm1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps)
rotary_input_size = input_size if window_size == 0 else (window_size, window_size)
rotary_scale = config.window_size / rotary_input_size[0]
self.rotary_emb = Sam3ViTRotaryEmbedding(
config, end_x=rotary_input_size[0], end_y=rotary_input_size[1], scale=rotary_scale
)
self.attention = Sam3ViTRoPEAttention(config)
self.layer_norm2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps)
self.mlp = Sam3MLP(config)
self.dropout = nn.Dropout(config.hidden_dropout)
self.window_size = window_size
def forward(
self,
hidden_states: torch.Tensor,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
residual = hidden_states
hidden_states = self.layer_norm1(hidden_states)
if self.window_size > 0:
height, width = hidden_states.shape[1], hidden_states.shape[2]
# Partition into non-overlapping windows for efficient attention
hidden_states, pad_height_width = window_partition(hidden_states, self.window_size)
position_embeddings = self.rotary_emb()
hidden_states, _ = self.attention(hidden_states, position_embeddings, **kwargs)
if self.window_size > 0:
# Reverse window partition to restore original spatial layout
hidden_states = window_unpartition(hidden_states, self.window_size, pad_height_width, (height, width))
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.layer_norm2(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + self.dropout(hidden_states)
return hidden_states
@auto_docstring
class Sam3PreTrainedModel(PreTrainedModel):
config_class = Sam3Config
base_model_prefix = "sam3"
main_input_name = "pixel_values"
input_modalities = ["image", "text"]
_supports_sdpa = True
_supports_flash_attn = True
_supports_flex_attn = True
_supports_attention_backend = True
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, Sam3ViTEmbeddings):
init.normal_(module.position_embeddings, mean=0.0, std=self.config.initializer_range)
elif isinstance(module, Sam3ViTRotaryEmbedding):
end_x, end_y = module.end_x, module.end_y
dim = module.dim
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | true |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/sam3/configuration_sam3.py | src/transformers/models/sam3/configuration_sam3.py | # coding=utf-8
# Copyright 2025 Meta AI and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SAM3 model configuration"""
from transformers import CLIPTextConfig
from ...configuration_utils import PreTrainedConfig
from ..auto import CONFIG_MAPPING, AutoConfig
class Sam3ViTConfig(PreTrainedConfig):
r"""
Configuration class for SAM3 Vision Encoder (ViT backbone).
Instantiating a configuration defaults will yield a similar configuration to that of SAM 3
[facebook/sam3](https://huggingface.co/facebook/sam3) architecture.
Args:
hidden_size (`int`, *optional*, defaults to 1024):
Dimensionality of the encoder layers.
intermediate_size (`int`, *optional*, defaults to 4736):
Dimensionality of the feedforward (MLP) layers.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer.
num_channels (`int`, *optional*, defaults to 3):
Number of input image channels.
image_size (`int`, *optional*, defaults to 1008):
Expected input image size.
patch_size (`int`, *optional*, defaults to 14):
Size of image patches.
hidden_act (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by layer normalization layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for attention probabilities.
rope_theta (`float`, *optional*, defaults to 10000.0):
Base frequency for RoPE.
window_size (`int`, *optional*, defaults to 24):
Window size for windowed attention.
global_attn_indexes (`list[int]`, *optional*, defaults to `[7, 15, 23, 31]`):
Indexes of layers with global attention.
layer_scale_init_value (`float`, *optional*):
Initial value for layer scale. None means no layer scale.
pretrain_image_size (`int`, *optional*, defaults to 336):
Pretrained model image size for position embedding initialization.
hidden_dropout (`float`, *optional*, defaults to 0.0):
Dropout probability for hidden states.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing weight matrices.
"""
base_config_key = "backbone_config"
model_type = "sam3_vit_model"
def __init__(
self,
hidden_size=1024,
intermediate_size=4736,
num_hidden_layers=32,
num_attention_heads=16,
num_channels=3,
image_size=1008,
patch_size=14,
hidden_act="gelu",
layer_norm_eps=1e-6,
attention_dropout=0.0,
rope_theta=10000.0,
window_size=24,
global_attn_indexes=None,
layer_scale_init_value=None,
pretrain_image_size=336,
hidden_dropout=0.0,
initializer_range=0.02,
**kwargs,
):
super().__init__(**kwargs)
if global_attn_indexes is None:
global_attn_indexes = [7, 15, 23, 31]
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_channels = num_channels
self.image_size = image_size
self.patch_size = patch_size
self.hidden_act = hidden_act
self.layer_norm_eps = layer_norm_eps
self.attention_dropout = attention_dropout
self.rope_theta = rope_theta
self.window_size = window_size
self.global_attn_indexes = global_attn_indexes
self.layer_scale_init_value = layer_scale_init_value
self.pretrain_image_size = pretrain_image_size
self.hidden_dropout = hidden_dropout
self.initializer_range = initializer_range
class Sam3VisionConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Sam3VisionModel`]. It is used to instantiate a SAM
vision encoder according to the specified arguments, defining the model architecture. Instantiating a configuration
defaults will yield a similar configuration to that of SAM 3
[facebook/sam3](https://huggingface.co/facebook/sam3) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
backbone_config (`Union[dict, "PreTrainedConfig"]`, *optional*, defaults to `Sam3ViTConfig()`):
Configuration for the vision backbone. This is used to instantiate the backbone using
`AutoModel.from_config`.
fpn_hidden_size (`int`, *optional*, defaults to 256):
The hidden dimension of the FPN.
backbone_feature_sizes (`List[List[int]]`, *optional*, defaults to `[[288, 288], [144, 144], [72, 72]]`):
The spatial sizes (height, width) of the feature maps from the backbone at different scales.
scale_factors (`list[float]`, *optional*, defaults to `[4.0, 2.0, 1.0, 0.5]`):
Scale factors for FPN multi-scale features. List of scaling factors for each FPN level.
hidden_act (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function in the neck.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon for the layer normalization.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
"""
base_config_key = "vision_config"
model_type = "sam3_vision_model"
sub_configs = {
"backbone_config": AutoConfig,
}
def __init__(
self,
backbone_config=None,
fpn_hidden_size=256,
backbone_feature_sizes=None,
scale_factors=None,
hidden_act="gelu",
layer_norm_eps=1e-6,
initializer_range=0.02,
**kwargs,
):
scale_factors = [4.0, 2.0, 1.0, 0.5] if scale_factors is None else scale_factors
if backbone_feature_sizes is None:
backbone_feature_sizes = [[288, 288], [144, 144], [72, 72]]
if isinstance(backbone_config, dict):
backbone_config["model_type"] = backbone_config.get("model_type", "sam3_vit_model")
backbone_config = CONFIG_MAPPING[backbone_config["model_type"]](**backbone_config)
elif backbone_config is None:
backbone_config = CONFIG_MAPPING["sam3_vit_model"]()
self.backbone_config = backbone_config
# Neck
self.fpn_hidden_size = fpn_hidden_size
self.scale_factors = scale_factors
self.backbone_feature_sizes = backbone_feature_sizes
self.hidden_act = hidden_act
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range
super().__init__(**kwargs)
@property
def image_size(self):
"""Image size for the vision encoder."""
return self.backbone_config.image_size
@image_size.setter
def image_size(self, value):
"""Set the image size and propagate to backbone."""
self.backbone_config.image_size = value
class Sam3GeometryEncoderConfig(PreTrainedConfig):
r"""
Configuration class for SAM3 Geometry Encoder.
Args:
hidden_size (`int`, *optional*, defaults to 256):
Dimensionality of the encoder layers.
num_layers (`int`, *optional*, defaults to 3):
Number of transformer encoder layers for processing geometry prompts.
num_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads in the geometry encoder.
intermediate_size (`int`, *optional*, defaults to 2048):
Dimensionality of the feedforward layers.
dropout (`float`, *optional*, defaults to 0.1):
Dropout probability.
hidden_act (`str`, *optional*, defaults to `"relu"`):
Activation function in FFN.
hidden_dropout (`float`, *optional*, defaults to 0.0):
Dropout probability for hidden states.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
Epsilon for layer normalization.
roi_size (`int`, *optional*, defaults to 7):
ROI size for box pooling operations.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing weight matrices.
"""
model_type = "sam3_geometry_encoder"
def __init__(
self,
hidden_size=256,
num_layers=3,
num_attention_heads=8,
intermediate_size=2048,
dropout=0.1,
hidden_act="relu",
hidden_dropout=0.0,
layer_norm_eps=1e-6,
roi_size=7,
initializer_range=0.02,
**kwargs,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.num_layers = num_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.hidden_act = hidden_act
self.hidden_dropout = hidden_dropout
self.layer_norm_eps = layer_norm_eps
self.roi_size = roi_size
self.initializer_range = initializer_range
class Sam3DETREncoderConfig(PreTrainedConfig):
r"""
Configuration class for SAM3 DETR Encoder (vision-text fusion encoder).
Args:
hidden_size (`int`, *optional*, defaults to 256):
Dimensionality of the encoder layers.
num_layers (`int`, *optional*, defaults to 6):
Number of encoder layers.
num_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads.
intermediate_size (`int`, *optional*, defaults to 2048):
Dimensionality of the feedforward layers.
dropout (`float`, *optional*, defaults to 0.1):
Dropout probability.
hidden_act (`str`, *optional*, defaults to `"relu"`):
Activation function in FFN.
hidden_dropout (`float`, *optional*, defaults to 0.0):
Dropout probability for hidden states.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
Epsilon for layer normalization.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing weight matrices.
"""
model_type = "sam3_detr_encoder"
def __init__(
self,
hidden_size=256,
num_layers=6,
num_attention_heads=8,
intermediate_size=2048,
dropout=0.1,
hidden_act="relu",
hidden_dropout=0.0,
layer_norm_eps=1e-6,
initializer_range=0.02,
**kwargs,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.num_layers = num_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.hidden_act = hidden_act
self.hidden_dropout = hidden_dropout
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range
class Sam3DETRDecoderConfig(PreTrainedConfig):
r"""
Configuration class for SAM3 DETR Decoder (object query decoder).
Args:
hidden_size (`int`, *optional*, defaults to 256):
Dimensionality of the decoder layers.
num_layers (`int`, *optional*, defaults to 6):
Number of decoder layers.
num_queries (`int`, *optional*, defaults to 200):
Number of object queries.
num_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads.
intermediate_size (`int`, *optional*, defaults to 2048):
Dimensionality of the feedforward layers.
dropout (`float`, *optional*, defaults to 0.1):
Dropout probability.
hidden_act (`str`, *optional*, defaults to `"relu"`):
Activation function in FFN.
hidden_dropout (`float`, *optional*, defaults to 0.0):
Dropout probability for hidden states.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
Epsilon for layer normalization.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing weight matrices.
"""
model_type = "sam3_detr_decoder"
def __init__(
self,
hidden_size=256,
num_layers=6,
num_queries=200,
num_attention_heads=8,
intermediate_size=2048,
dropout=0.1,
hidden_act="relu",
hidden_dropout=0.0,
layer_norm_eps=1e-6,
initializer_range=0.02,
**kwargs,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.num_layers = num_layers
self.num_queries = num_queries
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.hidden_act = hidden_act
self.hidden_dropout = hidden_dropout
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range
class Sam3MaskDecoderConfig(PreTrainedConfig):
r"""
Configuration class for SAM3 Mask Decoder (pixel-level mask prediction).
Args:
hidden_size (`int`, *optional*, defaults to 256):
Dimensionality of the mask decoder.
num_upsampling_stages (`int`, *optional*, defaults to 3):
Number of upsampling stages in the pixel decoder (FPN).
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
Epsilon for layer normalization.
dropout (`float`, *optional*, defaults to 0.0):
Dropout probability for prompt cross-attention.
num_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for prompt cross-attention.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing weight matrices.
"""
model_type = "sam3_mask_decoder"
def __init__(
self,
hidden_size=256,
num_upsampling_stages=3,
layer_norm_eps=1e-6,
dropout=0.0,
num_attention_heads=8,
initializer_range=0.02,
**kwargs,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.num_upsampling_stages = num_upsampling_stages
self.layer_norm_eps = layer_norm_eps
self.dropout = dropout
self.num_attention_heads = num_attention_heads
self.initializer_range = initializer_range
class Sam3Config(PreTrainedConfig):
r"""
Configuration class to store the configuration of a [`Sam3Model`].
Instantiating a configuration defaults will yield a similar configuration to that of SAM 3
[facebook/sam3](https://huggingface.co/facebook/sam3) architecture.
This is the main configuration class that combines all sub-configurations for the SAM3 model.
Args:
vision_config (`dict` or `Sam3VisionConfig`, *optional*):
Configuration for the vision encoder.
text_config (`dict` or `Sam3TextConfig`, *optional*):
Configuration for the text encoder.
geometry_encoder_config (`dict` or `Sam3GeometryEncoderConfig`, *optional*):
Configuration for the geometry encoder.
detr_encoder_config (`dict` or `Sam3DETREncoderConfig`, *optional*):
Configuration for the DETR encoder.
detr_decoder_config (`dict` or `Sam3DETRDecoderConfig`, *optional*):
Configuration for the DETR decoder.
mask_decoder_config (`dict` or `Sam3MaskDecoderConfig`, *optional*):
Configuration for the mask decoder.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing weight matrices.
Example:
```python
>>> from transformers import Sam3Config, Sam3Model
>>> # Initializing a SAM3 configuration
>>> configuration = Sam3Config()
>>> # Initializing a model from the configuration
>>> model = Sam3Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = "sam3"
is_composition = True
sub_configs = {
"vision_config": Sam3VisionConfig,
"text_config": CLIPTextConfig,
"geometry_encoder_config": Sam3GeometryEncoderConfig,
"detr_encoder_config": Sam3DETREncoderConfig,
"detr_decoder_config": Sam3DETRDecoderConfig,
"mask_decoder_config": Sam3MaskDecoderConfig,
}
def __init__(
self,
vision_config=None,
text_config=None,
geometry_encoder_config=None,
detr_encoder_config=None,
detr_decoder_config=None,
mask_decoder_config=None,
initializer_range=0.02,
**kwargs,
):
# Vision config
if vision_config is None:
vision_config = {}
if isinstance(vision_config, dict):
self.vision_config = Sam3VisionConfig(**vision_config)
else:
self.vision_config = vision_config
# Text config (CLIPTextModelWithProjection defaults)
if text_config is None:
text_config = {
"vocab_size": 49408,
"hidden_size": 1024,
"intermediate_size": 4096, # hidden_size * mlp_ratio (1024 * 4)
"projection_dim": 512, # CLIP's internal projection dimension
"num_hidden_layers": 24,
"num_attention_heads": 16,
"max_position_embeddings": 32,
"hidden_act": "gelu",
}
if isinstance(text_config, dict):
self.text_config = CLIPTextConfig(**text_config)
else:
self.text_config = text_config
# Geometry encoder config
if geometry_encoder_config is None:
geometry_encoder_config = {}
if isinstance(geometry_encoder_config, dict):
self.geometry_encoder_config = Sam3GeometryEncoderConfig(**geometry_encoder_config)
else:
self.geometry_encoder_config = geometry_encoder_config
# DETR encoder config
if detr_encoder_config is None:
detr_encoder_config = {}
if isinstance(detr_encoder_config, dict):
self.detr_encoder_config = Sam3DETREncoderConfig(**detr_encoder_config)
else:
self.detr_encoder_config = detr_encoder_config
# DETR decoder config
if detr_decoder_config is None:
detr_decoder_config = {}
if isinstance(detr_decoder_config, dict):
self.detr_decoder_config = Sam3DETRDecoderConfig(**detr_decoder_config)
else:
self.detr_decoder_config = detr_decoder_config
# Mask decoder config
if mask_decoder_config is None:
mask_decoder_config = {}
if isinstance(mask_decoder_config, dict):
self.mask_decoder_config = Sam3MaskDecoderConfig(**mask_decoder_config)
else:
self.mask_decoder_config = mask_decoder_config
self.initializer_range = initializer_range
super().__init__(**kwargs)
@property
def image_size(self):
"""Image size for the SAM3 model."""
return self.vision_config.image_size
@image_size.setter
def image_size(self, value):
"""Set the image size and propagate to vision config."""
self.vision_config.image_size = value
__all__ = [
"Sam3Config",
"Sam3ViTConfig",
"Sam3VisionConfig",
"Sam3GeometryEncoderConfig",
"Sam3DETREncoderConfig",
"Sam3DETRDecoderConfig",
"Sam3MaskDecoderConfig",
]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/sam3/image_processing_sam3_fast.py | src/transformers/models/sam3/image_processing_sam3_fast.py | # π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨
# This file was automatically generated from src/transformers/models/sam3/modular_sam3.py.
# Do NOT edit this file manually as any edits will be overwritten by the generation of
# the file from the modular. If any change should be done, please apply the change to the
# modular_sam3.py file directly. One of our CI enforces this.
# π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨
# coding=utf-8
# Copyright 2025 The Meta AI Authors and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from copy import deepcopy
from itertools import product
from typing import Any, Optional, Union
import numpy as np
import torch
import torch.nn.functional as F
from torchvision.ops.boxes import batched_nms
from ...image_processing_utils import BatchFeature, get_size_dict
from ...image_processing_utils_fast import BaseImageProcessorFast
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
SizeDict,
pil_torch_interpolation_mapping,
)
from ...processing_utils import ImagesKwargs, Unpack
from ...utils import TensorType, auto_docstring
class Sam3FastImageProcessorKwargs(ImagesKwargs, total=False):
r"""
mask_size (`dict[str, int]`, *optional*):
The size `{"height": int, "width": int}` to resize the segmentation maps to.
"""
mask_size: dict[str, int]
def _compute_stability_score(masks: "torch.Tensor", mask_threshold: float, stability_score_offset: int):
# One mask is always contained inside the other.
# Save memory by preventing unnecessary cast to torch.int64
intersections = (
(masks > (mask_threshold + stability_score_offset)).sum(-1, dtype=torch.int16).sum(-1, dtype=torch.int32)
)
unions = (masks > (mask_threshold - stability_score_offset)).sum(-1, dtype=torch.int16).sum(-1, dtype=torch.int32)
stability_scores = intersections / unions
return stability_scores
def _mask_to_rle(input_mask: "torch.Tensor"):
"""
Encodes masks the run-length encoding (RLE), in the format expected by pycoco tools.
"""
# Put in fortran order and flatten height and width
batch_size, height, width = input_mask.shape
input_mask = input_mask.permute(0, 2, 1).flatten(1)
# Compute change indices
diff = input_mask[:, 1:] ^ input_mask[:, :-1]
change_indices = diff.nonzero()
# Encode run length
out = []
for i in range(batch_size):
cur_idxs = change_indices[change_indices[:, 0] == i, 1] + 1
if len(cur_idxs) == 0:
# No changes => either all 0 or all 1
# If the entire mask is 0, RLE is [height*width] or if the entire mask is 1, RLE is [0, height*width].
if input_mask[i, 0] == 0:
out.append({"size": [height, width], "counts": [height * width]})
else:
out.append({"size": [height, width], "counts": [0, height * width]})
continue
btw_idxs = cur_idxs[1:] - cur_idxs[:-1]
counts = [] if input_mask[i, 0] == 0 else [0]
counts += [cur_idxs[0].item()] + btw_idxs.tolist() + [height * width - cur_idxs[-1].item()]
out.append({"size": [height, width], "counts": counts})
return out
def _batched_mask_to_box(masks: "torch.Tensor"):
"""
Computes the bounding boxes around the given input masks. The bounding boxes are in the XYXY format which
corresponds the following required indices:
- LEFT: left hand side of the bounding box
- TOP: top of the bounding box
- RIGHT: right of the bounding box
- BOTTOM: bottom of the bounding box
Return [0,0,0,0] for an empty mask. For input shape channel_1 x channel_2 x ... x height x width, the output shape
is channel_1 x channel_2 x ... x 4.
Args:
- masks (`torch.Tensor` of shape `(batch, nb_mask, height, width)`)
"""
# torch.max below raises an error on empty inputs, just skip in this case
if torch.numel(masks) == 0:
return torch.zeros(*masks.shape[:-2], 4, device=masks.device)
# Normalize shape to Cxheightxwidth
shape = masks.shape
height, width = shape[-2:]
# Get top and bottom edges
in_height, _ = torch.max(masks, dim=-1)
in_height_coords = in_height * torch.arange(height, device=in_height.device)[None, :]
bottom_edges, _ = torch.max(in_height_coords, dim=-1)
in_height_coords = in_height_coords + height * (~in_height)
top_edges, _ = torch.min(in_height_coords, dim=-1)
# Get left and right edges
in_width, _ = torch.max(masks, dim=-2)
in_width_coords = in_width * torch.arange(width, device=in_width.device)[None, :]
right_edges, _ = torch.max(in_width_coords, dim=-1)
in_width_coords = in_width_coords + width * (~in_width)
left_edges, _ = torch.min(in_width_coords, dim=-1)
# If the mask is empty the right edge will be to the left of the left edge.
# Replace these boxes with [0, 0, 0, 0]
empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)
out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1)
out = out * (~empty_filter).unsqueeze(-1)
# Return to original shape
out = out.reshape(*shape[:-2], 4)
return out
def _is_box_near_crop_edge(boxes, crop_box, orig_box, atol=20.0):
"""Filter masks at the edge of a crop, but not at the edge of the original image."""
crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device)
orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device)
left, top, _, _ = crop_box
offset = torch.tensor([[left, top, left, top]], device=boxes.device)
# Check if boxes has a channel dimension
if len(boxes.shape) == 3:
offset = offset.unsqueeze(1)
boxes = (boxes + offset).float()
near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0)
near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0)
near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge)
return torch.any(near_crop_edge, dim=1)
def _pad_masks(masks, crop_box: list[int], orig_height: int, orig_width: int):
left, top, right, bottom = crop_box
if left == 0 and top == 0 and right == orig_width and bottom == orig_height:
return masks
# Coordinate transform masks
pad_x, pad_y = orig_width - (right - left), orig_height - (bottom - top)
pad = (left, pad_x - left, top, pad_y - top)
return torch.nn.functional.pad(masks, pad, value=0)
def _generate_crop_boxes(
image,
target_size: int, # Is it tuple here?
crop_n_layers: int = 0,
overlap_ratio: float = 512 / 1500,
points_per_crop: Optional[int] = 32,
crop_n_points_downscale_factor: Optional[list[int]] = 1,
) -> tuple[list[list[int]], list[int]]:
"""
Generates a list of crop boxes of different sizes. Each layer has (2**i)**2 boxes for the ith layer.
Args:
image (Union[`numpy.ndarray`, `PIL.Image`, `torch.Tensor`]):
Image to generate crops for.
target_size (`int`):
Size of the smallest crop.
crop_n_layers (`int`, *optional*):
If `crops_n_layers>0`, mask prediction will be run again on crops of the image. Sets the number of layers
to run, where each layer has 2**i_layer number of image crops.
overlap_ratio (`int`, *optional*):
Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of the
image length. Later layers with more crops scale down this overlap.
points_per_crop (`int`, *optional*):
Number of points to sam3ple per crop.
crop_n_points_downscale_factor (`int`, *optional*):
The number of points-per-side sam3pled in layer n is scaled down by crop_n_points_downscale_factor**n.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
if isinstance(image, list):
raise ValueError("Only one image is allowed for crop generation.")
original_size = image.shape[-2:]
points_grid = []
for i in range(crop_n_layers + 1):
n_points = int(points_per_crop / (crop_n_points_downscale_factor**i))
points_grid.append(_build_point_grid(n_points))
crop_boxes, layer_idxs = _generate_per_layer_crops(crop_n_layers, overlap_ratio, original_size)
cropped_images, point_grid_per_crop = _generate_crop_images(
crop_boxes, image, points_grid, layer_idxs, target_size, original_size
)
crop_boxes = torch.tensor(crop_boxes)
crop_boxes = crop_boxes.float()
points_per_crop = torch.stack(point_grid_per_crop)
points_per_crop = points_per_crop.unsqueeze(0).permute(0, 2, 1, 3)
cropped_images = torch.stack(cropped_images)
input_labels = torch.ones_like(points_per_crop[:, :, :, 0], dtype=torch.int64)
return crop_boxes, points_per_crop, cropped_images, input_labels
def _generate_per_layer_crops(crop_n_layers, overlap_ratio, original_size):
"""
Generates 2 ** (layers idx + 1) crops for each crop_n_layers. Crops are in the XYWH format : The XYWH format
consists of the following required indices:
- X: X coordinate of the top left of the bounding box
- Y: Y coordinate of the top left of the bounding box
- W: width of the bounding box
- H: height of the bounding box
"""
crop_boxes, layer_idxs = [], []
im_height, im_width = original_size
short_side = min(im_height, im_width)
# Original image
crop_boxes.append([0, 0, im_width, im_height])
layer_idxs.append(0)
for i_layer in range(crop_n_layers):
n_crops_per_side = 2 ** (i_layer + 1)
overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side))
crop_width = int(math.ceil((overlap * (n_crops_per_side - 1) + im_width) / n_crops_per_side))
crop_height = int(math.ceil((overlap * (n_crops_per_side - 1) + im_height) / n_crops_per_side))
crop_box_x0 = [int((crop_width - overlap) * i) for i in range(n_crops_per_side)]
crop_box_y0 = [int((crop_height - overlap) * i) for i in range(n_crops_per_side)]
for left, top in product(crop_box_x0, crop_box_y0):
box = [left, top, min(left + crop_width, im_width), min(top + crop_height, im_height)]
crop_boxes.append(box)
layer_idxs.append(i_layer + 1)
return crop_boxes, layer_idxs
def _build_point_grid(n_per_side: int) -> torch.Tensor:
"""Generates a 2D grid of points evenly spaced in [0,1]x[0,1]."""
offset = 1 / (2 * n_per_side)
points_one_side = torch.linspace(offset, 1 - offset, n_per_side)
points_x = torch.tile(points_one_side[None, :], (n_per_side, 1))
points_y = torch.tile(points_one_side[:, None], (1, n_per_side))
points = torch.stack([points_x, points_y], dim=-1).reshape(-1, 2)
return points
def _generate_crop_images(
crop_boxes, image, points_grid, layer_idxs, target_size, original_size, input_data_format=None
):
"""
Takes as an input bounding boxes that are used to crop the image. Based in the crops, the corresponding points are
also passed.
"""
cropped_images = []
total_points_per_crop = []
for i, crop_box in enumerate(crop_boxes):
left, top, right, bottom = crop_box
cropped_im = image[:, top:bottom, left:right]
cropped_images.append(cropped_im)
cropped_im_size = cropped_im.shape[-2:]
points_scale = torch.tensor(cropped_im_size).flip(dims=(0,)).unsqueeze(0)
points = points_grid[layer_idxs[i]] * points_scale
normalized_points = _normalize_coordinates(target_size, points, original_size)
total_points_per_crop.append(normalized_points)
return cropped_images, total_points_per_crop
def _normalize_coordinates(
target_size: int, coords: torch.Tensor, original_size: tuple[int, int], is_bounding_box=False
) -> torch.Tensor:
"""
Expects a numpy array of length 2 in the final dimension. Requires the original image size in (height, width)
format.
"""
old_height, old_width = original_size
scale = target_size * 1.0 / max(old_height, old_width)
new_height, new_width = old_height * scale, old_width * scale
new_width = int(new_width + 0.5)
new_height = int(new_height + 0.5)
coords = deepcopy(coords).float()
if is_bounding_box:
coords = coords.reshape(-1, 2, 2)
coords[..., 0] = coords[..., 0] * (new_width / old_width)
coords[..., 1] = coords[..., 1] * (new_height / old_height)
if is_bounding_box:
coords = coords.reshape(-1, 4)
return coords
def _rle_to_mask(rle: dict[str, Any]) -> torch.Tensor:
"""Compute a binary mask from an uncompressed RLE."""
height, width = rle["size"]
mask = torch.empty(height * width, dtype=bool)
idx = 0
parity = False
for count in rle["counts"]:
mask[idx : idx + count] = parity
idx += count
parity = not parity
mask = mask.reshape(width, height)
return mask.transpose(0, 1) # Reshape to original shape
def _post_process_for_mask_generation(rle_masks, iou_scores, mask_boxes, amg_crops_nms_thresh=0.7):
"""
Perform NMS (Non Maximum Suppression) on the outputs.
Args:
rle_masks (`torch.Tensor`):
binary masks in the RLE format
iou_scores (`torch.Tensor` of shape (nb_masks, 1)):
iou_scores predicted by the model
mask_boxes (`torch.Tensor`):
The bounding boxes corresponding to segmentation masks
amg_crops_nms_thresh (`float`, *optional*, defaults to 0.7):
NMS threshold.
"""
keep_by_nms = batched_nms(
boxes=mask_boxes.float(),
scores=iou_scores,
idxs=torch.zeros(mask_boxes.shape[0]),
iou_threshold=amg_crops_nms_thresh,
)
iou_scores = iou_scores[keep_by_nms]
rle_masks = [rle_masks[i] for i in keep_by_nms]
mask_boxes = mask_boxes[keep_by_nms]
masks = [_rle_to_mask(rle) for rle in rle_masks]
return masks, iou_scores, rle_masks, mask_boxes
def _scale_boxes(boxes, target_sizes):
"""
Scale batch of bounding boxes to the target sizes.
Args:
boxes (`torch.Tensor` of shape `(batch_size, num_boxes, 4)`):
Bounding boxes to scale. Each box is expected to be in (x1, y1, x2, y2) format.
target_sizes (`list[tuple[int, int]]` or `torch.Tensor` of shape `(batch_size, 2)`):
Target sizes to scale the boxes to. Each target size is expected to be in (height, width) format.
Returns:
`torch.Tensor` of shape `(batch_size, num_boxes, 4)`: Scaled bounding boxes.
"""
if isinstance(target_sizes, (list, tuple)):
image_height = torch.tensor([i[0] for i in target_sizes])
image_width = torch.tensor([i[1] for i in target_sizes])
elif isinstance(target_sizes, torch.Tensor):
image_height, image_width = target_sizes.unbind(1)
else:
raise TypeError("`target_sizes` must be a list, tuple or torch.Tensor")
scale_factor = torch.stack([image_width, image_height, image_width, image_height], dim=1)
scale_factor = scale_factor.unsqueeze(1).to(boxes.device)
boxes = boxes * scale_factor
return boxes
@auto_docstring
class Sam3ImageProcessorFast(BaseImageProcessorFast):
resample = PILImageResampling.BILINEAR
image_mean = IMAGENET_STANDARD_MEAN
image_std = IMAGENET_STANDARD_STD
size = {"height": 1008, "width": 1008}
mask_size = {"height": 288, "width": 288}
do_resize = True
do_rescale = True
do_normalize = True
do_convert_rgb = True
valid_kwargs = Sam3FastImageProcessorKwargs
# modular artefacts
do_pad = None
pad_size = None
mask_pad_size = None
def __init__(self, **kwargs: Unpack[Sam3FastImageProcessorKwargs]):
super().__init__(**kwargs)
def _further_process_kwargs(
self,
size: Optional[SizeDict] = None,
mask_size: Optional[SizeDict] = None,
default_to_square: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
data_format: Optional[ChannelDimension] = None,
**kwargs,
) -> dict:
"""
Update kwargs that need further processing before being validated
Can be overridden by subclasses to customize the processing of kwargs.
"""
if kwargs is None:
kwargs = {}
if size is not None:
size = SizeDict(**get_size_dict(size=size, default_to_square=default_to_square))
if mask_size is not None:
mask_size = SizeDict(**get_size_dict(mask_size, param_name="mask_size"))
if isinstance(image_mean, list):
image_mean = tuple(image_mean)
if isinstance(image_std, list):
image_std = tuple(image_std)
if data_format is None:
data_format = ChannelDimension.FIRST
kwargs["size"] = size
kwargs["mask_size"] = mask_size
kwargs["image_mean"] = image_mean
kwargs["image_std"] = image_std
kwargs["data_format"] = data_format
# torch resize uses interpolation instead of resample
# Check if resample is an int before checking if it's an instance of PILImageResampling
# because if pillow < 9.1.0, resample is an int and PILImageResampling is a module.
# Checking PILImageResampling will fail with error `TypeError: isinstance() arg 2 must be a type or tuple of types`.
resample = kwargs.pop("resample")
kwargs["interpolation"] = (
pil_torch_interpolation_mapping[resample] if isinstance(resample, (PILImageResampling, int)) else resample
)
return kwargs
@auto_docstring
def preprocess(
self,
images: ImageInput,
segmentation_maps: Optional[ImageInput] = None,
**kwargs: Unpack[Sam3FastImageProcessorKwargs],
) -> BatchFeature:
r"""
segmentation_maps (`ImageInput`, *optional*):
The segmentation maps to preprocess.
"""
return super().preprocess(images, segmentation_maps, **kwargs)
def _preprocess_image_like_inputs(
self,
images: ImageInput,
segmentation_maps: Optional[ImageInput],
do_convert_rgb: bool,
input_data_format: ChannelDimension,
device: Optional[Union[str, "torch.device"]] = None,
**kwargs: Unpack[Sam3FastImageProcessorKwargs],
) -> BatchFeature:
"""
Preprocess image-like inputs.
"""
images = self._prepare_image_like_inputs(
images=images, do_convert_rgb=do_convert_rgb, input_data_format=input_data_format, device=device
)
original_sizes = [image.shape[-2:] for image in images]
images_kwargs = kwargs.copy()
pixel_values = self._preprocess(images, **images_kwargs)
data = {
"pixel_values": pixel_values,
"original_sizes": original_sizes,
}
if segmentation_maps is not None:
processed_segmentation_maps = self._prepare_image_like_inputs(
images=segmentation_maps,
expected_ndims=2,
do_convert_rgb=False,
input_data_format=ChannelDimension.FIRST,
)
segmentation_maps_kwargs = kwargs.copy()
segmentation_maps_kwargs.update(
{
"do_normalize": False,
"do_rescale": False,
"interpolation": pil_torch_interpolation_mapping[PILImageResampling.NEAREST],
"size": segmentation_maps_kwargs.pop("mask_size"),
}
)
processed_segmentation_maps = self._preprocess(
images=processed_segmentation_maps, **segmentation_maps_kwargs
)
data["labels"] = processed_segmentation_maps.squeeze(1).to(torch.int64)
return BatchFeature(data=data, tensor_type=kwargs["return_tensors"])
def _preprocess(
self,
images: list["torch.Tensor"],
return_tensors: Optional[Union[str, TensorType]],
**kwargs,
) -> "torch.Tensor":
return super()._preprocess(images, return_tensors=return_tensors, **kwargs).pixel_values
def generate_crop_boxes(
self,
image: "torch.Tensor",
target_size,
crop_n_layers: int = 0,
overlap_ratio: float = 512 / 1500,
points_per_crop: Optional[int] = 32,
crop_n_points_downscale_factor: Optional[list[int]] = 1,
device: Optional["torch.device"] = None,
):
"""
Generates a list of crop boxes of different sizes. Each layer has (2**i)**2 boxes for the ith layer.
Args:
image (`torch.Tensor`):
Input original image
target_size (`int`):
Target size of the resized image
crop_n_layers (`int`, *optional*, defaults to 0):
If >0, mask prediction will be run again on crops of the image. Sets the number of layers to run, where
each layer has 2**i_layer number of image crops.
overlap_ratio (`float`, *optional*, defaults to 512/1500):
Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of
the image length. Later layers with more crops scale down this overlap.
points_per_crop (`int`, *optional*, defaults to 32):
Number of points to sam3ple from each crop.
crop_n_points_downscale_factor (`list[int]`, *optional*, defaults to 1):
The number of points-per-side sam3pled in layer n is scaled down by crop_n_points_downscale_factor**n.
device (`torch.device`, *optional*, defaults to None):
Device to use for the computation. If None, cpu will be used.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
return_tensors (`str`, *optional*, defaults to `pt`):
If `pt`, returns `torch.Tensor`.
"""
image = self._process_image(image)
crop_boxes, points_per_crop, cropped_images, input_labels = _generate_crop_boxes(
image,
target_size,
crop_n_layers,
overlap_ratio,
points_per_crop,
crop_n_points_downscale_factor,
)
if device is None:
device = torch.device("cpu")
crop_boxes = crop_boxes.to(device)
points_per_crop = points_per_crop.to(device)
# cropped_images stays as torch.Tensor
input_labels = input_labels.to(device)
return crop_boxes, points_per_crop, cropped_images, input_labels
def filter_masks(
self,
masks,
iou_scores,
original_size,
cropped_box_image,
pred_iou_thresh=0.88,
stability_score_thresh=0.95,
mask_threshold=0,
stability_score_offset=1,
):
"""
Filters the predicted masks by selecting only the ones that meets several criteria. The first criterion being
that the iou scores needs to be greater than `pred_iou_thresh`. The second criterion is that the stability
score needs to be greater than `stability_score_thresh`. The method also converts the predicted masks to
bounding boxes and pad the predicted masks if necessary.
Args:
masks (`torch.Tensor`):
Input masks.
iou_scores (`torch.Tensor`):
List of IoU scores.
original_size (`tuple[int,int]`):
Size of the original image.
cropped_box_image (`torch.Tensor`):
The cropped image.
pred_iou_thresh (`float`, *optional*, defaults to 0.88):
The threshold for the iou scores.
stability_score_thresh (`float`, *optional*, defaults to 0.95):
The threshold for the stability score.
mask_threshold (`float`, *optional*, defaults to 0):
The threshold for the predicted masks.
stability_score_offset (`float`, *optional*, defaults to 1):
The offset for the stability score used in the `_compute_stability_score` method.
"""
original_height, original_width = original_size
iou_scores = iou_scores.flatten(0, 1)
masks = masks.flatten(0, 1)
if masks.shape[0] != iou_scores.shape[0]:
raise ValueError("masks and iou_scores must have the sam3e batch size.")
if masks.device != iou_scores.device:
iou_scores = iou_scores.to(masks.device)
batch_size = masks.shape[0]
keep_mask = torch.ones(batch_size, dtype=torch.bool, device=masks.device)
if pred_iou_thresh > 0.0:
keep_mask = keep_mask & (iou_scores > pred_iou_thresh)
# compute stability score
if stability_score_thresh > 0.0:
stability_scores = _compute_stability_score(masks, mask_threshold, stability_score_offset)
keep_mask = keep_mask & (stability_scores > stability_score_thresh)
scores = iou_scores[keep_mask]
masks = masks[keep_mask]
# binarize masks
masks = masks > mask_threshold
converted_boxes = _batched_mask_to_box(masks)
keep_mask = ~_is_box_near_crop_edge(
converted_boxes, cropped_box_image, [0, 0, original_width, original_height]
)
scores = scores[keep_mask]
masks = masks[keep_mask]
converted_boxes = converted_boxes[keep_mask]
masks = _pad_masks(masks, cropped_box_image, original_height, original_width)
# conversion to rle is necessary to run non-maximum suppression
masks = _mask_to_rle(masks)
return masks, scores, converted_boxes
def post_process_masks(
self,
masks,
original_sizes,
mask_threshold=0.0,
binarize=True,
max_hole_area=0.0,
max_sprinkle_area=0.0,
apply_non_overlapping_constraints=False,
**kwargs,
):
"""
Remove padding and upscale masks to the original image size.
Args:
masks (`Union[torch.Tensor, List[torch.Tensor], np.ndarray, List[np.ndarray]]`):
Batched masks from the mask_decoder in (batch_size, num_channels, height, width) format.
original_sizes (`Union[torch.Tensor, List[Tuple[int,int]]]`):
The original sizes of each image before it was resized to the model's expected input shape, in (height,
width) format.
mask_threshold (`float`, *optional*, defaults to 0.0):
Threshold for binarization and post-processing operations.
binarize (`bool`, *optional*, defaults to `True`):
Whether to binarize the masks.
max_hole_area (`float`, *optional*, defaults to 0.0):
The maximum area of a hole to fill.
max_sprinkle_area (`float`, *optional*, defaults to 0.0):
The maximum area of a sprinkle to fill.
apply_non_overlapping_constraints (`bool`, *optional*, defaults to `False`):
Whether to apply non-overlapping constraints to the masks.
Returns:
(`torch.Tensor`): Batched masks in batch_size, num_channels, height, width) format, where (height, width)
is given by original_size.
"""
if isinstance(original_sizes, (torch.Tensor, np.ndarray)):
original_sizes = original_sizes.tolist()
# TODO: add connected components kernel for postprocessing
output_masks = []
for i, original_size in enumerate(original_sizes):
if isinstance(masks[i], np.ndarray):
masks[i] = torch.from_numpy(masks[i])
elif not isinstance(masks[i], torch.Tensor):
raise TypeError("Input masks should be a list of `torch.tensors` or a list of `np.ndarray`")
interpolated_mask = F.interpolate(masks[i], original_size, mode="bilinear", align_corners=False)
if apply_non_overlapping_constraints:
interpolated_mask = self._apply_non_overlapping_constraints(interpolated_mask)
if binarize:
interpolated_mask = interpolated_mask > mask_threshold
output_masks.append(interpolated_mask)
return output_masks
def post_process_for_mask_generation(self, all_masks, all_scores, all_boxes, crops_nms_thresh):
"""
Post processes mask that are generated by calling the Non Maximum Suppression algorithm on the predicted masks.
Args:
all_masks (`torch.Tensor`):
List of all predicted segmentation masks
all_scores (`torch.Tensor`):
List of all predicted iou scores
all_boxes (`torch.Tensor`):
List of all bounding boxes of the predicted masks
crops_nms_thresh (`float`):
Threshold for NMS (Non Maximum Suppression) algorithm.
"""
return _post_process_for_mask_generation(all_masks, all_scores, all_boxes, crops_nms_thresh)
def _apply_non_overlapping_constraints(self, pred_masks: torch.Tensor) -> torch.Tensor:
"""
Apply non-overlapping constraints to the object scores in pred_masks. Here we
keep only the highest scoring object at each spatial location in pred_masks.
"""
batch_size = pred_masks.size(0)
if batch_size == 1:
return pred_masks
device = pred_masks.device
# "max_obj_inds": object index of the object with the highest score at each location
max_obj_inds = torch.argmax(pred_masks, dim=0, keepdim=True)
# "batch_obj_inds": object index of each object slice (along dim 0) in `pred_masks`
batch_obj_inds = torch.arange(batch_size, device=device)[:, None, None, None]
keep = max_obj_inds == batch_obj_inds
# suppress overlapping regions' scores below -10.0 so that the foreground regions
# don't overlap (here sigmoid(-10.0)=4.5398e-05)
pred_masks = torch.where(keep, pred_masks, torch.clamp(pred_masks, max=-10.0))
return pred_masks
def post_process_semantic_segmentation(
self, outputs, target_sizes: Optional[list[tuple]] = None, threshold: float = 0.5
):
"""
Converts the output of [`Sam3Model`] into semantic segmentation maps.
Args:
outputs ([`Sam3ImageSegmentationOutput`]):
Raw outputs of the model containing semantic_seg.
target_sizes (`list[tuple]` of length `batch_size`, *optional*):
List of tuples corresponding to the requested final size (height, width) of each prediction. If unset,
predictions will not be resized.
threshold (`float`, *optional*, defaults to 0.5):
Threshold for binarizing the semantic segmentation masks.
Returns:
semantic_segmentation: `list[torch.Tensor]` of length `batch_size`, where each item is a semantic
segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is
specified). Each entry is a binary mask (0 or 1).
"""
# Get semantic segmentation output
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | true |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/sam3/modular_sam3.py | src/transformers/models/sam3/modular_sam3.py | # coding=utf-8
# Copyright 2025 The Meta AI Authors and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import torch
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
)
from ..sam2.image_processing_sam2_fast import Sam2ImageProcessorFast
def _scale_boxes(boxes, target_sizes):
"""
Scale batch of bounding boxes to the target sizes.
Args:
boxes (`torch.Tensor` of shape `(batch_size, num_boxes, 4)`):
Bounding boxes to scale. Each box is expected to be in (x1, y1, x2, y2) format.
target_sizes (`list[tuple[int, int]]` or `torch.Tensor` of shape `(batch_size, 2)`):
Target sizes to scale the boxes to. Each target size is expected to be in (height, width) format.
Returns:
`torch.Tensor` of shape `(batch_size, num_boxes, 4)`: Scaled bounding boxes.
"""
if isinstance(target_sizes, (list, tuple)):
image_height = torch.tensor([i[0] for i in target_sizes])
image_width = torch.tensor([i[1] for i in target_sizes])
elif isinstance(target_sizes, torch.Tensor):
image_height, image_width = target_sizes.unbind(1)
else:
raise TypeError("`target_sizes` must be a list, tuple or torch.Tensor")
scale_factor = torch.stack([image_width, image_height, image_width, image_height], dim=1)
scale_factor = scale_factor.unsqueeze(1).to(boxes.device)
boxes = boxes * scale_factor
return boxes
class Sam3ImageProcessorFast(Sam2ImageProcessorFast):
image_mean = IMAGENET_STANDARD_MEAN
image_std = IMAGENET_STANDARD_STD
size = {"height": 1008, "width": 1008}
mask_size = {"height": 288, "width": 288}
def post_process_semantic_segmentation(
self, outputs, target_sizes: Optional[list[tuple]] = None, threshold: float = 0.5
):
"""
Converts the output of [`Sam3Model`] into semantic segmentation maps.
Args:
outputs ([`Sam3ImageSegmentationOutput`]):
Raw outputs of the model containing semantic_seg.
target_sizes (`list[tuple]` of length `batch_size`, *optional*):
List of tuples corresponding to the requested final size (height, width) of each prediction. If unset,
predictions will not be resized.
threshold (`float`, *optional*, defaults to 0.5):
Threshold for binarizing the semantic segmentation masks.
Returns:
semantic_segmentation: `list[torch.Tensor]` of length `batch_size`, where each item is a semantic
segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is
specified). Each entry is a binary mask (0 or 1).
"""
# Get semantic segmentation output
# semantic_seg has shape (batch_size, 1, height, width)
semantic_logits = outputs.semantic_seg
if semantic_logits is None:
raise ValueError(
"Semantic segmentation output is not available in the model outputs. "
"Make sure the model was run with semantic segmentation enabled."
)
# Apply sigmoid to convert logits to probabilities
semantic_probs = semantic_logits.sigmoid()
# Resize and binarize semantic segmentation maps
if target_sizes is not None:
if len(semantic_logits) != len(target_sizes):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits"
)
semantic_segmentation = []
for idx in range(len(semantic_logits)):
resized_probs = torch.nn.functional.interpolate(
semantic_probs[idx].unsqueeze(dim=0),
size=target_sizes[idx],
mode="bilinear",
align_corners=False,
)
# Binarize: values > threshold become 1, otherwise 0
semantic_map = (resized_probs[0, 0] > threshold).to(torch.long)
semantic_segmentation.append(semantic_map)
else:
# Binarize without resizing
semantic_segmentation = (semantic_probs[:, 0] > threshold).to(torch.long)
semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
def post_process_object_detection(
self, outputs, threshold: float = 0.3, target_sizes: Optional[list[tuple]] = None
):
"""
Converts the raw output of [`Sam3Model`] into final bounding boxes in (top_left_x, top_left_y,
bottom_right_x, bottom_right_y) format.
Args:
outputs ([`Sam3ImageSegmentationOutput`]):
Raw outputs of the model containing pred_boxes, pred_logits, and optionally presence_logits.
threshold (`float`, *optional*, defaults to 0.3):
Score threshold to keep object detection predictions.
target_sizes (`list[tuple[int, int]]`, *optional*):
List of tuples (`tuple[int, int]`) containing the target size `(height, width)` of each image in the
batch. If unset, predictions will not be resized.
Returns:
`list[dict]`: A list of dictionaries, each dictionary containing the following keys:
- **scores** (`torch.Tensor`): The confidence scores for each predicted box on the image.
- **boxes** (`torch.Tensor`): Image bounding boxes in (top_left_x, top_left_y, bottom_right_x,
bottom_right_y) format.
"""
pred_logits = outputs.pred_logits # (batch_size, num_queries)
pred_boxes = outputs.pred_boxes # (batch_size, num_queries, 4) in xyxy format
presence_logits = outputs.presence_logits # (batch_size, 1) or None
batch_size = pred_logits.shape[0]
if target_sizes is not None and len(target_sizes) != batch_size:
raise ValueError("Make sure that you pass in as many target sizes as images")
# Compute scores: combine pred_logits with presence_logits if available
batch_scores = pred_logits.sigmoid()
if presence_logits is not None:
presence_scores = presence_logits.sigmoid() # (batch_size, 1)
batch_scores = batch_scores * presence_scores # Broadcast multiplication
# Boxes are already in xyxy format from the model
batch_boxes = pred_boxes
# Convert from relative [0, 1] to absolute [0, height/width] coordinates
if target_sizes is not None:
batch_boxes = _scale_boxes(batch_boxes, target_sizes)
results = []
for scores, boxes in zip(batch_scores, batch_boxes):
keep = scores > threshold
scores = scores[keep]
boxes = boxes[keep]
results.append({"scores": scores, "boxes": boxes})
return results
def post_process_instance_segmentation(
self,
outputs,
threshold: float = 0.3,
mask_threshold: float = 0.5,
target_sizes: Optional[list[tuple]] = None,
):
"""
Converts the raw output of [`Sam3Model`] into instance segmentation predictions with bounding boxes and masks.
Args:
outputs ([`Sam3ImageSegmentationOutput`]):
Raw outputs of the model containing pred_boxes, pred_logits, pred_masks, and optionally
presence_logits.
threshold (`float`, *optional*, defaults to 0.3):
Score threshold to keep instance predictions.
mask_threshold (`float`, *optional*, defaults to 0.5):
Threshold for binarizing the predicted masks.
target_sizes (`list[tuple[int, int]]`, *optional*):
List of tuples (`tuple[int, int]`) containing the target size `(height, width)` of each image in the
batch. If unset, predictions will not be resized.
Returns:
`list[dict]`: A list of dictionaries, each dictionary containing the following keys:
- **scores** (`torch.Tensor`): The confidence scores for each predicted instance on the image.
- **boxes** (`torch.Tensor`): Image bounding boxes in (top_left_x, top_left_y, bottom_right_x,
bottom_right_y) format.
- **masks** (`torch.Tensor`): Binary segmentation masks for each instance, shape (num_instances,
height, width).
"""
pred_logits = outputs.pred_logits # (batch_size, num_queries)
pred_boxes = outputs.pred_boxes # (batch_size, num_queries, 4) in xyxy format
pred_masks = outputs.pred_masks # (batch_size, num_queries, height, width)
presence_logits = outputs.presence_logits # (batch_size, 1) or None
batch_size = pred_logits.shape[0]
if target_sizes is not None and len(target_sizes) != batch_size:
raise ValueError("Make sure that you pass in as many target sizes as images")
# Compute scores: combine pred_logits with presence_logits if available
batch_scores = pred_logits.sigmoid()
if presence_logits is not None:
presence_scores = presence_logits.sigmoid() # (batch_size, 1)
batch_scores = batch_scores * presence_scores # Broadcast multiplication
# Apply sigmoid to mask logits
batch_masks = pred_masks.sigmoid()
# Boxes are already in xyxy format from the model
batch_boxes = pred_boxes
# Scale boxes to target sizes if provided
if target_sizes is not None:
batch_boxes = _scale_boxes(batch_boxes, target_sizes)
results = []
for idx, (scores, boxes, masks) in enumerate(zip(batch_scores, batch_boxes, batch_masks)):
# Filter by score threshold
keep = scores > threshold
scores = scores[keep]
boxes = boxes[keep]
masks = masks[keep] # (num_keep, height, width)
# Resize masks to target size if provided
if target_sizes is not None:
target_size = target_sizes[idx]
if len(masks) > 0:
masks = torch.nn.functional.interpolate(
masks.unsqueeze(0), # (1, num_keep, height, width)
size=target_size,
mode="bilinear",
align_corners=False,
).squeeze(0) # (num_keep, target_height, target_width)
# Binarize masks
masks = (masks > mask_threshold).to(torch.long)
results.append({"scores": scores, "boxes": boxes, "masks": masks})
return results
__all__ = ["Sam3ImageProcessorFast"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/sam3/processing_sam3.py | src/transformers/models/sam3/processing_sam3.py | # coding=utf-8
# Copyright 2025 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Processor class for SAM3.
"""
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PreTokenizedInput, TextInput
from ...utils import TensorType, is_torch_available, logging
from ...utils.import_utils import requires
logger = logging.get_logger(__name__)
if is_torch_available():
import torch
def box_cxcywh_to_xyxy(x):
x_c, y_c, w, h = x.unbind(-1)
b = [(x_c - 0.5 * w), (y_c - 0.5 * h), (x_c + 0.5 * w), (y_c + 0.5 * h)]
return torch.stack(b, dim=-1)
def box_cxcywh_to_xywh(x):
x_c, y_c, w, h = x.unbind(-1)
b = [(x_c - 0.5 * w), (y_c - 0.5 * h), (w), (h)]
return torch.stack(b, dim=-1)
def box_xywh_to_xyxy(x):
x, y, w, h = x.unbind(-1)
b = [(x), (y), (x + w), (y + h)]
return torch.stack(b, dim=-1)
def box_xywh_to_cxcywh(x):
x, y, w, h = x.unbind(-1)
b = [(x + 0.5 * w), (y + 0.5 * h), (w), (h)]
return torch.stack(b, dim=-1)
def box_xyxy_to_xywh(x):
x, y, X, Y = x.unbind(-1)
b = [(x), (y), (X - x), (Y - y)]
return torch.stack(b, dim=-1)
def box_xyxy_to_cxcywh(x):
x0, y0, x1, y1 = x.unbind(-1)
b = [(x0 + x1) / 2, (y0 + y1) / 2, (x1 - x0), (y1 - y0)]
return torch.stack(b, dim=-1)
def box_area(boxes):
"""
Batched version of box area. Boxes should be in [x0, y0, x1, y1] format.
Inputs:
- boxes: Tensor of shape (..., 4)
Returns:
- areas: Tensor of shape (...,)
"""
x0, y0, x1, y1 = boxes.unbind(-1)
return (x1 - x0) * (y1 - y0)
@requires(backends=("torch",))
class Sam3Processor(ProcessorMixin):
r"""
Constructs a SAM3 processor which wraps a SAM3 image processor and bounding boxes processing into a
single processor.
[`Sam2Processor`] offers all the functionalities of [`Sam2ImageProcessorFast`] and [`Sam2VideoProcessor`]. See the docstring of
[`~Sam2ImageProcessorFast.__call__`] and [`~Sam2VideoProcessor.__call__`] for more information.
Args:
image_processor (`Sam2ImageProcessorFast`):
An instance of [`Sam2ImageProcessorFast`].
tokenizer ([`PreTrainedTokenizer`, `PreTrainedTokenizerFast`]):
An instance of [`PreTrainedTokenizer`, `PreTrainedTokenizerFast`]. The tokenizer is a required input.
target_size (`int`, *optional*):
The target size (target_size, target_size) to which the image will be resized.
point_pad_value (`int`, *optional*, defaults to -10):
The value used for padding input boxes.
"""
def __init__(
self, image_processor, tokenizer, target_size: Optional[int] = None, point_pad_value: int = -10, **kwargs
):
super().__init__(image_processor, tokenizer, **kwargs)
self.point_pad_value = point_pad_value
self.target_size = target_size if target_size is not None else self.image_processor.size["height"]
def __call__(
self,
images: Optional[ImageInput] = None,
text: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]] = None,
segmentation_maps: Optional[ImageInput] = None,
input_boxes: Optional[Union[list[list[list[float]]], torch.Tensor]] = None,
input_boxes_labels: Optional[Union[list[list[list[int]]], torch.Tensor]] = None,
original_sizes: Optional[Union[list[list[float]], torch.Tensor]] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
**kwargs,
) -> BatchEncoding:
r"""
This method uses [`Sam3ImageProcessorFast.__call__`] method to prepare image(s) for the model. It also prepares bounding boxes for the model if they are provided.
Args:
images (`ImageInput`, *optional*):
The image(s) to process.
text (`str`, `list[str]`, `list[list[str]]`, *optional*):
The text to process.
segmentation_maps (`ImageInput`, *optional*):
The segmentation maps to process.
input_boxes (`list[list[list[float]]]`, `torch.Tensor`, *optional*):
The bounding boxes to process.
input_boxes_labels (`list[list[int]]`, `torch.Tensor`, *optional*):
The labels for the bounding boxes.
original_sizes (`list[list[float]]`, `torch.Tensor`, *optional*):
The original sizes of the images.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return.
**kwargs:
Additional keyword arguments to pass to the image processor.
Returns:
A [`BatchEncoding`] with the following fields:
- `pixel_values` (`torch.Tensor`): The processed image(s).
- `original_sizes` (`list[list[float]]`): The original sizes of the images.
- `labels` (`torch.Tensor`): The processed segmentation maps (if provided).
- `input_boxes_labels` (`torch.Tensor`): The processed labels for the bounding boxes.
- `input_boxes` (`torch.Tensor`): The processed bounding boxes.
"""
encoding = None
if images is not None:
encoding = self.image_processor(
images,
segmentation_maps=segmentation_maps,
return_tensors=return_tensors,
**kwargs,
)
elif original_sizes is not None:
if isinstance(original_sizes, torch.Tensor):
original_sizes = original_sizes.cpu().tolist()
encoding = BatchEncoding({"original_sizes": original_sizes}, tensor_type=return_tensors)
elif input_boxes is not None:
raise ValueError("Either images or original_sizes must be provided if input_boxes is not None")
text = self._resolve_text_prompts(text, input_boxes)
if text is not None:
text_inputs = self.tokenizer(text, return_tensors=return_tensors, padding="max_length", max_length=32)
if encoding is not None:
encoding.update(text_inputs)
else:
encoding = text_inputs
# Process input boxes if provided
if input_boxes is not None:
original_sizes = encoding["original_sizes"]
# Validate and convert inputs to standardized format
processed_boxes = self._validate_single_input(
input_boxes,
expected_depth=3,
input_name="boxes",
expected_format="[image level, box level, box coordinates]",
expected_coord_size=4,
)
processed_boxes_labels = self._validate_single_input(
input_boxes_labels,
expected_depth=2,
input_name="labels",
expected_format="[image level, box level]",
)
# Get padding requirements for all inputs
if processed_boxes is not None:
boxes_max_dims = self._get_nested_dimensions(processed_boxes)[:2]
if processed_boxes_labels is not None:
boxes_labels_max_dims = self._get_nested_dimensions(processed_boxes_labels)[:2]
# Ensure boxes and labels have consistent dimensions
if processed_boxes is not None and processed_boxes_labels is not None:
if boxes_max_dims != boxes_labels_max_dims:
raise ValueError(
"Input boxes and labels have inconsistent dimensions. Please ensure they have the same dimensions."
)
# Pad and normalize all inputs to final tensor format
if processed_boxes is not None:
padded_boxes = self._pad_nested_list(processed_boxes, boxes_max_dims + [4])
final_boxes = torch.tensor(padded_boxes, dtype=torch.float32)
self._normalize_tensor_coordinates(
final_boxes, original_sizes, is_bounding_box=True, preserve_padding=True
)
final_boxes = box_xyxy_to_cxcywh(final_boxes)
encoding.update({"input_boxes": final_boxes})
if processed_boxes_labels is not None:
padded_boxes_labels = self._pad_nested_list(processed_boxes_labels, boxes_labels_max_dims)
final_boxes_labels = torch.tensor(padded_boxes_labels, dtype=torch.int64)
encoding.update({"input_boxes_labels": final_boxes_labels})
return encoding
def _normalize_coordinates(self, coords: "torch.Tensor", original_size, is_bounding_box=False) -> "torch.Tensor":
"""
Expects a numpy array of length 2 in the final dimension. Requires the original image size in (H, W) format.
Args:
target_size (`int`):
The target size of the image.
coords (`torch.Tensor`):
The coordinates to be normalized.
original_size (`tuple`):
The original size of the image.
is_bounding_box (`bool`, *optional*, defaults to `False`):
Whether the coordinates are bounding boxes.
"""
old_h, old_w = original_size
coords = deepcopy(coords).float()
if is_bounding_box:
coords = coords.reshape(-1, 2, 2)
coords[..., 0] = coords[..., 0] / old_w
coords[..., 1] = coords[..., 1] / old_h
if is_bounding_box:
coords = coords.reshape(-1, 4)
return coords
def _convert_to_nested_list(self, data, expected_depth, current_depth=0):
"""
Recursively convert various input formats (tensors, numpy arrays, lists) to nested lists.
Preserves None values within lists.
Args:
data: Input data in any format (may be None or contain None values)
expected_depth: Expected nesting depth
current_depth: Current depth in recursion
Returns:
Nested list representation of the data (or None)
"""
if data is None:
return None
# Convert tensor/numpy to list if we're at a leaf level or if it's a multi-dimensional array
if isinstance(data, torch.Tensor): # PyTorch tensor
if current_depth == expected_depth - 2 or len(data.shape) <= 2: # At coordinate level or small tensor
return data.numpy().tolist()
else:
return [self._convert_to_nested_list(item, expected_depth, current_depth + 1) for item in data]
elif isinstance(data, np.ndarray): # NumPy array
if current_depth == expected_depth - 2 or len(data.shape) <= 2: # At coordinate level or small array
return data.tolist()
else:
return [self._convert_to_nested_list(item, expected_depth, current_depth + 1) for item in data]
elif isinstance(data, list):
if current_depth == expected_depth:
# We've reached the expected depth, return as is
return data
else:
# Continue recursion, preserving None values
return [
self._convert_to_nested_list(item, expected_depth, current_depth + 1) if item is not None else None
for item in data
]
elif isinstance(data, (int, float)):
return data
else:
raise ValueError(f"Unsupported data type: {type(data)}")
def _resolve_text_prompts(self, text, input_boxes):
"""
Resolve text prompts by setting defaults based on prompt types.
"""
# If no text provided, infer default based on prompt type
if text is None:
return "visual" if input_boxes else None
if not isinstance(text, (list, tuple)):
return text
# Validate list/tuple length matches both prompt types if provided
text = list(text) # Convert to list to allow modification
if input_boxes and len(text) != len(input_boxes):
raise ValueError(
f"The number of text prompts must match the number of input boxes. "
f"Got {len(text)} text prompts and {len(input_boxes)} input boxes."
)
# Fill in None values with defaults based on corresponding prompt
for i, text_value in enumerate(text):
if text_value is None and input_boxes and input_boxes[i] is not None:
text[i] = "visual"
return text
def _get_nested_dimensions(self, nested_list, max_dims=None):
"""
Get the maximum dimensions at each level of nesting, skipping None values.
Args:
nested_list (`list`):
Nested list structure (may contain None values).
max_dims (`list`, *optional*):
Current maximum dimensions (for recursion).
Returns:
`list`: A list of maximum dimensions for each nesting level.
"""
if max_dims is None:
max_dims = []
if not isinstance(nested_list, list):
return max_dims
if len(max_dims) == 0:
max_dims.append(len(nested_list))
else:
max_dims[0] = max(max_dims[0], len(nested_list))
if len(nested_list) > 0:
for item in nested_list:
# Skip None values
if item is None:
continue
if isinstance(item, list):
sub_dims = self._get_nested_dimensions(item)
# Merge sub_dims into max_dims
for i, dim in enumerate(sub_dims):
if i + 1 >= len(max_dims):
max_dims.append(dim)
else:
max_dims[i + 1] = max(max_dims[i + 1], dim)
return max_dims
def _pad_nested_list(self, nested_list, target_dims, current_level=0, pad_value=None):
"""
Recursively pad a nested list to match target dimensions. Replaces None values with padded structures.
Args:
nested_list (`list`):
Nested list to pad (may contain None values).
target_dims (`list`):
Target dimensions for each level.
current_level (`int`, *optional*, defaults to 0):
Current nesting level.
pad_value (`int`, *optional*):
Value to use for padding.
Returns:
`list`: The padded nested list.
"""
if pad_value is None:
pad_value = self.point_pad_value
if current_level >= len(target_dims):
return nested_list
# Ensure we have a list
if not isinstance(nested_list, list):
nested_list = [nested_list]
# Pad current level
current_size = len(nested_list)
target_size = target_dims[current_level]
# Pad with appropriate values
if current_level == len(target_dims) - 1:
# At the coordinate level, pad with pad_value
nested_list.extend([pad_value] * (target_size - current_size))
else:
# At higher levels, pad with nested structures
if current_size > 0:
# Create appropriately sized template
if current_level < len(target_dims) - 2:
# For non-coordinate levels, create empty nested structure
template_dims = target_dims[current_level + 1 :]
template = self._create_empty_nested_structure(template_dims, pad_value)
else:
# For coordinate level, create list of pad_values
template = [pad_value] * target_dims[current_level + 1]
nested_list.extend([deepcopy(template) for _ in range(target_size - current_size)])
else:
# Create from scratch
template_dims = target_dims[current_level + 1 :]
template = self._create_empty_nested_structure(template_dims, pad_value)
nested_list.extend([deepcopy(template) for _ in range(target_size)])
# Recursively pad sublists, replacing None with padded structures
if current_level < len(target_dims) - 1:
for i in range(len(nested_list)):
if nested_list[i] is None:
# Replace None with fully padded structure
template_dims = target_dims[current_level + 1 :]
nested_list[i] = self._create_empty_nested_structure(template_dims, pad_value)
elif isinstance(nested_list[i], list):
nested_list[i] = self._pad_nested_list(nested_list[i], target_dims, current_level + 1, pad_value)
return nested_list
def _create_empty_nested_structure(self, dims, pad_value):
"""
Create an empty nested structure with given dimensions filled with pad_value.
Args:
dims (`list`):
The dimensions of the nested structure.
pad_value (`int`):
The value to fill the structure with.
"""
if len(dims) == 1:
return [pad_value] * dims[0]
else:
return [self._create_empty_nested_structure(dims[1:], pad_value) for _ in range(dims[0])]
def _get_nesting_level(self, input_list):
"""
Get the nesting level of a list structure, skipping None values.
Args:
input_list (`list`):
The list to get the nesting level of.
"""
if isinstance(input_list, list):
if len(input_list) == 0:
return 1
# Find first non-None element to determine nesting level
for item in input_list:
if item is not None:
return 1 + self._get_nesting_level(item)
# All elements are None, treat as single level
return 1
elif isinstance(input_list, (np.ndarray, torch.Tensor)):
# For arrays/tensors, the nesting level is the number of dimensions
return len(input_list.shape)
return 0
def _validate_single_input(
self,
data: Union[torch.Tensor, np.ndarray, list],
expected_depth: int,
input_name: str,
expected_format: str,
expected_coord_size: Optional[int] = None,
) -> list:
"""
Validate a single input by ensuring proper nesting and raising an error if the input is not valid.
Args:
data (`torch.Tensor`, `np.ndarray`, or `list`):
Input data to process.
expected_depth (`int`):
Expected nesting depth.
input_name (`str`):
Name of the input for error messages.
expected_format (`str`):
The expected format of the input.
expected_coord_size (`int`, *optional*):
Expected coordinate size (4 for boxes, None for labels).
.
"""
if data is None:
return None
# Handle tensors and numpy arrays first
if isinstance(data, (torch.Tensor, np.ndarray)):
# For tensors/arrays, we can directly check the number of dimensions
if data.ndim != expected_depth:
raise ValueError(
f"Input {input_name} must be a tensor/array with {expected_depth} dimensions. The expected nesting format is {expected_format}. Got {data.ndim} dimensions."
)
elif expected_coord_size is not None:
if data.shape[-1] != expected_coord_size:
raise ValueError(
f"Input {input_name} must be a tensor/array with {expected_coord_size} as the last dimension, got {data.shape[-1]}."
)
return self._convert_to_nested_list(data, expected_depth)
# Handle nested lists
if isinstance(data, list):
current_depth = self._get_nesting_level(data)
if current_depth != expected_depth:
raise ValueError(
f"Input {input_name} must be a nested list with {expected_depth} levels. The expected nesting format is {expected_format}. Got {current_depth} levels."
)
return self._convert_to_nested_list(data, expected_depth)
def _normalize_tensor_coordinates(self, tensor, original_sizes, is_bounding_box=False, preserve_padding=False):
"""
Helper method to normalize coordinates in a tensor across multiple images.
Args:
tensor (`torch.Tensor`):
Input tensor with coordinates.
original_sizes (`list`):
Original image sizes.
is_bounding_box (`bool`, *optional*, defaults to `False`):
Whether coordinates are bounding boxes.
preserve_padding (`bool`, *optional*, defaults to `False`):
Whether to preserve padding values (for boxes).
"""
if preserve_padding:
# For boxes: avoid normalizing pad values
mask = tensor != self.point_pad_value
coord_mask = mask.all(dim=-1, keepdim=True)
for img_idx in range(len(original_sizes)):
if img_idx < tensor.shape[0]:
original_size = original_sizes[img_idx] if img_idx < len(original_sizes) else original_sizes[0]
normalized_coords = self._normalize_coordinates(
tensor[img_idx], original_size, is_bounding_box=is_bounding_box
)
if preserve_padding:
# Only update non-padded values
img_mask = coord_mask[img_idx]
tensor[img_idx] = torch.where(
img_mask.expand_as(tensor[img_idx]), normalized_coords, tensor[img_idx]
)
else:
tensor[img_idx] = normalized_coords
def post_process_semantic_segmentation(self, outputs, target_sizes=None, threshold=0.5):
"""
Converts the output of [`Sam3Model`] into semantic segmentation maps.
Args:
outputs ([`Sam3ImageSegmentationOutput`]):
Raw outputs of the model containing semantic_seg.
target_sizes (`list[tuple]` of length `batch_size`, *optional*):
List of tuples corresponding to the requested final size (height, width) of each prediction. If unset,
predictions will not be resized.
threshold (`float`, *optional*, defaults to 0.5):
Threshold for binarizing the semantic segmentation masks.
Returns:
semantic_segmentation: `list[torch.Tensor]` of length `batch_size`, where each item is a semantic
segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is
specified). Each entry is a binary mask (0 or 1).
"""
return self.image_processor.post_process_semantic_segmentation(outputs, target_sizes, threshold)
def post_process_object_detection(self, outputs, threshold=0.3, target_sizes=None):
"""
Converts the raw output of [`Sam3Model`] into final bounding boxes in (top_left_x, top_left_y,
bottom_right_x, bottom_right_y) format. This is a convenience wrapper around the image processor method.
Args:
outputs ([`Sam3ImageSegmentationOutput`]):
Raw outputs of the model containing pred_boxes, pred_logits, and optionally presence_logits.
threshold (`float`, *optional*, defaults to 0.3):
Score threshold to keep object detection predictions.
target_sizes (`list[tuple[int, int]]`, *optional*):
List of tuples (`tuple[int, int]`) containing the target size `(height, width)` of each image in the
batch. If unset, predictions will not be resized.
Returns:
`list[dict]`: A list of dictionaries, each dictionary containing the following keys:
- **scores** (`torch.Tensor`): The confidence scores for each predicted box on the image.
- **boxes** (`torch.Tensor`): Image bounding boxes in (top_left_x, top_left_y, bottom_right_x,
bottom_right_y) format.
Example:
```python
>>> from transformers import AutoModel, AutoProcessor
>>> from PIL import Image
>>> import requests
>>> model = AutoModel.from_pretrained("facebook/sam3-base")
>>> processor = AutoProcessor.from_pretrained("facebook/sam3-base")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(images=image, text="cat", return_tensors="pt")
>>> outputs = model(**inputs)
>>> # Post-process to get bounding boxes
>>> results = processor.post_process_object_detection(outputs, threshold=0.3, target_sizes=[image.size[::-1]])
>>> boxes = results[0]["boxes"]
>>> scores = results[0]["scores"]
```
"""
return self.image_processor.post_process_object_detection(outputs, threshold, target_sizes)
def post_process_instance_segmentation(
self,
outputs,
threshold=0.3,
mask_threshold=0.5,
target_sizes=None,
):
"""
Converts the raw output of [`Sam3Model`] into instance segmentation predictions with bounding boxes and masks.
This is a convenience wrapper around the image processor method.
Args:
outputs ([`Sam3ImageSegmentationOutput`]):
Raw outputs of the model containing pred_boxes, pred_logits, pred_masks, and optionally
presence_logits.
threshold (`float`, *optional*, defaults to 0.3):
Score threshold to keep instance predictions.
mask_threshold (`float`, *optional*, defaults to 0.5):
Threshold for binarizing the predicted masks.
target_sizes (`list[tuple[int, int]]`, *optional*):
List of tuples (`tuple[int, int]`) containing the target size `(height, width)` of each image in the
batch. If unset, predictions will not be resized.
Returns:
`list[dict]`: A list of dictionaries, each dictionary containing the following keys:
- **scores** (`torch.Tensor`): The confidence scores for each predicted instance on the image.
- **boxes** (`torch.Tensor`): Image bounding boxes in (top_left_x, top_left_y, bottom_right_x,
bottom_right_y) format.
- **masks** (`torch.Tensor`): Binary segmentation masks for each instance, shape (num_instances,
height, width).
Example:
```python
>>> from transformers import AutoModel, AutoProcessor
>>> from PIL import Image
>>> import requests
>>> model = AutoModel.from_pretrained("facebook/sam3-base")
>>> processor = AutoProcessor.from_pretrained("facebook/sam3-base")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(images=image, text="cat", return_tensors="pt")
>>> outputs = model(**inputs)
>>> # Post-process to get instance segmentation
>>> results = processor.post_process_instance_segmentation(
... outputs, threshold=0.3, target_sizes=[image.size[::-1]]
... )
>>> masks = results[0]["masks"]
>>> boxes = results[0]["boxes"]
>>> scores = results[0]["scores"]
```
"""
return self.image_processor.post_process_instance_segmentation(
outputs, threshold, mask_threshold, target_sizes
)
__all__ = ["Sam3Processor"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/sam3/convert_sam3_to_hf.py | src/transformers/models/sam3/convert_sam3_to_hf.py | # Copyright 2025 The Meta AI Authors and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Convert SAM3 checkpoints from the original implementation to HuggingFace format.
Original repository: https://github.com/facebookresearch/segment-anything-3
"""
import argparse
import gc
import os
from typing import Optional
import regex as re
import torch
from transformers import CLIPTokenizerFast, Sam3Config, Sam3ImageProcessorFast, Sam3Model, Sam3Processor
from transformers.utils import logging
logging.set_verbosity_info()
logger = logging.get_logger(__name__)
# fmt: off
ORIGINAL_TO_CONVERTED_KEY_MAPPING = {
r"^sam3_model\.": r"",
# ============================================================================
# Vision Encoder - ViT Backbone
# ============================================================================
r"^backbone\.vision_backbone\.trunk\.": r"vision_encoder.backbone.",
r"^vision_encoder\.backbone\.pos_embed": r"vision_encoder.backbone.embeddings.position_embeddings",
r"^vision_encoder\.backbone\.patch_embed\.proj\.": r"vision_encoder.backbone.embeddings.patch_embeddings.projection.",
r"^vision_encoder\.backbone\.ln_pre\.": r"vision_encoder.backbone.layer_norm.",
r"^vision_encoder\.backbone\.blocks\.(\d+)\.norm1\.": r"vision_encoder.backbone.layers.\1.layer_norm1.",
r"^vision_encoder\.backbone\.blocks\.(\d+)\.norm2\.": r"vision_encoder.backbone.layers.\1.layer_norm2.",
r"^vision_encoder\.backbone\.blocks\.(\d+)\.attn\.qkv\.": r"vision_encoder.backbone.layers.\1.attention.qkv.",
r"^vision_encoder\.backbone\.blocks\.(\d+)\.attn\.proj\.": r"vision_encoder.backbone.layers.\1.attention.o_proj.",
r"^vision_encoder\.backbone\.blocks\.(\d+)\.attn\.freqs_cis": r"vision_encoder.backbone.layers.\1.rotary_emb.rope_embeddings",
r"^vision_encoder\.backbone\.blocks\.(\d+)\.mlp\.fc1\.": r"vision_encoder.backbone.layers.\1.mlp.fc1.",
r"^vision_encoder\.backbone\.blocks\.(\d+)\.mlp\.fc2\.": r"vision_encoder.backbone.layers.\1.mlp.fc2.",
# Vision Encoder - FPN Neck
r"^backbone\.vision_backbone\.neck\.fpn\.(\d+)\.": r"vision_encoder.neck.fpn_layers.\1.",
r"^backbone\.vision_backbone\.convs\.(\d+)\.dconv_2x2_0\.": r"vision_encoder.neck.fpn_layers.\1.scale_layers.0.",
r"^backbone\.vision_backbone\.convs\.(\d+)\.dconv_2x2_1\.": r"vision_encoder.neck.fpn_layers.\1.scale_layers.2.",
r"^backbone\.vision_backbone\.convs\.(\d+)\.dconv_2x2\.": r"vision_encoder.neck.fpn_layers.\1.scale_layers.0.",
r"^backbone\.vision_backbone\.convs\.(\d+)\.maxpool_2x2\.": r"vision_encoder.neck.fpn_layers.\1.scale_layers.0.",
r"^backbone\.vision_backbone\.convs\.(\d+)\.conv_1x1\.": r"vision_encoder.neck.fpn_layers.\1.proj1.",
r"^backbone\.vision_backbone\.convs\.(\d+)\.conv_3x3\.": r"vision_encoder.neck.fpn_layers.\1.proj2.",
# ============================================================================
# Text Encoder (CLIP)
# ============================================================================
r"^backbone\.language_backbone\.encoder\.": r"text_encoder.",
r"^text_encoder\.token_embedding\.": r"text_encoder.text_model.embeddings.token_embedding.",
r"^text_encoder\.positional_embedding": r"text_encoder.text_model.embeddings.position_embedding.weight",
r"^text_encoder\.ln_final\.": r"text_encoder.text_model.final_layer_norm.",
r"^text_encoder\.text_projection": r"text_encoder.text_projection.weight",
r"^text_encoder\.transformer\.resblocks\.(\d+)\.attn\.in_proj_": r"text_encoder.text_model.encoder.layers.\1.self_attn.in_proj_",
r"^text_encoder\.transformer\.resblocks\.(\d+)\.attn\.out_proj\.": r"text_encoder.text_model.encoder.layers.\1.self_attn.out_proj.",
r"^text_encoder\.transformer\.resblocks\.(\d+)\.ln_1\.": r"text_encoder.text_model.encoder.layers.\1.layer_norm1.",
r"^text_encoder\.transformer\.resblocks\.(\d+)\.ln_2\.": r"text_encoder.text_model.encoder.layers.\1.layer_norm2.",
r"^text_encoder\.transformer\.resblocks\.(\d+)\.mlp\.c_fc\.": r"text_encoder.text_model.encoder.layers.\1.mlp.fc1.",
r"^text_encoder\.transformer\.resblocks\.(\d+)\.mlp\.c_proj\.": r"text_encoder.text_model.encoder.layers.\1.mlp.fc2.",
r"^backbone\.language_backbone\.resizer\.": r"text_projection.",
# ============================================================================
# Geometry Encoder
# ============================================================================
r"^geometry_encoder\.encode\.(\d+)\.cross_attn_image\.out_proj\.": r"geometry_encoder.layers.\1.cross_attn.o_proj.",
r"^geometry_encoder\.encode\.(\d+)\.cross_attn_image\.": r"geometry_encoder.layers.\1.cross_attn.",
r"^geometry_encoder\.encode\.(\d+)\.self_attn\.out_proj\.": r"geometry_encoder.layers.\1.self_attn.o_proj.",
r"^geometry_encoder\.encode\.(\d+)\.self_attn\.": r"geometry_encoder.layers.\1.self_attn.",
r"^geometry_encoder\.encode\.(\d+)\.linear1\.": r"geometry_encoder.layers.\1.mlp.fc1.",
r"^geometry_encoder\.encode\.(\d+)\.linear2\.": r"geometry_encoder.layers.\1.mlp.fc2.",
r"^geometry_encoder\.encode\.(\d+)\.norm1\.": r"geometry_encoder.layers.\1.layer_norm1.",
r"^geometry_encoder\.encode\.(\d+)\.norm2\.": r"geometry_encoder.layers.\1.layer_norm2.",
r"^geometry_encoder\.encode\.(\d+)\.norm3\.": r"geometry_encoder.layers.\1.layer_norm3.",
r"^geometry_encoder\.img_pre_norm\.": r"geometry_encoder.vision_layer_norm.",
r"^geometry_encoder\.norm\.": r"geometry_encoder.prompt_layer_norm.",
r"^geometry_encoder\.encode_norm\.": r"geometry_encoder.output_layer_norm.",
# ============================================================================
# DETR Encoder
# ============================================================================
r"^transformer\.encoder\.layers\.(\d+)\.cross_attn_image\.out_proj\.": r"detr_encoder.layers.\1.cross_attn.o_proj.",
r"^transformer\.encoder\.layers\.(\d+)\.cross_attn_image\.": r"detr_encoder.layers.\1.cross_attn.",
r"^transformer\.encoder\.layers\.(\d+)\.self_attn\.out_proj\.": r"detr_encoder.layers.\1.self_attn.o_proj.",
r"^transformer\.encoder\.layers\.(\d+)\.self_attn\.": r"detr_encoder.layers.\1.self_attn.",
r"^transformer\.encoder\.layers\.(\d+)\.cross_attn\.out_proj\.": r"detr_encoder.layers.\1.cross_attn.o_proj.",
r"^transformer\.encoder\.layers\.(\d+)\.cross_attn\.": r"detr_encoder.layers.\1.cross_attn.",
r"^transformer\.encoder\.layers\.(\d+)\.linear1\.": r"detr_encoder.layers.\1.mlp.fc1.",
r"^transformer\.encoder\.layers\.(\d+)\.linear2\.": r"detr_encoder.layers.\1.mlp.fc2.",
r"^transformer\.encoder\.layers\.(\d+)\.norm1\.": r"detr_encoder.layers.\1.layer_norm1.",
r"^transformer\.encoder\.layers\.(\d+)\.norm2\.": r"detr_encoder.layers.\1.layer_norm2.",
r"^transformer\.encoder\.layers\.(\d+)\.norm3\.": r"detr_encoder.layers.\1.layer_norm3.",
# ============================================================================
# DETR Decoder
# ============================================================================
r"^transformer\.decoder\.query_embed\.": r"detr_decoder.query_embed.",
r"^transformer\.decoder\.reference_points\.": r"detr_decoder.reference_points.",
r"^transformer\.decoder\.instance_query_embed\.": r"detr_decoder.instance_query_embed.",
r"^transformer\.decoder\.instance_reference_points\.": r"detr_decoder.instance_reference_points.",
r"^transformer\.decoder\.presence_token\.": r"detr_decoder.presence_token.",
r"^transformer\.decoder\.presence_token_head\.layers\.0\.": r"detr_decoder.presence_head.layer1.",
r"^transformer\.decoder\.presence_token_head\.layers\.1\.": r"detr_decoder.presence_head.layer2.",
r"^transformer\.decoder\.presence_token_head\.layers\.2\.": r"detr_decoder.presence_head.layer3.",
r"^transformer\.decoder\.presence_token_out_norm\.": r"detr_decoder.presence_layer_norm.",
r"^transformer\.decoder\.norm\.": r"detr_decoder.output_layer_norm.",
r"^transformer\.decoder\.bbox_embed\.layers\.0\.": r"detr_decoder.box_head.layer1.",
r"^transformer\.decoder\.bbox_embed\.layers\.1\.": r"detr_decoder.box_head.layer2.",
r"^transformer\.decoder\.bbox_embed\.layers\.2\.": r"detr_decoder.box_head.layer3.",
r"^transformer\.decoder\.instance_bbox_embed\.layers\.0\.": r"detr_decoder.instance_box_head.layer1.",
r"^transformer\.decoder\.instance_bbox_embed\.layers\.1\.": r"detr_decoder.instance_box_head.layer2.",
r"^transformer\.decoder\.instance_bbox_embed\.layers\.2\.": r"detr_decoder.instance_box_head.layer3.",
r"^transformer\.decoder\.ref_point_head\.layers\.0\.": r"detr_decoder.ref_point_head.layer1.",
r"^transformer\.decoder\.ref_point_head\.layers\.1\.": r"detr_decoder.ref_point_head.layer2.",
r"^transformer\.decoder\.boxRPB_embed_x\.layers\.0\.": r"detr_decoder.box_rpb_embed_x.layer1.",
r"^transformer\.decoder\.boxRPB_embed_x\.layers\.1\.": r"detr_decoder.box_rpb_embed_x.layer2.",
r"^transformer\.decoder\.boxRPB_embed_y\.layers\.0\.": r"detr_decoder.box_rpb_embed_y.layer1.",
r"^transformer\.decoder\.boxRPB_embed_y\.layers\.1\.": r"detr_decoder.box_rpb_embed_y.layer2.",
r"^transformer\.decoder\.layers\.(\d+)\.self_attn\.out_proj\.": r"detr_decoder.layers.\1.self_attn.o_proj.",
r"^transformer\.decoder\.layers\.(\d+)\.self_attn\.": r"detr_decoder.layers.\1.self_attn.",
r"^transformer\.decoder\.layers\.(\d+)\.ca_text\.out_proj\.": r"detr_decoder.layers.\1.text_cross_attn.o_proj.",
r"^transformer\.decoder\.layers\.(\d+)\.ca_text\.": r"detr_decoder.layers.\1.text_cross_attn.",
r"^transformer\.decoder\.layers\.(\d+)\.cross_attn\.out_proj\.": r"detr_decoder.layers.\1.vision_cross_attn.o_proj.",
r"^transformer\.decoder\.layers\.(\d+)\.cross_attn\.": r"detr_decoder.layers.\1.vision_cross_attn.",
r"^transformer\.decoder\.layers\.(\d+)\.linear1\.": r"detr_decoder.layers.\1.mlp.fc1.",
r"^transformer\.decoder\.layers\.(\d+)\.linear2\.": r"detr_decoder.layers.\1.mlp.fc2.",
r"^transformer\.decoder\.layers\.(\d+)\.norm1\.": r"detr_decoder.layers.\1.vision_cross_attn_layer_norm.",
r"^transformer\.decoder\.layers\.(\d+)\.catext_norm\.": r"detr_decoder.layers.\1.text_cross_attn_layer_norm.",
r"^transformer\.decoder\.layers\.(\d+)\.norm2\.": r"detr_decoder.layers.\1.self_attn_layer_norm.",
r"^transformer\.decoder\.layers\.(\d+)\.norm3\.": r"detr_decoder.layers.\1.mlp_layer_norm.",
# ============================================================================
# Dot Product Scoring
# ============================================================================
r"^dot_prod_scoring\.prompt_mlp\.layers\.0\.": r"dot_product_scoring.text_mlp.layer1.",
r"^dot_prod_scoring\.prompt_mlp\.layers\.1\.": r"dot_product_scoring.text_mlp.layer2.",
r"^dot_prod_scoring\.prompt_mlp\.out_norm\.": r"dot_product_scoring.text_mlp_out_norm.",
r"^dot_prod_scoring\.prompt_proj\.": r"dot_product_scoring.text_proj.",
r"^dot_prod_scoring\.hs_proj\.": r"dot_product_scoring.query_proj.",
# ============================================================================
# Mask Decoder
# ============================================================================
r"^segmentation_head\.pixel_decoder\.conv_layers\.(\d+)\.": r"mask_decoder.pixel_decoder.conv_layers.\1.",
r"^segmentation_head\.pixel_decoder\.norms\.(\d+)\.": r"mask_decoder.pixel_decoder.norms.\1.",
r"^segmentation_head\.mask_embed\.layers\.(\d+)\.": r"mask_decoder.mask_embedder.layers.\1.",
r"^segmentation_head\.mask_predictor\.mask_embed\.layers\.(\d+)\.": r"mask_decoder.mask_embedder.layers.\1.",
r"^segmentation_head\.instance_seg_head\.": r"mask_decoder.instance_projection.",
r"^segmentation_head\.semantic_seg_head\.": r"mask_decoder.semantic_projection.",
r"^segmentation_head\.cross_attend_prompt\.out_proj\.": r"mask_decoder.prompt_cross_attn.o_proj.",
r"^segmentation_head\.cross_attend_prompt\.": r"mask_decoder.prompt_cross_attn.",
r"^segmentation_head\.cross_attn_norm\.": r"mask_decoder.prompt_cross_attn_norm.",
}
# fmt: on
def convert_old_keys_to_new_keys(state_dict_keys: list[str]) -> dict[str, str]:
"""
Convert original SAM3 checkpoint keys to HuggingFace format.
This function applies regex patterns to efficiently rename keys in bulk.
Args:
state_dict_keys: List of original checkpoint keys
Returns:
Dictionary mapping original keys to new keys
"""
output_dict = {}
if state_dict_keys is not None:
old_text = "\n".join(state_dict_keys)
new_text = old_text
# Apply all regex patterns
for pattern, replacement in ORIGINAL_TO_CONVERTED_KEY_MAPPING.items():
new_text = re.sub(pattern, replacement, new_text, flags=re.MULTILINE)
output_dict = dict(zip(old_text.split("\n"), new_text.split("\n")))
return output_dict
def split_qkv(state_dict: dict) -> dict:
"""
Split combined QKV weights/biases into separate Q, K, V projections.
Both the vision backbone and text encoder in the original SAM3 use combined QKV projections,
but the refactored model uses separate Q, K, V projections.
Args:
state_dict: State dictionary with combined QKV weights
Returns:
State dictionary with split Q, K, V weights
"""
# Handle vision backbone: .attention.qkv.* β .attention.{q,k,v}_proj.*
vision_keys_to_split = [key for key in state_dict.keys() if ".attention.qkv." in key]
for key in vision_keys_to_split:
qkv = state_dict.pop(key)
# Split into 3 equal chunks along dimension 0 (output dimension)
q, k, v = torch.chunk(qkv, 3, dim=0)
# Create new keys for q_proj, k_proj, v_proj
state_dict[key.replace(".qkv.", ".q_proj.")] = q
state_dict[key.replace(".qkv.", ".k_proj.")] = k
state_dict[key.replace(".qkv.", ".v_proj.")] = v
# Handle all attention layers with in_proj_* (text encoder, DETR decoder cross-attention, mask decoder)
# These use: .{attn_type}.in_proj_* β .{attn_type}.{q,k,v}_proj.*
in_proj_keys_to_split = [key for key in state_dict.keys() if ".in_proj_" in key]
for key in in_proj_keys_to_split:
in_proj = state_dict.pop(key)
# Split into 3 equal chunks along dimension 0 (output dimension)
q, k, v = torch.chunk(in_proj, 3, dim=0)
# Create new keys for q_proj, k_proj, v_proj
# Replace "in_proj_weight" with "q_proj.weight" (or "in_proj_bias" with "q_proj.bias")
if key.endswith("in_proj_weight"):
base_key = key.replace("in_proj_weight", "")
state_dict[base_key + "q_proj.weight"] = q
state_dict[base_key + "k_proj.weight"] = k
state_dict[base_key + "v_proj.weight"] = v
elif key.endswith("in_proj_bias"):
base_key = key.replace("in_proj_bias", "")
state_dict[base_key + "q_proj.bias"] = q
state_dict[base_key + "k_proj.bias"] = k
state_dict[base_key + "v_proj.bias"] = v
return state_dict
def load_original_state_dict(checkpoint_path: str) -> dict[str, torch.Tensor]:
"""Load the original SAM3 checkpoint."""
print(f"Loading original checkpoint from {checkpoint_path}")
checkpoint = torch.load(checkpoint_path, map_location="cpu")
# Handle different checkpoint formats
if "model" in checkpoint:
state_dict = checkpoint["model"]
elif "state_dict" in checkpoint:
state_dict = checkpoint["state_dict"]
else:
state_dict = checkpoint
print(f"Loaded {len(state_dict)} keys from checkpoint")
return state_dict
def get_sam3_config(
vision_config: Optional[dict] = None,
text_config: Optional[dict] = None,
) -> Sam3Config:
"""
Create SAM3 configuration.
Args:
vision_config: Optional vision encoder configuration overrides
text_config: Optional text encoder configuration overrides
Returns:
Sam3Config instance
"""
config = Sam3Config()
# Update with any provided overrides
if vision_config is not None:
for key, value in vision_config.items():
setattr(config.vision_config, key, value)
if text_config is not None:
# Text config is a CLIPTextConfig
for key, value in text_config.items():
setattr(config.text_config, key, value)
return config
def convert_sam3_checkpoint(
checkpoint_path: str,
output_path: str,
config: Optional[Sam3Config] = None,
push_to_hub: bool = False,
repo_id: Optional[str] = None,
):
"""
Convert SAM3 checkpoint from original format to HuggingFace format.
Args:
checkpoint_path: Path to the original checkpoint file
output_path: Path to save the converted checkpoint
config: Optional Sam3Config to use (otherwise creates default)
push_to_hub: Whether to push the model to the Hub
repo_id: Repository ID for pushing to Hub
"""
# Create output directory
os.makedirs(output_path, exist_ok=True)
# Load configuration
if config is None:
config = get_sam3_config()
config.architectures = ["Sam3Model"]
config.save_pretrained(output_path)
print("Model config saved successfully")
# Load and convert weights
print("Loading original checkpoint...")
state_dict_old = load_original_state_dict(checkpoint_path)
print("Converting checkpoint keys...")
all_keys = list(state_dict_old.keys())
key_mapping = convert_old_keys_to_new_keys(all_keys)
# Create new state dict with converted keys
state_dict_new = {}
for old_key in all_keys:
new_key = key_mapping.get(old_key, old_key)
# Special handling: Strip cls token from vision backbone position embeddings
if new_key == "vision_encoder.backbone.embeddings.position_embeddings":
# Original has [1, 577, 1024] with cls token, but refactored expects [1, 576, 1024] without cls token
# Strip the first position (cls token position)
state_dict_new[new_key] = state_dict_old[old_key][:, 1:, :]
else:
state_dict_new[new_key] = state_dict_old[old_key]
del state_dict_old
gc.collect()
# Split combined QKV projections into separate Q, K, V projections
print("Splitting QKV projections...")
state_dict_new = split_qkv(state_dict_new)
# Transpose CLIP text projection (stored transposed in original)
if "text_encoder.text_projection.weight" in state_dict_new:
print("Transposing CLIP text_projection...")
state_dict_new["text_encoder.text_projection.weight"] = state_dict_new["text_encoder.text_projection.weight"].T
# Load into HF model
print("Loading weights into Sam3Model...")
model = Sam3Model(config)
missing_keys, unexpected_keys = model.load_state_dict(state_dict_new, strict=False)
if missing_keys:
logger.warning(f"Missing keys ({len(missing_keys)}):")
for key in missing_keys: # Show more keys for debugging
logger.warning(f" - {key}")
if unexpected_keys:
logger.warning(f"Unexpected keys ({len(unexpected_keys)}):")
for key in unexpected_keys: # Show more keys for debugging
logger.warning(f" - {key}")
# Note: Some missing/unexpected keys are expected:
# - vision_encoder.backbone.embeddings.patch_embeddings.projection.bias: patch projection has bias=False
# - geometry_encoder.mask_encoder.projection.*: this is nn.Identity() in original (no weights)
# - rotary_emb.rope_embeddings: pre-computed in original, computed on-the-fly in refactored
# - text_encoder.text_projection.bias: projection layer might not have bias
# Save model
print(f"Saving converted model to {output_path}")
model.save_pretrained(
output_path,
)
# Save processor
print("Creating and saving processor...")
image_processor = Sam3ImageProcessorFast()
tokenizer = CLIPTokenizerFast.from_pretrained("openai/clip-vit-base-patch32", max_length=32, model_max_length=32)
processor = Sam3Processor(image_processor=image_processor, tokenizer=tokenizer)
processor.save_pretrained(output_path)
# Push to hub if requested
if push_to_hub:
if repo_id is None:
raise ValueError("repo_id must be provided when push_to_hub=True")
print(f"Pushing model to Hub: {repo_id}")
model.push_to_hub(repo_id)
processor.push_to_hub(repo_id)
print("Conversion complete!")
print(f"Model saved successfully to: {output_path}")
# Cleanup
del state_dict_new, model
gc.collect()
# Verify the conversion by reloading
print("\nVerifying converted checkpoint can be loaded...")
try:
model = Sam3Model.from_pretrained(output_path)
param_count = sum(p.numel() for p in model.parameters())
print(f"β Successfully loaded model with {param_count:,} parameters")
del model
gc.collect()
except Exception as e:
print(f"β Failed to reload model: {e}")
print("\n" + "=" * 80)
print("Conversion finished!")
print("=" * 80)
print(f"Output directory: {output_path}")
print("\nTo test the model, you can run:")
print(">>> from transformers import Sam3Model")
print(f">>> model = Sam3Model.from_pretrained('{output_path}')")
print("=" * 80)
def main():
parser = argparse.ArgumentParser(description="Convert SAM3 checkpoint to HuggingFace format")
parser.add_argument(
"--checkpoint_path",
type=str,
required=True,
help="Path to the original SAM3 checkpoint file",
)
parser.add_argument(
"--output_path",
type=str,
required=True,
help="Path to save the converted checkpoint",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the converted model to the Hugging Face Hub",
)
parser.add_argument(
"--repo_id",
type=str,
default=None,
help="Repository ID for pushing to Hub (e.g., 'facebook/sam3-large')",
)
args = parser.parse_args()
convert_sam3_checkpoint(
checkpoint_path=args.checkpoint_path,
output_path=args.output_path,
push_to_hub=args.push_to_hub,
repo_id=args.repo_id,
)
if __name__ == "__main__":
main()
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/sam3/__init__.py | src/transformers/models/sam3/__init__.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_sam3 import *
from .modeling_sam3 import *
from .processing_sam3 import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/barthez/tokenization_barthez.py | src/transformers/models/barthez/tokenization_barthez.py | # coding=utf-8
# Copyright 2020 Ecole Polytechnique and the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
"""Tokenization classes for the BARThez model."""
from typing import Optional, Union
from tokenizers import Regex, Tokenizer, decoders, normalizers, pre_tokenizers
from tokenizers.models import Unigram
from ...tokenization_python import AddedToken
from ...tokenization_utils_tokenizers import TokenizersBackend
from ...utils import logging
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
SPIECE_UNDERLINE = "β"
class BarthezTokenizer(TokenizersBackend):
"""
Adapted from [`CamembertTokenizer`] and [`BartTokenizer`]. Construct a "fast" BARThez tokenizer. Based on
[SentencePiece](https://github.com/google/sentencepiece).
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (`str`, *optional*, defaults to `"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
vocab_file (`str`, *optional*):
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
contains the vocabulary necessary to instantiate a tokenizer.
vocab (`str`, `dict` or `list`, *optional*):
Custom vocabulary dictionary. If not provided, vocabulary is loaded from vocab_file.
add_prefix_space (`bool`, *optional*, defaults to `True`):
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
other word.
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ["input_ids", "attention_mask"]
slow_tokenizer_class = None
def __init__(
self,
vocab: Optional[Union[str, dict, list]] = None,
bos_token="<s>",
eos_token="</s>",
sep_token="</s>",
cls_token="<s>",
unk_token="<unk>",
pad_token="<pad>",
mask_token="<mask>",
add_prefix_space=True,
**kwargs,
):
# Mask token behave like a normal word, i.e. include the space before it
mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
self.add_prefix_space = add_prefix_space
if vocab is not None:
self._vocab = vocab
else:
self._vocab = [
(str(pad_token), 0.0),
(str(unk_token), 0.0),
(str(cls_token), 0.0),
(str(sep_token), 0.0),
(str(mask_token), 0.0),
]
self._tokenizer = Tokenizer(Unigram(self._vocab, unk_id=3, byte_fallback=False))
self._tokenizer.normalizer = normalizers.Sequence(
[
normalizers.Replace(Regex(r"\s{2,}|[\n\r\t]"), " "),
normalizers.NFC(),
normalizers.Strip(left=False, right=True),
]
)
prepend_scheme = "always" if add_prefix_space else "never"
self._tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement="β", prepend_scheme=prepend_scheme)
self._tokenizer.decoder = decoders.Metaspace(replacement="β", prepend_scheme=prepend_scheme)
super().__init__(
bos_token=bos_token,
eos_token=eos_token,
unk_token=unk_token,
sep_token=sep_token,
cls_token=cls_token,
pad_token=pad_token,
mask_token=mask_token,
add_prefix_space=add_prefix_space,
**kwargs,
)
__all__ = ["BarthezTokenizer"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/barthez/__init__.py | src/transformers/models/barthez/__init__.py | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .tokenization_barthez import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/bit/configuration_bit.py | src/transformers/models/bit/configuration_bit.py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BiT model configuration"""
from ...configuration_utils import PreTrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
logger = logging.get_logger(__name__)
class BitConfig(BackboneConfigMixin, PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`BitModel`]. It is used to instantiate an BiT
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the BiT
[google/bit-50](https://huggingface.co/google/bit-50) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
embedding_size (`int`, *optional*, defaults to 64):
Dimensionality (hidden size) for the embedding layer.
hidden_sizes (`list[int]`, *optional*, defaults to `[256, 512, 1024, 2048]`):
Dimensionality (hidden size) at each stage.
depths (`list[int]`, *optional*, defaults to `[3, 4, 6, 3]`):
Depth (number of layers) for each stage.
layer_type (`str`, *optional*, defaults to `"preactivation"`):
The layer to use, it can be either `"preactivation"` or `"bottleneck"`.
hidden_act (`str`, *optional*, defaults to `"relu"`):
The non-linear activation function in each block. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"`
are supported.
global_padding (`str`, *optional*):
Padding strategy to use for the convolutional layers. Can be either `"valid"`, `"same"`, or `None`.
num_groups (`int`, *optional*, defaults to 32):
Number of groups used for the `BitGroupNormActivation` layers.
drop_path_rate (`float`, *optional*, defaults to 0.0):
The drop path rate for the stochastic depth.
embedding_dynamic_padding (`bool`, *optional*, defaults to `False`):
Whether or not to make use of dynamic padding for the embedding layer.
output_stride (`int`, *optional*, defaults to 32):
The output stride of the model.
width_factor (`int`, *optional*, defaults to 1):
The width factor for the model.
out_features (`list[str]`, *optional*):
If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
(depending on how many stages the model has). If unset and `out_indices` is set, will default to the
corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
out_indices (`list[int]`, *optional*):
If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
If unset and `out_features` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
Example:
```python
>>> from transformers import BitConfig, BitModel
>>> # Initializing a BiT bit-50 style configuration
>>> configuration = BitConfig()
>>> # Initializing a model (with random weights) from the bit-50 style configuration
>>> model = BitModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = "bit"
layer_types = ["preactivation", "bottleneck"]
supported_padding = ["SAME", "VALID"]
def __init__(
self,
num_channels=3,
embedding_size=64,
hidden_sizes=[256, 512, 1024, 2048],
depths=[3, 4, 6, 3],
layer_type="preactivation",
hidden_act="relu",
global_padding=None,
num_groups=32,
drop_path_rate=0.0,
embedding_dynamic_padding=False,
output_stride=32,
width_factor=1,
out_features=None,
out_indices=None,
**kwargs,
):
super().__init__(**kwargs)
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types)}")
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
global_padding = global_padding.upper()
else:
raise ValueError(f"Padding strategy {global_padding} not supported")
self.num_channels = num_channels
self.embedding_size = embedding_size
self.hidden_sizes = hidden_sizes
self.depths = depths
self.layer_type = layer_type
self.hidden_act = hidden_act
self.global_padding = global_padding
self.num_groups = num_groups
self.drop_path_rate = drop_path_rate
self.embedding_dynamic_padding = embedding_dynamic_padding
self.output_stride = output_stride
self.width_factor = width_factor
self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(depths) + 1)]
self._out_features, self._out_indices = get_aligned_output_features_output_indices(
out_features=out_features, out_indices=out_indices, stage_names=self.stage_names
)
__all__ = ["BitConfig"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/bit/image_processing_bit_fast.py | src/transformers/models/bit/image_processing_bit_fast.py | # coding=utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Image processor class for BiT."""
from ...image_processing_utils_fast import BaseImageProcessorFast
from ...image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, PILImageResampling
from ...utils import auto_docstring
@auto_docstring
class BitImageProcessorFast(BaseImageProcessorFast):
resample = PILImageResampling.BICUBIC
image_mean = OPENAI_CLIP_MEAN
image_std = OPENAI_CLIP_STD
size = {"shortest_edge": 224}
default_to_square = False
crop_size = {"height": 224, "width": 224}
rescale_factor = 1 / 255
do_resize = True
do_center_crop = True
do_rescale = True
do_normalize = True
do_convert_rgb = True
__all__ = ["BitImageProcessorFast"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/bit/convert_bit_to_pytorch.py | src/transformers/models/bit/convert_bit_to_pytorch.py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert BiT checkpoints from the timm library."""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
logger = logging.get_logger(__name__)
def get_config(model_name):
repo_id = "huggingface/label-files"
filename = "imagenet-1k-id2label.json"
id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
id2label = {int(k): v for k, v in id2label.items()}
label2id = {v: k for k, v in id2label.items()}
conv_layer = "std_conv" if "bit" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
config = BitConfig(
conv_layer=conv_layer,
num_labels=1000,
id2label=id2label,
label2id=label2id,
)
return config
def rename_key(name):
if "stem.conv" in name:
name = name.replace("stem.conv", "bit.embedder.convolution")
if "blocks" in name:
name = name.replace("blocks", "layers")
if "head.fc" in name:
name = name.replace("head.fc", "classifier.1")
if name.startswith("norm"):
name = "bit." + name
if "bit" not in name and "classifier" not in name:
name = "bit.encoder." + name
return name
# We will verify our results on an image of cute cats
def prepare_img():
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
im = Image.open(requests.get(url, stream=True).raw)
return im
@torch.no_grad()
def convert_bit_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub=False):
"""
Copy/paste/tweak model's weights to our BiT structure.
"""
# define default BiT configuration
config = get_config(model_name)
# load original model from timm
timm_model = create_model(model_name, pretrained=True)
timm_model.eval()
# load state_dict of original model
state_dict = timm_model.state_dict()
for key in state_dict.copy():
val = state_dict.pop(key)
state_dict[rename_key(key)] = val.squeeze() if "head" in key else val
# load HuggingFace model
model = BitForImageClassification(config)
model.eval()
model.load_state_dict(state_dict)
# create image processor
transform = create_transform(**resolve_data_config({}, model=timm_model))
timm_transforms = transform.transforms
pillow_resamplings = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
processor = BitImageProcessor(
do_resize=True,
size={"shortest_edge": timm_transforms[0].size},
resample=pillow_resamplings[timm_transforms[0].interpolation.value],
do_center_crop=True,
crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]},
do_normalize=True,
image_mean=timm_transforms[-1].mean.tolist(),
image_std=timm_transforms[-1].std.tolist(),
)
image = prepare_img()
timm_pixel_values = transform(image).unsqueeze(0)
pixel_values = processor(image, return_tensors="pt").pixel_values
# verify pixel values
assert torch.allclose(timm_pixel_values, pixel_values)
# verify logits
with torch.no_grad():
outputs = model(pixel_values)
logits = outputs.logits
print("Logits:", logits[0, :3])
print("Predicted class:", model.config.id2label[logits.argmax(-1).item()])
timm_logits = timm_model(pixel_values)
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(timm_logits, outputs.logits, atol=1e-3)
print("Looks ok!")
if pytorch_dump_folder_path is not None:
Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
print(f"Saving model {model_name} and processor to {pytorch_dump_folder_path}")
model.save_pretrained(pytorch_dump_folder_path)
processor.save_pretrained(pytorch_dump_folder_path)
if push_to_hub:
print(f"Pushing model {model_name} and processor to the hub")
model.push_to_hub(f"ybelkada/{model_name}")
processor.push_to_hub(f"ybelkada/{model_name}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="resnetv2_50x1_bitm",
type=str,
help="Name of the BiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model to the hub.",
)
args = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/bit/image_processing_bit.py | src/transformers/models/bit/image_processing_bit.py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image processor class for BiT."""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
convert_to_rgb,
get_resize_output_image_size,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
infer_channel_dimension_format,
is_scaled_image,
make_flat_list_of_images,
to_numpy_array,
valid_images,
validate_preprocess_arguments,
)
from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging
logger = logging.get_logger(__name__)
if is_vision_available():
import PIL
class BitImageProcessor(BaseImageProcessor):
r"""
Constructs a BiT image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
`do_resize` in the `preprocess` method.
size (`dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`):
Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with
the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess`
method.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method.
do_center_crop (`bool`, *optional*, defaults to `True`):
Whether to center crop the image to the specified `crop_size`. Can be overridden by `do_center_crop` in the
`preprocess` method.
crop_size (`dict[str, int]` *optional*, defaults to 224):
Size of the output image after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess`
method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
method.
do_normalize:
Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method.
image_mean (`float` or `list[float]`, *optional*, defaults to `OPENAI_CLIP_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `OPENAI_CLIP_MEAN`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
Can be overridden by the `image_std` parameter in the `preprocess` method.
do_convert_rgb (`bool`, *optional*, defaults to `True`):
Whether to convert the image to RGB.
"""
model_input_names = ["pixel_values"]
def __init__(
self,
do_resize: bool = True,
size: Optional[dict[str, int]] = None,
resample: PILImageResampling = PILImageResampling.BICUBIC,
do_center_crop: bool = True,
crop_size: Optional[dict[str, int]] = None,
do_rescale: bool = True,
rescale_factor: Union[int, float] = 1 / 255,
do_normalize: bool = True,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
do_convert_rgb: bool = True,
**kwargs,
) -> None:
super().__init__(**kwargs)
size = size if size is not None else {"shortest_edge": 224}
size = get_size_dict(size, default_to_square=False)
crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
crop_size = get_size_dict(crop_size, default_to_square=True, param_name="crop_size")
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD
self.do_convert_rgb = do_convert_rgb
# Copied from transformers.models.clip.image_processing_clip.CLIPImageProcessor.resize
def resize(
self,
image: np.ndarray,
size: dict[str, int],
resample: PILImageResampling = PILImageResampling.BICUBIC,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge
resized to keep the input aspect ratio.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
Resampling filter to use when resiizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
default_to_square = True
if "shortest_edge" in size:
size = size["shortest_edge"]
default_to_square = False
elif "height" in size and "width" in size:
size = (size["height"], size["width"])
else:
raise ValueError("Size must contain either 'shortest_edge' or 'height' and 'width'.")
output_size = get_resize_output_image_size(
image,
size=size,
default_to_square=default_to_square,
input_data_format=input_data_format,
)
return resize(
image,
size=output_size,
resample=resample,
data_format=data_format,
input_data_format=input_data_format,
**kwargs,
)
@filter_out_non_signature_kwargs()
def preprocess(
self,
images: ImageInput,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
resample: Optional[PILImageResampling] = None,
do_center_crop: Optional[bool] = None,
crop_size: Optional[int] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
do_convert_rgb: Optional[bool] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> PIL.Image.Image:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
the longest edge resized to keep the input aspect ratio.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
has an effect if `do_resize` is set to `True`.
do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
Whether to center crop the image.
crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`):
Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
`True`.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
Whether to convert the image to RGB.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
size = size if size is not None else self.size
size = get_size_dict(size, param_name="size", default_to_square=False)
resample = resample if resample is not None else self.resample
do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
crop_size = crop_size if crop_size is not None else self.crop_size
crop_size = get_size_dict(crop_size, param_name="crop_size", default_to_square=True)
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
images = make_flat_list_of_images(images)
if not valid_images(images):
raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor")
validate_preprocess_arguments(
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_center_crop=do_center_crop,
crop_size=crop_size,
do_resize=do_resize,
size=size,
resample=resample,
)
# PIL RGBA images are converted to RGB
if do_convert_rgb:
images = [convert_to_rgb(image) for image in images]
# All transformations expect numpy arrays.
images = [to_numpy_array(image) for image in images]
if do_rescale and is_scaled_image(images[0]):
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])
all_images = []
for image in images:
if do_resize:
image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
if do_center_crop:
image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format)
if do_rescale:
image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
if do_normalize:
image = self.normalize(
image=image, mean=image_mean, std=image_std, input_data_format=input_data_format
)
all_images.append(image)
images = [
to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
for image in all_images
]
data = {"pixel_values": images}
return BatchFeature(data=data, tensor_type=return_tensors)
__all__ = ["BitImageProcessor"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/bit/__init__.py | src/transformers/models/bit/__init__.py | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_bit import *
from .image_processing_bit import *
from .image_processing_bit_fast import *
from .modeling_bit import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/bit/modeling_bit.py | src/transformers/models/bit/modeling_bit.py | # coding=utf-8
# Copyright 2022 Google AI and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BiT model. Also supports backbone for ViT hybrid."""
import collections
import math
from typing import Optional
import numpy as np
import torch
from torch import Tensor, nn
from ... import initialization as init
from ...activations import ACT2FN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import auto_docstring, logging
from ...utils.backbone_utils import BackboneMixin
from .configuration_bit import BitConfig
logger = logging.get_logger(__name__)
def get_padding_value(padding=None, kernel_size=7, stride=1, dilation=1) -> tuple[tuple, bool]:
r"""
Utility function to get the tuple padding value given the kernel_size and padding.
Args:
padding (Union[`str`, `int`], *optional*):
Padding value, can be either `"same"`, `"valid"`. If a different value is provided the default padding from
PyTorch is used.
kernel_size (`int`, *optional*, defaults to 7):
Kernel size of the convolution layers.
stride (`int`, *optional*, defaults to 1):
Stride value of the convolution layers.
dilation (`int`, *optional*, defaults to 1):
Dilation value of the convolution layers.
"""
dynamic = False
if padding is None:
padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2
return padding, dynamic
if isinstance(padding, str):
# for any string padding, the padding will be calculated for you, one of three ways
padding = padding.lower()
if padding == "same":
# TF compatible 'SAME' padding, has a performance and GPU memory allocation impact
if stride == 1 and (dilation * (kernel_size - 1)) % 2 == 0:
# static case, no extra overhead
padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2
else:
# dynamic 'SAME' padding, has runtime/GPU memory overhead
padding = 0
dynamic = True
elif padding == "valid":
# 'VALID' padding, same as padding=0
padding = 0
else:
# Default to PyTorch style 'same'-ish symmetric padding
padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2
return padding, dynamic
class WeightStandardizedConv2d(nn.Conv2d):
"""Conv2d with Weight Standardization. Used for ViT Hybrid model.
Paper: [Micro-Batch Training with Batch-Channel Normalization and Weight
Standardization](https://huggingface.co/papers/1903.10520)
"""
def __init__(
self,
in_channel,
out_channels,
kernel_size,
stride=1,
padding="SAME",
dilation=1,
groups=1,
bias=False,
eps=1e-6,
):
padding, is_dynamic = get_padding_value(padding, kernel_size, stride=stride, dilation=dilation)
super().__init__(
in_channel,
out_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
)
if is_dynamic:
self.pad = DynamicPad2d(kernel_size, stride, dilation)
else:
self.pad = None
self.eps = eps
def forward(self, hidden_state):
if self.pad is not None:
hidden_state = self.pad(hidden_state)
weight = nn.functional.batch_norm(
self.weight.reshape(1, self.out_channels, -1), None, None, training=True, momentum=0.0, eps=self.eps
).reshape_as(self.weight)
hidden_state = nn.functional.conv2d(
hidden_state, weight, self.bias, self.stride, self.padding, self.dilation, self.groups
)
return hidden_state
class BitGroupNormActivation(nn.GroupNorm):
r"""
A module that combines group normalization with an activation function.
"""
def __init__(self, config, num_channels, eps=1e-5, affine=True, apply_activation=True):
super().__init__(config.num_groups, num_channels, eps=eps, affine=affine)
if apply_activation:
self.activation = ACT2FN[config.hidden_act]
else:
self.activation = nn.Identity()
def forward(self, hidden_state):
hidden_state = nn.functional.group_norm(hidden_state, self.num_groups, self.weight, self.bias, self.eps)
hidden_state = self.activation(hidden_state)
return hidden_state
class DynamicPad2d(nn.Module):
r"""
A module that wraps dynamic padding of any input, given the parameters of the convolutional layer and the input
hidden states.
"""
def __init__(self, kernel_size, stride, dilation, value=0):
super().__init__()
# Safety checkers
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
if isinstance(stride, int):
stride = (stride, stride)
if isinstance(dilation, int):
dilation = (dilation, dilation)
self.kernel_size = kernel_size
self.stride = stride
self.dilation = dilation
self.value = value
def compute_padding(x, kernel_size, stride, dilation):
return max((math.ceil(x / stride) - 1) * stride + (kernel_size - 1) * dilation + 1 - x, 0)
self.compute_padding = compute_padding
def forward(self, input):
# Get width and height
input_height, input_width = input.size()[-2:]
# Compute the padding values
padding_height = self.compute_padding(input_height, self.kernel_size[0], self.stride[0], self.dilation[0])
padding_width = self.compute_padding(input_width, self.kernel_size[1], self.stride[1], self.dilation[1])
# apply pad
if padding_height > 0 or padding_width > 0:
input = nn.functional.pad(
input,
[
padding_width // 2,
padding_width - padding_width // 2,
padding_height // 2,
padding_height - padding_height // 2,
],
value=self.value,
)
return input
class BitMaxPool2d(nn.MaxPool2d):
def __init__(
self,
kernel_size: int,
stride=None,
dilation=1,
ceil_mode=False,
padding=(0, 0),
padding_value=0,
use_dynamic_padding=True,
):
kernel_size = kernel_size if isinstance(kernel_size, collections.abc.Iterable) else (kernel_size, kernel_size)
stride = stride if isinstance(stride, collections.abc.Iterable) else (stride, stride)
dilation = dilation if isinstance(dilation, collections.abc.Iterable) else (dilation, dilation)
super().__init__(kernel_size, stride, padding, dilation, ceil_mode)
if use_dynamic_padding:
self.pad = DynamicPad2d(kernel_size, stride, dilation, padding_value)
else:
self.pad = nn.Identity()
def forward(self, hidden_states):
hidden_states = self.pad(hidden_states)
return nn.functional.max_pool2d(
hidden_states, self.kernel_size, self.stride, self.padding, self.dilation, self.ceil_mode
)
class BitEmbeddings(nn.Module):
"""
BiT Embeddings (stem) composed of a single aggressive convolution.
"""
def __init__(self, config: BitConfig):
super().__init__()
self.convolution = WeightStandardizedConv2d(
config.num_channels,
config.embedding_size,
kernel_size=7,
stride=2,
eps=1e-8,
padding=config.global_padding,
)
self.pooler = BitMaxPool2d(kernel_size=3, stride=2, use_dynamic_padding=config.embedding_dynamic_padding)
# Use the same padding strategy as convolutional layers
if config.global_padding is not None and config.global_padding.upper() == "SAME":
self.pad = nn.Identity()
else:
self.pad = nn.ConstantPad2d(padding=(1, 1, 1, 1), value=0.0)
if config.layer_type != "preactivation":
self.norm = BitGroupNormActivation(config, num_channels=config.embedding_size)
else:
self.norm = nn.Identity()
self.num_channels = config.num_channels
def forward(self, pixel_values: Tensor) -> Tensor:
num_channels = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration."
)
embedding = self.convolution(pixel_values)
embedding = self.pad(embedding)
embedding = self.norm(embedding)
embedding = self.pooler(embedding)
return embedding
# Copied from transformers.models.convnext.modeling_convnext.drop_path
def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
"""
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
if drop_prob == 0.0 or not training:
return input
keep_prob = 1 - drop_prob
shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
random_tensor.floor_() # binarize
output = input.div(keep_prob) * random_tensor
return output
# Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->Bit
class BitDropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob: Optional[float] = None) -> None:
super().__init__()
self.drop_prob = drop_prob
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
return drop_path(hidden_states, self.drop_prob, self.training)
def extra_repr(self) -> str:
return f"p={self.drop_prob}"
def make_div(value, divisor=8):
min_value = divisor
new_value = max(min_value, int(value + divisor / 2) // divisor * divisor)
if new_value < 0.9 * value:
new_value += divisor
return new_value
class BitPreActivationBottleneckLayer(nn.Module):
"""Pre-activation (v2) bottleneck block.
Follows the implementation of "Identity Mappings in Deep Residual Networks":
https://github.com/KaimingHe/resnet-1k-layers/blob/master/resnet-pre-act.lua
Except it puts the stride on 3x3 conv when available.
"""
def __init__(
self,
config,
in_channels,
out_channels=None,
bottle_ratio=0.25,
stride=1,
dilation=1,
first_dilation=None,
groups=1,
drop_path_rate=0.0,
is_first_layer=False,
):
super().__init__()
first_dilation = first_dilation or dilation
out_channels = out_channels or in_channels
mid_channels = make_div(out_channels * bottle_ratio)
if is_first_layer:
self.downsample = BitDownsampleConv(
config,
in_channels,
out_channels,
stride=stride,
preact=True,
)
else:
self.downsample = None
self.norm1 = BitGroupNormActivation(config, in_channels)
self.conv1 = WeightStandardizedConv2d(in_channels, mid_channels, 1, eps=1e-8, padding=config.global_padding)
self.norm2 = BitGroupNormActivation(config, num_channels=mid_channels)
self.conv2 = WeightStandardizedConv2d(
mid_channels, mid_channels, 3, stride=stride, groups=groups, eps=1e-8, padding=config.global_padding
)
self.norm3 = BitGroupNormActivation(config, mid_channels)
self.conv3 = WeightStandardizedConv2d(mid_channels, out_channels, 1, eps=1e-8, padding=config.global_padding)
self.drop_path = BitDropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity()
def forward(self, hidden_states):
hidden_states_preact = self.norm1(hidden_states)
# shortcut branch
shortcut = hidden_states
if self.downsample is not None:
shortcut = self.downsample(hidden_states_preact)
# residual branch
hidden_states = self.conv1(hidden_states_preact)
hidden_states = self.conv2(self.norm2(hidden_states))
hidden_states = self.conv3(self.norm3(hidden_states))
hidden_states = self.drop_path(hidden_states)
return hidden_states + shortcut
class BitBottleneckLayer(nn.Module):
"""Non Pre-activation bottleneck block, equivalent to V1.5/V1b bottleneck. Used for ViT Hybrid."""
def __init__(
self,
config,
in_channels,
out_channels=None,
bottle_ratio=0.25,
stride=1,
dilation=1,
first_dilation=None,
groups=1,
drop_path_rate=0.0,
is_first_layer=False,
):
super().__init__()
first_dilation = first_dilation or dilation
out_channels = out_channels or in_channels
mid_chs = make_div(out_channels * bottle_ratio)
if is_first_layer:
self.downsample = BitDownsampleConv(
config,
in_channels,
out_channels,
stride=stride,
preact=False,
)
else:
self.downsample = None
self.conv1 = WeightStandardizedConv2d(in_channels, mid_chs, 1, eps=1e-8, padding=config.global_padding)
self.norm1 = BitGroupNormActivation(config, num_channels=mid_chs)
self.conv2 = WeightStandardizedConv2d(
mid_chs,
mid_chs,
3,
stride=stride,
dilation=first_dilation,
groups=groups,
eps=1e-8,
padding=config.global_padding,
)
self.norm2 = BitGroupNormActivation(config, num_channels=mid_chs)
self.conv3 = WeightStandardizedConv2d(mid_chs, out_channels, 1, eps=1e-8, padding=config.global_padding)
self.norm3 = BitGroupNormActivation(config, num_channels=out_channels, apply_activation=False)
self.drop_path = BitDropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity()
self.activation = ACT2FN[config.hidden_act]
def forward(self, hidden_states):
# shortcut branch
shortcut = hidden_states
if self.downsample is not None:
shortcut = self.downsample(hidden_states)
# residual
hidden_states = self.conv1(hidden_states)
hidden_states = self.norm1(hidden_states)
hidden_states = self.conv2(hidden_states)
hidden_states = self.norm2(hidden_states)
hidden_states = self.conv3(hidden_states)
hidden_states = self.norm3(hidden_states)
hidden_states = self.drop_path(hidden_states)
hidden_states = self.activation(hidden_states + shortcut)
return hidden_states
class BitDownsampleConv(nn.Module):
def __init__(
self,
config,
in_channels,
out_channels,
stride=1,
preact=True,
):
super().__init__()
self.conv = WeightStandardizedConv2d(
in_channels, out_channels, 1, stride=stride, eps=1e-8, padding=config.global_padding
)
self.norm = (
nn.Identity()
if preact
else BitGroupNormActivation(config, num_channels=out_channels, apply_activation=False)
)
def forward(self, x):
return self.norm(self.conv(x))
class BitStage(nn.Module):
"""
A ResNet v2 stage composed by stacked layers.
"""
def __init__(
self,
config,
in_channels,
out_channels,
stride,
dilation,
depth,
bottle_ratio=0.25,
layer_dropout=None,
):
super().__init__()
first_dilation = 1 if dilation in (1, 2) else 2
# Get the layer type
if config.layer_type == "bottleneck":
layer_cls = BitBottleneckLayer
else:
layer_cls = BitPreActivationBottleneckLayer
prev_chs = in_channels
self.layers = nn.Sequential()
for layer_idx in range(depth):
# Get the current hyper-parameters
stride, drop_path_rate, is_first_layer = self._get_updated_hyperparameters(
layer_idx, stride, layer_dropout
)
self.layers.add_module(
str(layer_idx),
layer_cls(
config,
prev_chs,
out_channels,
stride=stride,
dilation=dilation,
bottle_ratio=bottle_ratio,
first_dilation=first_dilation,
drop_path_rate=drop_path_rate,
is_first_layer=is_first_layer,
),
)
prev_chs = out_channels
first_dilation = dilation
def _get_updated_hyperparameters(self, layer_idx, stride, layer_dropout):
r"""
Get the new hyper-parameters with respect to the previous ones and the index of the current layer.
"""
if layer_dropout:
drop_path_rate = layer_dropout[layer_idx]
else:
drop_path_rate = 0.0
if layer_idx != 0:
stride = 1
is_first_layer = layer_idx == 0
return stride, drop_path_rate, is_first_layer
def forward(self, input: Tensor) -> Tensor:
hidden_state = input
for _, layer in enumerate(self.layers):
hidden_state = layer(hidden_state)
return hidden_state
class BitEncoder(nn.Module):
def __init__(self, config: BitConfig):
super().__init__()
self.stages = nn.ModuleList([])
prev_chs = config.embedding_size
# These needs to stay hardcoded
current_stride = 4
dilation = 1
layer_dropouts = [
x.tolist()
for x in torch.Tensor(np.linspace(0, config.drop_path_rate, sum(config.depths))).split(config.depths)
]
for stage_idx, (current_depth, current_hidden_size, layer_dropout) in enumerate(
zip(config.depths, config.hidden_sizes, layer_dropouts)
):
# Get the updated hyper params
out_channels, stride, dilation = self._get_updated_hyperparameters(
stage_idx, current_stride, current_hidden_size, dilation, config
)
stage = BitStage(
config,
prev_chs,
out_channels,
stride=stride,
dilation=dilation,
depth=current_depth,
layer_dropout=layer_dropout,
)
prev_chs = out_channels
current_stride *= stride
self.stages.add_module(str(stage_idx), stage)
def _get_updated_hyperparameters(self, stage_idx, current_stride, current_hidden_size, dilation, config):
out_channels = make_div(current_hidden_size * config.width_factor)
stride = 1 if stage_idx == 0 else 2
if current_stride >= config.output_stride:
dilation *= stride
stride = 1
return out_channels, stride, dilation
def forward(
self, hidden_state: Tensor, output_hidden_states: bool = False, return_dict: bool = True
) -> BaseModelOutputWithNoAttention:
hidden_states = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
hidden_states = hidden_states + (hidden_state,)
hidden_state = stage_module(hidden_state)
if output_hidden_states:
hidden_states = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(
last_hidden_state=hidden_state,
hidden_states=hidden_states,
)
@auto_docstring
class BitPreTrainedModel(PreTrainedModel):
config: BitConfig
base_model_prefix = "bit"
input_modalities = ("image",)
main_input_name = "pixel_values"
_no_split_modules = ["BitEmbeddings"]
@torch.no_grad()
def _init_weights(self, module):
if isinstance(module, nn.Conv2d):
init.kaiming_normal_(module.weight, mode="fan_out", nonlinearity="relu")
# copied from the `reset_parameters` method of `class Linear(Module)` in `torch`.
elif isinstance(module, nn.Linear):
init.kaiming_uniform_(module.weight, a=math.sqrt(5))
if module.bias is not None:
fan_in, _ = torch.nn.init._calculate_fan_in_and_fan_out(module.weight)
bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
init.uniform_(module.bias, -bound, bound)
elif isinstance(module, (nn.BatchNorm2d, nn.GroupNorm)):
init.constant_(module.weight, 1)
init.constant_(module.bias, 0)
if getattr(module, "running_mean", None) is not None:
init.zeros_(module.running_mean)
init.ones_(module.running_var)
init.zeros_(module.num_batches_tracked)
@auto_docstring
class BitModel(BitPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
self.embedder = BitEmbeddings(config)
self.encoder = BitEncoder(config)
self.norm = (
BitGroupNormActivation(config, num_channels=config.hidden_sizes[-1])
if config.layer_type == "preactivation"
else nn.Identity()
)
self.pooler = nn.AdaptiveAvgPool2d((1, 1))
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
pixel_values: Tensor,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
**kwargs,
) -> BaseModelOutputWithPoolingAndNoAttention:
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
embedding_output = self.embedder(pixel_values)
encoder_outputs = self.encoder(
embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict
)
last_hidden_state = encoder_outputs[0]
last_hidden_state = self.norm(last_hidden_state)
pooled_output = self.pooler(last_hidden_state)
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=last_hidden_state,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
)
@auto_docstring(
custom_intro="""
BiT Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
"""
)
class BitForImageClassification(BitPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bit = BitModel(config)
# classification head
self.classifier = nn.Sequential(
nn.Flatten(),
nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity(),
)
# initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
**kwargs,
) -> ImageClassifierOutputWithNoAttention:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bit(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
pooled_output = outputs.pooler_output if return_dict else outputs[1]
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
loss = self.loss_function(labels, logits, self.config)
if not return_dict:
output = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
@auto_docstring(
custom_intro="""
BiT backbone, to be used with frameworks like DETR and MaskFormer.
"""
)
class BitBackbone(BitPreTrainedModel, BackboneMixin):
has_attentions = False
def __init__(self, config):
super().__init__(config)
super()._init_backbone(config)
self.bit = BitModel(config)
self.num_features = [config.embedding_size] + config.hidden_sizes
# initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
pixel_values: Tensor,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
**kwargs,
) -> BackboneOutput:
r"""
Examples:
```python
>>> from transformers import AutoImageProcessor, AutoBackbone
>>> import torch
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> processor = AutoImageProcessor.from_pretrained("google/bit-50")
>>> model = AutoBackbone.from_pretrained("google/bit-50")
>>> inputs = processor(image, return_tensors="pt")
>>> outputs = model(**inputs)
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
outputs = self.bit(pixel_values, output_hidden_states=True, return_dict=True)
hidden_states = outputs.hidden_states
feature_maps = ()
for idx, stage in enumerate(self.stage_names):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
output = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=feature_maps,
hidden_states=outputs.hidden_states if output_hidden_states else None,
attentions=None,
)
__all__ = ["BitForImageClassification", "BitModel", "BitPreTrainedModel", "BitBackbone"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/dinov2_with_registers/configuration_dinov2_with_registers.py | src/transformers/models/dinov2_with_registers/configuration_dinov2_with_registers.py | # π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨
# This file was automatically generated from src/transformers/models/dinov2_with_registers/modular_dinov2_with_registers.py.
# Do NOT edit this file manually as any edits will be overwritten by the generation of
# the file from the modular. If any change should be done, please apply the change to the
# modular_dinov2_with_registers.py file directly. One of our CI enforces this.
# π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨
# coding=utf-8
# Copyright 2024 Meta Inc. and the HuggingFace Inc. team. All rights reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...configuration_utils import PreTrainedConfig
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
class Dinov2WithRegistersConfig(BackboneConfigMixin, PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Dinov2WithRegistersModel`]. It is used to instantiate an
Dinov2WithRegisters model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the DINOv2 with Registers
[facebook/dinov2-with-registers-base](https://huggingface.co/facebook/dinov2-with-registers-base) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
mlp_ratio (`int`, *optional*, defaults to 4):
Ratio of the hidden size of the MLPs relative to the `hidden_size`.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 16):
The size (resolution) of each patch.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the queries, keys and values.
layerscale_value (`float`, *optional*, defaults to 1.0):
Initial value to use for layer scale.
drop_path_rate (`float`, *optional*, defaults to 0.0):
Stochastic depth rate per sample (when applied in the main path of residual layers).
use_swiglu_ffn (`bool`, *optional*, defaults to `False`):
Whether to use the SwiGLU feedforward neural network.
num_register_tokens (`int`, *optional*, defaults to 4):
Number of register tokens to use.
out_features (`list[str]`, *optional*):
If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
(depending on how many stages the model has). If unset and `out_indices` is set, will default to the
corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
out_indices (`list[int]`, *optional*):
If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
If unset and `out_features` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
apply_layernorm (`bool`, *optional*, defaults to `True`):
Whether to apply layer normalization to the feature maps in case the model is used as backbone.
reshape_hidden_states (`bool`, *optional*, defaults to `True`):
Whether to reshape the feature maps to 4D tensors of shape `(batch_size, hidden_size, height, width)` in
case the model is used as backbone. If `False`, the feature maps will be 3D tensors of shape `(batch_size,
seq_len, hidden_size)`.
Example:
```python
>>> from transformers import Dinov2WithRegistersConfig, Dinov2WithRegistersModel
>>> # Initializing a Dinov2WithRegisters base style configuration
>>> configuration = Dinov2WithRegistersConfig()
>>> # Initializing a model (with random weights) from the base style configuration
>>> model = Dinov2WithRegistersModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "dinov2_with_registers"
def __init__(
self,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
mlp_ratio=4,
hidden_act="gelu",
hidden_dropout_prob=0.0,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
layer_norm_eps=1e-6,
image_size=224,
patch_size=16,
num_channels=3,
qkv_bias=True,
layerscale_value=1.0,
drop_path_rate=0.0,
use_swiglu_ffn=False,
num_register_tokens=4,
out_features=None,
out_indices=None,
apply_layernorm=True,
reshape_hidden_states=True,
**kwargs,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.mlp_ratio = mlp_ratio
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.qkv_bias = qkv_bias
self.layerscale_value = layerscale_value
self.drop_path_rate = drop_path_rate
self.use_swiglu_ffn = use_swiglu_ffn
self.num_register_tokens = num_register_tokens
self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, num_hidden_layers + 1)]
self._out_features, self._out_indices = get_aligned_output_features_output_indices(
out_features=out_features, out_indices=out_indices, stage_names=self.stage_names
)
self.apply_layernorm = apply_layernorm
self.reshape_hidden_states = reshape_hidden_states
__all__ = ["Dinov2WithRegistersConfig"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/dinov2_with_registers/modeling_dinov2_with_registers.py | src/transformers/models/dinov2_with_registers/modeling_dinov2_with_registers.py | # π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨
# This file was automatically generated from src/transformers/models/dinov2_with_registers/modular_dinov2_with_registers.py.
# Do NOT edit this file manually as any edits will be overwritten by the generation of
# the file from the modular. If any change should be done, please apply the change to the
# modular_dinov2_with_registers.py file directly. One of our CI enforces this.
# π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨
# coding=utf-8
# Copyright 2024 Meta Inc. and the HuggingFace Inc. team. All rights reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections.abc
from collections.abc import Callable
from typing import Optional, Union
import torch
from torch import nn
from ... import initialization as init
from ...activations import ACT2FN
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import BackboneOutput, BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring, torch_int
from ...utils.backbone_utils import BackboneMixin
from ...utils.generic import can_return_tuple, check_model_inputs
from .configuration_dinov2_with_registers import Dinov2WithRegistersConfig
class Dinov2WithRegistersPatchEmbeddings(nn.Module):
"""
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
`hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
Transformer.
"""
def __init__(self, config):
super().__init__()
image_size, patch_size = config.image_size, config.patch_size
num_channels, hidden_size = config.num_channels, config.hidden_size
image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.num_patches = num_patches
self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)
def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
num_channels = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration."
f" Expected {self.num_channels} but got {num_channels}."
)
embeddings = self.projection(pixel_values).flatten(2).transpose(1, 2)
return embeddings
class Dinov2WithRegistersEmbeddings(nn.Module):
"""
Construct the CLS token, mask token, register tokens, position and patch embeddings.
"""
def __init__(self, config: Dinov2WithRegistersConfig) -> None:
super().__init__()
self.cls_token = nn.Parameter(torch.randn(1, 1, config.hidden_size))
self.mask_token = nn.Parameter(torch.zeros(1, config.hidden_size))
self.register_tokens = nn.Parameter(torch.zeros(1, config.num_register_tokens, config.hidden_size))
self.patch_embeddings = Dinov2WithRegistersPatchEmbeddings(config)
num_patches = self.patch_embeddings.num_patches
self.position_embeddings = nn.Parameter(torch.randn(1, num_patches + 1, config.hidden_size))
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.patch_size = config.patch_size
self.config = config
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
"""
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher
resolution images. This implementation supports torch.jit tracing while maintaining backwards compatibility
with the original implementation.
Adapted from:
- https://github.com/facebookresearch/dino/blob/main/vision_transformer.py
- https://github.com/facebookresearch/dinov2/blob/main/dinov2/models/vision_transformer.py
"""
num_patches = embeddings.shape[1] - 1
num_positions = self.position_embeddings.shape[1] - 1
# Skip interpolation for matching dimensions (unless tracing)
if not torch.jit.is_tracing() and num_patches == num_positions and height == width:
return self.position_embeddings
# Handle class token and patch embeddings separately
class_pos_embed = self.position_embeddings[:, 0]
patch_pos_embed = self.position_embeddings[:, 1:]
dim = embeddings.shape[-1]
# Calculate new dimensions
height = height // self.config.patch_size
width = width // self.config.patch_size
# Reshape for interpolation
sqrt_num_positions = torch_int(num_positions**0.5)
patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
# Store original dtype for restoration after interpolation
target_dtype = patch_pos_embed.dtype
# Interpolate at float32 precision
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed.to(dtype=torch.float32),
size=(torch_int(height), torch_int(width)), # Explicit size instead of scale_factor
mode="bicubic",
align_corners=False,
antialias=True,
).to(dtype=target_dtype)
# Validate output dimensions if not tracing
if not torch.jit.is_tracing():
if int(height) != patch_pos_embed.shape[-2] or int(width) != patch_pos_embed.shape[-1]:
raise ValueError("Width or height does not match with the interpolated position embeddings")
# Reshape back to original format
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
# Combine class and patch embeddings
return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
def forward(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.Tensor] = None) -> torch.Tensor:
batch_size, _, height, width = pixel_values.shape
target_dtype = self.patch_embeddings.projection.weight.dtype
embeddings = self.patch_embeddings(pixel_values.to(dtype=target_dtype))
if bool_masked_pos is not None:
embeddings = torch.where(
bool_masked_pos.unsqueeze(-1), self.mask_token.to(embeddings.dtype).unsqueeze(0), embeddings
)
# add the [CLS] token to the embedded patch tokens
cls_tokens = self.cls_token.expand(batch_size, -1, -1)
embeddings = torch.cat((cls_tokens, embeddings), dim=1)
# add positional encoding to each token
embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
# add register tokens
embeddings = torch.cat(
(embeddings[:, :1], self.register_tokens.expand(embeddings.shape[0], -1, -1), embeddings[:, 1:]), dim=1
)
embeddings = self.dropout(embeddings)
return embeddings
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: Optional[float] = None,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
if scaling is None:
scaling = query.size(-1) ** -0.5
# Take the dot product between "query" and "key" to get the raw attention scores.
attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling
if attention_mask is not None:
attention_mask = attention_mask[:, :, :, : key.shape[-2]]
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
class Dinov2WithRegistersSelfAttention(nn.Module):
def __init__(self, config: Dinov2WithRegistersConfig):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size {config.hidden_size} is not a multiple of the number of attention "
f"heads {config.num_attention_heads}."
)
self.config = config
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.dropout_prob = config.attention_probs_dropout_prob
self.scaling = self.attention_head_size**-0.5
self.is_causal = False
self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
def forward(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
batch_size = hidden_states.shape[0]
new_shape = batch_size, -1, self.num_attention_heads, self.attention_head_size
key_layer = self.key(hidden_states).view(*new_shape).transpose(1, 2)
value_layer = self.value(hidden_states).view(*new_shape).transpose(1, 2)
query_layer = self.query(hidden_states).view(*new_shape).transpose(1, 2)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
context_layer, attention_probs = attention_interface(
self,
query_layer,
key_layer,
value_layer,
None,
is_causal=self.is_causal,
scaling=self.scaling,
dropout=0.0 if not self.training else self.dropout_prob,
)
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.reshape(new_context_layer_shape)
return context_layer, attention_probs
class Dinov2WithRegistersSelfOutput(nn.Module):
"""
The residual connection is defined in Dinov2WithRegistersLayer instead of here (as is the case with other models), due to the
layernorm applied before each block.
"""
def __init__(self, config: Dinov2WithRegistersConfig):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
class Dinov2WithRegistersAttention(nn.Module):
def __init__(self, config: Dinov2WithRegistersConfig):
super().__init__()
self.attention = Dinov2WithRegistersSelfAttention(config)
self.output = Dinov2WithRegistersSelfOutput(config)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
self_attn_output, _ = self.attention(hidden_states)
output = self.output(self_attn_output, hidden_states)
return output
class Dinov2WithRegistersLayerScale(nn.Module):
def __init__(self, config) -> None:
super().__init__()
self.lambda1 = nn.Parameter(config.layerscale_value * torch.ones(config.hidden_size))
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
return hidden_state * self.lambda1
def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
"""
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
if drop_prob == 0.0 or not training:
return input
keep_prob = 1 - drop_prob
shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
random_tensor.floor_() # binarize
output = input.div(keep_prob) * random_tensor
return output
class Dinov2WithRegistersDropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob: Optional[float] = None) -> None:
super().__init__()
self.drop_prob = drop_prob
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
return drop_path(hidden_states, self.drop_prob, self.training)
def extra_repr(self) -> str:
return f"p={self.drop_prob}"
class Dinov2WithRegistersMLP(nn.Module):
def __init__(self, config) -> None:
super().__init__()
in_features = out_features = config.hidden_size
hidden_features = int(config.hidden_size * config.mlp_ratio)
self.fc1 = nn.Linear(in_features, hidden_features, bias=True)
if isinstance(config.hidden_act, str):
self.activation = ACT2FN[config.hidden_act]
else:
self.activation = config.hidden_act
self.fc2 = nn.Linear(hidden_features, out_features, bias=True)
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
hidden_state = self.fc1(hidden_state)
hidden_state = self.activation(hidden_state)
hidden_state = self.fc2(hidden_state)
return hidden_state
class Dinov2WithRegistersSwiGLUFFN(nn.Module):
def __init__(self, config) -> None:
super().__init__()
in_features = out_features = config.hidden_size
hidden_features = int(config.hidden_size * config.mlp_ratio)
hidden_features = (int(hidden_features * 2 / 3) + 7) // 8 * 8
self.weights_in = nn.Linear(in_features, 2 * hidden_features, bias=True)
self.weights_out = nn.Linear(hidden_features, out_features, bias=True)
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
hidden_state = self.weights_in(hidden_state)
x1, x2 = hidden_state.chunk(2, dim=-1)
hidden = nn.functional.silu(x1) * x2
return self.weights_out(hidden)
class Dinov2WithRegistersLayer(GradientCheckpointingLayer):
"""This corresponds to the Block class in the original implementation."""
def __init__(self, config: Dinov2WithRegistersConfig) -> None:
super().__init__()
self.norm1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.attention = Dinov2WithRegistersAttention(config)
self.layer_scale1 = Dinov2WithRegistersLayerScale(config)
self.drop_path = (
Dinov2WithRegistersDropPath(config.drop_path_rate) if config.drop_path_rate > 0.0 else nn.Identity()
)
self.norm2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
if config.use_swiglu_ffn:
self.mlp = Dinov2WithRegistersSwiGLUFFN(config)
else:
self.mlp = Dinov2WithRegistersMLP(config)
self.layer_scale2 = Dinov2WithRegistersLayerScale(config)
def forward(
self,
hidden_states: torch.Tensor,
) -> torch.Tensor:
hidden_states_norm = self.norm1(hidden_states)
self_attention_output = self.attention(hidden_states_norm)
self_attention_output = self.layer_scale1(self_attention_output)
# first residual connection
hidden_states = self.drop_path(self_attention_output) + hidden_states
# in Dinov2WithRegisters, layernorm is also applied after self-attention
layer_output = self.norm2(hidden_states)
layer_output = self.mlp(layer_output)
layer_output = self.layer_scale2(layer_output)
# second residual connection
layer_output = self.drop_path(layer_output) + hidden_states
return layer_output
class Dinov2WithRegistersEncoder(nn.Module):
def __init__(self, config: Dinov2WithRegistersConfig):
super().__init__()
self.config = config
self.layer = nn.ModuleList([Dinov2WithRegistersLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(self, hidden_states: torch.Tensor, output_hidden_states: bool = False) -> BaseModelOutput:
all_hidden_states = [hidden_states] if output_hidden_states else None
for i, layer_module in enumerate(self.layer):
hidden_states = layer_module(hidden_states)
if all_hidden_states:
all_hidden_states.append(hidden_states)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=tuple(all_hidden_states) if all_hidden_states else None,
)
@auto_docstring
class Dinov2WithRegistersPreTrainedModel(PreTrainedModel):
config: Dinov2WithRegistersConfig
base_model_prefix = "dinov2_with_registers"
main_input_name = "pixel_values"
input_modalities = ("image",)
supports_gradient_checkpointing = True
_no_split_modules = ["Dinov2WithRegistersLayer"]
_supports_sdpa = True
_supports_flash_attn = True
_supports_flex_attn = True
_supports_attention_backend = True
_can_record_outputs = {
"attentions": Dinov2WithRegistersSelfAttention,
}
@torch.no_grad()
def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None:
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
init.trunc_normal_(module.weight, mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
init.zeros_(module.bias)
elif isinstance(module, nn.LayerNorm):
init.zeros_(module.bias)
init.ones_(module.weight)
elif isinstance(module, Dinov2WithRegistersEmbeddings):
init.trunc_normal_(module.position_embeddings, mean=0.0, std=self.config.initializer_range)
init.trunc_normal_(module.cls_token, mean=0.0, std=self.config.initializer_range)
init.zeros_(module.mask_token)
init.zeros_(module.register_tokens)
elif isinstance(module, Dinov2WithRegistersLayerScale): # noqa: F821
init.constant_(module.lambda1, self.config.layerscale_value)
@auto_docstring
class Dinov2WithRegistersModel(Dinov2WithRegistersPreTrainedModel):
def __init__(self, config: Dinov2WithRegistersConfig):
super().__init__(config)
self.config = config
self.embeddings = Dinov2WithRegistersEmbeddings(config)
self.encoder = Dinov2WithRegistersEncoder(config)
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self) -> Dinov2WithRegistersPatchEmbeddings:
return self.embeddings.patch_embeddings
@check_model_inputs(tie_last_hidden_states=False)
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.Tensor] = None,
bool_masked_pos: Optional[torch.Tensor] = None,
output_hidden_states: Optional[bool] = None,
**kwargs,
) -> BaseModelOutputWithPooling:
r"""
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, sequence_length)`):
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). Only relevant for
pre-training.
"""
if output_hidden_states is None:
output_hidden_states = self.config.output_hidden_states
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
embedding_output = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos)
encoder_outputs: BaseModelOutput = self.encoder(embedding_output, output_hidden_states=output_hidden_states)
sequence_output = encoder_outputs.last_hidden_state
sequence_output = self.layernorm(sequence_output)
pooled_output = sequence_output[:, 0, :]
return BaseModelOutputWithPooling(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
)
@auto_docstring(
custom_intro="""
Dinov2WithRegisters Model transformer with an image classification head on top (a linear layer on top of the final hidden state
of the [CLS] token) e.g. for ImageNet.
"""
)
class Dinov2WithRegistersForImageClassification(Dinov2WithRegistersPreTrainedModel):
def __init__(self, config: Dinov2WithRegistersConfig) -> None:
super().__init__(config)
self.num_labels = config.num_labels
self.dinov2_with_registers = Dinov2WithRegistersModel(config)
# Classifier head
self.classifier = (
nn.Linear(config.hidden_size * 2, config.num_labels) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> ImageClassifierOutput:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
outputs: BaseModelOutputWithPooling = self.dinov2_with_registers(pixel_values, **kwargs)
sequence_output = outputs.last_hidden_state # batch_size, sequence_length, hidden_size
cls_token = sequence_output[:, 0]
# cls and register tokens should not be included in patch tokens variable
patch_tokens = sequence_output[:, 1 + self.config.num_register_tokens :]
linear_input = torch.cat([cls_token, patch_tokens.mean(dim=1)], dim=1)
logits = self.classifier(linear_input)
loss = None
if labels is not None:
loss = self.loss_function(labels, logits, self.config, **kwargs)
return ImageClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@auto_docstring(
custom_intro="""
Dinov2WithRegisters backbone, to be used with frameworks like DETR and MaskFormer.
"""
)
class Dinov2WithRegistersBackbone(Dinov2WithRegistersPreTrainedModel, BackboneMixin):
def __init__(self, config):
super().__init__(config)
super()._init_backbone(config)
self.num_features = [config.hidden_size for _ in range(config.num_hidden_layers + 1)]
self.embeddings = Dinov2WithRegistersEmbeddings(config)
self.encoder = Dinov2WithRegistersEncoder(config)
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.num_register_tokens = config.num_register_tokens
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self) -> Dinov2WithRegistersPatchEmbeddings:
return self.embeddings.patch_embeddings
@check_model_inputs
@auto_docstring
def forward(
self,
pixel_values: torch.Tensor,
output_hidden_states: Optional[bool] = None,
**kwargs,
) -> BackboneOutput:
r"""
Examples:
```python
>>> from transformers import AutoImageProcessor, AutoBackbone
>>> import torch
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> processor = AutoImageProcessor.from_pretrained("facebook/dinov2-with-registers-base")
>>> model = AutoBackbone.from_pretrained(
... "facebook/dinov2-with-registers-base", out_features=["stage2", "stage5", "stage8", "stage11"]
... )
>>> inputs = processor(image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> feature_maps = outputs.feature_maps
>>> list(feature_maps[-1].shape)
[1, 768, 16, 16]
```"""
if output_hidden_states is None:
output_hidden_states = self.config.output_hidden_states
embedding_output = self.embeddings(pixel_values)
output: BaseModelOutput = self.encoder(embedding_output, output_hidden_states=True)
hidden_states = output.hidden_states
feature_maps = []
for stage, hidden_state in zip(self.stage_names, hidden_states):
if stage in self.out_features:
if self.config.apply_layernorm:
hidden_state = self.layernorm(hidden_state)
if self.config.reshape_hidden_states:
hidden_state = hidden_state[:, 1 + self.num_register_tokens :]
# this was actually a bug in the original implementation that we copied here,
# cause normally the order is height, width
batch_size, _, height, width = pixel_values.shape
patch_size = self.config.patch_size
hidden_state = hidden_state.reshape(batch_size, height // patch_size, width // patch_size, -1)
hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous()
feature_maps.append(hidden_state)
return BackboneOutput(
feature_maps=tuple(feature_maps),
hidden_states=hidden_states if output_hidden_states else None,
)
__all__ = [
"Dinov2WithRegistersPreTrainedModel",
"Dinov2WithRegistersModel",
"Dinov2WithRegistersForImageClassification",
"Dinov2WithRegistersBackbone",
]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/dinov2_with_registers/__init__.py | src/transformers/models/dinov2_with_registers/__init__.py | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_dinov2_with_registers import *
from .modeling_dinov2_with_registers import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/dinov2_with_registers/modular_dinov2_with_registers.py | src/transformers/models/dinov2_with_registers/modular_dinov2_with_registers.py | # coding=utf-8
# Copyright 2024 Meta Inc. and the HuggingFace Inc. team. All rights reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Union
import torch
from torch import nn
from ....transformers.models.dinov2.modeling_dinov2 import (
Dinov2Backbone,
Dinov2Encoder,
Dinov2ForImageClassification,
Dinov2Model,
Dinov2PatchEmbeddings,
Dinov2PreTrainedModel,
)
from ... import initialization as init
from ...configuration_utils import PreTrainedConfig
from ...modeling_outputs import BackboneOutput, BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, logging, torch_int
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
logger = logging.get_logger(__name__)
class Dinov2WithRegistersConfig(BackboneConfigMixin, PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Dinov2WithRegistersModel`]. It is used to instantiate an
Dinov2WithRegisters model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the DINOv2 with Registers
[facebook/dinov2-with-registers-base](https://huggingface.co/facebook/dinov2-with-registers-base) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
mlp_ratio (`int`, *optional*, defaults to 4):
Ratio of the hidden size of the MLPs relative to the `hidden_size`.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 16):
The size (resolution) of each patch.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the queries, keys and values.
layerscale_value (`float`, *optional*, defaults to 1.0):
Initial value to use for layer scale.
drop_path_rate (`float`, *optional*, defaults to 0.0):
Stochastic depth rate per sample (when applied in the main path of residual layers).
use_swiglu_ffn (`bool`, *optional*, defaults to `False`):
Whether to use the SwiGLU feedforward neural network.
num_register_tokens (`int`, *optional*, defaults to 4):
Number of register tokens to use.
out_features (`list[str]`, *optional*):
If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
(depending on how many stages the model has). If unset and `out_indices` is set, will default to the
corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
out_indices (`list[int]`, *optional*):
If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
If unset and `out_features` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
apply_layernorm (`bool`, *optional*, defaults to `True`):
Whether to apply layer normalization to the feature maps in case the model is used as backbone.
reshape_hidden_states (`bool`, *optional*, defaults to `True`):
Whether to reshape the feature maps to 4D tensors of shape `(batch_size, hidden_size, height, width)` in
case the model is used as backbone. If `False`, the feature maps will be 3D tensors of shape `(batch_size,
seq_len, hidden_size)`.
Example:
```python
>>> from transformers import Dinov2WithRegistersConfig, Dinov2WithRegistersModel
>>> # Initializing a Dinov2WithRegisters base style configuration
>>> configuration = Dinov2WithRegistersConfig()
>>> # Initializing a model (with random weights) from the base style configuration
>>> model = Dinov2WithRegistersModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "dinov2_with_registers"
def __init__(
self,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
mlp_ratio=4,
hidden_act="gelu",
hidden_dropout_prob=0.0,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
layer_norm_eps=1e-6,
image_size=224,
patch_size=16,
num_channels=3,
qkv_bias=True,
layerscale_value=1.0,
drop_path_rate=0.0,
use_swiglu_ffn=False,
num_register_tokens=4,
out_features=None,
out_indices=None,
apply_layernorm=True,
reshape_hidden_states=True,
**kwargs,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.mlp_ratio = mlp_ratio
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.qkv_bias = qkv_bias
self.layerscale_value = layerscale_value
self.drop_path_rate = drop_path_rate
self.use_swiglu_ffn = use_swiglu_ffn
self.num_register_tokens = num_register_tokens
self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, num_hidden_layers + 1)]
self._out_features, self._out_indices = get_aligned_output_features_output_indices(
out_features=out_features, out_indices=out_indices, stage_names=self.stage_names
)
self.apply_layernorm = apply_layernorm
self.reshape_hidden_states = reshape_hidden_states
class Dinov2WithRegistersPatchEmbeddings(Dinov2PatchEmbeddings):
pass
class Dinov2WithRegistersEmbeddings(nn.Module):
"""
Construct the CLS token, mask token, register tokens, position and patch embeddings.
"""
def __init__(self, config: Dinov2WithRegistersConfig) -> None:
super().__init__()
self.cls_token = nn.Parameter(torch.randn(1, 1, config.hidden_size))
self.mask_token = nn.Parameter(torch.zeros(1, config.hidden_size))
self.register_tokens = nn.Parameter(torch.zeros(1, config.num_register_tokens, config.hidden_size))
self.patch_embeddings = Dinov2WithRegistersPatchEmbeddings(config)
num_patches = self.patch_embeddings.num_patches
self.position_embeddings = nn.Parameter(torch.randn(1, num_patches + 1, config.hidden_size))
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.patch_size = config.patch_size
self.config = config
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
"""
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher
resolution images. This implementation supports torch.jit tracing while maintaining backwards compatibility
with the original implementation.
Adapted from:
- https://github.com/facebookresearch/dino/blob/main/vision_transformer.py
- https://github.com/facebookresearch/dinov2/blob/main/dinov2/models/vision_transformer.py
"""
num_patches = embeddings.shape[1] - 1
num_positions = self.position_embeddings.shape[1] - 1
# Skip interpolation for matching dimensions (unless tracing)
if not torch.jit.is_tracing() and num_patches == num_positions and height == width:
return self.position_embeddings
# Handle class token and patch embeddings separately
class_pos_embed = self.position_embeddings[:, 0]
patch_pos_embed = self.position_embeddings[:, 1:]
dim = embeddings.shape[-1]
# Calculate new dimensions
height = height // self.config.patch_size
width = width // self.config.patch_size
# Reshape for interpolation
sqrt_num_positions = torch_int(num_positions**0.5)
patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
# Store original dtype for restoration after interpolation
target_dtype = patch_pos_embed.dtype
# Interpolate at float32 precision
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed.to(dtype=torch.float32),
size=(torch_int(height), torch_int(width)), # Explicit size instead of scale_factor
mode="bicubic",
align_corners=False,
antialias=True,
).to(dtype=target_dtype)
# Validate output dimensions if not tracing
if not torch.jit.is_tracing():
if int(height) != patch_pos_embed.shape[-2] or int(width) != patch_pos_embed.shape[-1]:
raise ValueError("Width or height does not match with the interpolated position embeddings")
# Reshape back to original format
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
# Combine class and patch embeddings
return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
def forward(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.Tensor] = None) -> torch.Tensor:
batch_size, _, height, width = pixel_values.shape
target_dtype = self.patch_embeddings.projection.weight.dtype
embeddings = self.patch_embeddings(pixel_values.to(dtype=target_dtype))
if bool_masked_pos is not None:
embeddings = torch.where(
bool_masked_pos.unsqueeze(-1), self.mask_token.to(embeddings.dtype).unsqueeze(0), embeddings
)
# add the [CLS] token to the embedded patch tokens
cls_tokens = self.cls_token.expand(batch_size, -1, -1)
embeddings = torch.cat((cls_tokens, embeddings), dim=1)
# add positional encoding to each token
embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
# add register tokens
embeddings = torch.cat(
(embeddings[:, :1], self.register_tokens.expand(embeddings.shape[0], -1, -1), embeddings[:, 1:]), dim=1
)
embeddings = self.dropout(embeddings)
return embeddings
class Dinov2WithRegistersEncoder(Dinov2Encoder):
pass
class Dinov2WithRegistersPreTrainedModel(Dinov2PreTrainedModel):
@torch.no_grad()
def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None:
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
init.trunc_normal_(module.weight, mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
init.zeros_(module.bias)
elif isinstance(module, nn.LayerNorm):
init.zeros_(module.bias)
init.ones_(module.weight)
elif isinstance(module, Dinov2WithRegistersEmbeddings):
init.trunc_normal_(module.position_embeddings, mean=0.0, std=self.config.initializer_range)
init.trunc_normal_(module.cls_token, mean=0.0, std=self.config.initializer_range)
init.zeros_(module.mask_token)
init.zeros_(module.register_tokens)
elif isinstance(module, Dinov2WithRegistersLayerScale): # noqa: F821
init.constant_(module.lambda1, self.config.layerscale_value)
class Dinov2WithRegistersModel(Dinov2Model):
pass
class Dinov2WithRegistersForImageClassification(Dinov2ForImageClassification):
def forward(
self,
pixel_values: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> ImageClassifierOutput:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
outputs: BaseModelOutputWithPooling = self.dinov2_with_registers(pixel_values, **kwargs)
sequence_output = outputs.last_hidden_state # batch_size, sequence_length, hidden_size
cls_token = sequence_output[:, 0]
# cls and register tokens should not be included in patch tokens variable
patch_tokens = sequence_output[:, 1 + self.config.num_register_tokens :]
linear_input = torch.cat([cls_token, patch_tokens.mean(dim=1)], dim=1)
logits = self.classifier(linear_input)
loss = None
if labels is not None:
loss = self.loss_function(labels, logits, self.config, **kwargs)
return ImageClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class Dinov2WithRegistersBackbone(Dinov2Backbone):
def __init__(self, config):
super().__init__(config)
super()._init_backbone(config)
self.num_register_tokens = config.num_register_tokens
self.num_features = [config.hidden_size for _ in range(config.num_hidden_layers + 1)]
self.embeddings = Dinov2WithRegistersEmbeddings(config)
self.encoder = Dinov2WithRegistersEncoder(config)
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self) -> Dinov2WithRegistersPatchEmbeddings:
return self.embeddings.patch_embeddings
def forward(
self,
pixel_values: torch.Tensor,
output_hidden_states: Optional[bool] = None,
**kwargs,
) -> BackboneOutput:
r"""
Examples:
```python
>>> from transformers import AutoImageProcessor, AutoBackbone
>>> import torch
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> processor = AutoImageProcessor.from_pretrained("facebook/dinov2-with-registers-base")
>>> model = AutoBackbone.from_pretrained(
... "facebook/dinov2-with-registers-base", out_features=["stage2", "stage5", "stage8", "stage11"]
... )
>>> inputs = processor(image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> feature_maps = outputs.feature_maps
>>> list(feature_maps[-1].shape)
[1, 768, 16, 16]
```"""
if output_hidden_states is None:
output_hidden_states = self.config.output_hidden_states
embedding_output = self.embeddings(pixel_values)
output: BaseModelOutput = self.encoder(embedding_output, output_hidden_states=True)
hidden_states = output.hidden_states
feature_maps = []
for stage, hidden_state in zip(self.stage_names, hidden_states):
if stage in self.out_features:
if self.config.apply_layernorm:
hidden_state = self.layernorm(hidden_state)
if self.config.reshape_hidden_states:
hidden_state = hidden_state[:, 1 + self.num_register_tokens :]
# this was actually a bug in the original implementation that we copied here,
# cause normally the order is height, width
batch_size, _, height, width = pixel_values.shape
patch_size = self.config.patch_size
hidden_state = hidden_state.reshape(batch_size, height // patch_size, width // patch_size, -1)
hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous()
feature_maps.append(hidden_state)
return BackboneOutput(
feature_maps=tuple(feature_maps),
hidden_states=hidden_states if output_hidden_states else None,
)
__all__ = [
"Dinov2WithRegistersConfig",
"Dinov2WithRegistersPreTrainedModel",
"Dinov2WithRegistersModel",
"Dinov2WithRegistersForImageClassification",
"Dinov2WithRegistersBackbone",
]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/dinov2_with_registers/convert_dinov2_with_registers_to_hf.py | src/transformers/models/dinov2_with_registers/convert_dinov2_with_registers_to_hf.py | # coding=utf-8
# Copyright 2024 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert DINOv2 with Registers checkpoints from the original repository.
URL: https://github.com/facebookresearch/dinov2/tree/main
"""
import argparse
import json
from pathlib import Path
import requests
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import (
BitImageProcessor,
Dinov2WithRegistersConfig,
Dinov2WithRegistersForImageClassification,
Dinov2WithRegistersModel,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
logger = logging.get_logger(__name__)
def get_dinov2_with_registers_config(model_name, image_classifier=False):
config = Dinov2WithRegistersConfig(image_size=518, patch_size=14)
# size of the architecture
if "vits" in model_name:
config.hidden_size = 384
config.num_attention_heads = 6
elif "vitb" in model_name:
pass
elif "vitl" in model_name:
config.hidden_size = 1024
config.num_hidden_layers = 24
config.num_attention_heads = 16
elif "vitg" in model_name:
config.use_swiglu_ffn = True
config.hidden_size = 1536
config.num_hidden_layers = 40
config.num_attention_heads = 24
else:
raise ValueError("Model not supported")
if image_classifier:
repo_id = "huggingface/label-files"
filename = "imagenet-1k-id2label.json"
config.num_labels = 1000
config.id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
config.id2label = {int(k): v for k, v in config.id2label.items()}
return config
def create_rename_keys(config):
rename_keys = []
# fmt: off
# patch embedding layer
rename_keys.append(("cls_token", "embeddings.cls_token"))
rename_keys.append(("mask_token", "embeddings.mask_token"))
rename_keys.append(("pos_embed", "embeddings.position_embeddings"))
rename_keys.append(("register_tokens", "embeddings.register_tokens"))
rename_keys.append(("patch_embed.proj.weight", "embeddings.patch_embeddings.projection.weight"))
rename_keys.append(("patch_embed.proj.bias", "embeddings.patch_embeddings.projection.bias"))
for i in range(config.num_hidden_layers):
# layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"encoder.layer.{i}.norm1.weight"))
rename_keys.append((f"blocks.{i}.norm1.bias", f"encoder.layer.{i}.norm1.bias"))
rename_keys.append((f"blocks.{i}.norm2.weight", f"encoder.layer.{i}.norm2.weight"))
rename_keys.append((f"blocks.{i}.norm2.bias", f"encoder.layer.{i}.norm2.bias"))
# MLP
if config.use_swiglu_ffn:
rename_keys.append((f"blocks.{i}.mlp.w12.weight", f"encoder.layer.{i}.mlp.w12.weight"))
rename_keys.append((f"blocks.{i}.mlp.w12.bias", f"encoder.layer.{i}.mlp.w12.bias"))
rename_keys.append((f"blocks.{i}.mlp.w3.weight", f"encoder.layer.{i}.mlp.w3.weight"))
rename_keys.append((f"blocks.{i}.mlp.w3.bias", f"encoder.layer.{i}.mlp.w3.bias"))
else:
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"encoder.layer.{i}.mlp.fc1.weight"))
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"encoder.layer.{i}.mlp.fc1.bias"))
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"encoder.layer.{i}.mlp.fc2.weight"))
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"encoder.layer.{i}.mlp.fc2.bias"))
# layerscale
rename_keys.append((f"blocks.{i}.ls1.gamma", f"encoder.layer.{i}.layer_scale1.lambda1"))
rename_keys.append((f"blocks.{i}.ls2.gamma", f"encoder.layer.{i}.layer_scale2.lambda1"))
# attention projection layer
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"encoder.layer.{i}.attention.output.dense.weight"))
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"encoder.layer.{i}.attention.output.dense.bias"))
# final layernorm
rename_keys.append(("norm.weight", "layernorm.weight"))
rename_keys.append(("norm.bias", "layernorm.bias"))
# fmt: on
return rename_keys
def rename_key(dct, old, new):
val = dct.pop(old)
dct[new] = val
# we split up the matrix of each encoder layer into queries, keys and values
def read_in_q_k_v(state_dict, config):
for i in range(config.num_hidden_layers):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
in_proj_weight = state_dict.pop(f"blocks.{i}.attn.qkv.weight")
in_proj_bias = state_dict.pop(f"blocks.{i}.attn.qkv.bias")
# next, add query, keys and values (in that order) to the state dict
state_dict[f"encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[: config.hidden_size, :]
state_dict[f"encoder.layer.{i}.attention.attention.query.bias"] = in_proj_bias[: config.hidden_size]
state_dict[f"encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
state_dict[f"encoder.layer.{i}.attention.attention.key.bias"] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
state_dict[f"encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[-config.hidden_size :, :]
state_dict[f"encoder.layer.{i}.attention.attention.value.bias"] = in_proj_bias[-config.hidden_size :]
# We will verify our results on an image of cute cats
def prepare_img():
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
image = Image.open(requests.get(url, stream=True).raw).convert("RGB")
return image
@torch.no_grad()
def convert_dinov2_with_registers_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub=False):
"""
Copy/paste/tweak model's weights to our Dinov2WithRegisters structure.
"""
# define default Dinov2WithRegisters configuration
image_classifier = "1layer" in model_name
config = get_dinov2_with_registers_config(model_name, image_classifier=image_classifier)
# load original model from torch hub
original_model = torch.hub.load("facebookresearch/dinov2", model_name.replace("_1layer", ""))
original_model.eval()
# load state_dict of original model, remove and rename some keys
state_dict = original_model.state_dict()
rename_keys = create_rename_keys(config)
for src, dest in rename_keys:
rename_key(state_dict, src, dest)
read_in_q_k_v(state_dict, config)
for key, val in state_dict.copy().items():
val = state_dict.pop(key)
if "w12" in key:
key = key.replace("w12", "weights_in")
if "w3" in key:
key = key.replace("w3", "weights_out")
state_dict[key] = val
# load HuggingFace model
if image_classifier:
model = Dinov2WithRegistersForImageClassification(config).eval()
model.dinov2_with_registers.load_state_dict(state_dict)
model_name_to_classifier_dict_url = {
"dinov2_vits14_reg_1layer": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_vits14_reg4_linear_head.pth",
"dinov2_vitb14_reg_1layer": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vitb14/dinov2_vitb14_reg4_linear_head.pth",
"dinov2_vitl14_reg_1layer": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vitl14/dinov2_vitl14_reg4_linear_head.pth",
"dinov2_vitg14_reg_1layer": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vitg14/dinov2_vitg14_reg4_linear_head.pth",
}
url = model_name_to_classifier_dict_url[model_name]
classifier_state_dict = torch.hub.load_state_dict_from_url(url, map_location="cpu")
model.classifier.weight = nn.Parameter(classifier_state_dict["weight"])
model.classifier.bias = nn.Parameter(classifier_state_dict["bias"])
else:
model = Dinov2WithRegistersModel(config).eval()
model.load_state_dict(state_dict)
# load image
image = prepare_img()
# preprocess image
transformations = transforms.Compose(
[
transforms.Resize(256, interpolation=transforms.InterpolationMode.BICUBIC),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
mean=IMAGENET_DEFAULT_MEAN, # these are RGB mean+std values
std=IMAGENET_DEFAULT_STD, # across a large photo dataset.
),
]
)
original_pixel_values = transformations(image).unsqueeze(0) # insert batch dimension
processor = BitImageProcessor(
size={"shortest_edge": 256},
resample=PILImageResampling.BICUBIC,
image_mean=IMAGENET_DEFAULT_MEAN,
image_std=IMAGENET_DEFAULT_STD,
)
pixel_values = processor(image, return_tensors="pt").pixel_values
assert torch.allclose(original_pixel_values, pixel_values)
with torch.no_grad():
outputs = model(pixel_values, output_hidden_states=True)
original_outputs = original_model(pixel_values)
# assert values
if image_classifier:
print("Predicted class:")
class_idx = outputs.logits.argmax(-1).item()
print(model.config.id2label[class_idx])
else:
assert outputs.last_hidden_state[:, 0].shape == original_outputs.shape
assert torch.allclose(outputs.last_hidden_state[:, 0], original_outputs, atol=1e-3)
print("Looks ok!")
if pytorch_dump_folder_path is not None:
Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
print(f"Saving model {model_name} to {pytorch_dump_folder_path}")
model.save_pretrained(pytorch_dump_folder_path)
print(f"Saving image processor to {pytorch_dump_folder_path}")
processor.save_pretrained(pytorch_dump_folder_path)
if push_to_hub:
model_name_to_hf_name = {
"dinov2_vits14_reg": "dinov2-with-registers-small",
"dinov2_vitb14_reg": "dinov2-with-registers-base",
"dinov2_vitl14_reg": "dinov2-with-registers-large",
"dinov2_vitg14_reg": "dinov2-with-registers-giant",
"dinov2_vits14_reg_1layer": "dinov2-with-registers-small-imagenet1k-1-layer",
"dinov2_vitb14_reg_1layer": "dinov2-with-registers-base-imagenet1k-1-layer",
"dinov2_vitl14_reg_1layer": "dinov2-with-registers-large-imagenet1k-1-layer",
"dinov2_vitg14_reg_1layer": "dinov2-with-registers-giant-imagenet1k-1-layer",
}
name = model_name_to_hf_name[model_name]
model.push_to_hub(f"nielsr/{name}")
processor.push_to_hub(f"nielsr/{name}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="dinov2_vits14_reg",
type=str,
choices=[
"dinov2_vits14_reg",
"dinov2_vitb14_reg",
"dinov2_vitl14_reg",
"dinov2_vitg14_reg",
"dinov2_vits14_reg_1layer",
"dinov2_vitb14_reg_1layer",
"dinov2_vitl14_reg_1layer",
"dinov2_vitg14_reg_1layer",
],
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model to the Hugging Face hub.",
)
args = parser.parse_args()
convert_dinov2_with_registers_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/umt5/convert_umt5_checkpoint_to_pytorch.py | src/transformers/models/umt5/convert_umt5_checkpoint_to_pytorch.py | # coding=utf-8
# Copyright 2023 Google LLC and HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Convert T5X checkpoint to PyTorch
Steps:
- Install gsutil according to https://cloud.google.com/storage/docs/gsutil_install
- Get a T5X checkpoint at https://github.com/google-research/t5x/blob/main/docs/models.md#t5-11-checkpoints Example:
`gsutil -m cp -r gs://t5-data/pretrained_models/t5x/t5_1_1_small $HOME/`
- Create or download a corresponding config for the downloaded model. E.g. for T5 v1.1 small, you can use
https://huggingface.co/google/t5-v1_1-small/blob/main/config.json
- Convert:
```
python3 convert_t5x_checkpoint_to_pytorch.py --t5x_checkpoint_path=$HOME/t5_1_1_small --config_file=config.json\
--pytorch_dump_path=$HOME/t5_1_1_small_pt
```
"""
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from t5x import checkpoints
from transformers import MT5Config, UMT5EncoderModel, UMT5ForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def t5x_relpos_bias_lookup(params, i, prefix):
"""Returns the Relative Position Bias parameters of a layer. Does not transpose."""
return params[f"{prefix}/{prefix}/relpos_bias/rel_embedding"][:, i, :]
def t5x_attention_lookup(params, i, prefix, layer_name="attention"):
"""Returns the KOQV parameters of (self-)attention. Does not transpose."""
k_tmp = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/key/kernel"][:, i, :, :])
k = k_tmp.reshape(k_tmp.shape[0], k_tmp.shape[1] * k_tmp.shape[2])
o_tmp = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/out/kernel"][:, i, :, :])
o = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1], o_tmp.shape[2])
q_tmp = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/query/kernel"][:, i, :, :])
q = q_tmp.reshape(q_tmp.shape[0], q_tmp.shape[1] * q_tmp.shape[2])
v_tmp = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/value/kernel"][:, i, :, :])
v = v_tmp.reshape(v_tmp.shape[0], v_tmp.shape[1] * v_tmp.shape[2])
return k, o, q, v
def t5x_mlp_lookup(params, i, prefix, split_mlp_wi=False):
"""Returns the MLP parameters of a layer. Does not transpose."""
if split_mlp_wi:
wi_0 = params[f"{prefix}/{prefix}/mlp/wi_0/kernel"][:, i, :]
wi_1 = params[f"{prefix}/{prefix}/mlp/wi_1/kernel"][:, i, :]
wi = (wi_0, wi_1)
else:
wi = params[f"{prefix}/{prefix}/mlp/wi/kernel"][:, i, :]
wo = params[f"{prefix}/{prefix}/mlp/wo/kernel"][:, i, :]
return wi, wo
def t5x_layer_norm_lookup(params, i, prefix, layer_name):
"""Returns the layer norm param of a layer."""
return params[f"{prefix}/{prefix}/{layer_name}/scale"][:, i]
def convert_t5x_to_pytorch(
variables: dict, *, num_layers: int, is_encoder_only: bool, scalable_attention: bool = False
):
"""Converts the parameters from T5X-Flax to Transformers-PyTorch."""
old = traverse_util.flatten_dict(variables["target"])
old = {"/".join(k): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
split_mlp_wi = "encoder/encoder/mlp/wi_0/kernel" in old
print("Split MLP:", split_mlp_wi)
new = collections.OrderedDict()
# Shared embeddings.
new["shared.weight"] = old["token_embedder/embedding"]
# Encoder.
for i in range(num_layers):
# Block i, layer 0 (Self Attention).
layer_norm = t5x_layer_norm_lookup(old, i, "encoder", "pre_attention_layer_norm")
k, o, q, v = t5x_attention_lookup(old, i, "encoder", "attention")
new[f"encoder.block.{i}.layer.0.layer_norm.weight"] = layer_norm
new[f"encoder.block.{i}.layer.0.SelfAttention.k.weight"] = k.T
new[f"encoder.block.{i}.layer.0.SelfAttention.o.weight"] = o.T
new[f"encoder.block.{i}.layer.0.SelfAttention.q.weight"] = q.T
new[f"encoder.block.{i}.layer.0.SelfAttention.v.weight"] = v.T
# Block i, layer 1 (MLP).
layer_norm = t5x_layer_norm_lookup(old, i, "encoder", "pre_mlp_layer_norm")
wi, wo = t5x_mlp_lookup(old, i, "encoder", split_mlp_wi)
new[f"encoder.block.{i}.layer.1.layer_norm.weight"] = layer_norm
if split_mlp_wi:
new[f"encoder.block.{i}.layer.1.DenseReluDense.wi_0.weight"] = wi[0].T
new[f"encoder.block.{i}.layer.1.DenseReluDense.wi_1.weight"] = wi[1].T
else:
new[f"encoder.block.{i}.layer.1.DenseReluDense.wi.weight"] = wi.T
new[f"encoder.block.{i}.layer.1.DenseReluDense.wo.weight"] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
new[f"encoder.block.{i}.layer.0.SelfAttention.relative_attention_bias.weight"] = t5x_relpos_bias_lookup(
old, i, "encoder"
).T
new["encoder.final_layer_norm.weight"] = old["encoder/encoder_norm/scale"]
if not scalable_attention:
new["encoder.block.0.layer.0.SelfAttention.relative_attention_bias.weight"] = t5x_relpos_bias_lookup(
old, 0, "encoder"
).T
new["decoder.block.0.layer.0.SelfAttention.relative_attention_bias.weight"] = t5x_relpos_bias_lookup(
old, 0, "decoder"
).T
if not is_encoder_only:
# Decoder.
for i in range(num_layers):
# Block i, layer 0 (Self Attention).
layer_norm = t5x_layer_norm_lookup(old, i, "decoder", "pre_self_attention_layer_norm")
k, o, q, v = t5x_attention_lookup(old, i, "decoder", "self_attention")
new[f"decoder.block.{i}.layer.0.layer_norm.weight"] = layer_norm
new[f"decoder.block.{i}.layer.0.SelfAttention.k.weight"] = k.T
new[f"decoder.block.{i}.layer.0.SelfAttention.o.weight"] = o.T
new[f"decoder.block.{i}.layer.0.SelfAttention.q.weight"] = q.T
new[f"decoder.block.{i}.layer.0.SelfAttention.v.weight"] = v.T
# Block i, layer 1 (Cross Attention).
layer_norm = t5x_layer_norm_lookup(old, i, "decoder", "pre_cross_attention_layer_norm")
k, o, q, v = t5x_attention_lookup(old, i, "decoder", "encoder_decoder_attention")
new[f"decoder.block.{i}.layer.1.layer_norm.weight"] = layer_norm
new[f"decoder.block.{i}.layer.1.EncDecAttention.k.weight"] = k.T
new[f"decoder.block.{i}.layer.1.EncDecAttention.o.weight"] = o.T
new[f"decoder.block.{i}.layer.1.EncDecAttention.q.weight"] = q.T
new[f"decoder.block.{i}.layer.1.EncDecAttention.v.weight"] = v.T
# Block i, layer 2 (MLP).
layer_norm = t5x_layer_norm_lookup(old, i, "decoder", "pre_mlp_layer_norm")
wi, wo = t5x_mlp_lookup(old, i, "decoder", split_mlp_wi)
new[f"decoder.block.{i}.layer.2.layer_norm.weight"] = layer_norm
if split_mlp_wi:
new[f"decoder.block.{i}.layer.2.DenseReluDense.wi_0.weight"] = wi[0].T
new[f"decoder.block.{i}.layer.2.DenseReluDense.wi_1.weight"] = wi[1].T
else:
new[f"encoder.block.{i}.layer.2.DenseReluDense.wi.weight"] = wi.T
new[f"decoder.block.{i}.layer.2.DenseReluDense.wo.weight"] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
new[f"decoder.block.{i}.layer.0.SelfAttention.relative_attention_bias.weight"] = (
t5x_relpos_bias_lookup(old, i, "decoder").T
)
new["decoder.final_layer_norm.weight"] = old["decoder/decoder_norm/scale"]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
new["lm_head.weight"] = old["decoder/logits_dense/kernel"].T
return new
def make_state_dict(converted_params, is_encoder_only: bool):
"""Prepares a state dict for the PyTorch model."""
# Make a state dict with torch tensors.
state_dict = collections.OrderedDict([(k, torch.from_numpy(v.copy())) for (k, v) in converted_params.items()])
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
state_dict["encoder.embed_tokens.weight"] = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
state_dict["decoder.embed_tokens.weight"] = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head.")
state_dict["lm_head.weight"] = state_dict["shared.weight"]
return state_dict
def load_t5x_weights_in_t5(model, config, t5x_checkpoint_path, is_encoder_only, scalable_attention):
"""Replaces the params in model with the T5X converted params."""
variables = checkpoints.load_t5x_checkpoint(t5x_checkpoint_path)
converted = convert_t5x_to_pytorch(
variables, num_layers=config.num_layers, is_encoder_only=is_encoder_only, scalable_attention=scalable_attention
)
state_dict = make_state_dict(converted, is_encoder_only)
model.load_state_dict(state_dict, strict=True)
def convert_t5x_checkpoint_to_pytorch(
t5x_checkpoint_path,
config_file,
pytorch_dump_path,
is_encoder_only: bool = False,
scalable_attention: bool = False,
):
"""Loads the config and model, converts the T5X checkpoint, and saves a PyTorch checkpoint."""
# Initialise PyTorch model
config = MT5Config.from_json_file(config_file)
print(f"Building PyTorch model from configuration: {config}")
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
model = UMT5EncoderModel(config)
else:
model = UMT5ForConditionalGeneration(config)
# Load weights from tf checkpoint
load_t5x_weights_in_t5(model, config, t5x_checkpoint_path, is_encoder_only, scalable_attention)
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}")
model.save_pretrained(pytorch_dump_path)
# Verify that we can load the checkpoint.
model.from_pretrained(pytorch_dump_path)
print("Done")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.")
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False
)
parser.add_argument(
"--scalable_attention",
action="store_true",
help="Whether the model uses scaled attention (umt5 model)",
default=False,
)
args = parser.parse_args()
convert_t5x_checkpoint_to_pytorch(
args.t5x_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/umt5/__init__.py | src/transformers/models/umt5/__init__.py | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_umt5 import *
from .modeling_umt5 import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/umt5/configuration_umt5.py | src/transformers/models/umt5/configuration_umt5.py | # coding=utf-8
# Copyright 2023, The T5 Authors and HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""UMT5 model configuration"""
from ...configuration_utils import PreTrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
class UMT5Config(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`UMT5Model`]. It is used to instantiate a UMT5
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the UMT5
[google/umt5-small](https://huggingface.co/google/umt5-small) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Arguments:
vocab_size (`int`, *optional*, defaults to 250112):
Vocabulary size of the UMT5 model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`UMT5Model`].
d_model (`int`, *optional*, defaults to 512):
Size of the encoder layers and the pooler layer.
d_kv (`int`, *optional*, defaults to 64):
Size of the key, query, value projections per attention head. `d_kv` has to be equal to `d_model //
num_heads`.
d_ff (`int`, *optional*, defaults to 1024):
Size of the intermediate feed forward layer in each `UMT5Block`.
num_layers (`int`, *optional*, defaults to 8):
Number of hidden layers in the Transformer encoder.
num_decoder_layers (`int`, *optional*):
Number of hidden layers in the Transformer decoder. Will use the same value as `num_layers` if not set.
num_heads (`int`, *optional*, defaults to 6):
Number of attention heads for each attention layer in the Transformer encoder.
relative_attention_num_buckets (`int`, *optional*, defaults to 32):
The number of buckets to use for each attention layer.
relative_attention_max_distance (`int`, *optional*, defaults to 128):
The maximum distance of the longer sequences for the bucket separation.
dropout_rate (`float`, *optional*, defaults to 0.1):
The ratio for all dropout layers.
classifier_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for classifier.
layer_norm_eps (`float`, *optional*, defaults to 1e-6):
The epsilon used by the layer normalization layers.
initializer_factor (`float`, *optional*, defaults to 1):
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
testing).
feed_forward_proj (`string`, *optional*, defaults to `"gated-gelu"`):
Type of feed forward layer to be used. Should be one of `"relu"` or `"gated-gelu"`.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
"""
model_type = "umt5"
keys_to_ignore_at_inference = ["past_key_values"]
attribute_map = {
"hidden_size": "d_model",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
"head_dim": "d_kv",
}
def __init__(
self,
vocab_size=250112,
d_model=512,
d_kv=64,
d_ff=1024,
num_layers=8,
num_decoder_layers=None,
num_heads=6,
relative_attention_num_buckets=32,
relative_attention_max_distance=128,
dropout_rate=0.1,
layer_norm_epsilon=1e-6,
initializer_factor=1.0,
feed_forward_proj="gated-gelu",
is_encoder_decoder=True,
use_cache=True,
tokenizer_class="T5Tokenizer",
pad_token_id=0,
eos_token_id=1,
decoder_start_token_id=0,
classifier_dropout=0.0,
**kwargs,
):
self.vocab_size = vocab_size
self.d_model = d_model
self.d_kv = d_kv
self.d_ff = d_ff
self.num_layers = num_layers
self.num_decoder_layers = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
self.num_heads = num_heads
self.relative_attention_num_buckets = relative_attention_num_buckets
self.relative_attention_max_distance = relative_attention_max_distance
self.dropout_rate = dropout_rate
self.classifier_dropout = classifier_dropout
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_factor = initializer_factor
self.feed_forward_proj = feed_forward_proj
self.use_cache = use_cache
act_info = self.feed_forward_proj.split("-")
self.dense_act_fn = act_info[-1]
self.is_gated_act = act_info[0] == "gated"
if len(act_info) > 1 and act_info[0] != "gated" or len(act_info) > 2:
raise ValueError(
f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer. "
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'"
)
if feed_forward_proj == "gated-gelu":
self.dense_act_fn = "gelu_new"
# Force because official weights have False serialized, but we have to tie always
kwargs["tie_word_embeddings"] = True
super().__init__(
is_encoder_decoder=is_encoder_decoder,
tokenizer_class=tokenizer_class,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
decoder_start_token_id=decoder_start_token_id,
**kwargs,
)
__all__ = ["UMT5Config"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/umt5/modeling_umt5.py | src/transformers/models/umt5/modeling_umt5.py | # coding=utf-8
# Copyright 2023 Mesh TensorFlow authors, T5 Authors and HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch UMT5 model."""
import copy
import math
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ... import initialization as init
from ...activations import ACT2FN
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from ...generation import GenerationMixin
from ...modeling_attn_mask_utils import AttentionMaskConverter
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPastAndCrossAttentions,
Seq2SeqLMOutput,
Seq2SeqModelOutput,
Seq2SeqQuestionAnsweringModelOutput,
Seq2SeqSequenceClassifierOutput,
TokenClassifierOutput,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
DUMMY_INPUTS,
DUMMY_MASK,
auto_docstring,
is_torch_flex_attn_available,
is_torchdynamo_compiling,
logging,
)
from .configuration_umt5 import UMT5Config
if is_torch_flex_attn_available():
from torch.nn.attention.flex_attention import BlockMask
from ...integrations.flex_attention import make_flex_block_causal_mask
logger = logging.get_logger(__name__)
# Copied from transformers.models.t5.modeling_t5.T5LayerNorm with T5->UMT5
class UMT5LayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
Construct a layernorm module in the UMT5 style. No bias and no subtraction of mean.
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
# UMT5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://huggingface.co/papers/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
# convert into half-precision if necessary
if self.weight.dtype in [torch.float16, torch.bfloat16]:
hidden_states = hidden_states.to(self.weight.dtype)
return self.weight * hidden_states
# Copied from transformers.models.t5.modeling_t5.T5DenseActDense with T5->UMT5
class UMT5DenseActDense(nn.Module):
def __init__(self, config: UMT5Config):
super().__init__()
self.wi = nn.Linear(config.d_model, config.d_ff, bias=False)
self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)
self.dropout = nn.Dropout(config.dropout_rate)
self.act = ACT2FN[config.dense_act_fn]
def forward(self, hidden_states):
hidden_states = self.wi(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.dropout(hidden_states)
if (
isinstance(self.wo.weight, torch.Tensor)
and hidden_states.dtype != self.wo.weight.dtype
and self.wo.weight.dtype != torch.int8
):
hidden_states = hidden_states.to(self.wo.weight.dtype)
hidden_states = self.wo(hidden_states)
return hidden_states
# Copied from transformers.models.t5.modeling_t5.T5DenseGatedActDense with T5->UMT5
class UMT5DenseGatedActDense(nn.Module):
def __init__(self, config: UMT5Config):
super().__init__()
self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False)
self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False)
self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)
self.dropout = nn.Dropout(config.dropout_rate)
self.act = ACT2FN[config.dense_act_fn]
def forward(self, hidden_states):
hidden_gelu = self.act(self.wi_0(hidden_states))
hidden_linear = self.wi_1(hidden_states)
hidden_states = hidden_gelu * hidden_linear
hidden_states = self.dropout(hidden_states)
# To make 8bit quantization work for google/flan-t5-xxl, self.wo is kept in float32.
# See https://github.com/huggingface/transformers/issues/20287
# we also make sure the weights are not in `int8` in case users will force `_keep_in_fp32_modules` to be `None``
if (
isinstance(self.wo.weight, torch.Tensor)
and hidden_states.dtype != self.wo.weight.dtype
and self.wo.weight.dtype != torch.int8
):
hidden_states = hidden_states.to(self.wo.weight.dtype)
hidden_states = self.wo(hidden_states)
return hidden_states
# Copied from transformers.models.t5.modeling_t5.T5LayerFF with T5->UMT5
class UMT5LayerFF(nn.Module):
def __init__(self, config: UMT5Config):
super().__init__()
if config.is_gated_act:
self.DenseReluDense = UMT5DenseGatedActDense(config)
else:
self.DenseReluDense = UMT5DenseActDense(config)
self.layer_norm = UMT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(self, hidden_states):
forwarded_states = self.layer_norm(hidden_states)
forwarded_states = self.DenseReluDense(forwarded_states)
hidden_states = hidden_states + self.dropout(forwarded_states)
return hidden_states
class UMT5Attention(nn.Module):
"""
T5's attention using relative_attention_bias.
"""
def __init__(self, config, has_relative_attention_bias=False, layer_idx: Optional[int] = None):
super().__init__()
self.is_decoder = config.is_decoder
self.has_relative_attention_bias = has_relative_attention_bias
self.relative_attention_num_buckets = config.relative_attention_num_buckets
self.relative_attention_max_distance = config.relative_attention_max_distance
self.d_model = config.d_model
self.key_value_proj_dim = config.d_kv
self.n_heads = config.num_heads
self.dropout = config.dropout_rate
self.inner_dim = self.n_heads * self.key_value_proj_dim
self.layer_idx = layer_idx
if layer_idx is None and self.is_decoder:
logger.warning_once(
f"Instantiating a decoder {self.__class__.__name__} without passing `layer_idx` is not recommended and "
"will to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` "
"when creating this class."
)
self.q = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.k = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.v = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.o = nn.Linear(self.inner_dim, self.d_model, bias=False)
if self.has_relative_attention_bias:
self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads)
def _shape(self, projection: torch.Tensor) -> torch.Tensor:
new_projection_shape = projection.size()[:-1] + (self.n_heads, self.key_value_proj_dim)
# move heads to 2nd position (B, T, H * D) -> (B, T, H, D) -> (B, H, T, D)
new_projection = projection.view(new_projection_shape).permute(0, 2, 1, 3)
return new_projection
def _relative_position_bucket(self, relative_position):
"""
Adapted from Mesh Tensorflow:
https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
Translate relative position to a bucket number for relative attention. The relative position is defined as
memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
This should allow for more graceful generalization to longer sequences than the model has been trained on
Args:
relative_position: an int32 Tensor
bidirectional: a boolean - whether the attention is bidirectional
num_buckets: an integer
max_distance: an integer
Returns:
a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)
"""
relative_buckets = 0
num_buckets = self.relative_attention_num_buckets
max_distance = self.relative_attention_max_distance
if not self.is_decoder:
num_buckets //= 2
relative_buckets += (relative_position > 0).to(torch.long) * num_buckets
relative_position = torch.abs(relative_position)
else:
relative_position = -torch.min(relative_position, torch.zeros_like(relative_position))
# now relative_position is in the range [0, inf)
# half of the buckets are for exact increments in positions
max_exact = num_buckets // 2
is_small = relative_position < max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
log_ratio = torch.log(relative_position.float() / max_exact) / math.log(max_distance / max_exact)
log_ratio = log_ratio * (num_buckets - max_exact)
relative_position_if_large = max_exact + log_ratio.to(torch.long)
relative_position_if_large = torch.min(
relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1)
)
relative_buckets += torch.where(is_small, relative_position, relative_position_if_large)
return relative_buckets
def compute_bias(self, query_length, key_length, device=None, cache_position=None):
"""Compute binned relative position bias"""
if device is None:
device = self.relative_attention_bias.weight.device
if cache_position is None:
context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None]
else:
context_position = cache_position[:, None]
memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :]
relative_position = memory_position - context_position # shape (query_length, key_length)
relative_position_bucket = self._relative_position_bucket(relative_position)
values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads)
values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length)
return values
def forward(
self,
hidden_states: torch.Tensor,
encoder_hidden_states: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
attention_mask: Optional[torch.Tensor] = None,
cache_position: Optional[torch.Tensor] = None,
):
batch_size, seq_length = hidden_states.shape[:2]
# if encoder_hidden_states are provided this layer is used as a cross-attention layer for the decoder
is_cross_attention = encoder_hidden_states is not None
query_states = self.q(hidden_states)
query_states = query_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
# Check is encoder-decoder model is being used. Otherwise we'll get `DynamicCache`
is_updated = False
if past_key_values is not None and isinstance(past_key_values, EncoderDecoderCache):
is_updated = past_key_values.is_updated.get(self.layer_idx)
if is_cross_attention:
# after the first generated id, we can subsequently re-use all key/value_states from cache
curr_past_key_values = past_key_values.cross_attention_cache
else:
curr_past_key_values = past_key_values.self_attention_cache
else:
curr_past_key_values = past_key_values
current_states = encoder_hidden_states if is_cross_attention else hidden_states
if is_cross_attention and past_key_values is not None and is_updated:
# reuse k,v, cross_attentions
key_states = curr_past_key_values.layers[self.layer_idx].keys
value_states = curr_past_key_values.layers[self.layer_idx].values
else:
key_states = self.k(current_states)
value_states = self.v(current_states)
key_states = key_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
value_states = value_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
if past_key_values is not None:
# save all key/value_states to cache to be re-used for fast auto-regressive generation
cache_position = cache_position if not is_cross_attention else None
key_states, value_states = curr_past_key_values.update(
key_states, value_states, self.layer_idx, {"cache_position": cache_position}
)
# set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls
if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache):
past_key_values.is_updated[self.layer_idx] = True
# compute scores, equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9
scores = torch.matmul(query_states, key_states.transpose(3, 2))
# cache position is 0-indexed so we add 1 to get the real length of queries (aka with past)
real_seq_length = seq_length + past_key_values.get_seq_length() if past_key_values is not None else seq_length
key_length = key_states.shape[-2]
if not self.has_relative_attention_bias:
position_bias = torch.zeros(
(1, self.n_heads, seq_length, key_length), device=scores.device, dtype=scores.dtype
)
else:
position_bias = self.compute_bias(
real_seq_length, key_length, device=scores.device, cache_position=cache_position
)
position_bias = position_bias[:, :, -seq_length:, :]
if attention_mask is not None:
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
position_bias = position_bias + causal_mask
position_bias_masked = position_bias
scores += position_bias_masked
# (batch_size, n_heads, seq_length, key_length)
attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores)
attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
attn_output = attn_output.view(batch_size, seq_length, -1)
attn_output = self.o(attn_output)
return attn_output, attn_weights
class UMT5LayerSelfAttention(nn.Module):
def __init__(self, config, layer_idx: Optional[int] = None):
super().__init__()
self.SelfAttention = UMT5Attention(config, has_relative_attention_bias=True, layer_idx=layer_idx)
self.layer_norm = UMT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(
self,
hidden_states,
attention_mask=None,
past_key_values=None,
cache_position=None,
):
normed_hidden_states = self.layer_norm(hidden_states)
attention_output = self.SelfAttention(
normed_hidden_states,
attention_mask=attention_mask,
past_key_values=past_key_values,
cache_position=cache_position,
)
hidden_states = hidden_states + self.dropout(attention_output[0])
outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them
return outputs
class UMT5LayerCrossAttention(nn.Module):
def __init__(self, config, layer_idx: Optional[int] = None):
super().__init__()
self.EncDecAttention = UMT5Attention(config, has_relative_attention_bias=False, layer_idx=layer_idx)
self.layer_norm = UMT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(
self,
hidden_states,
encoder_hidden_states=None,
attention_mask=None,
past_key_values=None,
cache_position=None,
):
normed_hidden_states = self.layer_norm(hidden_states)
attention_output = self.EncDecAttention(
normed_hidden_states,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
past_key_values=past_key_values,
cache_position=cache_position,
)
layer_output = hidden_states + self.dropout(attention_output[0])
outputs = (layer_output,) + attention_output[1:] # add attentions if we output them
return outputs
class UMT5Block(GradientCheckpointingLayer):
def __init__(self, config, layer_idx: Optional[int] = None):
super().__init__()
self.is_decoder = config.is_decoder
self.layer = nn.ModuleList()
self.layer.append(UMT5LayerSelfAttention(config, layer_idx=layer_idx))
if self.is_decoder:
self.layer.append(UMT5LayerCrossAttention(config, layer_idx=layer_idx))
self.layer.append(UMT5LayerFF(config))
def forward(
self,
hidden_states,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=False,
output_attentions=False,
cache_position=None,
):
hidden_states, self_attn_weights = self.layer[0](
hidden_states,
attention_mask=attention_mask,
past_key_values=past_key_values,
cache_position=cache_position,
)
# clamp inf values to enable fp16 training
if hidden_states.dtype == torch.float16:
max_dtype = torch.finfo(hidden_states.dtype).max
clamp_value = torch.where(torch.isinf(hidden_states).any(), max_dtype - 1000, max_dtype)
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
# Cross-Attention Block
cross_attn_weights = None
do_cross_attention = self.is_decoder and encoder_hidden_states is not None
if do_cross_attention:
hidden_states, cross_attn_weights = self.layer[1](
hidden_states,
encoder_hidden_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
cache_position=cache_position,
)
# clamp inf values to enable fp16 training
if hidden_states.dtype == torch.float16:
max_dtype = torch.finfo(hidden_states.dtype).max
clamp_value = torch.where(torch.isinf(hidden_states).any(), max_dtype - 1000, max_dtype)
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
# Apply Feed Forward layer
hidden_states = self.layer[-1](hidden_states)
# clamp inf values to enable fp16 training
if hidden_states.dtype == torch.float16:
max_dtype = torch.finfo(hidden_states.dtype).max
clamp_value = torch.where(torch.isinf(hidden_states).any(), max_dtype - 1000, max_dtype)
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
return outputs
# Copied from transformers.models.t5.modeling_t5.T5ClassificationHead with T5->UMT5
class UMT5ClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config: UMT5Config):
super().__init__()
self.dense = nn.Linear(config.d_model, config.d_model)
self.dropout = nn.Dropout(p=config.classifier_dropout)
self.out_proj = nn.Linear(config.d_model, config.num_labels)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dropout(hidden_states)
hidden_states = self.dense(hidden_states)
hidden_states = torch.tanh(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.out_proj(hidden_states)
return hidden_states
@auto_docstring
class UMT5PreTrainedModel(PreTrainedModel):
config: UMT5Config
base_model_prefix = "transformer"
supports_gradient_checkpointing = True
_can_compile_fullgraph = True
_no_split_modules = ["UMT5Block"]
_keep_in_fp32_modules = ["wo"]
@property
def dummy_inputs(self):
input_ids = torch.tensor(DUMMY_INPUTS)
input_mask = torch.tensor(DUMMY_MASK)
dummy_inputs = {
"decoder_input_ids": input_ids,
"input_ids": input_ids,
"decoder_attention_mask": input_mask,
}
return dummy_inputs
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights"""
factor = self.config.initializer_factor # Used for testing weights initialization
if isinstance(module, UMT5LayerNorm):
init.constant_(module.weight, factor * 1.0)
elif isinstance(
module,
(
UMT5Model,
UMT5ForConditionalGeneration,
UMT5EncoderModel,
UMT5ForQuestionAnswering,
),
):
# Mesh TensorFlow embeddings initialization
# See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624
init.normal_(module.shared.weight, mean=0.0, std=factor * 1.0)
if hasattr(module, "lm_head") and not self.config.tie_word_embeddings:
init.normal_(module.lm_head.weight, mean=0.0, std=factor * 1.0)
if hasattr(module, "qa_outputs"):
init.normal_(module.qa_outputs.weight, mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
init.zeros_(module.qa_outputs.bias)
elif isinstance(module, UMT5ForTokenClassification):
if hasattr(module, "classifier"):
init.normal_(module.classifier.weight, mean=0.0, std=factor * 1.0)
init.zeros_(module.classifier.bias)
elif isinstance(module, UMT5ClassificationHead):
init.normal_(module.dense.weight, mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
if hasattr(module.dense, "bias") and module.dense.bias is not None:
init.zeros_(module.dense.bias)
init.normal_(module.out_proj.weight, mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
if hasattr(module.out_proj, "bias") and module.out_proj.bias is not None:
init.zeros_(module.out_proj.bias)
elif isinstance(module, UMT5DenseActDense):
# Mesh TensorFlow FF initialization
# See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56
# and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89
init.normal_(module.wi.weight, mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
if hasattr(module.wi, "bias") and module.wi.bias is not None:
init.zeros_(module.wi.bias)
init.normal_(module.wo.weight, mean=0.0, std=factor * ((self.config.d_ff) ** -0.5))
if hasattr(module.wo, "bias") and module.wo.bias is not None:
init.zeros_(module.wo.bias)
elif isinstance(module, UMT5DenseGatedActDense):
init.normal_(module.wi_0.weight, mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
if hasattr(module.wi_0, "bias") and module.wi_0.bias is not None:
init.zeros_(module.wi_0.bias)
init.normal_(module.wi_1.weight, mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
if hasattr(module.wi_1, "bias") and module.wi_1.bias is not None:
init.zeros_(module.wi_1.bias)
init.normal_(module.wo.weight, mean=0.0, std=factor * ((self.config.d_ff) ** -0.5))
if hasattr(module.wo, "bias") and module.wo.bias is not None:
init.zeros_(module.wo.bias)
elif isinstance(module, UMT5Attention):
# Mesh TensorFlow attention initialization to avoid scaling before softmax
# See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136
d_model = self.config.d_model
key_value_proj_dim = self.config.d_kv
n_heads = self.config.num_heads
init.normal_(module.q.weight, mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5))
init.normal_(module.k.weight, mean=0.0, std=factor * (d_model**-0.5))
init.normal_(module.v.weight, mean=0.0, std=factor * (d_model**-0.5))
init.normal_(module.o.weight, mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5))
if module.has_relative_attention_bias:
init.normal_(module.relative_attention_bias.weight, mean=0.0, std=factor * ((d_model) ** -0.5))
def _shift_right(self, input_ids):
decoder_start_token_id = self.config.decoder_start_token_id
pad_token_id = self.config.pad_token_id
if decoder_start_token_id is None:
raise ValueError(
"self.model.config.decoder_start_token_id has to be defined. In UMT5 it is usually set to the pad_token_id. "
"See UMT5 docs for more information."
)
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()
shifted_input_ids[..., 0] = decoder_start_token_id
if pad_token_id is None:
raise ValueError("self.model.config.pad_token_id has to be defined.")
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
return shifted_input_ids
class UMT5Stack(UMT5PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model)
self.is_decoder = config.is_decoder
self.block = nn.ModuleList([UMT5Block(config, layer_idx=i) for i in range(config.num_layers)])
self.final_layer_norm = UMT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
# Initialize weights and apply final processing
self.gradient_checkpointing = False
self.post_init()
def set_input_embeddings(self, new_embeddings):
self.embed_tokens = new_embeddings
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
inputs_embeds=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
cache_position=None,
**kwargs,
):
use_cache = use_cache if use_cache is not None else self.config.use_cache
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
err_msg_prefix = "decoder_" if self.is_decoder else ""
raise ValueError(
f"You cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same time"
)
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
err_msg_prefix = "decoder_" if self.is_decoder else ""
raise ValueError(f"You have to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds")
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
if inputs_embeds is None:
if self.embed_tokens is None:
raise ValueError("You have to initialize the model with valid token embeddings")
inputs_embeds = self.embed_tokens(input_ids)
batch_size, seq_length = input_shape
if use_cache is True:
if not self.is_decoder:
raise ValueError(f"`use_cache` can only be set to `True` if {self} is used as a decoder")
# initialize past_key_values
if self.is_decoder:
if use_cache and past_key_values is None:
if self.config.is_encoder_decoder:
past_key_values = EncoderDecoderCache(
DynamicCache(config=self.config), DynamicCache(config=self.config)
)
else:
past_key_values = DynamicCache(config=self.config)
elif not self.is_decoder:
# do not pass cache object down the line for encoder stack
# it messes indexing later in decoder-stack because cache object is modified in-place
past_key_values = None
past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0
if cache_position is None:
cache_position = torch.arange(
past_key_values_length, past_key_values_length + seq_length, device=inputs_embeds.device
)
if attention_mask is None and not is_torchdynamo_compiling():
# required mask seq length can be calculated via length of past cache
mask_seq_length = past_key_values_length + seq_length
attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device)
if self.is_decoder:
causal_mask = self._update_causal_mask(
attention_mask,
inputs_embeds,
cache_position,
past_key_values.self_attention_cache
if isinstance(past_key_values, EncoderDecoderCache)
else past_key_values,
output_attentions,
)
elif attention_mask is not None:
causal_mask = attention_mask[:, None, None, :]
causal_mask = causal_mask.to(dtype=inputs_embeds.dtype)
causal_mask = (1.0 - causal_mask) * torch.finfo(inputs_embeds.dtype).min
else:
causal_mask = None
# If a 2D or 3D attention mask is provided for the cross-attention
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | true |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py | src/transformers/models/blenderbot_small/modeling_blenderbot_small.py | # coding=utf-8
# Copyright 2021 The Facebook, Inc. and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BlenderbotSmall model."""
import math
from collections.abc import Callable
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import initialization as init
from ...activations import ACT2FN
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from ...generation import GenerationMixin
from ...masking_utils import create_bidirectional_mask, create_causal_mask
from ...modeling_flash_attention_utils import FlashAttentionKwargs
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPastAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
Seq2SeqLMOutput,
Seq2SeqModelOutput,
)
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...processing_utils import Unpack
from ...utils import (
TransformersKwargs,
auto_docstring,
is_torchdynamo_compiling,
logging,
)
from .configuration_blenderbot_small import BlenderbotSmallConfig
logger = logging.get_logger(__name__)
# Copied from transformers.models.bart.modeling_bart.shift_tokens_right
def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
"""
Shift input ids one token to the right.
"""
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
shifted_input_ids[:, 0] = decoder_start_token_id
if pad_token_id is None:
raise ValueError("self.model.config.pad_token_id has to be defined.")
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
return shifted_input_ids
# Copied from transformers.models.blenderbot.modeling_blenderbot.BlenderbotLearnedPositionalEmbedding with Blenderbot->BlenderbotSmall
class BlenderbotSmallLearnedPositionalEmbedding(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
"""
def __init__(self, num_embeddings: int, embedding_dim: int):
super().__init__(num_embeddings, embedding_dim)
def forward(
self, input_ids_shape: torch.Size, past_key_values_length: int = 0, position_ids: Optional[torch.Tensor] = None
):
"""`input_ids_shape` is expected to be [bsz x seqlen]."""
if position_ids is None:
bsz, seq_len = input_ids_shape[:2]
position_ids = torch.arange(
past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device
)
return super().forward(position_ids)
# Copied from transformers.models.bert.modeling_bert.eager_attention_forward
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: Optional[float] = None,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
if scaling is None:
scaling = query.size(-1) ** -0.5
# Take the dot product between "query" and "key" to get the raw attention scores.
attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling
if attention_mask is not None:
attention_mask = attention_mask[:, :, :, : key.shape[-2]]
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->BlenderbotSmall
class BlenderbotSmallAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
is_causal: bool = False,
config: Optional[BlenderbotSmallConfig] = None,
layer_idx: Optional[int] = None,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
self.config = config
if (self.head_dim * num_heads) != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.is_decoder = is_decoder
self.is_causal = is_causal
self.layer_idx = layer_idx
if layer_idx is None and self.is_decoder:
logger.warning_once(
f"Instantiating a decoder {self.__class__.__name__} without passing `layer_idx` is not recommended and "
"will lead to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` "
"when creating this class."
)
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
attention_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
cache_position: Optional[torch.Tensor] = None,
# TODO: we need a refactor so that the different attention modules can get their specific kwargs
# ATM, we have mixed things encoder, decoder, and encoder-decoder attn
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
# determine input shapes
bsz, tgt_len = hidden_states.shape[:-1]
src_len = key_value_states.shape[1] if is_cross_attention else tgt_len
q_input_shape = (bsz, tgt_len, -1, self.head_dim)
kv_input_shape = (bsz, src_len, -1, self.head_dim)
# get query proj
query_states = self.q_proj(hidden_states).view(*q_input_shape).transpose(1, 2)
is_updated = False
if past_key_values is not None:
if isinstance(past_key_values, EncoderDecoderCache):
is_updated = past_key_values.is_updated.get(self.layer_idx)
if is_cross_attention:
# after the first generated id, we can subsequently re-use all key/value_states from cache
curr_past_key_values = past_key_values.cross_attention_cache
else:
curr_past_key_values = past_key_values.self_attention_cache
else:
curr_past_key_values = past_key_values
current_states = key_value_states if is_cross_attention else hidden_states
if is_cross_attention and past_key_values is not None and is_updated:
# reuse k,v, cross_attentions
key_states = curr_past_key_values.layers[self.layer_idx].keys
value_states = curr_past_key_values.layers[self.layer_idx].values
else:
key_states = self.k_proj(current_states)
value_states = self.v_proj(current_states)
key_states = key_states.view(*kv_input_shape).transpose(1, 2)
value_states = value_states.view(*kv_input_shape).transpose(1, 2)
if past_key_values is not None:
# save all key/value_states to cache to be re-used for fast auto-regressive generation
cache_position = cache_position if not is_cross_attention else None
key_states, value_states = curr_past_key_values.update(
key_states, value_states, self.layer_idx, {"cache_position": cache_position}
)
# set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls
if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache):
past_key_values.is_updated[self.layer_idx] = True
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.dropout,
scaling=self.scaling,
output_attentions=output_attentions,
**kwargs,
)
attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous()
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights
# Copied from transformers.models.bart.modeling_bart.BartEncoderLayer with Bart->BlenderbotSmall, BART->BLENDERBOT_SMALL
class BlenderbotSmallEncoderLayer(GradientCheckpointingLayer):
def __init__(self, config: BlenderbotSmallConfig, layer_idx: Optional[int] = None):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = BlenderbotSmallAttention(
embed_dim=self.embed_dim,
num_heads=config.encoder_attention_heads,
dropout=config.attention_dropout,
config=config,
layer_idx=layer_idx,
)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.FloatTensor,
attention_mask: torch.FloatTensor,
output_attentions: Optional[bool] = False,
) -> tuple[torch.FloatTensor, Optional[torch.FloatTensor]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states, attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
if hidden_states.dtype == torch.float16 and (
torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
):
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
# Copied from transformers.models.bart.modeling_bart.BartDecoderLayer with Bart->BlenderbotSmall, BART->BLENDERBOT_SMALL
class BlenderbotSmallDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: BlenderbotSmallConfig, layer_idx: Optional[int] = None):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = BlenderbotSmallAttention(
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
is_causal=True,
config=config,
layer_idx=layer_idx,
)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.encoder_attn = BlenderbotSmallAttention(
self.embed_dim,
config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
config=config,
layer_idx=layer_idx,
)
self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = True,
cache_position: Optional[torch.Tensor] = None,
) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
encoder_hidden_states (`torch.FloatTensor`):
cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
past_key_values (`Cache`): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence. It is used to update the
cache in the correct position and to infer the complete sequence length.
"""
residual = hidden_states
# Self Attention
hidden_states, self_attn_weights = self.self_attn(
hidden_states=hidden_states,
past_key_values=past_key_values,
attention_mask=attention_mask,
output_attentions=output_attentions,
cache_position=cache_position,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Cross-Attention Block
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
hidden_states, cross_attn_weights = self.encoder_attn(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
output_attentions=output_attentions,
cache_position=cache_position,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
# Fully Connected
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
return outputs
@auto_docstring
class BlenderbotSmallPreTrainedModel(PreTrainedModel):
config: BlenderbotSmallConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = True
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, BlenderbotSmallForConditionalGeneration):
init.zeros_(module.final_logits_bias)
@property
def dummy_inputs(self):
pad_token = self.config.pad_token_id
input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device)
dummy_inputs = {
"attention_mask": input_ids.ne(pad_token),
"input_ids": input_ids,
"decoder_input_ids": input_ids,
}
return dummy_inputs
class BlenderbotSmallEncoder(BlenderbotSmallPreTrainedModel):
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
[`BlenderbotSmallEncoderLayer`].
Args:
config: BlenderbotSmallConfig
embed_tokens (nn.Embedding): output embedding
"""
def __init__(self, config: BlenderbotSmallConfig):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop
embed_dim = config.d_model
self.padding_idx = config.pad_token_id
self.max_source_positions = config.max_position_embeddings
self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx)
self.embed_positions = BlenderbotSmallLearnedPositionalEmbedding(
config.max_position_embeddings,
embed_dim,
)
self.layers = nn.ModuleList([BlenderbotSmallEncoderLayer(config) for _ in range(config.encoder_layers)])
self.layernorm_embedding = nn.LayerNorm(embed_dim)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
input_ids=None,
attention_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
**kwargs,
):
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
embed_pos = self.embed_positions(input_shape)
hidden_states = inputs_embeds + embed_pos
hidden_states = self.layernorm_embedding(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
attention_mask = create_bidirectional_mask(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=attention_mask,
)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
# add LayerDrop (see https://huggingface.co/papers/1909.11556 for description)
to_drop = False
if self.training:
dropout_probability = torch.rand([])
if dropout_probability < self.layerdrop: # skip the layer
to_drop = True
if to_drop:
layer_outputs = (None, None)
else:
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
class BlenderbotSmallDecoder(BlenderbotSmallPreTrainedModel):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`BlenderbotSmallDecoderLayer`]
Args:
config: BlenderbotSmallConfig
embed_tokens (nn.Embedding): output embedding
"""
def __init__(self, config: BlenderbotSmallConfig):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
self.padding_idx = config.pad_token_id
self.max_target_positions = config.max_position_embeddings
self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)
self.embed_positions = BlenderbotSmallLearnedPositionalEmbedding(
config.max_position_embeddings,
config.d_model,
)
self.layers = nn.ModuleList(
[BlenderbotSmallDecoderLayer(config, layer_idx=i) for i in range(config.decoder_layers)]
)
self.layernorm_embedding = nn.LayerNorm(config.d_model)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
cache_position=None,
**kwargs,
):
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence. It is used to update the
cache in the correct position and to infer the complete sequence length.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
elif input_ids is not None:
input = input_ids
input_shape = input.shape
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
input = inputs_embeds[:, :, -1]
else:
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input)
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing`. Setting `use_cache=False`..."
)
use_cache = False
# initialize `past_key_values`
if use_cache and past_key_values is None:
past_key_values = (
EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config))
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | true |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/blenderbot_small/configuration_blenderbot_small.py | src/transformers/models/blenderbot_small/configuration_blenderbot_small.py | # coding=utf-8
# Copyright 2021 The Facebook, Inc. and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BlenderbotSmall model configuration"""
from ...configuration_utils import PreTrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
class BlenderbotSmallConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`BlenderbotSmallModel`]. It is used to instantiate
an BlenderbotSmall model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the BlenderbotSmall
[facebook/blenderbot_small-90M](https://huggingface.co/facebook/blenderbot_small-90M) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50265):
Vocabulary size of the BlenderbotSmall model. Defines the number of different tokens that can be
represented by the `inputs_ids` passed when calling [`BlenderbotSmallModel`].
d_model (`int`, *optional*, defaults to 512):
Dimensionality of the layers and the pooler layer.
encoder_layers (`int`, *optional*, defaults to 8):
Number of encoder layers.
decoder_layers (`int`, *optional*, defaults to 8):
Number of decoder layers.
encoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
decoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer decoder.
decoder_ffn_dim (`int`, *optional*, defaults to 2048):
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
encoder_ffn_dim (`int`, *optional*, defaults to 2048):
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for activations inside the fully connected layer.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
encoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
decoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
scale_embedding (`bool`, *optional*, defaults to `False`):
Scale embeddings by diving by sqrt(d_model).
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models)
forced_eos_token_id (`int`, *optional*, defaults to 2):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
Example:
```python
>>> from transformers import BlenderbotSmallConfig, BlenderbotSmallModel
>>> # Initializing a BlenderbotSmall facebook/blenderbot_small-90M style configuration
>>> configuration = BlenderbotSmallConfig()
>>> # Initializing a model (with random weights) from the facebook/blenderbot_small-90M style configuration
>>> model = BlenderbotSmallModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "blenderbot-small"
keys_to_ignore_at_inference = ["past_key_values"]
attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__(
self,
vocab_size=50265,
max_position_embeddings=512,
encoder_layers=8,
encoder_ffn_dim=2048,
encoder_attention_heads=16,
decoder_layers=8,
decoder_ffn_dim=2048,
decoder_attention_heads=16,
encoder_layerdrop=0.0,
decoder_layerdrop=0.0,
use_cache=True,
is_encoder_decoder=True,
activation_function="gelu",
d_model=512,
dropout=0.1,
attention_dropout=0.0,
activation_dropout=0.0,
init_std=0.02,
decoder_start_token_id=1,
scale_embedding=False,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
forced_eos_token_id=2,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
self.use_cache = use_cache
self.num_hidden_layers = encoder_layers
self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
is_encoder_decoder=is_encoder_decoder,
decoder_start_token_id=decoder_start_token_id,
forced_eos_token_id=forced_eos_token_id,
**kwargs,
)
__all__ = ["BlenderbotSmallConfig"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/blenderbot_small/__init__.py | src/transformers/models/blenderbot_small/__init__.py | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_blenderbot_small import *
from .modeling_blenderbot_small import *
from .tokenization_blenderbot_small import *
from .tokenization_blenderbot_small_fast import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/blenderbot_small/tokenization_blenderbot_small.py | src/transformers/models/blenderbot_small/tokenization_blenderbot_small.py | # coding=utf-8
# Copyright 2021 The Facebook Inc. and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization class for BlenderbotSmall."""
import json
import os
from typing import Optional
import regex as re
from ...tokenization_python import PreTrainedTokenizer
from ...utils import logging
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
def get_pairs(word):
"""
Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
pairs = set(pairs)
return pairs
class BlenderbotSmallTokenizer(PreTrainedTokenizer):
"""
Constructs a Blenderbot-90M tokenizer based on BPE (Byte-Pair-Encoding)
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
the superclass for more information regarding methods.
Args:
vocab_file (`str`):
File containing the vocabulary.
merges_file (`str`):
Path to the merges file.
bos_token (`str`, *optional*, defaults to `"__start__"`):
The beginning of sentence token.
eos_token (`str`, *optional*, defaults to `"__end__"`):
The end of sentence token.
unk_token (`str`, *optional*, defaults to `"__unk__"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"__null__"`):
The token used for padding, for example when batching sequences of different lengths.
kwargs (*optional*):
Additional keyword arguments passed along to [`PreTrainedTokenizer`]
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ["input_ids", "attention_mask"]
def __init__(
self,
vocab_file,
merges_file,
bos_token="__start__",
eos_token="__end__",
unk_token="__unk__",
pad_token="__null__",
**kwargs,
):
with open(vocab_file, encoding="utf-8") as vocab_handle:
self.encoder = json.load(vocab_handle)
self.decoder = {v: k for k, v in self.encoder.items()}
with open(merges_file, encoding="utf-8") as merges_handle:
merges = merges_handle.read().split("\n")[1:-1]
merges = [tuple(merge.split()) for merge in merges]
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {}
super().__init__(unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, **kwargs)
self.special_tokens_pattern = None
@property
def vocab_size(self) -> int:
return len(self.encoder)
def get_vocab(self) -> dict:
return dict(self.encoder, **self.added_tokens_encoder)
def bpe(self, token: str) -> str:
if token in self.cache:
return self.cache[token]
token = re.sub("([.,!?()])", r" \1", token)
token = re.sub("(')", r" \1 ", token)
token = re.sub(r"\s{2,}", " ", token)
if "\n" in token:
token = token.replace("\n", " __newln__")
tokens = token.split(" ")
words = []
for token in tokens:
if not len(token):
continue
token = token.lower()
word = tuple(token)
word = tuple(list(word[:-1]) + [word[-1] + "</w>"])
pairs = get_pairs(word)
if not pairs:
words.append(token)
continue
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except ValueError:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = "@@ ".join(word)
word = word[:-4]
self.cache[token] = word
words.append(word)
return " ".join(words)
def _tokenize(self, text: str) -> list[str]:
"""Split a string into tokens using BPE."""
split_tokens = []
words = re.findall(r"\S+\n?", text)
for token in words:
split_tokens.extend(list(self.bpe(token).split(" ")))
return split_tokens
def _convert_token_to_id(self, token: str) -> int:
"""Converts a token to an id using the vocab."""
token = token.lower()
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index: int) -> str:
"""Converts an index (integer) in a token (str) using the vocab."""
return self.decoder.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens: list[str]) -> str:
"""Converts a sequence of tokens in a single string."""
out_string = " ".join(tokens).replace("@@ ", "").strip()
return out_string
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
merge_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
)
with open(vocab_file, "w", encoding="utf-8") as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
index = 0
with open(merge_file, "w", encoding="utf-8") as writer:
writer.write("#version: 0.2\n")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!"
)
index = token_index
writer.write(" ".join(bpe_tokens) + "\n")
index += 1
return vocab_file, merge_file
__all__ = ["BlenderbotSmallTokenizer"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/lxmert/convert_lxmert_original_tf_checkpoint_to_pytorch.py | src/transformers/models/lxmert/convert_lxmert_original_tf_checkpoint_to_pytorch.py | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert LXMERT checkpoint."""
import argparse
import os
import torch
from transformers import LxmertConfig, LxmertForPreTraining
from transformers.utils import logging
logger = logging.get_logger(__name__)
logging.set_verbosity_info()
def load_tf_weights_in_lxmert(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n
in [
"adam_v",
"adam_m",
"AdamWeightDecayOptimizer",
"AdamWeightDecayOptimizer_1",
"global_step",
]
for n in name
):
logger.info(f"Skipping {'/'.join(name)}")
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info(f"Skipping {'/'.join(name)}")
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array)
return model
def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, config_file, pytorch_dump_path):
# Initialise PyTorch model
config = LxmertConfig.from_json_file(config_file)
print(f"Building PyTorch model from configuration: {config}")
model = LxmertForPreTraining(config)
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(model, config, tf_checkpoint_path)
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}")
torch.save(model.state_dict(), pytorch_dump_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
args = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/lxmert/__init__.py | src/transformers/models/lxmert/__init__.py | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from ..bert.tokenization_bert import BertTokenizer as LxmertTokenizer
from .configuration_lxmert import *
from .modeling_lxmert import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/lxmert/modeling_lxmert.py | src/transformers/models/lxmert/modeling_lxmert.py | # coding=utf-8
# Copyright 2018 Hao Tan, Mohit Bansal, and the HuggingFace team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch LXMERT model."""
import math
import warnings
from dataclasses import dataclass
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, SmoothL1Loss
from ... import initialization as init
from ...activations import ACT2FN, gelu
from ...modeling_utils import PreTrainedModel
from ...utils import ModelOutput, auto_docstring, logging
from .configuration_lxmert import LxmertConfig
logger = logging.get_logger(__name__)
class GeLU(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return gelu(x)
@dataclass
@auto_docstring(
custom_intro="""
Lxmert's outputs that contain the last hidden states, pooled outputs, and attention probabilities for the language,
visual, and, cross-modality encoders. (note: the visual encoder in Lxmert is referred to as the "relation-ship"
encoder")
"""
)
class LxmertModelOutput(ModelOutput):
r"""
language_output (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the language encoder.
vision_output (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the visual encoder.
pooled_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`):
Last layer hidden-state of the first token of the sequence (classification, CLS, token) further processed
by a Linear layer and a Tanh activation function. The Linear
language_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of
shape `(batch_size, sequence_length, hidden_size)`.
vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of
shape `(batch_size, sequence_length, hidden_size)`.
language_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
the self-attention heads.
vision_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
the self-attention heads.
cross_encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
the self-attention heads.
"""
language_output: Optional[torch.FloatTensor] = None
vision_output: Optional[torch.FloatTensor] = None
pooled_output: Optional[torch.FloatTensor] = None
language_hidden_states: Optional[tuple[torch.FloatTensor]] = None
vision_hidden_states: Optional[tuple[torch.FloatTensor]] = None
language_attentions: Optional[tuple[torch.FloatTensor]] = None
vision_attentions: Optional[tuple[torch.FloatTensor]] = None
cross_encoder_attentions: Optional[tuple[torch.FloatTensor]] = None
@dataclass
@auto_docstring(
custom_intro="""
Output type of [`LxmertForQuestionAnswering`].
"""
)
class LxmertForQuestionAnsweringOutput(ModelOutput):
r"""
loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
Total loss as the sum of the masked language modeling loss and the next sequence prediction
(classification) loss.k.
question_answering_score (`torch.FloatTensor` of shape `(batch_size, n_qa_answers)`, *optional*):
Prediction scores of question answering objective (classification).
language_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of
shape `(batch_size, sequence_length, hidden_size)`.
vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of
shape `(batch_size, sequence_length, hidden_size)`.
language_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
the self-attention heads.
vision_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
the self-attention heads.
cross_encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
the self-attention heads.
"""
loss: Optional[torch.FloatTensor] = None
question_answering_score: Optional[torch.FloatTensor] = None
language_hidden_states: Optional[tuple[torch.FloatTensor]] = None
vision_hidden_states: Optional[tuple[torch.FloatTensor]] = None
language_attentions: Optional[tuple[torch.FloatTensor]] = None
vision_attentions: Optional[tuple[torch.FloatTensor]] = None
cross_encoder_attentions: Optional[tuple[torch.FloatTensor]] = None
@dataclass
@auto_docstring(
custom_intro="""
Output type of [`LxmertForPreTraining`].
"""
)
class LxmertForPreTrainingOutput(ModelOutput):
r"""
loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
Total loss as the sum of the masked language modeling loss and the next sequence prediction
(classification) loss.
prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
cross_relationship_score (`torch.FloatTensor` of shape `(batch_size, 2)`):
Prediction scores of the textual matching objective (classification) head (scores of True/False
continuation before SoftMax).
question_answering_score (`torch.FloatTensor` of shape `(batch_size, n_qa_answers)`):
Prediction scores of question answering objective (classification).
language_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of
shape `(batch_size, sequence_length, hidden_size)`.
vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of
shape `(batch_size, sequence_length, hidden_size)`.
language_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
the self-attention heads.
vision_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
the self-attention heads.
cross_encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
the self-attention heads.
"""
loss: Optional[torch.FloatTensor] = None
prediction_logits: Optional[torch.FloatTensor] = None
cross_relationship_score: Optional[torch.FloatTensor] = None
question_answering_score: Optional[torch.FloatTensor] = None
language_hidden_states: Optional[tuple[torch.FloatTensor]] = None
vision_hidden_states: Optional[tuple[torch.FloatTensor]] = None
language_attentions: Optional[tuple[torch.FloatTensor]] = None
vision_attentions: Optional[tuple[torch.FloatTensor]] = None
cross_encoder_attentions: Optional[tuple[torch.FloatTensor]] = None
class LxmertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size, padding_idx=0)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size, padding_idx=0)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None, inputs_embeds=None):
if input_ids is not None:
input_shape = input_ids.size()
device = input_ids.device
else:
input_shape = inputs_embeds.size()[:-1]
device = inputs_embeds.device
seq_length = input_shape[1]
position_ids = torch.arange(seq_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).expand(input_shape)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class LxmertAttention(nn.Module):
def __init__(self, config, ctx_dim=None):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.head_size = self.num_attention_heads * self.attention_head_size
# visual_dim = 2048
if ctx_dim is None:
ctx_dim = config.hidden_size
self.query = nn.Linear(config.hidden_size, self.head_size)
self.key = nn.Linear(ctx_dim, self.head_size)
self.value = nn.Linear(ctx_dim, self.head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def forward(self, hidden_states, context, attention_mask=None, output_attentions=False):
batch_size, seq_length, _ = hidden_states.shape
query_layer = (
self.query(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
key_layer = (
self.key(context).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
)
value_layer = (
self.value(context)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
if attention_mask is not None:
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.head_size,)
context_layer = context_layer.view(new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
class LxmertAttentionOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class LxmertCrossAttentionLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.att = LxmertAttention(config)
self.output = LxmertAttentionOutput(config)
def forward(self, input_tensor, ctx_tensor, ctx_att_mask=None, output_attentions=False):
output = self.att(input_tensor, ctx_tensor, ctx_att_mask, output_attentions=output_attentions)
if output_attentions:
attention_probs = output[1]
attention_output = self.output(output[0], input_tensor)
outputs = (attention_output, attention_probs) if output_attentions else (attention_output,)
return outputs
class LxmertSelfAttentionLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.self = LxmertAttention(config)
self.output = LxmertAttentionOutput(config)
def forward(self, input_tensor, attention_mask, output_attentions=False):
# Self attention attends to itself, thus keys and queries are the same (input_tensor).
output = self.self(
input_tensor,
input_tensor,
attention_mask,
output_attentions=output_attentions,
)
if output_attentions:
attention_probs = output[1]
attention_output = self.output(output[0], input_tensor)
outputs = (attention_output, attention_probs) if output_attentions else (attention_output,)
return outputs
class LxmertIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
self.intermediate_act_fn = ACT2FN[config.hidden_act]
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class LxmertOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class LxmertLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.attention = LxmertSelfAttentionLayer(config)
self.intermediate = LxmertIntermediate(config)
self.output = LxmertOutput(config)
def forward(self, hidden_states, attention_mask=None, output_attentions=False):
outputs = self.attention(hidden_states, attention_mask, output_attentions=output_attentions)
attention_output = outputs[0]
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
outputs = (layer_output,) + outputs[1:] # add attentions if we output them
return outputs
class LxmertXLayer(nn.Module):
def __init__(self, config):
super().__init__()
# The cross-attention Layer
self.visual_attention = LxmertCrossAttentionLayer(config)
# Self-attention Layers
self.lang_self_att = LxmertSelfAttentionLayer(config)
self.visn_self_att = LxmertSelfAttentionLayer(config)
# Intermediate and Output Layers (FFNs)
self.lang_inter = LxmertIntermediate(config)
self.lang_output = LxmertOutput(config)
self.visn_inter = LxmertIntermediate(config)
self.visn_output = LxmertOutput(config)
def cross_att(
self,
lang_input,
lang_attention_mask,
visual_input,
visual_attention_mask,
output_x_attentions=False,
):
# Cross Attention
lang_att_output = self.visual_attention(
lang_input,
visual_input,
ctx_att_mask=visual_attention_mask,
output_attentions=output_x_attentions,
)
visual_att_output = self.visual_attention(
visual_input,
lang_input,
ctx_att_mask=lang_attention_mask,
output_attentions=False,
)
return lang_att_output, visual_att_output
def self_att(self, lang_input, lang_attention_mask, visual_input, visual_attention_mask):
# Self Attention
lang_att_output = self.lang_self_att(lang_input, lang_attention_mask, output_attentions=False)
visual_att_output = self.visn_self_att(visual_input, visual_attention_mask, output_attentions=False)
return lang_att_output[0], visual_att_output[0]
def output_fc(self, lang_input, visual_input):
# FC layers
lang_inter_output = self.lang_inter(lang_input)
visual_inter_output = self.visn_inter(visual_input)
# Layer output
lang_output = self.lang_output(lang_inter_output, lang_input)
visual_output = self.visn_output(visual_inter_output, visual_input)
return lang_output, visual_output
def forward(
self,
lang_feats,
lang_attention_mask,
visual_feats,
visual_attention_mask,
output_attentions=False,
):
lang_att_output, visual_att_output = self.cross_att(
lang_input=lang_feats,
lang_attention_mask=lang_attention_mask,
visual_input=visual_feats,
visual_attention_mask=visual_attention_mask,
output_x_attentions=output_attentions,
)
attention_probs = lang_att_output[1:]
lang_att_output, visual_att_output = self.self_att(
lang_att_output[0],
lang_attention_mask,
visual_att_output[0],
visual_attention_mask,
)
lang_output, visual_output = self.output_fc(lang_att_output, visual_att_output)
return (
(
lang_output,
visual_output,
attention_probs[0],
)
if output_attentions
else (lang_output, visual_output)
)
class LxmertVisualFeatureEncoder(nn.Module):
def __init__(self, config):
super().__init__()
feat_dim = config.visual_feat_dim
pos_dim = config.visual_pos_dim
# Object feature encoding
self.visn_fc = nn.Linear(feat_dim, config.hidden_size)
self.visn_layer_norm = nn.LayerNorm(config.hidden_size, eps=1e-12)
# Box position encoding
self.box_fc = nn.Linear(pos_dim, config.hidden_size)
self.box_layer_norm = nn.LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, visual_feats, visual_pos):
x = self.visn_fc(visual_feats)
x = self.visn_layer_norm(x)
y = self.box_fc(visual_pos)
y = self.box_layer_norm(y)
output = (x + y) / 2
output = self.dropout(output)
return output
class LxmertEncoder(nn.Module):
def __init__(self, config):
super().__init__()
# Obj-level image embedding layer
self.visn_fc = LxmertVisualFeatureEncoder(config)
self.config = config
# Number of layers
self.num_l_layers = config.l_layers
self.num_x_layers = config.x_layers
self.num_r_layers = config.r_layers
# Layers
# Using self.layer instead of self.l_layer to support loading BERT weights.
self.layer = nn.ModuleList([LxmertLayer(config) for _ in range(self.num_l_layers)])
self.x_layers = nn.ModuleList([LxmertXLayer(config) for _ in range(self.num_x_layers)])
self.r_layers = nn.ModuleList([LxmertLayer(config) for _ in range(self.num_r_layers)])
def forward(
self,
lang_feats,
lang_attention_mask,
visual_feats,
visual_pos,
visual_attention_mask=None,
output_attentions=None,
):
vision_hidden_states = ()
language_hidden_states = ()
vision_attentions = () if output_attentions or self.config.output_attentions else None
language_attentions = () if output_attentions or self.config.output_attentions else None
cross_encoder_attentions = () if output_attentions or self.config.output_attentions else None
visual_feats = self.visn_fc(visual_feats, visual_pos)
# Run language layers
for layer_module in self.layer:
l_outputs = layer_module(lang_feats, lang_attention_mask, output_attentions=output_attentions)
lang_feats = l_outputs[0]
language_hidden_states = language_hidden_states + (lang_feats,)
if language_attentions is not None:
language_attentions = language_attentions + (l_outputs[1],)
# Run relational layers
for layer_module in self.r_layers:
v_outputs = layer_module(visual_feats, visual_attention_mask, output_attentions=output_attentions)
visual_feats = v_outputs[0]
vision_hidden_states = vision_hidden_states + (visual_feats,)
if vision_attentions is not None:
vision_attentions = vision_attentions + (v_outputs[1],)
# Run cross-modality layers
for layer_module in self.x_layers:
x_outputs = layer_module(
lang_feats,
lang_attention_mask,
visual_feats,
visual_attention_mask,
output_attentions=output_attentions,
)
lang_feats, visual_feats = x_outputs[:2]
vision_hidden_states = vision_hidden_states + (visual_feats,)
language_hidden_states = language_hidden_states + (lang_feats,)
if cross_encoder_attentions is not None:
cross_encoder_attentions = cross_encoder_attentions + (x_outputs[2],)
visual_encoder_outputs = (
vision_hidden_states,
vision_attentions if output_attentions else None,
)
lang_encoder_outputs = (
language_hidden_states,
language_attentions if output_attentions else None,
)
return (
visual_encoder_outputs,
lang_encoder_outputs,
cross_encoder_attentions if output_attentions else None,
)
class LxmertPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class LxmertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.transform_act_fn = ACT2FN[config.hidden_act]
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class LxmertLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = LxmertPredictionHeadTransform(config)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class LxmertVisualAnswerHead(nn.Module):
def __init__(self, config, num_labels):
super().__init__()
hid_dim = config.hidden_size
self.logit_fc = nn.Sequential(
nn.Linear(hid_dim, hid_dim * 2),
GeLU(),
nn.LayerNorm(hid_dim * 2, eps=1e-12),
nn.Linear(hid_dim * 2, num_labels),
)
def forward(self, hidden_states):
return self.logit_fc(hidden_states)
class LxmertVisualObjHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = LxmertPredictionHeadTransform(config)
# Decide the use of visual losses
visual_losses = {}
if config.visual_obj_loss:
visual_losses["obj"] = {"shape": (-1,), "num": config.num_object_labels}
if config.visual_attr_loss:
visual_losses["attr"] = {"shape": (-1,), "num": config.num_attr_labels}
if config.visual_feat_loss:
visual_losses["feat"] = {
"shape": (-1, config.visual_feat_dim),
"num": config.visual_feat_dim,
}
self.visual_losses = visual_losses
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder_dict = nn.ModuleDict(
{key: nn.Linear(config.hidden_size, self.visual_losses[key]["num"]) for key in self.visual_losses}
)
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
output = {}
for key in self.visual_losses:
output[key] = self.decoder_dict[key](hidden_states)
return output
class LxmertPreTrainingHeads(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = LxmertLMPredictionHead(config)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
@auto_docstring
class LxmertPreTrainedModel(PreTrainedModel):
config: LxmertConfig
base_model_prefix = "lxmert"
input_modalities = ("image", "text")
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights"""
super()._init_weights(module)
if isinstance(module, LxmertLMPredictionHead):
init.zeros_(module.bias)
@auto_docstring
class LxmertModel(LxmertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.embeddings = LxmertEmbeddings(config)
self.encoder = LxmertEncoder(config)
self.pooler = LxmertPooler(config)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, new_embeddings):
self.embeddings.word_embeddings = new_embeddings
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
visual_feats: Optional[torch.FloatTensor] = None,
visual_pos: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
visual_attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
**kwargs,
) -> Union[LxmertModelOutput, tuple[torch.FloatTensor]]:
r"""
visual_feats (`torch.FloatTensor` of shape `(batch_size, num_visual_features, visual_feat_dim)`):
This input represents visual features. They ROI pooled object features from bounding boxes using a
faster-RCNN model)
These are currently not provided by the transformers library.
visual_pos (`torch.FloatTensor` of shape `(batch_size, num_visual_features, visual_pos_dim)`):
This input represents spatial features corresponding to their relative (via index) visual features. The
pre-trained LXMERT model expects these spatial features to be normalized bounding boxes on a scale of 0 to
1.
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | true |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/lxmert/configuration_lxmert.py | src/transformers/models/lxmert/configuration_lxmert.py | # coding=utf-8
# Copyright 2018, Hao Tan, Mohit Bansal
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""LXMERT model configuration"""
from ...configuration_utils import PreTrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
class LxmertConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`LxmertModel`]. It is used
to instantiate a LXMERT model according to the specified arguments, defining the model architecture. Instantiating
a configuration with the defaults will yield a similar configuration to that of the Lxmert
[unc-nlp/lxmert-base-uncased](https://huggingface.co/unc-nlp/lxmert-base-uncased) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 30522):
Vocabulary size of the LXMERT model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`LxmertModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
num_qa_labels (`int`, *optional*, defaults to 9500):
This represents the total number of different question answering (QA) labels there are. If using more than
one dataset with QA, the user will need to account for the total number of labels that all of the datasets
have in total.
num_object_labels (`int`, *optional*, defaults to 1600):
This represents the total number of semantically unique objects that lxmert will be able to classify a
pooled-object feature as belonging too.
num_attr_labels (`int`, *optional*, defaults to 400):
This represents the total number of semantically unique attributes that lxmert will be able to classify a
pooled-object feature as possessing.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the *token_type_ids* passed into [`BertModel`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
l_layers (`int`, *optional*, defaults to 9):
Number of hidden layers in the Transformer language encoder.
x_layers (`int`, *optional*, defaults to 5):
Number of hidden layers in the Transformer cross modality encoder.
r_layers (`int`, *optional*, defaults to 5):
Number of hidden layers in the Transformer visual encoder.
visual_feat_dim (`int`, *optional*, defaults to 2048):
This represents the last dimension of the pooled-object features used as input for the model, representing
the size of each object feature itself.
visual_pos_dim (`int`, *optional*, defaults to 4):
This represents the number of spatial features that are mixed into the visual features. The default is set
to 4 because most commonly this will represent the location of a bounding box. i.e., (x, y, width, height)
visual_loss_normalizer (`float`, *optional*, defaults to 6.67):
This represents the scaling factor in which each visual loss is multiplied by if during pretraining, one
decided to train with multiple vision-based loss objectives.
task_matched (`bool`, *optional*, defaults to `True`):
This task is used for sentence-image matching. If the sentence correctly describes the image the label will
be 1. If the sentence does not correctly describe the image, the label will be 0.
task_mask_lm (`bool`, *optional*, defaults to `True`):
Whether or not to add masked language modeling (as used in pretraining models such as BERT) to the loss
objective.
task_obj_predict (`bool`, *optional*, defaults to `True`):
Whether or not to add object prediction, attribute prediction and feature regression to the loss objective.
task_qa (`bool`, *optional*, defaults to `True`):
Whether or not to add the question-answering loss to the objective
visual_obj_loss (`bool`, *optional*, defaults to `True`):
Whether or not to calculate the object-prediction loss objective
visual_attr_loss (`bool`, *optional*, defaults to `True`):
Whether or not to calculate the attribute-prediction loss objective
visual_feat_loss (`bool`, *optional*, defaults to `True`):
Whether or not to calculate the feature-regression loss objective
"""
model_type = "lxmert"
attribute_map = {}
def __init__(
self,
vocab_size=30522,
hidden_size=768,
num_attention_heads=12,
num_qa_labels=9500,
num_object_labels=1600,
num_attr_labels=400,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
l_layers=9,
x_layers=5,
r_layers=5,
visual_feat_dim=2048,
visual_pos_dim=4,
visual_loss_normalizer=6.67,
task_matched=True,
task_mask_lm=True,
task_obj_predict=True,
task_qa=True,
visual_obj_loss=True,
visual_attr_loss=True,
visual_feat_loss=True,
**kwargs,
):
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.num_qa_labels = num_qa_labels
self.num_object_labels = num_object_labels
self.num_attr_labels = num_attr_labels
self.l_layers = l_layers
self.x_layers = x_layers
self.r_layers = r_layers
self.visual_feat_dim = visual_feat_dim
self.visual_pos_dim = visual_pos_dim
self.visual_loss_normalizer = visual_loss_normalizer
self.task_matched = task_matched
self.task_mask_lm = task_mask_lm
self.task_obj_predict = task_obj_predict
self.task_qa = task_qa
self.visual_obj_loss = visual_obj_loss
self.visual_attr_loss = visual_attr_loss
self.visual_feat_loss = visual_feat_loss
self.num_hidden_layers = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers}
super().__init__(**kwargs)
__all__ = ["LxmertConfig"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/codegen/tokenization_codegen.py | src/transformers/models/codegen/tokenization_codegen.py | # coding=utf-8
# Copyright 2022 The Salesforce authors, The Open AI Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for CodeGen."""
import re
from typing import TYPE_CHECKING, Optional, Union
import numpy as np
from tokenizers import Tokenizer, decoders, pre_tokenizers, processors
from tokenizers.models import BPE
from ...tokenization_utils_tokenizers import TokenizersBackend
from ...utils import is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
class CodeGenTokenizer(TokenizersBackend):
"""
Construct a CodeGen tokenizer (backed by HuggingFace's *tokenizers* library). Based on byte-level
Byte-Pair-Encoding.
This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
be encoded differently whether it is at the beginning of the sentence (without space) or not:
```python
>>> from transformers import CodeGenTokenizer
>>> tokenizer = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono")
>>> tokenizer("Hello world")["input_ids"]
[15496, 995]
>>> tokenizer(" Hello world")["input_ids"]
[18435, 995]
```
You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer, but since
the model was not pretrained this way, it might yield a decrease in performance.
<Tip>
When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`.
</Tip>
This tokenizer inherits from [`TokenizersBackend`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab (`str` or `dict[str, int]`, *optional*):
Custom vocabulary dictionary. If not provided, vocabulary is loaded from `vocab_file`.
merges (`str` or `list[str]`, *optional*):
Custom merges list. If not provided, merges are loaded from `merges_file`.
unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
bos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
The beginning of sequence token.
eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
The end of sequence token.
pad_token (`str`, *optional*):
The token used for padding, for example when batching sequences of different lengths.
add_prefix_space (`bool`, *optional*, defaults to `False`):
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
other word. (CodeGen tokenizer detect beginning of words by the preceding space).
return_token_type_ids (`bool`, *optional*, defaults to `False`):
Whether to return token type IDs.
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ["input_ids", "attention_mask"]
model = BPE
def __init__(
self,
vocab: Optional[Union[str, dict[str, int]]] = None,
merges: Optional[Union[str, list[str]]] = None,
unk_token: str = "<|endoftext|>",
bos_token: str = "<|endoftext|>",
eos_token: str = "<|endoftext|>",
pad_token=None,
add_prefix_space: bool = False,
return_token_type_ids: bool = False,
**kwargs,
):
self.return_token_type_ids = return_token_type_ids
if self.return_token_type_ids:
self.model_input_names.append("token_type_ids")
self.add_prefix_space = add_prefix_space
self._vocab = vocab if vocab is not None else {}
self._merges = merges or []
self._tokenizer = Tokenizer(
BPE(
vocab=self._vocab,
merges=self._merges,
dropout=None,
continuing_subword_prefix="",
end_of_word_suffix="",
fuse_unk=False,
)
)
self._tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=add_prefix_space)
self._tokenizer.decoder = decoders.ByteLevel()
self._tokenizer.post_processor = processors.ByteLevel(
add_prefix_space=True, use_regex=True, trim_offsets=False
)
super().__init__(
unk_token=unk_token,
bos_token=bos_token,
eos_token=eos_token,
pad_token=pad_token,
add_prefix_space=add_prefix_space,
return_token_type_ids=return_token_type_ids,
**kwargs,
)
def decode(
self,
token_ids: Union[int, list[int], np.ndarray, "torch.Tensor"],
skip_special_tokens: bool = False,
clean_up_tokenization_spaces: Optional[bool] = None,
truncate_before_pattern: Optional[list[str]] = None,
**kwargs,
) -> str:
"""
Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special
tokens and clean up tokenization spaces.
Similar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`.
Args:
token_ids (`Union[int, List[int], np.ndarray, torch.Tensor]`):
List of tokenized input ids. Can be obtained using the `__call__` method.
skip_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not to remove special tokens in the decoding.
clean_up_tokenization_spaces (`bool`, *optional*):
Whether or not to clean up the tokenization spaces. If `None`, will default to
`self.clean_up_tokenization_spaces` (available in the `tokenizer_config`).
truncate_before_pattern (`List[str]`, *optional*, defaults to `None`):
A list of regular expression strings that will be used to truncate the returned string. This can be
used to remove extra pieces of code (e.g. truncate if observing a comment symbol "#" at the beginning
of a new line). An example pattern could be `["^#", re.escape("<|endoftext|>"), "^'''", "\n\n\n"]`.
kwargs (additional keyword arguments, *optional*):
Will be passed to the underlying model specific decode method.
Returns:
`str`: The decoded sentence.
"""
decoded_text = super().decode(
token_ids=token_ids,
skip_special_tokens=skip_special_tokens,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
**kwargs,
)
if truncate_before_pattern is not None and len(truncate_before_pattern) > 0:
decoded_text = self.truncate(decoded_text, truncate_before_pattern)
return decoded_text
def truncate(self, completion, truncate_before_pattern):
def find_re(string, pattern, start_pos):
m = pattern.search(string, start_pos)
return m.start() if m else -1
terminals = [re.compile(pattern, re.MULTILINE) for pattern in truncate_before_pattern]
prints = list(re.finditer("^print", completion, re.MULTILINE))
if len(prints) > 1:
completion = completion[: prints[1].start()]
defs = list(re.finditer("^def", completion, re.MULTILINE))
if len(defs) > 1:
completion = completion[: defs[1].start()]
start_pos = 0
terminals_pos = [
pos for pos in [find_re(completion, terminal, start_pos) for terminal in terminals] if pos != -1
]
if len(terminals_pos) > 0:
return completion[: min(terminals_pos)]
else:
return completion
__all__ = ["CodeGenTokenizer"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/codegen/__init__.py | src/transformers/models/codegen/__init__.py | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from ..gpt2.tokenization_gpt2 import GPT2Tokenizer as CodeGenTokenizerFast
from .configuration_codegen import *
from .modeling_codegen import *
from .tokenization_codegen import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/codegen/modeling_codegen.py | src/transformers/models/codegen/modeling_codegen.py | # coding=utf-8
# Copyright 2022 Salesforce authors, The EleutherAI, and HuggingFace Teams. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch CodeGen model."""
import math
from typing import Optional, Union
import torch
from torch import nn
from ... import initialization as init
from ...activations import ACT2FN
from ...cache_utils import Cache, DynamicCache
from ...generation import GenerationMixin
from ...modeling_attn_mask_utils import AttentionMaskConverter
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from ...modeling_utils import PreTrainedModel
from ...utils import (
auto_docstring,
is_torch_flex_attn_available,
logging,
)
from .configuration_codegen import CodeGenConfig
if is_torch_flex_attn_available():
from torch.nn.attention.flex_attention import BlockMask
from ...integrations.flex_attention import make_flex_block_causal_mask
logger = logging.get_logger(__name__)
# Copied from transformers.models.gptj.modeling_gptj.create_sinusoidal_positions
def create_sinusoidal_positions(num_pos: int, dim: int) -> torch.Tensor:
inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2, dtype=torch.int64) / dim))
sinusoid_inp = torch.einsum("i , j -> i j", torch.arange(num_pos, dtype=torch.int64).float(), inv_freq).float()
return torch.cat((torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)), dim=1)
# Copied from transformers.models.gptj.modeling_gptj.rotate_every_two
def rotate_every_two(x: torch.Tensor) -> torch.Tensor:
x1 = x[:, :, :, ::2]
x2 = x[:, :, :, 1::2]
x = torch.stack((-x2, x1), dim=-1)
return x.flatten(-2) # in einsum notation: rearrange(x, '... d j -> ... (d j)')
# Copied from transformers.models.gptj.modeling_gptj.apply_rotary_pos_emb
def apply_rotary_pos_emb(tensor: torch.Tensor, sin: torch.Tensor, cos: torch.Tensor) -> torch.Tensor:
sin = torch.repeat_interleave(sin[:, :, None, :], 2, 3)
cos = torch.repeat_interleave(cos[:, :, None, :], 2, 3)
return (tensor * cos) + (rotate_every_two(tensor) * sin)
class CodeGenAttention(nn.Module):
def __init__(self, config, layer_idx=None):
super().__init__()
self.max_positions = config.max_position_embeddings
self.attn_dropout = nn.Dropout(config.attn_pdrop)
self.resid_dropout = nn.Dropout(config.resid_pdrop)
self.layer_idx = layer_idx
if layer_idx is None:
logger.warning_once(
f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will "
"lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` "
"when creating this class."
)
self.embed_dim = config.hidden_size
self.num_attention_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_attention_heads
if self.head_dim * self.num_attention_heads != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_attention_heads (got `embed_dim`: {self.embed_dim} and"
f" `num_attention_heads`: {self.num_attention_heads})."
)
self.scale_attn = math.sqrt(self.head_dim)
self.qkv_proj = nn.Linear(self.embed_dim, self.embed_dim * 3, bias=False)
self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
self.rotary_dim = config.rotary_dim
self.pos_embd_dim = self.rotary_dim or self.embed_dim
self.register_buffer(
"embed_positions", create_sinusoidal_positions(self.max_positions, self.pos_embd_dim), persistent=False
)
def _split_heads(self, x, n_head, dim_head, mp_num):
reshaped = x.reshape(x.shape[:-1] + (n_head // mp_num, dim_head))
reshaped = reshaped.reshape(x.shape[:-2] + (-1,) + reshaped.shape[-1:])
return reshaped
def _merge_heads(self, tensor, num_attention_heads, attn_head_size):
"""
Merges attn_head_size dim and num_attn_heads dim into n_ctx
"""
if len(tensor.shape) == 5:
tensor = tensor.permute(0, 1, 3, 2, 4).contiguous()
elif len(tensor.shape) == 4:
tensor = tensor.permute(0, 2, 1, 3).contiguous()
else:
raise ValueError(f"Input tensor rank should be one of [4, 5], but is: {len(tensor.shape)}")
new_shape = tensor.size()[:-2] + (num_attention_heads * attn_head_size,)
return tensor.view(new_shape)
def _attn(
self,
query,
key,
value,
attention_mask=None,
):
# Keep the attention weights computation in fp32 to avoid overflow issues
query = query.to(torch.float32)
key = key.to(torch.float32)
attn_weights = torch.matmul(query, key.transpose(-1, -2))
if attention_mask is not None:
causal_mask = attention_mask[:, :, :, : key.shape[-2]]
attn_weights += causal_mask
attn_weights = attn_weights / self.scale_attn
attn_weights = nn.Softmax(dim=-1)(attn_weights)
attn_weights = attn_weights.to(value.dtype)
attn_weights = self.attn_dropout(attn_weights)
attn_output = torch.matmul(attn_weights, value)
return attn_output, attn_weights
def forward(
self,
hidden_states: Optional[torch.FloatTensor],
layer_past: Optional[Cache] = None,
attention_mask: Optional[torch.FloatTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = False,
output_attentions: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
) -> Union[
tuple[torch.Tensor, tuple[torch.Tensor]],
Optional[tuple[torch.Tensor, tuple[torch.Tensor], tuple[torch.Tensor, ...]]],
]:
qkv = self.qkv_proj(hidden_states)
# TODO(enijkamp): factor out number of logical TPU-v4 cores or make forward pass agnostic
mp_num = 4
qkv_split = qkv.reshape(qkv.shape[:-1] + (mp_num, -1))
local_dim = self.head_dim * self.num_attention_heads // mp_num
query, value, key = torch.split(qkv_split, local_dim, dim=-1)
query = self._split_heads(query, self.num_attention_heads, self.head_dim, mp_num=mp_num)
key = self._split_heads(key, self.num_attention_heads, self.head_dim, mp_num=mp_num)
value = self._split_heads(value, self.num_attention_heads, self.head_dim, mp_num=mp_num)
value = value.permute(0, 2, 1, 3)
embed_positions = self.embed_positions
if embed_positions.device != position_ids.device:
embed_positions = embed_positions.to(position_ids.device)
self.embed_positions = embed_positions
sincos = embed_positions[position_ids]
sin, cos = torch.split(sincos, sincos.shape[-1] // 2, dim=-1)
if self.rotary_dim is not None:
k_rot = key[:, :, :, : self.rotary_dim]
k_pass = key[:, :, :, self.rotary_dim :]
q_rot = query[:, :, :, : self.rotary_dim]
q_pass = query[:, :, :, self.rotary_dim :]
k_rot = apply_rotary_pos_emb(k_rot, sin, cos)
q_rot = apply_rotary_pos_emb(q_rot, sin, cos)
key = torch.cat([k_rot, k_pass], dim=-1)
query = torch.cat([q_rot, q_pass], dim=-1)
else:
key = apply_rotary_pos_emb(key, sin, cos)
query = apply_rotary_pos_emb(query, sin, cos)
key = key.permute(0, 2, 1, 3)
query = query.permute(0, 2, 1, 3)
# Note that this cast is quite ugly, but is not implemented before ROPE as k_rot in the original codebase is always in fp32.
# Reference: https://github.com/salesforce/CodeGen/blob/f210c3bb1216c975ad858cd4132c0fdeabf4bfc2/codegen1/jaxformer/hf/codegen/modeling_codegen.py#L38
if layer_past is not None:
cache_kwargs = {
"sin": sin,
"cos": cos,
"partial_rotation_size": self.rotary_dim,
"cache_position": cache_position,
}
key, value = layer_past.update(key.to(hidden_states.dtype), value, self.layer_idx, cache_kwargs)
# compute self-attention: V x Softmax(QK^T)
attn_output, attn_weights = self._attn(query, key, value, attention_mask)
attn_output = self._merge_heads(attn_output, self.num_attention_heads, self.head_dim)
attn_output = self.out_proj(attn_output)
attn_output = self.resid_dropout(attn_output)
return attn_output, attn_weights
# Copied from transformers.models.gptj.modeling_gptj.GPTJMLP with GPTJ->CodeGen
class CodeGenMLP(nn.Module):
def __init__(self, intermediate_size, config): # in MLP: intermediate_size= 4 * embed_dim
super().__init__()
embed_dim = config.n_embd
self.fc_in = nn.Linear(embed_dim, intermediate_size)
self.fc_out = nn.Linear(intermediate_size, embed_dim)
self.act = ACT2FN[config.activation_function]
self.dropout = nn.Dropout(config.resid_pdrop)
def forward(self, hidden_states: Optional[torch.FloatTensor]) -> torch.FloatTensor:
hidden_states = self.fc_in(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.fc_out(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
# Copied from transformers.models.gptj.modeling_gptj.GPTJBlock with GPTJ->CodeGen
class CodeGenBlock(GradientCheckpointingLayer):
# Ignore copy
def __init__(self, config, layer_idx=None):
super().__init__()
inner_dim = config.n_inner if config.n_inner is not None else 4 * config.n_embd
self.ln_1 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
self.attn = CodeGenAttention(config, layer_idx)
self.mlp = CodeGenMLP(inner_dim, config)
def forward(
self,
hidden_states: Optional[torch.FloatTensor],
layer_past: Optional[Cache] = None,
attention_mask: Optional[torch.FloatTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = False,
output_attentions: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
) -> Union[tuple[torch.Tensor], Optional[tuple[torch.Tensor, tuple[torch.FloatTensor, ...]]]]:
residual = hidden_states
hidden_states = self.ln_1(hidden_states)
attn_outputs, attn_weights = self.attn(
hidden_states=hidden_states,
layer_past=layer_past,
attention_mask=attention_mask,
position_ids=position_ids,
use_cache=use_cache,
output_attentions=output_attentions,
cache_position=cache_position,
)
feed_forward_hidden_states = self.mlp(hidden_states)
hidden_states = attn_outputs + feed_forward_hidden_states + residual
return hidden_states, attn_weights
@auto_docstring
class CodeGenPreTrainedModel(PreTrainedModel):
config: CodeGenConfig
base_model_prefix = "transformer"
supports_gradient_checkpointing = True
_no_split_modules = ["CodeGenBlock"]
_skip_keys_device_placement = "past_key_values"
_can_compile_fullgraph = True
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, CodeGenAttention):
init.copy_(module.embed_positions, create_sinusoidal_positions(module.max_positions, module.pos_embd_dim))
@auto_docstring
class CodeGenModel(CodeGenPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.embed_dim = config.n_embd
self.vocab_size = config.vocab_size
self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
self.drop = nn.Dropout(config.embd_pdrop)
self.h = nn.ModuleList([CodeGenBlock(config, layer_idx=i) for i in range(config.n_layer)])
self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
self.rotary_dim = min(config.rotary_dim, config.n_ctx // config.num_attention_heads)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.wte
def set_input_embeddings(self, new_embeddings):
self.wte = new_embeddings
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs, # NOOP kwargs, for now
) -> Union[tuple, BaseModelOutputWithPast]:
r"""
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_dim)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
model's internal embedding lookup matrix.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
if inputs_embeds is None:
inputs_embeds = self.wte(input_ids)
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
seq_length = inputs_embeds.shape[1]
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(past_seen_tokens, past_seen_tokens + seq_length, device=inputs_embeds.device)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = self._update_causal_mask(
attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
)
hidden_states = inputs_embeds
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, seq_length)
token_type_embeds = self.wte(token_type_ids)
hidden_states = hidden_states + token_type_embeds
hidden_states = self.drop(hidden_states)
output_shape = (-1, seq_length, hidden_states.size(-1))
all_self_attentions = () if output_attentions else None
all_hidden_states = () if output_hidden_states else None
for i, block in enumerate(self.h):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = block(
hidden_states,
layer_past=past_key_values,
attention_mask=causal_mask,
position_ids=position_ids,
use_cache=use_cache,
output_attentions=output_attentions,
cache_position=cache_position,
)
hidden_states = outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (outputs[1],)
hidden_states = self.ln_f(hidden_states)
hidden_states = hidden_states.view(output_shape)
# Add last hidden state
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v for v in [hidden_states, past_key_values, all_hidden_states, all_self_attentions] if v is not None
)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
# Copied from transformers.models.gptj.modeling_gptj.GPTJModel._update_causal_mask
def _update_causal_mask(
self,
attention_mask: Union[torch.Tensor, "BlockMask"],
input_tensor: torch.Tensor,
cache_position: torch.Tensor,
past_key_values: Cache,
output_attentions: bool = False,
):
if self.config._attn_implementation == "flash_attention_2":
if attention_mask is not None and (attention_mask == 0.0).any():
return attention_mask
return None
if self.config._attn_implementation == "flex_attention":
if isinstance(attention_mask, torch.Tensor):
attention_mask = make_flex_block_causal_mask(attention_mask)
return attention_mask
# For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
# order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
# to infer the attention mask.
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
using_compilable_cache = past_key_values.is_compileable if past_key_values is not None else False
# When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
if self.config._attn_implementation == "sdpa" and not using_compilable_cache and not output_attentions:
if AttentionMaskConverter._ignore_causal_mask_sdpa(
attention_mask,
inputs_embeds=input_tensor,
past_key_values_length=past_seen_tokens,
is_training=self.training,
):
return None
dtype = input_tensor.dtype
sequence_length = input_tensor.shape[1]
if using_compilable_cache:
target_length = past_key_values.get_max_cache_shape()
else:
target_length = (
attention_mask.shape[-1]
if isinstance(attention_mask, torch.Tensor)
else past_seen_tokens + sequence_length + 1
)
# In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
attention_mask,
sequence_length=sequence_length,
target_length=target_length,
dtype=dtype,
cache_position=cache_position,
batch_size=input_tensor.shape[0],
)
if (
self.config._attn_implementation == "sdpa"
and attention_mask is not None
and attention_mask.device.type in ["cuda", "xpu", "npu"]
and not output_attentions
):
# Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
# using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
# Details: https://github.com/pytorch/pytorch/issues/110213
min_dtype = torch.finfo(dtype).min
causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
return causal_mask
@staticmethod
# Copied from transformers.models.gptj.modeling_gptj.GPTJModel._prepare_4d_causal_attention_mask_with_cache_position
def _prepare_4d_causal_attention_mask_with_cache_position(
attention_mask: torch.Tensor,
sequence_length: int,
target_length: int,
dtype: torch.dtype,
cache_position: torch.Tensor,
batch_size: int,
**kwargs,
):
"""
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
Args:
attention_mask (`torch.Tensor`):
A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
`(batch_size, 1, query_length, key_value_length)`.
sequence_length (`int`):
The sequence length being processed.
target_length (`int`):
The target length: when generating with static cache, the mask should be as long as the static cache,
to account for the 0 padding, the part of the cache that is not filled yet.
dtype (`torch.dtype`):
The dtype to use for the 4D attention mask.
cache_position (`torch.Tensor`):
Indices depicting the position of the input sequence tokens in the sequence.
batch_size (`torch.Tensor`):
Batch size.
"""
if attention_mask is not None and attention_mask.dim() == 4:
# In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
causal_mask = attention_mask
else:
min_dtype = torch.finfo(dtype).min
causal_mask = torch.full(
(sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device
)
if sequence_length != 1:
causal_mask = torch.triu(causal_mask, diagonal=1)
causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1)
causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
if attention_mask is not None:
causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
mask_length = attention_mask.shape[-1]
padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(
causal_mask.device
)
padding_mask = padding_mask == 0
causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
padding_mask, min_dtype
)
return causal_mask
@auto_docstring(
custom_intro="""
The CodeGen Model transformer with a language modeling head on top.
"""
)
class CodeGenForCausalLM(CodeGenPreTrainedModel, GenerationMixin):
_tied_weights_keys = {"lm_head.weight": "transformer.wte.weight"}
def __init__(self, config):
super().__init__(config)
self.transformer = CodeGenModel(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs,
) -> Union[tuple, CausalLMOutputWithPast]:
r"""
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_dim)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
model's internal embedding lookup matrix.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
hidden_states = transformer_outputs[0]
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
if not return_dict:
output = (logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
__all__ = ["CodeGenForCausalLM", "CodeGenModel", "CodeGenPreTrainedModel"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/codegen/configuration_codegen.py | src/transformers/models/codegen/configuration_codegen.py | # coding=utf-8
# Copyright 2022 Salesforce authors, The EleutherAI, and HuggingFace Teams. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CodeGen model configuration"""
from ...configuration_utils import PreTrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
class CodeGenConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`CodeGenModel`]. It is used to instantiate a
CodeGen model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the CodeGen
[Salesforce/codegen-2B-mono](https://huggingface.co/Salesforce/codegen-2B-mono) architecture. Configuration objects
inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from
[`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50400):
Vocabulary size of the CodeGen model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`CodeGenModel`].
n_positions (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
n_ctx (`int`, *optional*, defaults to 2048):
This attribute is used in `CodeGenModel.__init__` without any real effect.
n_embd (`int`, *optional*, defaults to 4096):
Dimensionality of the embeddings and hidden states.
n_layer (`int`, *optional*, defaults to 28):
Number of hidden layers in the Transformer encoder.
n_head (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
rotary_dim (`int`, *optional*, defaults to 64):
Number of dimensions in the embedding that Rotary Position Embedding is applied to.
n_inner (`int`, *optional*):
Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd
activation_function (`str`, *optional*, defaults to `"gelu_new"`):
Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new"]`.
resid_pdrop (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
embd_pdrop (`int`, *optional*, defaults to 0.0):
The dropout ratio for the embeddings.
attn_pdrop (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention.
layer_norm_epsilon (`float`, *optional*, defaults to 1e-05):
The epsilon to use in the layer normalization layers.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
bos_token_id (`int`, *optional*, defaults to 50256):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 50256):
End of stream token id.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether the model's input and output word embeddings should be tied. Note that this is only relevant if the
model has a output word embedding layer.
Example:
```python
>>> from transformers import CodeGenConfig, CodeGenModel
>>> # Initializing a CodeGen 6B configuration
>>> configuration = CodeGenConfig()
>>> # Initializing a model (with random weights) from the configuration
>>> model = CodeGenModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "codegen"
attribute_map = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__(
self,
vocab_size=50400,
n_positions=2048,
n_ctx=2048,
n_embd=4096,
n_layer=28,
n_head=16,
rotary_dim=64,
n_inner=None,
activation_function="gelu_new",
resid_pdrop=0.0,
embd_pdrop=0.0,
attn_pdrop=0.0,
layer_norm_epsilon=1e-5,
initializer_range=0.02,
use_cache=True,
bos_token_id=50256,
eos_token_id=50256,
tie_word_embeddings=False,
**kwargs,
):
self.vocab_size = vocab_size
self.n_ctx = n_ctx
self.n_positions = n_positions
self.n_embd = n_embd
self.n_layer = n_layer
self.n_head = n_head
self.n_inner = n_inner
self.rotary_dim = rotary_dim
self.activation_function = activation_function
self.resid_pdrop = resid_pdrop
self.embd_pdrop = embd_pdrop
self.attn_pdrop = attn_pdrop
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_range = initializer_range
self.use_cache = use_cache
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
super().__init__(
bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs
)
__all__ = ["CodeGenConfig"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/longcat_flash/modeling_longcat_flash.py | src/transformers/models/longcat_flash/modeling_longcat_flash.py | # π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨
# This file was automatically generated from src/transformers/models/longcat_flash/modular_longcat_flash.py.
# Do NOT edit this file manually as any edits will be overwritten by the generation of
# the file from the modular. If any change should be done, please apply the change to the
# modular_longcat_flash.py file directly. One of our CI enforces this.
# π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨
# coding=utf-8
# Copyright 2025 Meituan and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from collections.abc import Callable
from typing import Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ... import initialization as init
from ...activations import ACT2FN
from ...cache_utils import Cache, DynamicCache
from ...generation import GenerationMixin
from ...integrations import use_kernel_forward_from_hub
from ...masking_utils import create_causal_mask
from ...modeling_flash_attention_utils import FlashAttentionKwargs
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
from ...utils.generic import check_model_inputs, maybe_autocast
from .configuration_longcat_flash import LongcatFlashConfig
@use_kernel_forward_from_hub("RMSNorm")
class LongcatFlashRMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
LongcatFlashRMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
class LongcatFlashRotaryEmbedding(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, config: LongcatFlashConfig, device=None):
super().__init__()
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_type = self.config.rope_parameters["rope_type"]
rope_init_fn: Callable = self.compute_default_rope_parameters
if self.rope_type != "default":
rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.register_buffer("original_inv_freq", inv_freq.clone(), persistent=False)
@staticmethod
def compute_default_rope_parameters(
config: Optional[LongcatFlashConfig] = None,
device: Optional["torch.device"] = None,
seq_len: Optional[int] = None,
) -> tuple["torch.Tensor", float]:
"""
Computes the inverse frequencies according to the original RoPE implementation
Args:
config ([`~transformers.PreTrainedConfig`]):
The model configuration.
device (`torch.device`):
The device to use for initialization of the inverse frequencies.
seq_len (`int`, *optional*):
The current sequence length. Unused for this type of RoPE.
Returns:
Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
"""
base = config.rope_parameters["rope_theta"]
dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
attention_factor = 1.0 # Unused in this type of RoPE
# Compute the inverse frequencies
inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
)
return inv_freq, attention_factor
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with maybe_autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
class LongcatFlashMLP(nn.Module):
def __init__(self, config, hidden_size=None, intermediate_size=None):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size if hidden_size is None else hidden_size
self.intermediate_size = config.ffn_hidden_size if intermediate_size is None else intermediate_size
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, x):
down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
return down_proj
class LongcatFlashTopkRouter(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.n_routed_experts = config.n_routed_experts + (config.zero_expert_num or 0)
self.register_buffer("e_score_correction_bias", torch.zeros(self.n_routed_experts))
self.top_k = config.moe_topk
self.routed_scaling_factor = config.routed_scaling_factor
self.router_bias = getattr(config, "router_bias", False)
self.classifier = nn.Linear(config.hidden_size, self.n_routed_experts, bias=self.router_bias)
def forward(self, hidden_states):
hidden_states = hidden_states.view(-1, self.config.hidden_size)
router_logits = F.linear(hidden_states.type(torch.float32), self.classifier.weight.type(torch.float32))
scores = router_logits.softmax(dim=-1)
topk_indices = self.get_topk_indices(scores)
topk_weights = scores.gather(1, topk_indices)
topk_weights = topk_weights * self.routed_scaling_factor
return topk_weights.to(router_logits.dtype), topk_indices
@torch.no_grad()
def get_topk_indices(self, scores):
scores_for_choice = scores.view(-1, self.n_routed_experts) + self.e_score_correction_bias.unsqueeze(0)
topk_indices = torch.topk(scores_for_choice, k=self.top_k, dim=-1, sorted=False)[1]
return topk_indices
class LongcatFlashExperts(nn.Module):
def __init__(self, config):
super().__init__()
self.intermediate_size = config.expert_ffn_hidden_size
self.hidden_size = config.hidden_size
self.num_routed_experts = config.n_routed_experts
self.zero_expert_num = config.zero_expert_num or 0
self.total_experts = self.num_routed_experts + self.zero_expert_num
self.act_fn = ACT2FN[config.hidden_act]
if self.num_routed_experts > 0:
self.gate_up_proj = nn.Parameter(
torch.empty(self.total_experts, 2 * self.intermediate_size, self.hidden_size)
)
self.down_proj = nn.Parameter(
torch.empty(self.num_routed_experts, self.hidden_size, self.intermediate_size)
)
else:
self.register_parameter("gate_up_proj", None)
self.register_parameter("down_proj", None)
def forward(self, hidden_states, top_k_index, top_k_weights):
final_hidden_states = torch.zeros_like(hidden_states)
if top_k_index.numel() == 0:
return final_hidden_states
expert_mask = torch.nn.functional.one_hot(top_k_index, num_classes=self.total_experts).permute(2, 1, 0)
expert_hit = torch.greater(expert_mask.sum(dim=(-1, -2)), 0).nonzero(as_tuple=False)
for expert_idx_tensor in expert_hit:
expert_idx = int(expert_idx_tensor.item())
selection_idx, token_idx = torch.where(expert_mask[expert_idx].squeeze(0))
if token_idx.numel() == 0:
continue
current_state = hidden_states[token_idx]
if expert_idx >= self.num_routed_experts or self.gate_up_proj is None:
current_hidden_states = current_state
else:
gate, up = F.linear(current_state, self.gate_up_proj[expert_idx]).chunk(2, dim=-1)
current_hidden_states = self.act_fn(gate) * up
current_hidden_states = F.linear(current_hidden_states, self.down_proj[expert_idx])
current_hidden_states = current_hidden_states * top_k_weights[token_idx, selection_idx, None]
final_hidden_states.index_add_(0, token_idx, current_hidden_states.to(hidden_states.dtype))
return final_hidden_states
# remap config key expert_ffn_hidden_size -> moe_intermediate_size
class LongcatFlashMoE(nn.Module):
"""
A mixed expert module containing zero compute (identity) experts.
"""
def __init__(self, config):
super().__init__()
self.intermediate_size = config.expert_ffn_hidden_size
self.config = config
self.experts = LongcatFlashExperts(config)
self.router = LongcatFlashTopkRouter(config)
def forward(self, hidden_states):
orig_shape = hidden_states.shape
topk_weights, topk_indices = self.router(hidden_states)
hidden_states = hidden_states.view(-1, hidden_states.shape[-1])
hidden_states = self.experts(hidden_states, topk_indices, topk_weights).view(*orig_shape)
return hidden_states
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
key_states = repeat_kv(key, module.num_key_value_groups)
value_states = repeat_kv(value, module.num_key_value_groups)
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
if attention_mask is not None:
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
attn_weights = attn_weights + causal_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
def apply_rotary_pos_emb_interleave(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
r"""
TODO let's just use the original freqcis computation to not have the view
transpose + reshape! This is not optimized!
Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`):
The position indices of the tokens corresponding to the query and key tensors. For example, this can be
used to pass offsetted position ids when working with a KV-cache.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
b, h, s, d = q.shape
q = q.view(b, h, s, d // 2, 2).transpose(4, 3).reshape(b, h, s, d)
b, h, s, d = k.shape
k = k.view(b, h, s, d // 2, 2).transpose(4, 3).reshape(b, h, s, d)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
def yarn_get_mscale(scale=1, mscale=1):
if scale <= 1:
return 1.0
return 0.1 * mscale * math.log(scale) + 1.0
class LongcatFlashMLA(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config, layer_idx: int):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.attention_dropout = config.attention_dropout
self.num_heads = config.num_attention_heads
self.q_lora_rank = config.q_lora_rank
self.qk_rope_head_dim = config.qk_rope_head_dim
self.kv_lora_rank = config.kv_lora_rank
self.v_head_dim = config.v_head_dim
self.qk_nope_head_dim = config.qk_nope_head_dim
self.qk_head_dim = config.qk_head_dim
self.is_causal = True
if self.q_lora_rank is None:
self.q_proj = nn.Linear(config.hidden_size, self.num_heads * self.qk_head_dim, bias=False)
else:
self.q_a_proj = nn.Linear(config.hidden_size, config.q_lora_rank, bias=config.attention_bias)
self.q_a_layernorm = LongcatFlashRMSNorm(config.q_lora_rank)
self.q_b_proj = nn.Linear(config.q_lora_rank, self.num_heads * self.qk_head_dim, bias=False)
self.kv_a_proj_with_mqa = nn.Linear(
config.hidden_size,
self.kv_lora_rank + self.qk_rope_head_dim,
bias=config.attention_bias,
)
self.kv_a_layernorm = LongcatFlashRMSNorm(self.kv_lora_rank)
self.kv_b_proj = nn.Linear(
self.kv_lora_rank,
self.num_heads * (self.qk_nope_head_dim + self.v_head_dim),
bias=False,
)
self.o_proj = nn.Linear(
self.num_heads * self.v_head_dim,
config.hidden_size,
bias=config.attention_bias,
)
self.scaling = self.qk_head_dim ** (-0.5)
if self.config.rope_parameters.get("rope_type", "default") != "default":
mscale_all_dim = self.config.rope_parameters.get("mscale_all_dim", 0)
scaling_factor = self.config.rope_parameters["factor"]
if mscale_all_dim:
mscale = yarn_get_mscale(scaling_factor, mscale_all_dim)
self.scaling = self.scaling * mscale * mscale
self.mla_scale_q_lora = (config.hidden_size / self.q_lora_rank) ** 0.5
self.mla_scale_kv_lora = (config.hidden_size / self.kv_lora_rank) ** 0.5
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor],
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
batch_size, seq_length = hidden_states.shape[:-1]
query_shape = (batch_size, seq_length, -1, self.qk_head_dim)
key_shape = (batch_size, seq_length, -1, self.qk_nope_head_dim + self.v_head_dim)
# we always do a lora for queries as well
q_states = self.q_b_proj(self.q_a_layernorm(self.q_a_proj(hidden_states)))
q_states = q_states.view(query_shape).transpose(1, 2)
q_pass, q_rot = torch.split(q_states, [self.qk_nope_head_dim, self.qk_rope_head_dim], dim=-1)
compressed_kv = self.kv_a_proj_with_mqa(hidden_states)
k_pass, k_rot = torch.split(compressed_kv, [self.kv_lora_rank, self.qk_rope_head_dim], dim=-1)
k_pass = self.kv_a_layernorm(k_pass)
# apply LoRA scaling
q_pass = q_pass * self.mla_scale_q_lora
q_rot = q_rot * self.mla_scale_q_lora
k_pass = k_pass * self.mla_scale_kv_lora
k_pass = self.kv_b_proj(k_pass).view(key_shape).transpose(1, 2)
k_pass, value_states = torch.split(k_pass, [self.qk_nope_head_dim, self.v_head_dim], dim=-1)
k_rot = k_rot.view(batch_size, 1, seq_length, self.qk_rope_head_dim)
cos, sin = position_embeddings
q_rot, k_rot = apply_rotary_pos_emb_interleave(q_rot, k_rot, cos, sin)
k_rot = k_rot.expand(*k_pass.shape[:-1], -1)
query_states = torch.cat((q_pass, q_rot), dim=-1)
key_states = torch.cat((k_pass, k_rot), dim=-1)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
if "flash" in self.config._attn_implementation and self.qk_head_dim != self.v_head_dim:
value_states = F.pad(value_states, [0, self.qk_head_dim - self.v_head_dim])
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
if "flash" in self.config._attn_implementation and self.qk_head_dim != self.v_head_dim:
attn_output = attn_output[:, :, :, : self.v_head_dim]
attn_output = attn_output.reshape(batch_size, seq_length, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class LongcatFlashDecoderLayer(GradientCheckpointingLayer):
"""
LongCat decoder layer with dual-sublayer + shortcut MoE architecture.
Each logical layer contains:
- 2 attention sublayers (with layer indices: layer_idx*2, layer_idx*2+1)
- 2 MLP sublayers
- 1 shortcut MoE connection
"""
def __init__(self, config, layer_idx: int):
super().__init__()
self.layer_idx = layer_idx
self.hidden_size = config.hidden_size
self.mlp = LongcatFlashMoE(config)
self.self_attn = nn.ModuleList([LongcatFlashMLA(config=config, layer_idx=layer_idx * 2 + i) for i in [0, 1]])
self.mlps = nn.ModuleList([LongcatFlashMLP(config) for _ in [0, 1]])
self.input_layernorm = nn.ModuleList(
[LongcatFlashRMSNorm(config.hidden_size, eps=config.rms_norm_eps) for _ in [0, 1]]
)
self.post_attention_layernorm = nn.ModuleList(
[LongcatFlashRMSNorm(config.hidden_size, eps=config.rms_norm_eps) for _ in [0, 1]]
)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> torch.Tensor:
residual = hidden_states
hidden_states = self.input_layernorm[0](hidden_states)
hidden_states, _ = self.self_attn[0](
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.post_attention_layernorm[0](hidden_states)
shortcut_mlp_output = self.mlp(hidden_states)
hidden_states = self.mlps[0](hidden_states)
hidden_states = residual + hidden_states
# shortcut connection after second sublayer
residual = hidden_states
hidden_states = self.input_layernorm[1](hidden_states)
hidden_states, _ = self.self_attn[1](
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.post_attention_layernorm[1](hidden_states)
hidden_states = self.mlps[1](hidden_states)
hidden_states = residual + hidden_states + shortcut_mlp_output
return hidden_states
@auto_docstring
class LongcatFlashPreTrainedModel(PreTrainedModel):
config: LongcatFlashConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["LongcatFlashDecoderLayer"]
_skip_keys_device_placement = ["past_key_values"]
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = False
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": LongcatFlashDecoderLayer,
"attentions": LongcatFlashMLA,
}
@torch.no_grad()
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, LongcatFlashTopkRouter):
init.normal_(module.classifier.weight, mean=0.0, std=self.config.initializer_range)
init.zeros_(module.e_score_correction_bias)
if isinstance(module, LongcatFlashExperts):
if module.gate_up_proj is not None:
init.normal_(module.gate_up_proj, mean=0.0, std=self.config.initializer_range)
init.normal_(module.down_proj, mean=0.0, std=self.config.initializer_range)
@auto_docstring
class LongcatFlashModel(LongcatFlashPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"model\.mtp.*"]
def __init__(self, config):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList(
[LongcatFlashDecoderLayer(config, layer_idx) for layer_idx in range(config.num_layers)]
)
self.norm = LongcatFlashRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = LongcatFlashRotaryEmbedding(config=config)
self.gradient_checkpointing = False
# Each layer above has 2 sublayers, config hack to have a correct cache (to avoid a checkpoint change)
self.head_dim = config.head_dim # For CI happiness (we didn't convert so head_dim is not directly used)
self.config.num_hidden_layers = 2 * config.num_layers
# Initialize weights and apply final processing
self.post_init()
@check_model_inputs
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
cache_position: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds: torch.Tensor = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position: torch.Tensor = (
torch.arange(inputs_embeds.shape[1], device=inputs_embeds.device) + past_seen_tokens
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = create_causal_mask(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
position_ids=position_ids,
)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for decoder_layer in self.layers[: self.config.num_layers]:
hidden_states = decoder_layer(
hidden_states,
attention_mask=causal_mask,
position_ids=position_ids,
past_key_values=past_key_values,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
hidden_states=None,
attentions=None,
)
@auto_docstring
class LongcatFlashForCausalLM(LongcatFlashPreTrainedModel, GenerationMixin):
_tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
_tp_plan = {"lm_head": "colwise_rep"}
_pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
_keys_to_ignore_on_load_unexpected = [r"model\.mtp.*"]
def __init__(self, config):
super().__init__(config)
self.model = LongcatFlashModel(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
) -> CausalLMOutputWithPast:
r"""
Example:
```python
>>> from transformers import AutoTokenizer, LongcatFlashForCausalLM
>>> model = LongcatFlashForCausalLM.from_pretrained("meta-longcat_flash/LongcatFlash-2-7b-hf")
>>> tokenizer = AutoTokenizer.from_pretrained("meta-longcat_flash/LongcatFlash-2-7b-hf")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```"""
outputs: BaseModelOutputWithPast = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
__all__ = ["LongcatFlashPreTrainedModel", "LongcatFlashModel", "LongcatFlashForCausalLM"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/longcat_flash/modular_longcat_flash.py | src/transformers/models/longcat_flash/modular_longcat_flash.py | # coding=utf-8
# Copyright 2025 Meituan and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Callable
from typing import Optional
import torch
import torch.nn.functional as F
from torch import nn
from ... import initialization as init
from ...activations import ACT2FN
from ...cache_utils import Cache, DynamicCache
from ...masking_utils import create_causal_mask
from ...modeling_flash_attention_utils import FlashAttentionKwargs
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import BaseModelOutputWithPast
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring, logging
from ..deepseek_v3.modeling_deepseek_v3 import (
DeepseekV3Attention,
DeepseekV3ForCausalLM,
DeepseekV3MLP,
DeepseekV3Model,
DeepseekV3RMSNorm,
DeepseekV3RotaryEmbedding,
DeepseekV3TopkRouter,
apply_rotary_pos_emb_interleave,
eager_attention_forward,
)
from .configuration_longcat_flash import LongcatFlashConfig
logger = logging.get_logger(__name__)
class LongcatFlashRMSNorm(DeepseekV3RMSNorm):
pass
class LongcatFlashRotaryEmbedding(DeepseekV3RotaryEmbedding):
pass
# TODO remap config key ffn_hidden_size -> intermediate_size
class LongcatFlashMLP(DeepseekV3MLP):
def __init__(self, config, hidden_size=None, intermediate_size=None):
super().__init__(config)
self.hidden_size = config.hidden_size if hidden_size is None else hidden_size
self.intermediate_size = config.ffn_hidden_size if intermediate_size is None else intermediate_size
# TODO remap config key moe_topk -> num_experts_per_tok
class LongcatFlashTopkRouter(DeepseekV3TopkRouter):
def __init__(self, config):
super().__init__(config)
del self.n_group
del self.topk_group
del self.weight
del self.norm_topk_prob
self.top_k = config.moe_topk
self.n_routed_experts = config.n_routed_experts + (config.zero_expert_num or 0)
self.routed_scaling_factor = config.routed_scaling_factor
self.register_buffer("e_score_correction_bias", torch.zeros(self.n_routed_experts))
self.router_bias = getattr(config, "router_bias", False)
self.classifier = nn.Linear(config.hidden_size, self.n_routed_experts, bias=self.router_bias)
@torch.no_grad()
def get_topk_indices(self, scores):
scores_for_choice = scores.view(-1, self.n_routed_experts) + self.e_score_correction_bias.unsqueeze(0)
topk_indices = torch.topk(scores_for_choice, k=self.top_k, dim=-1, sorted=False)[1]
return topk_indices
def forward(self, hidden_states):
hidden_states = hidden_states.view(-1, self.config.hidden_size)
router_logits = F.linear(hidden_states.type(torch.float32), self.classifier.weight.type(torch.float32))
scores = router_logits.softmax(dim=-1)
topk_indices = self.get_topk_indices(scores)
topk_weights = scores.gather(1, topk_indices)
topk_weights = topk_weights * self.routed_scaling_factor
return topk_weights.to(router_logits.dtype), topk_indices
class LongcatFlashExperts(nn.Module):
def __init__(self, config):
super().__init__()
self.intermediate_size = config.expert_ffn_hidden_size
self.hidden_size = config.hidden_size
self.num_routed_experts = config.n_routed_experts
self.zero_expert_num = config.zero_expert_num or 0
self.total_experts = self.num_routed_experts + self.zero_expert_num
self.act_fn = ACT2FN[config.hidden_act]
if self.num_routed_experts > 0:
self.gate_up_proj = nn.Parameter(
torch.empty(self.total_experts, 2 * self.intermediate_size, self.hidden_size)
)
self.down_proj = nn.Parameter(
torch.empty(self.num_routed_experts, self.hidden_size, self.intermediate_size)
)
else:
self.register_parameter("gate_up_proj", None)
self.register_parameter("down_proj", None)
def forward(self, hidden_states, top_k_index, top_k_weights):
final_hidden_states = torch.zeros_like(hidden_states)
if top_k_index.numel() == 0:
return final_hidden_states
expert_mask = torch.nn.functional.one_hot(top_k_index, num_classes=self.total_experts).permute(2, 1, 0)
expert_hit = torch.greater(expert_mask.sum(dim=(-1, -2)), 0).nonzero(as_tuple=False)
for expert_idx_tensor in expert_hit:
expert_idx = int(expert_idx_tensor.item())
selection_idx, token_idx = torch.where(expert_mask[expert_idx].squeeze(0))
if token_idx.numel() == 0:
continue
current_state = hidden_states[token_idx]
if expert_idx >= self.num_routed_experts or self.gate_up_proj is None:
current_hidden_states = current_state
else:
gate, up = F.linear(current_state, self.gate_up_proj[expert_idx]).chunk(2, dim=-1)
current_hidden_states = self.act_fn(gate) * up
current_hidden_states = F.linear(current_hidden_states, self.down_proj[expert_idx])
current_hidden_states = current_hidden_states * top_k_weights[token_idx, selection_idx, None]
final_hidden_states.index_add_(0, token_idx, current_hidden_states.to(hidden_states.dtype))
return final_hidden_states
# remap config key expert_ffn_hidden_size -> moe_intermediate_size
class LongcatFlashMoE(nn.Module):
"""
A mixed expert module containing zero compute (identity) experts.
"""
def __init__(self, config):
super().__init__()
self.intermediate_size = config.expert_ffn_hidden_size
self.config = config
self.experts = LongcatFlashExperts(config)
self.router = LongcatFlashTopkRouter(config)
def forward(self, hidden_states):
orig_shape = hidden_states.shape
topk_weights, topk_indices = self.router(hidden_states)
hidden_states = hidden_states.view(-1, hidden_states.shape[-1])
hidden_states = self.experts(hidden_states, topk_indices, topk_weights).view(*orig_shape)
return hidden_states
class LongcatFlashMLA(DeepseekV3Attention):
def __init__(self, config, layer_idx: int):
super().__init__(config, layer_idx)
self.mla_scale_q_lora = (config.hidden_size / self.q_lora_rank) ** 0.5
self.mla_scale_kv_lora = (config.hidden_size / self.kv_lora_rank) ** 0.5
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor],
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
batch_size, seq_length = hidden_states.shape[:-1]
query_shape = (batch_size, seq_length, -1, self.qk_head_dim)
key_shape = (batch_size, seq_length, -1, self.qk_nope_head_dim + self.v_head_dim)
# we always do a lora for queries as well
q_states = self.q_b_proj(self.q_a_layernorm(self.q_a_proj(hidden_states)))
q_states = q_states.view(query_shape).transpose(1, 2)
q_pass, q_rot = torch.split(q_states, [self.qk_nope_head_dim, self.qk_rope_head_dim], dim=-1)
compressed_kv = self.kv_a_proj_with_mqa(hidden_states)
k_pass, k_rot = torch.split(compressed_kv, [self.kv_lora_rank, self.qk_rope_head_dim], dim=-1)
k_pass = self.kv_a_layernorm(k_pass)
# apply LoRA scaling
q_pass = q_pass * self.mla_scale_q_lora
q_rot = q_rot * self.mla_scale_q_lora
k_pass = k_pass * self.mla_scale_kv_lora
k_pass = self.kv_b_proj(k_pass).view(key_shape).transpose(1, 2)
k_pass, value_states = torch.split(k_pass, [self.qk_nope_head_dim, self.v_head_dim], dim=-1)
k_rot = k_rot.view(batch_size, 1, seq_length, self.qk_rope_head_dim)
cos, sin = position_embeddings
q_rot, k_rot = apply_rotary_pos_emb_interleave(q_rot, k_rot, cos, sin)
k_rot = k_rot.expand(*k_pass.shape[:-1], -1)
query_states = torch.cat((q_pass, q_rot), dim=-1)
key_states = torch.cat((k_pass, k_rot), dim=-1)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
if "flash" in self.config._attn_implementation and self.qk_head_dim != self.v_head_dim:
value_states = F.pad(value_states, [0, self.qk_head_dim - self.v_head_dim])
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
if "flash" in self.config._attn_implementation and self.qk_head_dim != self.v_head_dim:
attn_output = attn_output[:, :, :, : self.v_head_dim]
attn_output = attn_output.reshape(batch_size, seq_length, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class LongcatFlashDecoderLayer(GradientCheckpointingLayer):
"""
LongCat decoder layer with dual-sublayer + shortcut MoE architecture.
Each logical layer contains:
- 2 attention sublayers (with layer indices: layer_idx*2, layer_idx*2+1)
- 2 MLP sublayers
- 1 shortcut MoE connection
"""
def __init__(self, config, layer_idx: int):
super().__init__()
self.layer_idx = layer_idx
self.hidden_size = config.hidden_size
self.mlp = LongcatFlashMoE(config)
self.self_attn = nn.ModuleList([LongcatFlashMLA(config=config, layer_idx=layer_idx * 2 + i) for i in [0, 1]])
self.mlps = nn.ModuleList([LongcatFlashMLP(config) for _ in [0, 1]])
self.input_layernorm = nn.ModuleList(
[LongcatFlashRMSNorm(config.hidden_size, eps=config.rms_norm_eps) for _ in [0, 1]]
)
self.post_attention_layernorm = nn.ModuleList(
[LongcatFlashRMSNorm(config.hidden_size, eps=config.rms_norm_eps) for _ in [0, 1]]
)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> torch.Tensor:
residual = hidden_states
hidden_states = self.input_layernorm[0](hidden_states)
hidden_states, _ = self.self_attn[0](
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.post_attention_layernorm[0](hidden_states)
shortcut_mlp_output = self.mlp(hidden_states)
hidden_states = self.mlps[0](hidden_states)
hidden_states = residual + hidden_states
# shortcut connection after second sublayer
residual = hidden_states
hidden_states = self.input_layernorm[1](hidden_states)
hidden_states, _ = self.self_attn[1](
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.post_attention_layernorm[1](hidden_states)
hidden_states = self.mlps[1](hidden_states)
hidden_states = residual + hidden_states + shortcut_mlp_output
return hidden_states
@auto_docstring
class LongcatFlashPreTrainedModel(PreTrainedModel):
config: LongcatFlashConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["LongcatFlashDecoderLayer"]
_skip_keys_device_placement = ["past_key_values"]
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = False
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": LongcatFlashDecoderLayer,
"attentions": LongcatFlashMLA,
}
@torch.no_grad()
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, LongcatFlashTopkRouter):
init.normal_(module.classifier.weight, mean=0.0, std=self.config.initializer_range)
init.zeros_(module.e_score_correction_bias)
if isinstance(module, LongcatFlashExperts):
if module.gate_up_proj is not None:
init.normal_(module.gate_up_proj, mean=0.0, std=self.config.initializer_range)
init.normal_(module.down_proj, mean=0.0, std=self.config.initializer_range)
class LongcatFlashModel(DeepseekV3Model):
_keys_to_ignore_on_load_unexpected = [r"model\.mtp.*"]
def __init__(self, config):
super().__init__(config)
self.layers = nn.ModuleList(
[LongcatFlashDecoderLayer(config, layer_idx) for layer_idx in range(config.num_layers)]
)
# Each layer above has 2 sublayers, config hack to have a correct cache (to avoid a checkpoint change)
self.head_dim = config.head_dim # For CI happiness (we didn't convert so head_dim is not directly used)
self.config.num_hidden_layers = 2 * config.num_layers
self.norm = LongcatFlashRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = LongcatFlashRotaryEmbedding(config=config)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
cache_position: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
**kwargs: Unpack[TransformersKwargs],
):
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds: torch.Tensor = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position: torch.Tensor = (
torch.arange(inputs_embeds.shape[1], device=inputs_embeds.device) + past_seen_tokens
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = create_causal_mask(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
position_ids=position_ids,
)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for decoder_layer in self.layers[: self.config.num_layers]:
hidden_states = decoder_layer(
hidden_states,
attention_mask=causal_mask,
position_ids=position_ids,
past_key_values=past_key_values,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
hidden_states=None,
attentions=None,
)
class LongcatFlashForCausalLM(DeepseekV3ForCausalLM):
_keys_to_ignore_on_load_unexpected = [r"model\.mtp.*"]
def __init__(self, config):
super().__init__(config)
self.model = LongcatFlashModel(config)
__all__ = ["LongcatFlashPreTrainedModel", "LongcatFlashModel", "LongcatFlashForCausalLM"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/longcat_flash/configuration_longcat_flash.py | src/transformers/models/longcat_flash/configuration_longcat_flash.py | # coding=utf-8
# Copyright 2025 Meituan and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""LongCat Flash model configuration"""
from typing import Optional
from ...configuration_utils import PreTrainedConfig
from ...modeling_rope_utils import RopeParameters
class LongcatFlashConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`LongcatFlashModel`]. It is used to instantiate
a LongCat Flash model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the LongCat Flash architecture.
e.g. [meituan-longcat/LongCat-Flash-Chat](https://huggingface.co/meituan-longcat/LongCat-Flash-Chat)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 131072):
Vocabulary size of the LongCat Flash model. Defines the number of different tokens that can be represented by the
`input_ids` passed when calling [`LongcatFlashModel`]
hidden_size (`int`, *optional*, defaults to 6144):
Dimension of the hidden representations.
num_hidden_layers (`int`, *optional*, defaults to 56):
Number of hidden layers in the Transformer decoder.
num_layers (`int`, *optional*, defaults to 28):
number of layers, each with 2 sublayers.
num_attention_heads (`int`, *optional*, defaults to 64):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting from a multi-head checkpoint to a GQA checkpoint, each group key and value head should be
constructed by meanpooling all the original heads within that group. For more details checkout [this
paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
`num_attention_heads`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 131072):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon value used by the RMS normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 1):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 2):
End of stream token id.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie input and output embeddings.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
`{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
`max_position_embeddings` to the expected new maximum.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
ffn_hidden_size (`int`, *optional*, defaults to 12288):
Dimension of the MLP representations.
q_lora_rank (`int`, *optional*, defaults to 1536):
The rank of the query LoRA projection in MLA (Multi-head Latent Attention).
kv_lora_rank (`int`, *optional*, defaults to 512):
The rank of the key-value LoRA projection in MLA.
qk_nope_head_dim (`int`, *optional*, defaults to 128):
The dimension of the non-position encoding part of query/key heads.
qk_rope_head_dim (`int`, *optional*, defaults to 64):
The dimension of the RoPE part of query/key heads.
head_dim (`int`, *optional*, defaults to 64):
Standard dimension of qk heads, unused except for CI.
v_head_dim (`int`, *optional*, defaults to 128):
The dimension of value heads.
qk_head_dim (`int`, *optional*):
The total dimension of query/key heads. If not specified, set to `qk_nope_head_dim + qk_rope_head_dim`.
moe_topk (`int`, *optional*, defaults to 12):
Number of experts to route to for each token in the MoE layer.
n_routed_experts (`int`, *optional*, defaults to 512):
Number of routed experts in the MoE layer.
zero_expert_num (`int`, *optional*, defaults to 256):
Number of zero experts (identity function) to add to the expert pool.
expert_ffn_hidden_size (`int`, *optional*, defaults to 2048):
Hidden size of individual expert FFN layers.
routed_scaling_factor (`float`, *optional*, defaults to 6.0):
Scaling factor applied to the routing weights.
```python
>>> from transformers import LongcatFlashModel, LongcatFlashConfig
>>> # Initializing a LongCat Flash style configuration
>>> configuration = LongcatFlashConfig()
>>> # Initializing a model from the configuration
>>> model = LongcatFlashModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "longcat_flash"
keys_to_ignore_at_inference = ["past_key_values"]
default_theta = 10000000.0
base_model_tp_plan = {
"layers.*.self_attn.*.q_b_proj": "colwise",
"layers.*.self_attn.*.kv_b_proj": "colwise",
"layers.*.self_attn.*.o_proj": "rowwise",
"layers.*.mlps.*.gate_proj": "colwise",
"layers.*.mlps.*.up_proj": "colwise",
"layers.*.mlps.*.down_proj": "rowwise",
"layers.*.mlp.experts.gate_up_proj": "local_rowwise",
"layers.*.mlp.experts.down_proj": "local_rowwise",
"layers.*.mlp.experts": "gather",
}
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
"norm": (["hidden_states"], ["hidden_states"]),
}
def __init__(
self,
vocab_size: Optional[int] = 131072,
hidden_size: Optional[int] = 6144,
num_hidden_layers: Optional[int] = 56,
num_layers: Optional[int] = 28,
num_attention_heads: Optional[int] = 64,
num_key_value_heads: Optional[int] = None,
hidden_act: Optional[str] = "silu",
max_position_embeddings: Optional[int] = 131072,
initializer_range: Optional[float] = 0.02,
rms_norm_eps: Optional[float] = 1e-5,
use_cache: Optional[bool] = True,
pad_token_id: Optional[int] = None,
bos_token_id: Optional[int] = 1,
eos_token_id: Optional[int] = 2,
tie_word_embeddings: Optional[bool] = False,
rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None,
attention_bias: Optional[bool] = False,
attention_dropout: Optional[float] = 0.0,
ffn_hidden_size: Optional[int] = 12288,
q_lora_rank: Optional[int] = 1536,
kv_lora_rank: Optional[int] = 512,
qk_nope_head_dim: Optional[int] = 128,
qk_rope_head_dim: Optional[int] = 64,
head_dim: Optional[int] = 64,
v_head_dim: Optional[int] = 128,
qk_head_dim: Optional[int] = None,
moe_topk: Optional[int] = 12,
n_routed_experts: Optional[int] = 512,
zero_expert_num: Optional[int] = 256,
expert_ffn_hidden_size: Optional[int] = 2048,
routed_scaling_factor: Optional[float] = 6.0,
**kwargs,
):
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
if qk_head_dim is None:
qk_head_dim = qk_nope_head_dim + qk_rope_head_dim
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.num_layers = num_layers
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.ffn_hidden_size = ffn_hidden_size
self.q_lora_rank = q_lora_rank
self.kv_lora_rank = kv_lora_rank
self.qk_nope_head_dim = qk_nope_head_dim
self.qk_rope_head_dim = qk_rope_head_dim
self.v_head_dim = v_head_dim
self.qk_head_dim = qk_head_dim
self.head_dim = head_dim
self.moe_topk = moe_topk
self.n_routed_experts = n_routed_experts
self.zero_expert_num = zero_expert_num
self.expert_ffn_hidden_size = expert_ffn_hidden_size
self.routed_scaling_factor = routed_scaling_factor
self.rope_parameters = rope_parameters
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
def convert_rope_params_to_dict(self, ignore_keys_at_rope_validation: Optional[set] = None, **kwargs):
rope_scaling = kwargs.pop("rope_scaling", None)
self.rope_parameters = rope_scaling or self.rope_parameters
self.rope_parameters = self.rope_parameters if self.rope_parameters is not None else {}
# Standardize and validate the correctness of rotary position embeddings parameters
self.rope_parameters.setdefault("rope_theta", kwargs.pop("rope_theta", self.default_theta))
self.standardize_rope_params()
self.validate_rope(ignore_keys=ignore_keys_at_rope_validation)
# Convert to float because RoPE fn expect a float. Models on the hub were saved as int
for key in ["beta_fast", "beta_slow", "factor"]:
if key in self.rope_parameters:
self.rope_parameters[key] = float(self.rope_parameters[key])
return kwargs
__all__ = ["LongcatFlashConfig"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/longcat_flash/__init__.py | src/transformers/models/longcat_flash/__init__.py | # coding=utf-8
# Copyright 2025 Meituan and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_longcat_flash import *
from .modeling_longcat_flash import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/cohere/modular_cohere.py | src/transformers/models/cohere/modular_cohere.py | # coding=utf-8
# Copyright 2024 Cohere team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is based on the LLama model definition file in transformers
"""PyTorch Cohere model."""
from collections.abc import Callable
from typing import Optional, Union
import torch
from torch import nn
from ...cache_utils import Cache
from ...modeling_flash_attention_utils import FlashAttentionKwargs
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from ...modeling_rope_utils import dynamic_rope_update
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, logging
from ...utils.generic import maybe_autocast
from ..llama.modeling_llama import (
LlamaAttention,
LlamaForCausalLM,
LlamaMLP,
LlamaModel,
LlamaRotaryEmbedding,
eager_attention_forward,
)
from .configuration_cohere import CohereConfig
logger = logging.get_logger(__name__)
class CohereLayerNorm(nn.Module):
def __init__(self, hidden_size=None, eps=1e-5, bias=False):
"""The hidden size can be a tuple or an int. The tuple is used for QKNorm to normalize across head_dim"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
mean = hidden_states.mean(-1, keepdim=True)
variance = (hidden_states - mean).pow(2).mean(-1, keepdim=True)
hidden_states = (hidden_states - mean) * torch.rsqrt(variance + self.variance_epsilon)
hidden_states = self.weight.to(torch.float32) * hidden_states
return hidden_states.to(input_dtype)
class CohereRotaryEmbedding(LlamaRotaryEmbedding):
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with maybe_autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.repeat_interleave(freqs, 2, dim=-1) # diff from Llama: we interleave() instead of cat()
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
def rotate_half(x):
# Split and rotate. Note that this function is different from e.g. Llama.
x1 = x[..., ::2]
x2 = x[..., 1::2]
rot_x = torch.stack([-x2, x1], dim=-1).flatten(-2)
return rot_x
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`, *optional*):
Deprecated and unused.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
dtype = q.dtype
q = q.float()
k = k.float()
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed.to(dtype=dtype), k_embed.to(dtype=dtype)
class CohereMLP(LlamaMLP):
def __init__(self, config):
super().__init__(config)
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
class CohereAttention(LlamaAttention):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: CohereConfig, layer_idx: Optional[int] = None):
super().__init__(config, layer_idx)
self.use_qk_norm = config.use_qk_norm
if self.use_qk_norm:
# When sharding the model using Tensor Parallelism, need to be careful to use n_local_heads
self.q_norm = CohereLayerNorm(
hidden_size=(config.num_attention_heads, self.head_dim), eps=config.layer_norm_eps
)
self.k_norm = CohereLayerNorm(
hidden_size=(config.num_key_value_heads, self.head_dim), eps=config.layer_norm_eps
)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor],
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape)
key_states = self.k_proj(hidden_states).view(hidden_shape)
value_states = self.v_proj(hidden_states).view(hidden_shape)
if self.use_qk_norm: # main diff from Llama
query_states = self.q_norm(query_states)
key_states = self.k_norm(key_states)
query_states = query_states.transpose(1, 2)
key_states = key_states.transpose(1, 2)
value_states = value_states.transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
# sin and cos are specific to RoPE models; position_ids needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class CohereDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: CohereConfig, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = CohereAttention(config=config, layer_idx=layer_idx)
self.mlp = CohereMLP(config)
self.input_layernorm = CohereLayerNorm(hidden_size=(config.hidden_size), eps=config.layer_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`, *optional*):
attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
query_sequence_length, key_sequence_length)` if default attention is used.
past_key_values (`Cache`, *optional*): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence
position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
with `head_dim` being the embedding dimension of each attention head.
"""
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
hidden_states_attention, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states_mlp = self.mlp(hidden_states)
hidden_states = residual + hidden_states_attention + hidden_states_mlp
return hidden_states
class CohereModel(LlamaModel):
def __init__(self, config: CohereConfig):
super().__init__(config)
self.layers = nn.ModuleList(
[CohereDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = CohereLayerNorm(hidden_size=(config.hidden_size), eps=config.layer_norm_eps)
class CohereForCausalLM(LlamaForCausalLM):
def __init__(self, config):
super().__init__(config)
self.model = CohereModel(config)
self.logit_scale = config.logit_scale
self.tie_word_embeddings = config.tie_word_embeddings
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
) -> CausalLMOutputWithPast:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>> from transformers import AutoTokenizer, CohereForCausalLM
>> model = CohereForCausalLM.from_pretrained("CohereForAI/c4ai-command-r-v01")
>> tokenizer = AutoTokenizer.from_pretrained("CohereForAI/c4ai-command-r-v01")
>> prompt = "Hey, are you conscious? Can you talk to me?"
>> inputs = tokenizer(prompt, return_tensors="pt")
>> # Generate
>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs: BaseModelOutputWithPast = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
logits = logits * self.logit_scale # main diff from Llama
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
__all__ = [
"CohereForCausalLM",
"CohereModel",
"CoherePreTrainedModel", # noqa: F822
]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/cohere/tokenization_cohere.py | src/transformers/models/cohere/tokenization_cohere.py | # coding=utf-8
# Copyright 2024 Cohere team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is based on the tokenization_llama.py file in transformers
from typing import Literal, Optional, Union
from tokenizers import Tokenizer, decoders, normalizers, pre_tokenizers
from tokenizers.models import BPE
from ...tokenization_utils_tokenizers import TokenizersBackend
from ...utils import logging
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
PRETRAINED_VOCAB_FILES_MAP = {
"tokenizer_file": {
"Cohere/Command-nightly": "https://huggingface.co/Cohere/Command-nightly/blob/main/tokenizer.json",
},
}
# fmt: off
DEFAULT_SYSTEM_PROMPT = "You are Command-R, a brilliant, sophisticated, AI-assistant trained to assist human users by providing thorough responses. You are trained by Cohere."
DEFAULT_RAG_PREAMBLE = """## Task and Context
You help people answer their questions and other requests interactively. You will be asked a very wide array of requests on all kinds of topics. You will be equipped with a wide range of search engines or similar tools to help you, which you use to research your answer. You should focus on serving the user's needs as best you can, which will be wide-ranging.
## Style Guide
Unless the user asks for a different style of answer, you should answer in full sentences, using proper grammar and spelling."""
# fmt: on
class CohereTokenizer(TokenizersBackend):
"""
Construct a Cohere tokenizer. Based on byte-level Byte-Pair-Encoding.
This uses notably ByteFallback and NFC normalization.
```python
>>> from transformers import AutoTokenizer
>>> tokenizer = AutoTokenizer.from_pretrained("CohereForAI/c4ai-command-r-v01")
>>> tokenizer.encode("Hello this is a test")
[5, 28339, 2075, 1801, 1671, 3282]
```
If you want to change the `bos_token` or the `eos_token`, make sure to specify them when initializing the model, or
call `tokenizer.update_post_processor()` to make sure that the post-processing is correctly done (otherwise the
values of the first token and final token of an encoded sequence will not be correct). For more details, checkout
[post-processors] (https://huggingface.co/docs/tokenizers/api/post-processors) documentation.
You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer, but since
the model was not pretrained this way, it might yield a decrease in performance.
<Tip>
When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`.
</Tip>
This tokenizer inherits from [`TokenizersBackend`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`, *optional*):
Path to the vocabulary file.
merges_file (`str`, *optional*):
Path to the merges file.
tokenizer_file (`str`, *optional*):
[tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that
contains everything needed to load the tokenizer.
clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like
extra spaces.
unk_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<UNK>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
bos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<BOS_TOKEN>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
eos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<|END_OF_TURN_TOKEN|>"`):
The end of sequence token.
add_bos_token (`bool`, *optional*, defaults to `True`):
Whether or not to add an `bos_token` at the start of sequences.
add_eos_token (`bool`, *optional*, defaults to `False`):
Whether or not to add an `eos_token` at the end of sequences.
use_default_system_prompt (`bool`, *optional*, defaults to `False`):
Whether or not the default system prompt for Cohere tokenizer should be used.
add_prefix_space (`bool`, *optional*, defaults to `False`):
Whether or not the tokenizer should automatically add a prefix space
vocab (`str`, `dict` or `list`, *optional*):
Custom vocabulary dictionary. If not provided, vocabulary is loaded from vocab_file.
merges (`str` or `list[str]`, *optional*):
Custom merges list. If not provided, merges are loaded from `merges_file`.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
padding_side = "left"
model_input_names = ["input_ids", "attention_mask"]
model = BPE
# No `max_model_input_sizes`
def __init__(
self,
vocab: Optional[Union[str, dict[str, int]]] = None,
merges: Optional[Union[str, list[str]]] = None,
errors: str = "replace",
unk_token: str = "<UNK>",
bos_token: str = "<BOS_TOKEN>",
eos_token: str = "<|END_OF_TURN_TOKEN|>",
pad_token: str = "<PAD>",
cls_token: str = "<CLS>",
sep_token: str = "<SEP>",
mask_token: str = "<MASK_TOKEN>",
use_default_system_prompt: bool = False,
add_prefix_space: bool = False,
**kwargs,
):
self.use_default_system_prompt = use_default_system_prompt
self.add_prefix_space = add_prefix_space
self.grounded_generation_template = kwargs.pop("grounded_generation_template", None)
self.tool_use_template = kwargs.pop("tool_use_template", None)
self._vocab = (
vocab
if vocab is not None
else {
str(pad_token): 0,
str(unk_token): 1,
str(cls_token): 2,
str(sep_token): 3,
str(mask_token): 4,
str(bos_token): 5,
}
)
self._merges = merges or []
self._tokenizer = Tokenizer(
BPE(
vocab=self._vocab,
merges=self._merges,
dropout=None,
continuing_subword_prefix="",
end_of_word_suffix="",
fuse_unk=False,
)
)
self._tokenizer.normalizer = normalizers.NFC()
self._tokenizer.pre_tokenizer = pre_tokenizers.Sequence(
[
pre_tokenizers.Digits(individual_digits=True),
pre_tokenizers.ByteLevel(add_prefix_space=add_prefix_space, trim_offsets=True),
]
)
self._tokenizer.decoder = decoders.ByteLevel(add_prefix_space=add_prefix_space, trim_offsets=True)
super().__init__(
errors=errors,
unk_token=unk_token,
bos_token=bos_token,
eos_token=eos_token,
pad_token=pad_token,
cls_token=cls_token,
sep_token=sep_token,
mask_token=mask_token,
use_default_system_prompt=use_default_system_prompt,
add_prefix_space=add_prefix_space,
**kwargs,
)
self._post_init()
def apply_tool_use_template(
self,
conversation: list[dict[str, str]],
tools: list[dict],
**kwargs,
) -> Union[str, list[int]]:
"""Create a Command-R tool-use prompt.
Once rendered, the prompt instructs the model to generate a list of actions to perform on a set of user supplied tools
to help carry out the user's requests.
Conceptually, this works in the same way as `apply_chat_format`, but takes an additional `tools` parameter.
Converts a chat in the form of a list of dictionaries with `"role"` and `"content"` keys and a list of available
tools for the model to use into a prompt string, or a list of token ids.
This method will use the tokenizer's `default_tool_use_template` template specified at the class level.
You can override the default template using the `tool_use_template` kwarg but the quality of your results may decrease.
Args:
conversation (list[dict[str, str]]): A list of dicts
with "role" and "content" keys, representing the chat history so far.
tools (list[Dict]): a list of tools to render into the prompt for the model to choose from.
See an example at the bottom of the docstring.
The format should be:
* name (str): The name of the tool to be called. Valid names contain only the characters a-z,
A-Z, 0-9, _ and must not begin with a digit.
* description (str): The description of what the tool does, the model uses the description to
choose when and how to call the function.
* parameter_definitions (list[Dict]): The input parameters of the tool. Accepts a dictionary
where the key is the name of the parameter and the value is the parameter spec.
Valid parameter names contain only the characters a-z, A-Z, 0-9, _ and must not begin with a digit.
Parameter specs are as follows:
* description (str): The description of the parameter.
* type (str): the type of the parameter - most effective for python builtin data types, such as 'str', 'bool'
* required: boolean: Denotes whether the parameter is always present (required) or not. Defaults to not required.
add_generation_prompt (bool, *optional*): Whether to end the prompt with the token(s) that indicate
the start of an assistant message. This is useful when you want to generate a response from the model.
Note that this argument will be passed to the chat template, and so it must be supported in the
template for this argument to have any effect.
tokenize (`bool`, defaults to `True`):
Whether to tokenize the output. If `False`, the output will be a string.
padding (`bool`, defaults to `False`):
Whether to pad sequences to the maximum length. Has no effect if tokenize is `False`.
truncation (`bool`, defaults to `False`):
Whether to truncate sequences at the maximum length. Has no effect if tokenize is `False`.
max_length (`int`, *optional*):
Maximum length (in tokens) to use for padding or truncation. Has no effect if tokenize is `False`. If
not specified, the tokenizer's `max_length` attribute will be used as a default.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors of a particular framework. Has no effect if tokenize is `False`. Acceptable
values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
return_dict (`bool`, *optional*, defaults to `False`):
Whether to return a dictionary with named outputs. Has no effect if tokenize is `False`.
**tokenizer_kwargs: Additional kwargs to pass to the tokenizer.
Returns:
`str`: A rendered prompt string.
or if tokenize=True:
`list[int]`: A list of token ids representing the tokenized chat so far, including control tokens. This
output is ready to pass to the model, either directly or via methods like `generate()`.
Examples:
```python
>> tokenizer = CohereTokenizer.from_pretrained("CohereForAI/c4ai-command-r-v01")
>> tools = [
{
"name": "internet_search",
"description": "Returns a list of relevant document snippets for a textual query retrieved from the internet",
"parameter_definitions": {
"query": {
"description": "Query to search the internet with",
"type": "str",
"required": True,
}
},
},
{
"name": "directly_answer",
"description": "Calls a standard (un-augmented) AI chatbot to generate a response given the conversation history",
"parameter_definitions": {},
},
]
>> conversation = [
{"role": "user", "content": "Whats the biggest penguin in the world?"},
]
>> # Render the prompt, ready for user to inspect, or for input into the model
>> prompt = tokenizer.apply_tool_use_template(conversation, tools=tools, tokenize=False, add_generation_prompt=True)
>> print(prompt)
>> inputs = tokenizer.encode(grounded_generation_prompt, add_special_tokens=False, return_tensors='pt')
>> outputs = model.generate(inputs, max_new_tokens=128)
>> print(tokenizer.decode(outputs[0]))
[
{
"tool_name": "internet_search",
"parameters": {
"query": "biggest penguin in the world"
}
}
]
```
"""
return self.apply_chat_template(
conversation,
chat_template="tool_use",
tools=tools,
**kwargs,
)
def apply_grounded_generation_template(
self,
conversation: list[dict[str, str]],
documents: list[dict],
citation_mode: Literal["fast", "accurate"] = "accurate",
**kwargs,
) -> Union[str, list[int]]:
"""Create a Command-R grounded generation (aka RAG) prompt.
Once rendered, the prompt instructs the model to generate a response with citations in, based on supplied documents.
Conceptually, this works in the same way as `apply_chat_format`, but takes additional `documents`
and parameter `citation_mode` parameters.
Converts a list of dictionaries with `"role"` and `"content"` keys and a list of
documents for the model to ground its response on into a prompt string, or a list of token ids.
This method will use the tokenizer's `grounded_generation_template` template specified at the class level.
You can override the default template using the `grounded_generation_template` kwarg but the quality of your results may decrease.
Args:
conversation (list[dict[str, str]]): A list of dicts
with "role" and "content" keys, representing the chat history so far.
documents (list[dict[str, str]): A list of dicts, representing documents or tool outputs to ground your
generation on. A document is a semistructured dict, with a string to string mapping. Common fields are
`url`, `title`, `snippet` etc but should be descriptive of the key. They will get rendered into the prompt.
citation_mode: either "accurate" (prompt the model to generate an answer first, then rewrite it with citation
spans in) or "fast", where the prompt instructs the model to generate an answer with citations in directly.
The former has higher quality citations, the latter requires fewer tokens to be generated.
add_generation_prompt (bool, *optional*): Whether to end the prompt with the token(s) that indicate
the start of an assistant message. This is useful when you want to generate a response from the model.
Note that this argument will be passed to the chat template, and so it must be supported in the
template for this argument to have any effect.
tokenize (`bool`, defaults to `True`):
Whether to tokenize the output. If `False`, the output will be a string.
padding (`bool`, defaults to `False`):
Whether to pad sequences to the maximum length. Has no effect if tokenize is `False`.
truncation (`bool`, defaults to `False`):
Whether to truncate sequences at the maximum length. Has no effect if tokenize is `False`.
max_length (`int`, *optional*):
Maximum length (in tokens) to use for padding or truncation. Has no effect if tokenize is `False`. If
not specified, the tokenizer's `max_length` attribute will be used as a default.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors of a particular framework. Has no effect if tokenize is `False`. Acceptable
values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
return_dict (`bool`, *optional*, defaults to `False`):
Whether to return a dictionary with named outputs. Has no effect if tokenize is `False`.
**tokenizer_kwargs: Additional kwargs to pass to the tokenizer.
Returns:
`str`: A rendered prompt string.
or if tokenize=True:
`list[int]`: A list of token ids representing the tokenized chat so far, including control tokens. This
output is ready to pass to the model, either directly or via methods like `generate()`.
Examples:
```python
>> tokenizer = CohereTokenizer.from_pretrained('CohereForAI/c4ai-command-r-v01')
>> # define documents:
>> documents = [
{ "title": "Tall penguins", "text": "Emperor penguins are the tallest." },
{ "title": "Penguin habitats", "text": "Emperor penguins only live in Antarctica."}
]
>> # define a conversation:
>> conversation = [
{"role": "user", "content": "Whats the biggest penguin in the world?"}
]
>> # render the prompt, ready for user to inspect, or for input into the model:
>> grounded_generation_prompt = tokenizer.apply_grounded_generation_template(conversation, documents=documents, tokenize=False, add_generation_prompt=True)
>> print(grounded_generation_prompt)
>> inputs = tokenizer.encode(prompt, add_special_tokens=False, return_tensors='pt')
>> outputs = model.generate(inputs, max_new_tokens=128)
>> print(tokenizer.decode(outputs[0]))
```
"""
return self.apply_chat_template(
conversation,
chat_template="rag",
documents=documents,
citation_mode=citation_mode,
**kwargs,
)
__all__ = ["CohereTokenizer"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/cohere/__init__.py | src/transformers/models/cohere/__init__.py | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_cohere import *
from .modeling_cohere import *
from .tokenization_cohere import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/cohere/modeling_cohere.py | src/transformers/models/cohere/modeling_cohere.py | # π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨
# This file was automatically generated from src/transformers/models/cohere/modular_cohere.py.
# Do NOT edit this file manually as any edits will be overwritten by the generation of
# the file from the modular. If any change should be done, please apply the change to the
# modular_cohere.py file directly. One of our CI enforces this.
# π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨
# coding=utf-8
# Copyright 2024 Cohere team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is based on the LLama model definition file in transformers
from collections.abc import Callable
from typing import Optional, Union
import torch
from torch import nn
from ...activations import ACT2FN
from ...cache_utils import Cache, DynamicCache
from ...generation import GenerationMixin
from ...integrations import use_kernelized_func
from ...masking_utils import create_causal_mask
from ...modeling_flash_attention_utils import FlashAttentionKwargs
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
from ...utils.generic import check_model_inputs, maybe_autocast
from .configuration_cohere import CohereConfig
class CohereLayerNorm(nn.Module):
def __init__(self, hidden_size=None, eps=1e-5, bias=False):
"""The hidden size can be a tuple or an int. The tuple is used for QKNorm to normalize across head_dim"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
mean = hidden_states.mean(-1, keepdim=True)
variance = (hidden_states - mean).pow(2).mean(-1, keepdim=True)
hidden_states = (hidden_states - mean) * torch.rsqrt(variance + self.variance_epsilon)
hidden_states = self.weight.to(torch.float32) * hidden_states
return hidden_states.to(input_dtype)
class CohereRotaryEmbedding(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, config: CohereConfig, device=None):
super().__init__()
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_type = self.config.rope_parameters["rope_type"]
rope_init_fn: Callable = self.compute_default_rope_parameters
if self.rope_type != "default":
rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.register_buffer("original_inv_freq", inv_freq.clone(), persistent=False)
@staticmethod
def compute_default_rope_parameters(
config: Optional[CohereConfig] = None,
device: Optional["torch.device"] = None,
seq_len: Optional[int] = None,
) -> tuple["torch.Tensor", float]:
"""
Computes the inverse frequencies according to the original RoPE implementation
Args:
config ([`~transformers.PreTrainedConfig`]):
The model configuration.
device (`torch.device`):
The device to use for initialization of the inverse frequencies.
seq_len (`int`, *optional*):
The current sequence length. Unused for this type of RoPE.
Returns:
Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
"""
base = config.rope_parameters["rope_theta"]
dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
attention_factor = 1.0 # Unused in this type of RoPE
# Compute the inverse frequencies
inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
)
return inv_freq, attention_factor
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with maybe_autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.repeat_interleave(freqs, 2, dim=-1) # diff from Llama: we interleave() instead of cat()
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
class CohereMLP(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, x):
down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
return down_proj
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
key_states = repeat_kv(key, module.num_key_value_groups)
value_states = repeat_kv(value, module.num_key_value_groups)
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
if attention_mask is not None:
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
attn_weights = attn_weights + causal_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
def rotate_half(x):
# Split and rotate. Note that this function is different from e.g. Llama.
x1 = x[..., ::2]
x2 = x[..., 1::2]
rot_x = torch.stack([-x2, x1], dim=-1).flatten(-2)
return rot_x
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`, *optional*):
Deprecated and unused.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
dtype = q.dtype
q = q.float()
k = k.float()
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed.to(dtype=dtype), k_embed.to(dtype=dtype)
@use_kernelized_func(apply_rotary_pos_emb)
class CohereAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: CohereConfig, layer_idx: Optional[int] = None):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.attention_dropout = config.attention_dropout
self.is_causal = True
self.q_proj = nn.Linear(
config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
)
self.k_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.v_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.o_proj = nn.Linear(
config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias
)
self.use_qk_norm = config.use_qk_norm
if self.use_qk_norm:
# When sharding the model using Tensor Parallelism, need to be careful to use n_local_heads
self.q_norm = CohereLayerNorm(
hidden_size=(config.num_attention_heads, self.head_dim), eps=config.layer_norm_eps
)
self.k_norm = CohereLayerNorm(
hidden_size=(config.num_key_value_heads, self.head_dim), eps=config.layer_norm_eps
)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor],
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape)
key_states = self.k_proj(hidden_states).view(hidden_shape)
value_states = self.v_proj(hidden_states).view(hidden_shape)
if self.use_qk_norm: # main diff from Llama
query_states = self.q_norm(query_states)
key_states = self.k_norm(key_states)
query_states = query_states.transpose(1, 2)
key_states = key_states.transpose(1, 2)
value_states = value_states.transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
# sin and cos are specific to RoPE models; position_ids needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class CohereDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: CohereConfig, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = CohereAttention(config=config, layer_idx=layer_idx)
self.mlp = CohereMLP(config)
self.input_layernorm = CohereLayerNorm(hidden_size=(config.hidden_size), eps=config.layer_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`, *optional*):
attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
query_sequence_length, key_sequence_length)` if default attention is used.
past_key_values (`Cache`, *optional*): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence
position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
with `head_dim` being the embedding dimension of each attention head.
"""
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
hidden_states_attention, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states_mlp = self.mlp(hidden_states)
hidden_states = residual + hidden_states_attention + hidden_states_mlp
return hidden_states
@auto_docstring
class CoherePreTrainedModel(PreTrainedModel):
config: CohereConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["CohereDecoderLayer"]
_skip_keys_device_placement = ["past_key_values"]
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": CohereDecoderLayer,
"attentions": CohereAttention,
}
@auto_docstring
class CohereModel(CoherePreTrainedModel):
def __init__(self, config: CohereConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList(
[CohereDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = CohereLayerNorm(hidden_size=(config.hidden_size), eps=config.layer_norm_eps)
self.rotary_emb = CohereRotaryEmbedding(config=config)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
@check_model_inputs
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
cache_position: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds: torch.Tensor = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position: torch.Tensor = (
torch.arange(inputs_embeds.shape[1], device=inputs_embeds.device) + past_seen_tokens
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = create_causal_mask(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
position_ids=position_ids,
)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids)
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
hidden_states = decoder_layer(
hidden_states,
attention_mask=causal_mask,
position_embeddings=position_embeddings,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
@auto_docstring
class CohereForCausalLM(CoherePreTrainedModel, GenerationMixin):
_tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
_tp_plan = {"lm_head": "colwise_rep"}
_pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
def __init__(self, config):
super().__init__(config)
self.model = CohereModel(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.logit_scale = config.logit_scale
self.tie_word_embeddings = config.tie_word_embeddings
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
) -> CausalLMOutputWithPast:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>> from transformers import AutoTokenizer, CohereForCausalLM
>> model = CohereForCausalLM.from_pretrained("CohereForAI/c4ai-command-r-v01")
>> tokenizer = AutoTokenizer.from_pretrained("CohereForAI/c4ai-command-r-v01")
>> prompt = "Hey, are you conscious? Can you talk to me?"
>> inputs = tokenizer(prompt, return_tensors="pt")
>> # Generate
>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs: BaseModelOutputWithPast = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
logits = logits * self.logit_scale # main diff from Llama
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
__all__ = ["CohereForCausalLM", "CohereModel", "CoherePreTrainedModel"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/cohere/configuration_cohere.py | src/transformers/models/cohere/configuration_cohere.py | # coding=utf-8
# Copyright 2024 Cohere team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cohere model configuration"""
from typing import Optional
from ...configuration_utils import PreTrainedConfig
from ...modeling_rope_utils import RopeParameters
from ...utils import logging
logger = logging.get_logger(__name__)
class CohereConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`CohereModel`]. It is used to instantiate an Cohere
model according to the specified arguments, defining the model architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information. Instantiating a configuration
with the defaults will yield a similar configuration to that of the [CohereForAI/c4ai-command-r-v01](https://huggingface.co/CohereForAI/c4ai-command-r-v01) model.
Args:
vocab_size (`int`, *optional*, defaults to 256000):
Vocabulary size of the Cohere model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`CohereModel`]
hidden_size (`int`, *optional*, defaults to 8192):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 22528):
Dimension of the MLP representations.
logit_scale (`float`, *optional*, defaults to 0.0625):
The scaling factor for the output logits.
num_hidden_layers (`int`, *optional*, defaults to 40):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 64):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 8192):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*, defaults to 0):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 5):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 255001):
End of stream token id.
tie_word_embeddings (`bool`, *optional*, defaults to `True`):
Whether to tie weight embeddings
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
use_qk_norm (`bool`, *optional*, defaults to `False`):
Whether to use query-key normalization in the attention
```python
>>> from transformers import CohereModel, CohereConfig
>>> # Initializing a Cohere model configuration
>>> configuration = CohereConfig()
>>> # Initializing a model from the Cohere configuration
>>> model = CohereModel(configuration) # doctest: +SKIP
>>> # Accessing the model configuration
>>> configuration = model.config # doctest: +SKIP
```"""
model_type = "cohere"
keys_to_ignore_at_inference = ["past_key_values"]
default_theta = 500000.0
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise",
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.gate_proj": "colwise",
"layers.*.mlp.up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
}
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
"norm": (["hidden_states"], ["hidden_states"]),
}
def __init__(
self,
vocab_size: Optional[int] = 256000,
hidden_size: Optional[int] = 8192,
intermediate_size: Optional[int] = 22528,
logit_scale: Optional[float] = 0.0625,
num_hidden_layers: Optional[int] = 40,
num_attention_heads: Optional[int] = 64,
num_key_value_heads: Optional[int] = None,
hidden_act: Optional[str] = "silu",
max_position_embeddings: Optional[int] = 8192,
initializer_range: Optional[float] = 0.02,
layer_norm_eps: Optional[int] = 1e-5,
use_cache: Optional[bool] = True,
pad_token_id: Optional[int] = 0,
bos_token_id: Optional[int] = 5,
eos_token_id: Optional[int] = 255001,
tie_word_embeddings: Optional[bool] = True,
rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None,
attention_bias: Optional[bool] = False,
attention_dropout: Optional[float] = 0.0,
use_qk_norm: Optional[bool] = False,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.logit_scale = logit_scale
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.use_cache = use_cache
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.use_qk_norm = use_qk_norm
self.rope_parameters = rope_parameters
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
__all__ = ["CohereConfig"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/llava_next/image_processing_llava_next.py | src/transformers/models/llava_next/image_processing_llava_next.py | # coding=utf-8
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image processor class for LLaVa-NeXT."""
from collections.abc import Iterable
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import (
BaseImageProcessor,
BatchFeature,
get_patch_output_size,
get_size_dict,
select_best_resolution,
)
from ...image_transforms import (
PaddingMode,
convert_to_rgb,
get_resize_output_image_size,
pad,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
infer_channel_dimension_format,
is_scaled_image,
make_flat_list_of_images,
to_numpy_array,
valid_images,
validate_preprocess_arguments,
)
from ...processing_utils import ImagesKwargs
from ...utils import TensorType, is_vision_available, logging
logger = logging.get_logger(__name__)
if is_vision_available():
from PIL import Image
class LlavaNextImageProcessorKwargs(ImagesKwargs, total=False):
r"""
image_grid_pinpoints (`list[list[int]]`, *optional*):
A list of possible resolutions to use for processing high resolution images. The best resolution is selected
based on the original size of the image. Can be overridden by `image_grid_pinpoints` in the `preprocess`
method.
"""
image_grid_pinpoints: list[list[int]]
def divide_to_patches(image: np.ndarray, patch_size: int, input_data_format) -> list[np.ndarray]:
"""
Divides an image into patches of a specified size.
Args:
image (`np.ndarray`):
The input image.
patch_size (`int`):
The size of each patch.
input_data_format (`ChannelDimension` or `str`):
The channel dimension format of the input image.
Returns:
list: A list of np.ndarray representing the patches.
"""
patches = []
height, width = get_image_size(image, channel_dim=input_data_format)
for i in range(0, height, patch_size):
for j in range(0, width, patch_size):
if input_data_format == ChannelDimension.LAST:
patch = image[i : i + patch_size, j : j + patch_size]
else:
patch = image[:, i : i + patch_size, j : j + patch_size]
patches.append(patch)
return patches
def expand_to_square(image: np.ndarray, background_color, input_data_format) -> np.ndarray:
"""
Expands an image to a square by adding a background color.
"""
height, width = get_image_size(image, channel_dim=input_data_format)
if width == height:
return image
elif width > height:
result = np.ones((width, width, image.shape[2]), dtype=image.dtype) * background_color
result[(width - height) // 2 : (width - height) // 2 + height, :] = image
return result
else:
result = np.ones((height, height, image.shape[2]), dtype=image.dtype) * background_color
result[:, (height - width) // 2 : (height - width) // 2 + width] = image
return result
class LlavaNextImageProcessor(BaseImageProcessor):
r"""
Constructs a LLaVa-NeXT image processor. Based on [`CLIPImageProcessor`] with incorporation of additional techniques
for processing high resolution images as explained in the [LLaVa paper](https://huggingface.co/papers/2310.03744).
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
`do_resize` in the `preprocess` method.
size (`dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`):
Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with
the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess`
method.
image_grid_pinpoints (`List` *optional*, defaults to `[[672, 336], [336, 672], [672, 672], [336, 1008], [1008, 336]]`):
A list of possible resolutions to use for processing high resolution images. The best resolution is selected
based on the original size of the image. Can be overridden by `image_grid_pinpoints` in the `preprocess`
method.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method.
do_center_crop (`bool`, *optional*, defaults to `True`):
Whether to center crop the image to the specified `crop_size`. Can be overridden by `do_center_crop` in the
`preprocess` method.
crop_size (`dict[str, int]` *optional*, defaults to 224):
Size of the output image after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess`
method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
method.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method.
image_mean (`float` or `list[float]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
Can be overridden by the `image_std` parameter in the `preprocess` method.
do_pad (`bool`, *optional*, defaults to `True`):
Whether to pad the image. If `True`, will pad the patch dimension of the images in the batch to the largest
number of patches in the batch. Padding will be applied to the bottom and right with zeros.
do_convert_rgb (`bool`, *optional*, defaults to `True`):
Whether to convert the image to RGB.
"""
model_input_names = ["pixel_values", "image_sizes"]
valid_kwargs = LlavaNextImageProcessorKwargs
def __init__(
self,
do_resize: bool = True,
size: Optional[dict[str, int]] = None,
image_grid_pinpoints: Optional[list] = None,
resample: PILImageResampling = PILImageResampling.BICUBIC,
do_center_crop: bool = True,
crop_size: Optional[dict[str, int]] = None,
do_rescale: bool = True,
rescale_factor: Union[int, float] = 1 / 255,
do_normalize: bool = True,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
do_pad: Optional[bool] = True,
do_convert_rgb: bool = True,
**kwargs,
) -> None:
super().__init__(**kwargs)
size = size if size is not None else {"shortest_edge": 224}
size = get_size_dict(size, default_to_square=False)
image_grid_pinpoints = (
image_grid_pinpoints
if image_grid_pinpoints is not None
else [[336, 672], [672, 336], [672, 672], [1008, 336], [336, 1008]]
)
crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
crop_size = get_size_dict(crop_size, default_to_square=True, param_name="crop_size")
self.do_resize = do_resize
self.size = size
self.image_grid_pinpoints = image_grid_pinpoints
self.resample = resample
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD
self.do_pad = do_pad
self.do_convert_rgb = do_convert_rgb
# Copied from transformers.models.clip.image_processing_clip.CLIPImageProcessor.resize with CLIP->LLaVa
def resize(
self,
image: np.ndarray,
size: dict[str, int],
resample: PILImageResampling = PILImageResampling.BICUBIC,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge
resized to keep the input aspect ratio.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
Resampling filter to use when resiizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
default_to_square = True
if "shortest_edge" in size:
size = size["shortest_edge"]
default_to_square = False
elif "height" in size and "width" in size:
size = (size["height"], size["width"])
else:
raise ValueError("Size must contain either 'shortest_edge' or 'height' and 'width'.")
output_size = get_resize_output_image_size(
image,
size=size,
default_to_square=default_to_square,
input_data_format=input_data_format,
)
return resize(
image,
size=output_size,
resample=resample,
data_format=data_format,
input_data_format=input_data_format,
**kwargs,
)
def pad(
self,
image: np.ndarray,
padding: Union[int, tuple[int, int], Iterable[tuple[int, int]]],
mode: PaddingMode = PaddingMode.CONSTANT,
constant_values: Union[float, Iterable[float]] = 0.0,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> np.ndarray:
"""
Pads the `image` with the specified `padding` and `mode`. Padding can be in the (`height`, `width`)
dimension of in the (`num_patches`) dimension. In the second case an iterable if tuples is expected
as input.
Args:
image (`np.ndarray`):
The image to pad.
padding (`int` or `tuple[int, int]` or `Iterable[tuple[int, int]]`):
Padding to apply to the edges of the height, width axes. Can be one of three formats:
- `((before_height, after_height), (before_width, after_width))` unique pad widths for each axis.
- `((before, after),)` yields same before and after pad for height and width.
- `(pad,)` or int is a shortcut for before = after = pad width for all axes.
mode (`PaddingMode`):
The padding mode to use. Can be one of:
- `"constant"`: pads with a constant value.
- `"reflect"`: pads with the reflection of the vector mirrored on the first and last values of the
vector along each axis.
- `"replicate"`: pads with the replication of the last value on the edge of the array along each axis.
- `"symmetric"`: pads with the reflection of the vector mirrored along the edge of the array.
constant_values (`float` or `Iterable[float]`, *optional*):
The value to use for the padding if `mode` is `"constant"`.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
If unset, will use same as the input image.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
If unset, will use the inferred format of the input image.
Returns:
`np.ndarray`: The padded image.
"""
# call the general `pad` if padding on `height/width`, otherwise it's the `num_patched` dim
if isinstance(padding, int) or len(padding) != 4:
return pad(image, padding, mode, constant_values, data_format, input_data_format)
if input_data_format is None:
input_data_format = infer_channel_dimension_format(image)
if mode == PaddingMode.CONSTANT:
image = np.pad(image, padding, mode="constant", constant_values=constant_values)
elif mode == PaddingMode.REFLECT:
image = np.pad(image, padding, mode="reflect")
elif mode == PaddingMode.REPLICATE:
image = np.pad(image, padding, mode="edge")
elif mode == PaddingMode.SYMMETRIC:
image = np.pad(image, padding, mode="symmetric")
else:
raise ValueError(f"Invalid padding mode: {mode}")
image = (
to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image
)
return image
def _preprocess(
self,
images: ImageInput,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
resample: Optional[PILImageResampling] = None,
do_center_crop: Optional[bool] = None,
crop_size: Optional[int] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> Image.Image:
"""
Preprocess an image or batch of images. Copy of the `preprocess` method from `CLIPImageProcessor`.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
the longest edge resized to keep the input aspect ratio.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
has an effect if `do_resize` is set to `True`.
do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
Whether to center crop the image.
crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`):
Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
`True`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
images = make_flat_list_of_images(images)
all_images = []
for image in images:
if do_resize:
image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
if do_center_crop:
image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format)
if do_rescale:
image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
if do_normalize:
image = self.normalize(
image=image, mean=image_mean, std=image_std, input_data_format=input_data_format
)
all_images.append(image)
images = [
to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
for image in all_images
]
return images
def _resize_for_patching(
self, image: np.ndarray, target_resolution: tuple, resample, input_data_format: ChannelDimension
) -> np.ndarray:
"""
Resizes an image to a target resolution while maintaining aspect ratio.
Args:
image (np.ndarray):
The input image.
target_resolution (tuple):
The target resolution (height, width) of the image.
resample (`PILImageResampling`):
Resampling filter to use if resizing the image.
input_data_format (`ChannelDimension` or `str`):
The channel dimension format of the input image.
Returns:
np.ndarray: The resized and padded image.
"""
new_height, new_width = get_patch_output_size(image, target_resolution, input_data_format)
# Resize the image
resized_image = resize(image, (new_height, new_width), resample=resample, input_data_format=input_data_format)
return resized_image
def _get_padding_size(self, original_resolution: tuple, target_resolution: tuple):
original_height, original_width = original_resolution
target_height, target_width = target_resolution
paste_x, r_x = divmod(target_width - original_width, 2)
paste_y, r_y = divmod(target_height - original_height, 2)
return (paste_y, paste_y + r_y), (paste_x, paste_x + r_x)
def _pad_for_patching(
self, image: np.ndarray, target_resolution: tuple, input_data_format: ChannelDimension
) -> np.ndarray:
"""
Pad an image to a target resolution while maintaining aspect ratio.
"""
new_resolution = get_patch_output_size(image, target_resolution, input_data_format)
padding = self._get_padding_size(new_resolution, target_resolution)
padded_image = self.pad(image, padding=padding)
return padded_image
def get_image_patches(
self,
image: np.ndarray,
grid_pinpoints,
size: tuple,
patch_size: int,
resample: PILImageResampling,
data_format: ChannelDimension,
input_data_format: ChannelDimension,
) -> list[np.ndarray]:
"""
Process an image with variable resolutions by dividing it into patches.
Args:
image (np.ndarray):
The input image to be processed.
grid_pinpoints (List):
A string representation of a list of possible resolutions.
size (`tuple`):
Size to resize the original image to.
patch_size (`int`):
Size of the patches to divide the image into.
resample (`PILImageResampling`):
Resampling filter to use if resizing the image.
data_format (`ChannelDimension` or `str`):
The channel dimension format for the output image.
input_data_format (`ChannelDimension` or `str`):
The channel dimension format of the input image.
Returns:
list[np.ndarray]: A list of NumPy arrays containing the processed image patches.
"""
if not isinstance(grid_pinpoints, list):
raise TypeError("grid_pinpoints must be a list of possible resolutions.")
possible_resolutions = grid_pinpoints
image_size = get_image_size(image, channel_dim=input_data_format)
best_resolution = select_best_resolution(image_size, possible_resolutions)
resized_image = self._resize_for_patching(
image, best_resolution, resample=resample, input_data_format=input_data_format
)
padded_image = self._pad_for_patching(resized_image, best_resolution, input_data_format=input_data_format)
patches = divide_to_patches(padded_image, patch_size=patch_size, input_data_format=input_data_format)
# make sure that all patches are in the input data format
patches = [
to_channel_dimension_format(patch, channel_dim=data_format, input_channel_dim=input_data_format)
for patch in patches
]
resized_original_image = resize(
image,
size=size,
resample=resample,
data_format=data_format,
input_data_format=input_data_format,
)
image_patches = [resized_original_image] + patches
return image_patches
def _pad_for_batching(
self,
pixel_values: list[np.ndarray],
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
):
"""
Pads images on the `num_of_patches` dimension with zeros to form a batch of same number of patches.
Args:
pixel_values (`list[np.ndarray]`):
An array of pixel values of each images of shape (`batch_size`, `num_patches`, `image_in_3D`)
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
If unset, will use same as the input image.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
If unset, will use the inferred format of the input image.
Returns:
list[`np.ndarray`]: The padded images.
"""
max_patch = max(len(x) for x in pixel_values)
pixel_values = [
self.pad(
image,
padding=((0, max_patch - image.shape[0]), (0, 0), (0, 0), (0, 0)),
data_format=data_format,
input_data_format=input_data_format,
)
for image in pixel_values
]
return pixel_values
def preprocess(
self,
images: ImageInput,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
image_grid_pinpoints: Optional[list] = None,
resample: Optional[PILImageResampling] = None,
do_center_crop: Optional[bool] = None,
crop_size: Optional[int] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
do_pad: Optional[bool] = None,
do_convert_rgb: Optional[bool] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
):
"""
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
the longest edge resized to keep the input aspect ratio.
image_grid_pinpoints (`List` *optional*, defaults to `self.image_grid_pinpoints`):
A list of possible resolutions to use for processing high resolution images. The best resolution is
selected based on the original size of the image.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
has an effect if `do_resize` is set to `True`.
do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
Whether to center crop the image.
crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`):
Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
`True`.
do_pad (`bool`, *optional*, defaults to `self.do_pad`):
Whether to pad the image. If `True`, will pad the patch dimension of the images in the batch to the largest
number of patches in the batch. Padding will be applied to the bottom and right with zeros.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
Whether to convert the image to RGB.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
size = size if size is not None else self.size
size = get_size_dict(size, param_name="size", default_to_square=False)
image_grid_pinpoints = image_grid_pinpoints if image_grid_pinpoints is not None else self.image_grid_pinpoints
resample = resample if resample is not None else self.resample
do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
crop_size = crop_size if crop_size is not None else self.crop_size
crop_size = get_size_dict(crop_size, param_name="crop_size", default_to_square=True)
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | true |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/llava_next/convert_llava_next_weights_to_hf.py | src/transformers/models/llava_next/convert_llava_next_weights_to_hf.py | # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert LLaVa-NeXT (LLaVa-1.6) checkpoints from the original repository.
URL: https://github.com/haotian-liu/LLaVA/tree/main.
The command used to obtain original logits is the following:
python llava/eval/run_llava.py --model-path "liuhaotian/llava-v1.6-mistral-7b" --image-file "images/llava_v1_5_radar.jpg" --query "What is shown in this image?" --max_new_tokens 100 --temperature 0
Note: logits are tested with torch==2.1.2.
"""
import argparse
import gc
import glob
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download, snapshot_download
from PIL import Image
from safetensors import safe_open
from transformers import (
AddedToken,
AutoConfig,
AutoTokenizer,
LlavaNextConfig,
LlavaNextForConditionalGeneration,
LlavaNextImageProcessor,
LlavaNextProcessor,
)
KEYS_TO_MODIFY_MAPPING = {
"model.vision_tower.": "",
"model.mm_projector": "multi_modal_projector",
"model": "model.model",
"vision_model.model": "vision_model",
"lm_head": "language_model.lm_head",
"model.model": "language_model.model",
"multi_modal_projector.0": "multi_modal_projector.linear_1",
"multi_modal_projector.2": "multi_modal_projector.linear_2",
"language_model.model.image_newline": "image_newline",
}
def load_original_state_dict(model_id):
directory_path = snapshot_download(repo_id=model_id, allow_patterns=["*.safetensors"])
original_state_dict = {}
for path in glob.glob(f"{directory_path}/*"):
if path.endswith(".safetensors"):
with safe_open(path, framework="pt", device="cpu") as f:
for key in f.keys():
original_state_dict[key] = f.get_tensor(key)
return original_state_dict
def convert_state_dict_to_hf(state_dict):
new_state_dict = {}
for key, value in state_dict.items():
if key.endswith(".inv_freq"):
continue
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
key = key.replace(key_to_modify, new_key)
new_state_dict[key] = value.to(torch.float16)
return new_state_dict
def load_image():
url = "https://github.com/haotian-liu/LLaVA/blob/1a91fc274d7c35a9b50b3cb29c4247ae5837ce39/images/llava_v1_5_radar.jpg?raw=true"
image = Image.open(requests.get(url, stream=True).raw)
return image
def convert_llava_to_hf(model_id, pytorch_dump_folder_path, push_to_hub=False):
# load original config
filepath = hf_hub_download(repo_id=model_id, filename="config.json", repo_type="model")
# read json
with open(filepath) as f:
data = json.load(f)
print(data)
if model_id == "liuhaotian/llava-v1.6-mistral-7b":
text_model_id = "mistralai/Mistral-7B-Instruct-v0.2"
image_token_id = 32000
elif model_id == "liuhaotian/llava-v1.6-vicuna-7b":
text_model_id = "lmsys/vicuna-7b-v1.5"
image_token_id = 32000
elif model_id == "liuhaotian/llava-v1.6-vicuna-13b":
text_model_id = "lmsys/vicuna-13b-v1.5"
image_token_id = 32000
elif model_id == "liuhaotian/llava-v1.6-34b":
text_model_id = "NousResearch/Nous-Hermes-2-Yi-34B"
image_token_id = 64000
elif model_id == "lmms-lab/llama3-llava-next-8b":
text_model_id = "meta-llama/Meta-Llama-3-8B-Instruct"
image_token_id = 128256
elif model_id == "lmms-lab/llava-next-72b":
text_model_id = "Qwen/Qwen1.5-72B-Chat"
image_token_id = 151646
elif model_id == "lmms-lab/llava-next-110b":
text_model_id = "Qwen/Qwen1.5-110B-Chat"
image_token_id = 151646
vision_model_id = data["mm_vision_tower"]
torch.set_default_dtype(torch.float16)
text_config = AutoConfig.from_pretrained(text_model_id)
use_fast = model_id != "liuhaotian/llava-v1.6-34b"
tokenizer = AutoTokenizer.from_pretrained(text_model_id, use_fast=use_fast)
tokenizer.add_tokens(AddedToken("<image>", special=True, normalized=False), special_tokens=True)
if model_id in ("liuhaotian/llava-v1.6-mistral-7b", "lmms-lab/llama3-llava-next-8b"):
# Mistral-7B doesn't have a padding token set yet
tokenizer.add_special_tokens({"pad_token": "<pad>"})
image_processor = LlavaNextImageProcessor.from_pretrained(vision_model_id)
processor = LlavaNextProcessor(tokenizer=tokenizer, image_processor=image_processor)
config = LlavaNextConfig(
text_config=text_config.to_dict(),
image_grid_pinpoints=image_processor.image_grid_pinpoints,
use_image_newline_parameter=True,
image_token_id=image_token_id,
)
with torch.device("meta"):
model = LlavaNextForConditionalGeneration(config)
# load original state dict
state_dict = load_original_state_dict(model_id)
state_dict = convert_state_dict_to_hf(state_dict)
model.load_state_dict(state_dict, assign=True)
model.eval()
pre_expansion_embeddings = model.language_model.model.embed_tokens.weight.data
mu = torch.mean(pre_expansion_embeddings, dim=0).float()
n = pre_expansion_embeddings.size()[0]
sigma = ((pre_expansion_embeddings - mu).T @ (pre_expansion_embeddings - mu)) / n
dist = torch.distributions.multivariate_normal.MultivariateNormal(mu, covariance_matrix=1e-5 * sigma)
# We add an image token so we resize the model
# Pad to 64 for performance reasons
# Qwen-based models have extra unused space in the vocab size already, so no need to resize
if model_id not in ["lmms-lab/llava-next-72b", "lmms-lab/llava-next-110b"]:
pad_shape = 64
vocab_size = config.text_config.vocab_size
if model_id == "liuhaotian/llava-v1.6-34b":
# this one has 3 additional tokens, namely <|startoftext|>, <|endoftext|> and <image>
num_tokens = vocab_size + 3
else:
# this one has 2 additional tokens, namely <image> and <pad>
num_tokens = vocab_size + 2
model.resize_token_embeddings(num_tokens, pad_to_multiple_of=pad_shape)
model.language_model.model.embed_tokens.weight.data[vocab_size:] = torch.stack(
tuple(
dist.sample() for _ in range(model.language_model.model.embed_tokens.weight.data[vocab_size:].shape[0])
),
dim=0,
)
model.language_model.lm_head.weight.data[vocab_size:] = torch.stack(
tuple(dist.sample() for _ in range(model.language_model.lm_head.weight.data[vocab_size:].shape[0])),
dim=0,
)
print(f"Saving model and processor for {model_id} to {pytorch_dump_folder_path}")
Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
model.save_pretrained(pytorch_dump_folder_path)
processor.save_pretrained(pytorch_dump_folder_path)
# Make space so we can load the model properly now.
del state_dict
gc.collect()
# Load everything back for inference tests in float32 because prev script was written as that
# Though it's mostly loaded in fp16 as original weights are in fp16
model = LlavaNextForConditionalGeneration.from_pretrained(pytorch_dump_folder_path, device_map="auto")
processor = LlavaNextProcessor.from_pretrained(pytorch_dump_folder_path)
device = model.device
# prepare inputs
image = load_image()
if model_id == "liuhaotian/llava-v1.6-mistral-7b":
prompt = "[INST] <image>\nWhat is shown in this image? [/INST]"
elif model_id in ["liuhaotian/llava-v1.6-vicuna-7b", "liuhaotian/llava-v1.6-vicuna-13b"]:
prompt = "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions. USER: <image>\nWhat is shown in this image? ASSISTANT:"
elif model_id == "liuhaotian/llava-v1.6-34b":
prompt = "<|im_start|>system\nAnswer the questions.<|im_end|><|im_start|>user\n<image>\nWhat is shown in this image?<|im_end|><|im_start|>assistant\n"
elif model_id == "lmms-lab/llama3-llava-next-8b":
prompt = "<|start_header_id|>system<|end_header_id|>\n\nYou are a helpful language and vision assistant. You are able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language.<|eot_id|><|start_header_id|><|start_header_id|>user<|end_header_id|>\n\n<image>\nWhat is shown in this image?<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n"
elif model_id in ["lmms-lab/llava-next-72b", "lmms-lab/llava-next-110b"]:
prompt = "<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\n<image>\nWhat is shown in this image?<|im_end|>\n<|im_start|>assistant\n"
inputs = processor(images=image, text=prompt, return_tensors="pt")
# verify inputs
filepath = hf_hub_download(repo_id="nielsr/test-image", filename="llava_1_6_pixel_values.pt", repo_type="dataset")
original_pixel_values = torch.load(filepath, map_location="cpu", weights_only=True)
assert torch.allclose(original_pixel_values, inputs.pixel_values.half())
if model_id == "liuhaotian/llava-v1.6-mistral-7b":
filepath = hf_hub_download(repo_id="nielsr/test-image", filename="llava_1_6_input_ids.pt", repo_type="dataset")
original_input_ids = torch.load(filepath, map_location="cpu", weights_only=True)
# replace -200 by image_token_id (since we use token ID = 32000 for the image token)
original_input_ids[original_input_ids == -200] = image_token_id
assert original_input_ids[0].tolist() == inputs.input_ids[0].tolist()
elif model_id == "liuhaotian/llava-v1.6-34b":
filepath = hf_hub_download(
repo_id="nielsr/test-image", filename="llava_1_6_34b_input_ids.pt", repo_type="dataset"
)
original_input_ids = torch.load(filepath, map_location="cpu", weights_only=True)
# replace -200 by image_token_id
original_input_ids[original_input_ids == -200] = image_token_id
assert original_input_ids[0].tolist() == inputs.input_ids[0].tolist()
image_sizes = torch.tensor([[899, 1024]])
assert image_sizes[0].tolist() == inputs.image_sizes[0].tolist()
# verify single forward pass
print("Single forward pass")
with torch.inference_mode():
inputs = inputs.to(device)
outputs = model(**inputs)
print("Shape of logits:", outputs.logits.shape)
print("First values of logits:", outputs.logits[0, :3, :3])
if model_id == "liuhaotian/llava-v1.6-mistral-7b":
expected_slice = torch.tensor(
[[-4.8555, -4.6992, -0.1996], [-10.5703, -10.7344, -2.7246], [-7.0391, -7.3672, -0.2634]],
dtype=torch.float32,
device=device,
)
elif model_id == "liuhaotian/llava-v1.6-vicuna-7b":
expected_slice = torch.tensor(
[[1.4883, 0.9976, -0.6992], [-9.7031, -5.7031, -1.5557], [-5.1328, -5.5586, 8.8281]],
dtype=torch.float32,
device=device,
)
elif model_id == "liuhaotian/llava-v1.6-vicuna-13b":
expected_slice = torch.tensor(
[[-0.9614, 7.3125, 0.2106], [-7.2695, -8.5469, 3.6211], [-6.3750, -8.1875, 5.4688]],
dtype=torch.float32,
device=device,
)
elif model_id == "liuhaotian/llava-v1.6-34b":
expected_slice = torch.tensor(
[[-9.0859, -9.1406, 5.9453], [-5.9570, -5.9766, 2.2754], [-5.7305, -5.7539, 4.0000]],
dtype=torch.float32,
device=device,
)
elif model_id == "lmms-lab/llama3-llava-next-8b":
expected_slice = torch.tensor(
[[-3.9648, 1.1396, 3.3145], [-5.3594, -1.5654, -1.9619], [-12.3750, -10.6797, -9.3125]],
dtype=torch.float32,
device=device,
)
elif model_id == "lmms-lab/llava-next-72b":
# Not yet checked against reference
expected_slice = torch.tensor(
[[3.7148, 3.9277, 3.4395], [-0.4341, 1.1387, 6.5117], [3.2324, 3.4688, 4.1133]],
dtype=torch.float32,
device=device,
)
elif model_id == "lmms-lab/llava-next-110b":
# Not yet checked against reference
expected_slice = torch.tensor(
[[-2.5449, -1.6738, -2.0371], [1.0811, 3.4961, 5.0312], [1.7803, 2.5137, 2.4277]],
dtype=torch.float32,
device=device,
)
else:
raise ValueError(f"Model {model_id} not supported")
assert torch.allclose(outputs.logits[0, :3, :3], expected_slice, atol=1e-4)
print("Logits are ok!")
# verify generation
output_ids = model.generate(
**inputs,
max_new_tokens=100,
use_cache=True,
)
generated_text = processor.batch_decode(output_ids, skip_special_tokens=True)[0].strip()
print("Generated text:", repr(generated_text))
if model_id == "liuhaotian/llava-v1.6-mistral-7b":
expected_text = '[INST] \nWhat is shown in this image? [/INST] The image appears to be a radar chart, which is a type of multi-dimensional plot that displays data in the form of a two-dimensional chart of three or more quantitative variables represented on axes starting from the same point.\n\nIn this particular radar chart, there are several axes labeled with different metrics or benchmarks, such as "MMM-Vet," "MMM-Bench," "LLaVA-Bench," "SLED-Bench," "'
elif model_id == "liuhaotian/llava-v1.6-vicuna-7b":
expected_text = """A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human\'s questions. USER: \nWhat is shown in this image? ASSISTANT: The image appears to be a graphical representation of a benchmarking study comparing the performance of various models or systems. It\'s a scatter plot with a circular layout, where each point represents a different model or system, and the axes represent different metrics or dimensions of comparison.\n\nThe metrics are likely related to machine learning or artificial intelligence performance, as indicated by the terms like "BLIP-2," "Instruct BLIP," "POE," "QWA," "V"""
elif model_id == "liuhaotian/llava-v1.6-vicuna-13b":
expected_text = "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions. USER: \nWhat is shown in this image? ASSISTANT: The image appears to be a radar chart, also known as a spider chart or star chart, which is a graphical method of displaying multivariate data in the form of a two-dimensional chart of three or more quantitative variables represented on axes starting from the same point.\n\nIn this particular radar chart, there are several variables represented:\n\n- MM-Vet\n- LLa-Va-Bench\n- SEED-Bench\n- MM"
elif model_id == "liuhaotian/llava-v1.6-34b":
expected_text = "<|im_start|> system\nAnswer the questions. <|im_start|> user\n\nWhat is shown in this image? <|im_start|> assistant\nThe image appears to be a radar chart, also known as a spider chart, which is a graphical method of displaying multivariate data in the form of a two-dimensional chart of three or more quantitative variables represented on axes starting from the same point.\n\nIn this particular chart, there are several datasets represented by different colors and labeled with various acronyms such as MM-Vet, LLaVA-Bench, SEED-Bench, MM-Bench-CN, MM-"
elif model_id == "lmms-lab/llama3-llava-next-8b":
expected_text = 'system\n\nYou are a helpful language and vision assistant. You are able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language.user\n\n\nWhat is shown in this image?assistant\n\n\nThe image shows a radar chart, also known as a spider chart or a web chart, which is a type of graph used to display multivariate data in the form of a two-dimensional chart of three or more quantitative variables represented on axes starting from the same point. Each axis represents a different variable, and the values are plotted along each axis and connected to form a polygon.\n\nIn this particular radar chart, there are several axes labeled with different variables, such as "MM-Vet," "LL'
elif model_id == "lmms-lab/llava-next-72b":
expected_text = "system\nYou are a helpful assistant.\nuser\n\nWhat is shown in this image?\nassistant\nThe image displays a radar chart, also known as a spider chart or a star chart, which is a graphical method of displaying multivariate data in the form of a two-dimensional chart of three or more quantitative variables represented on axes starting from the same point. Each axis represents a different variable, and the value of each variable is represented by the distance from the center of the chart to the point where the axis intersects with the line representing that variable's value.\n\nIn this particular chart, there are several axes"
elif model_id == "lmms-lab/llava-next-110b":
expected_text = "system\nYou are a helpful assistant.\nuser\n\nWhat is shown in this image?\nassistant\nThe image shows a radar chart comparing the performance of different models on various visual question answering (VQA) benchmarks. Each colored line represents a different model, and the distance from the center of the chart indicates the score or performance level of the model on a particular benchmark. The benchmarks are labeled around the edges of the chart, and include VQA v2, GQA, VizWiz, TextVQA, MMBench-CN, MME, and others. The chart allows for a"
else:
raise ValueError(f"Model {model_id} not supported")
assert generated_text == expected_text
print("Generated text is ok!")
# verify batched generation
print("Batched generation...")
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
cats_image = Image.open(requests.get(url, stream=True).raw)
inputs = processor(
images=[image, cats_image],
text=[prompt, prompt],
padding=True,
return_tensors="pt",
).to(device)
for k, v in inputs.items():
print(k, v.shape)
print("Image sizes:", inputs.image_sizes)
# make sure image_sizes are the same
# as otherwise batched generation doesn't work
inputs.image_sizes[1] = inputs.image_sizes[0]
print("Batched generation...")
output_ids = model.generate(
**inputs,
max_new_tokens=20,
use_cache=True,
)
outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
print(outputs)
if push_to_hub:
checkpoint_name = model_id.split("/")[-1]
print(f"Pushing to repo llava-hf/{checkpoint_name}-hf")
model.push_to_hub(f"llava-hf/{checkpoint_name}-hf")
processor.push_to_hub(f"llava-hf/{checkpoint_name}-hf")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_id",
help="Hub location of the model to convert",
default="liuhaotian/llava-v1.6-mistral-7b",
choices=[
"liuhaotian/llava-v1.6-mistral-7b",
"liuhaotian/llava-v1.6-vicuna-7b",
"liuhaotian/llava-v1.6-vicuna-13b",
"liuhaotian/llava-v1.6-34b",
"lmms-lab/llama3-llava-next-8b",
"lmms-lab/llava-next-72b",
"lmms-lab/llava-next-110b",
],
required=False,
)
parser.add_argument(
"--pytorch_dump_folder_path", type=str, required=True, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model to the Hugging Face hub.",
)
args = parser.parse_args()
convert_llava_to_hf(args.model_id, args.pytorch_dump_folder_path, args.push_to_hub)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/llava_next/modeling_llava_next.py | src/transformers/models/llava_next/modeling_llava_next.py | # coding=utf-8
# Copyright 2024 the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch Llava-NeXT model."""
import math
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
import torch
from torch import nn
from ... import initialization as init
from ...activations import ACT2FN
from ...cache_utils import Cache
from ...generation import GenerationMixin
from ...image_processing_utils import select_best_resolution
from ...modeling_flash_attention_utils import FlashAttentionKwargs
from ...modeling_outputs import BaseModelOutputWithPast, ModelOutput
from ...modeling_utils import PreTrainedModel
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
from ..auto import AutoModel
from .configuration_llava_next import LlavaNextConfig
logger = logging.get_logger(__name__)
def get_anyres_image_grid_shape(image_size, grid_pinpoints, patch_size):
"""
Calculate the shape of the image patch grid after the preprocessing for images of any resolution.
Args:
image_size (`tuple`):
The size of the input image in the format (width, height).
grid_pinpoints (`List`):
A list containing possible resolutions. Each item in the list should be a tuple or list
of the form `(height, width)`.
patch_size (`int`):
The size of each image patch.
Returns:
tuple: The shape of the image patch grid in the format (width, height).
"""
if not isinstance(grid_pinpoints, list):
raise TypeError("grid_pinpoints should be a list of tuples or lists")
# ! VERY IMPORTANT if image_size is tensor, must convert to into tuple, otherwise it will cause wrong calculate
if not isinstance(image_size, (list, tuple)):
if not isinstance(image_size, (torch.Tensor, np.ndarray)):
raise TypeError(
f"image_size invalid type: {type(image_size)} not valid, should be either list, tuple, np.ndarray or tensor"
)
image_size = image_size.tolist()
height, width = select_best_resolution(image_size, grid_pinpoints)
return height // patch_size, width // patch_size
def image_size_to_num_patches(image_size, grid_pinpoints, patch_size: int):
"""
Calculate the number of patches after the preprocessing for images of any resolution.
Args:
image_size (`torch.LongTensor` or `np.ndarray` or `tuple[int, int]`):
The size of the input image in the format (height, width). ?
grid_pinpoints (`List`):
A list containing possible resolutions. Each item in the list should be a tuple or list
of the form `(height, width)`.
patch_size (`int`):
The size of each image patch.
Returns:
int: the number of patches
"""
if not isinstance(grid_pinpoints, list):
raise TypeError("grid_pinpoints should be a list of tuples or lists")
# ! VERY IMPORTANT if image_size is tensor, must convert to into tuple, otherwise it will cause wrong calculate
if not isinstance(image_size, (list, tuple)):
if not isinstance(image_size, (torch.Tensor, np.ndarray)):
raise TypeError(f"image_size invalid type {type(image_size)} with value {image_size}")
image_size = image_size.tolist()
best_resolution = select_best_resolution(image_size, grid_pinpoints)
height, width = best_resolution
num_patches = 0
# consider change to ceil(height/patch_size)*ceil(width/patch_size) + 1
for i in range(0, height, patch_size):
for j in range(0, width, patch_size):
num_patches += 1
# add the base patch
num_patches += 1
return num_patches
def unpad_image(tensor, original_size):
"""
Unpads a PyTorch tensor of a padded and resized image.
Args:
tensor (`torch.Tensor`):
The image tensor, assumed to be of shape (num_channels, height, width).
original_size (`tuple`):
The original size of the image (height, width).
Returns:
`torch.Tensor`: The unpadded image tensor.
"""
if not isinstance(original_size, (list, tuple)):
if not isinstance(original_size, (torch.Tensor, np.ndarray)):
raise TypeError(
f"image_size invalid type: {type(original_size)} not valid, should be either list, tuple, np.ndarray or tensor"
)
original_size = original_size.tolist()
original_height, original_width = original_size
current_height, current_width = tensor.shape[1:]
original_aspect_ratio = original_width / original_height
current_aspect_ratio = current_width / current_height
if original_aspect_ratio > current_aspect_ratio:
scale_factor = current_width / original_width
new_height = int(round(original_height * scale_factor, 7))
padding = (current_height - new_height) // 2
unpadded_tensor = tensor[:, padding : current_height - padding, :]
else:
scale_factor = current_height / original_height
new_width = int(round(original_width * scale_factor, 7))
padding = (current_width - new_width) // 2
unpadded_tensor = tensor[:, :, padding : current_width - padding]
return unpadded_tensor
@dataclass
@auto_docstring(
custom_intro="""
Base class for Llava outputs, with hidden states and attentions.
"""
)
class LlavaNextModelOutputWithPast(BaseModelOutputWithPast):
r"""
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
image_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`.
image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
"""
image_hidden_states: Optional[torch.FloatTensor] = None
@dataclass
@auto_docstring(
custom_intro="""
Base class for LlavaNext causal language model (or autoregressive) outputs.
"""
)
class LlavaNextCausalLMOutputWithPast(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
image_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size (batch_size * num_patches, num_images, sequence_length, hidden_size)`.
image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
past_key_values: Optional[Cache] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
image_hidden_states: Optional[torch.FloatTensor] = None
# Copied from transformers.models.llava.modeling_llava.LlavaMultiModalProjector with Llava->LlavaNext
class LlavaNextMultiModalProjector(nn.Module):
def __init__(self, config: LlavaNextConfig):
super().__init__()
# We have hidden_size * the number of vision feature layers
num_feature_layers = 1 if isinstance(config.vision_feature_layer, int) else len(config.vision_feature_layer)
self.linear_1 = nn.Linear(
config.vision_config.hidden_size * num_feature_layers,
config.text_config.hidden_size,
bias=config.multimodal_projector_bias,
)
self.act = ACT2FN[config.projector_hidden_act]
self.linear_2 = nn.Linear(
config.text_config.hidden_size, config.text_config.hidden_size, bias=config.multimodal_projector_bias
)
def forward(self, image_features):
hidden_states = self.linear_1(image_features)
hidden_states = self.act(hidden_states)
hidden_states = self.linear_2(hidden_states)
return hidden_states
@auto_docstring
class LlavaNextPreTrainedModel(PreTrainedModel):
config: LlavaNextConfig
base_model_prefix = "model"
input_modalities = ("image", "text")
supports_gradient_checkpointing = True
_no_split_modules = ["LlamaDecoderLayer"]
_skip_keys_device_placement = "past_key_values"
_supports_flash_attn = True
_supports_sdpa = True
_can_compile_fullgraph = True
_supports_flex_attn = True
_supports_attention_backend = True
@torch.no_grad()
def _init_weights(self, module):
std = getattr(self.config, "initializer_range", self.config.get_text_config().initializer_range)
if isinstance(module, nn.Linear):
init.normal_(module.weight, mean=0.0, std=std)
if module.bias is not None:
init.zeros_(module.bias)
elif isinstance(module, LlavaNextModel):
embed_std = 1 / math.sqrt(self.config.text_config.hidden_size)
init.normal_(module.image_newline, mean=0.0, std=embed_std)
@auto_docstring(
custom_intro="""
The Llava-Next model which consists of a vision backbone and a language model without language modeling head.
"""
)
class LlavaNextModel(LlavaNextPreTrainedModel):
_checkpoint_conversion_mapping = {
r"^language_model.model": "language_model",
}
base_model_prefix = "model"
def __init__(self, config: LlavaNextConfig):
super().__init__(config)
self.vision_tower = AutoModel.from_config(config.vision_config)
self.multi_modal_projector = LlavaNextMultiModalProjector(config)
embed_std = 1 / math.sqrt(config.text_config.hidden_size)
self.image_newline = nn.Parameter(torch.randn(config.text_config.hidden_size, dtype=self.dtype) * embed_std)
self.vocab_size = config.text_config.vocab_size
self.language_model = AutoModel.from_config(config.text_config)
self.pad_token_id = self.config.pad_token_id if self.config.pad_token_id is not None else -1
self.post_init()
def get_input_embeddings(self):
return self.language_model.get_input_embeddings()
def set_input_embeddings(self, value):
self.language_model.set_input_embeddings(value)
def pack_image_features(self, image_features, image_sizes, vision_feature_select_strategy, image_newline=None):
"""
Reshape, unpad and then pack each image_feature into a single image_features tensor containing all visual vectors.
Args:
image_features (`list[torch.Tensor]` of length num_images, each of shape `(num_patches, image_length, embed_dim)`)
List of image feature tensor, each contains all the visual feature of all patches.
image_sizes (`torch.Tensor` of shape `(num_images, 2)`)
Actual image size of each images (H, W).
vision_feature_select_strategy (`str`)
The feature selection strategy used to select the vision feature from the vision backbone.
image_newline (`torch.Tensor` of shape `(embed_dim)`)
New line embedding vector.
Returns:
image_features (`torch.Tensor` of shape `(all_feat_len, embed_dim)`)
feature_lens (`list[int]`)
token length of each image in image_features
"""
new_image_features = []
feature_lens = []
for image_idx, image_feature in enumerate(image_features):
if image_feature.shape[0] > 1:
base_image_feature = image_feature[0]
image_feature = image_feature[1:]
height = width = self.config.vision_config.image_size // self.config.vision_config.patch_size
num_patch_height, num_patch_width = get_anyres_image_grid_shape(
image_sizes[image_idx],
self.config.image_grid_pinpoints,
self.config.vision_config.image_size,
)
if (
np.prod(image_feature.shape) % (num_patch_height * num_patch_width * height * width) != 0
and vision_feature_select_strategy == "default"
):
logger.warning_once(
"Image feature shape does not line up with the provided patch size. "
"You may be using the `default` vision_feature_select_strategy with a"
" visual encoder that does not have CLS."
)
image_feature = image_feature.view(num_patch_height, num_patch_width, height, width, -1)
image_feature = image_feature.permute(4, 0, 2, 1, 3).contiguous()
image_feature = image_feature.flatten(1, 2).flatten(2, 3)
image_feature = unpad_image(image_feature, image_sizes[image_idx])
if image_newline is not None:
image_feature = torch.cat(
(
image_feature,
image_newline[:, None, None]
.expand(*image_feature.shape[:-1], 1)
.to(image_feature.device, image_feature.dtype),
),
dim=-1,
)
image_feature = image_feature.flatten(1, 2).transpose(0, 1)
image_feature = torch.cat((base_image_feature, image_feature), dim=0)
else:
image_feature = image_feature[0]
if image_newline is not None:
image_feature = torch.cat((image_feature, image_newline[None].to(image_feature)), dim=0)
new_image_features.append(image_feature)
feature_lens.append(image_feature.size(0))
feature_lens = torch.tensor(feature_lens, dtype=torch.long, device=image_features[0].device)
return new_image_features, feature_lens
def get_image_features(
self,
pixel_values: torch.FloatTensor,
image_sizes: torch.Tensor,
vision_feature_layer: Optional[Union[int, list[int]]] = None,
vision_feature_select_strategy: Optional[str] = None,
):
"""
Obtains image last hidden states from the vision tower and apply multimodal projection.
Args:
pixel_values (`torch.FloatTensor]` of shape `(batch_size, num_patches, channels, height, width)`)
The tensors corresponding to the input images.
image_sizes (`torch.Tensor` of shape `(num_images, 2)`)
Actual image size of each images (H, W).
vision_feature_layer (`Union[int, list[int]]`, *optional*):
The index of the layer to select the vision feature. If multiple indices are provided,
the vision feature of the corresponding indices will be concatenated to form the
vision features.
vision_feature_select_strategy (`str`, *optional*):
The feature selection strategy used to select the vision feature from the vision backbone.
Can be one of `"default"` or `"full"`
Returns:
image_features (list[`torch.Tensor`]): List of image feature tensor, each contains all the visual feature of all patches
and are of shape `(num_patches, image_length, embed_dim)`).
"""
vision_feature_layer = (
vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer
)
vision_feature_select_strategy = (
vision_feature_select_strategy
if vision_feature_select_strategy is not None
else self.config.vision_feature_select_strategy
)
# ! infer image_num_patches from image_sizes
image_num_patches = [
image_size_to_num_patches(
image_size=imsize,
grid_pinpoints=self.config.image_grid_pinpoints,
patch_size=self.config.vision_config.image_size,
)
for imsize in image_sizes
]
if pixel_values.dim() == 5:
# stacked if input is (batch_size, num_patches, num_channels, height, width)
_pixel_values_list = [pix_val[:num_patch] for pix_val, num_patch in zip(pixel_values, image_num_patches)]
pixel_values = torch.cat(_pixel_values_list, dim=0)
elif pixel_values.dim() != 4:
# otherwise has to be stacked from list of (num_patches, num_channels, height, width)
raise ValueError(f"pixel_values of shape {pixel_values.shape}, expect to be of 4 or 5 dimensions")
image_features = self.vision_tower(pixel_values, output_hidden_states=True)
# If we have one vision feature layer, return the corresponding hidden states,
# otherwise, select the hidden states of each feature layer and concatenate them
if isinstance(vision_feature_layer, int):
selected_image_feature = image_features.hidden_states[vision_feature_layer]
else:
hs_pool = [image_features.hidden_states[layer_idx] for layer_idx in vision_feature_layer]
selected_image_feature = torch.cat(hs_pool, dim=-1)
if vision_feature_select_strategy == "default":
selected_image_feature = selected_image_feature[:, 1:]
image_features = self.multi_modal_projector(selected_image_feature)
image_features = torch.split(image_features, image_num_patches, dim=0)
# NOTE we only support multimodal_patch_merge_type == "spatial_unpad"
image_features, feature_lens = self.pack_image_features(
image_features,
image_sizes,
vision_feature_select_strategy=vision_feature_select_strategy,
image_newline=self.image_newline,
)
return image_features
def get_placeholder_mask(
self, input_ids: torch.LongTensor, inputs_embeds: torch.FloatTensor, image_features: torch.FloatTensor
):
"""
Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is
equal to the length of multimodal features. If the lengths are different, an error is raised.
"""
if input_ids is None:
special_image_mask = inputs_embeds == self.get_input_embeddings()(
torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device)
)
special_image_mask = special_image_mask.all(-1)
else:
special_image_mask = input_ids == self.config.image_token_id
n_image_tokens = special_image_mask.sum()
special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
if inputs_embeds[special_image_mask].numel() != image_features.numel():
raise ValueError(
f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {image_features.shape[0]}"
)
return special_image_mask
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
image_sizes: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
vision_feature_layer: Optional[Union[int, list[int]]] = None,
vision_feature_select_strategy: Optional[str] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> Union[tuple, LlavaNextModelOutputWithPast]:
r"""
vision_feature_select_strategy (`str`, *optional*, defaults to `"default"`):
The feature selection strategy used to select the vision feature from the vision backbone.
Can be one of `"default"` or `"full"`. If `"default"`, the CLS token is removed from the vision features.
If `"full"`, the full vision features are used.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
vision_feature_layer = (
vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer
)
vision_feature_select_strategy = (
vision_feature_select_strategy
if vision_feature_select_strategy is not None
else self.config.vision_feature_select_strategy
)
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.get_input_embeddings()(input_ids)
if pixel_values is not None and pixel_values.size(0) > 0:
image_features = self.get_image_features(
pixel_values,
image_sizes,
vision_feature_layer=vision_feature_layer,
vision_feature_select_strategy=vision_feature_select_strategy,
)
image_features = torch.cat(image_features, dim=0).to(inputs_embeds.device, inputs_embeds.dtype)
special_image_mask = self.get_placeholder_mask(
input_ids, inputs_embeds=inputs_embeds, image_features=image_features
)
inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features)
outputs = self.language_model(
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
cache_position=cache_position,
**kwargs,
)
return LlavaNextModelOutputWithPast(
last_hidden_state=outputs.last_hidden_state,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=image_features if pixel_values is not None else None,
)
@auto_docstring(
custom_intro="""
The LLAVA-NeXT model which consists of a vision backbone and a language model.
"""
)
class LlavaNextForConditionalGeneration(LlavaNextPreTrainedModel, GenerationMixin):
_checkpoint_conversion_mapping = {
r"^language_model.model": "model.language_model",
r"^vision_tower": "model.vision_tower",
r"^multi_modal_projector": "model.multi_modal_projector",
r"^image_newline": "model.image_newline",
r"^language_model.lm_head": "lm_head",
}
_tied_weights_keys = {"lm_head.weight": "model.language_model.embed_tokens.weight"}
def __init__(self, config: LlavaNextConfig):
super().__init__(config)
self.model = LlavaNextModel(config)
self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vocab_size, bias=False)
self.post_init()
def get_input_embeddings(self):
return self.model.get_input_embeddings()
def set_input_embeddings(self, value):
self.model.set_input_embeddings(value)
def get_output_embeddings(self) -> nn.Module:
return self.lm_head
def pack_image_features(self, image_features, image_sizes, vision_feature_select_strategy, image_newline=None):
return self.model.pack_image_features(
image_features=image_features,
image_sizes=image_sizes,
vision_feature_select_strategy=vision_feature_select_strategy,
image_newline=image_newline,
)
def get_image_features(
self,
pixel_values: torch.FloatTensor,
image_sizes: torch.Tensor,
vision_feature_layer: Optional[Union[int, list[int]]] = None,
vision_feature_select_strategy: Optional[str] = None,
):
return self.model.get_image_features(
pixel_values=pixel_values,
image_sizes=image_sizes,
vision_feature_layer=vision_feature_layer,
vision_feature_select_strategy=vision_feature_select_strategy,
)
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
image_sizes: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
vision_feature_layer: Optional[Union[int, list[int]]] = None,
vision_feature_select_strategy: Optional[str] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, LlavaNextCausalLMOutputWithPast]:
r"""
vision_feature_select_strategy (`str`, *optional*, defaults to `"default"`):
The feature selection strategy used to select the vision feature from the vision backbone.
Can be one of `"default"` or `"full"`. If `"default"`, the CLS token is removed from the vision features.
If `"full"`, the full vision features are used.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, LlavaNextForConditionalGeneration
>>> model = LlavaNextForConditionalGeneration.from_pretrained("llava-hf/llava-v1.6-mistral-7b-hf")
>>> processor = AutoProcessor.from_pretrained("llava-hf/llava-v1.6-mistral-7b-hf")
>>> prompt = "[INST] <image>\nWhat is shown in this image? [/INST]"
>>> url = "https://www.ilankelman.org/stopsigns/australia.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(images=image, text=prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(**inputs, max_length=30)
>>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"[INST] \nWhat is shown in this image? [/INST] The image appears to be a radar chart, which is a type of multi-dimensional plot (...)"
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
vision_feature_layer = (
vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer
)
vision_feature_select_strategy = (
vision_feature_select_strategy
if vision_feature_select_strategy is not None
else self.config.vision_feature_select_strategy
)
outputs = self.model(
input_ids,
pixel_values=pixel_values,
image_sizes=image_sizes,
vision_feature_layer=vision_feature_layer,
vision_feature_select_strategy=vision_feature_select_strategy,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs[0]
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(
logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size, **kwargs
)
return LlavaNextCausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=outputs.image_hidden_states,
)
def prepare_inputs_for_generation(
self,
input_ids,
past_key_values=None,
inputs_embeds=None,
pixel_values=None,
image_sizes=None,
attention_mask=None,
cache_position=None,
logits_to_keep=None,
is_first_iteration=False,
**kwargs,
):
# Overwritten -- in specific circumstances we don't want to forward image inputs to the model
model_inputs = super().prepare_inputs_for_generation(
input_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
logits_to_keep=logits_to_keep,
is_first_iteration=is_first_iteration,
**kwargs,
)
# Pixel values are used only in the first iteration if available
# In subsquent iterations, they are already merged with text and cached
# NOTE: first iteration doesn't have to be prefill, it can be the first
# iteration with a question and cached system prompt (continue generate from cache)
if is_first_iteration or not kwargs.get("use_cache", True):
model_inputs["pixel_values"] = pixel_values
model_inputs["image_sizes"] = image_sizes
return model_inputs
@staticmethod
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | true |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/llava_next/__init__.py | src/transformers/models/llava_next/__init__.py | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_llava_next import *
from .image_processing_llava_next import *
from .image_processing_llava_next_fast import *
from .modeling_llava_next import *
from .processing_llava_next import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/llava_next/image_processing_llava_next_fast.py | src/transformers/models/llava_next/image_processing_llava_next_fast.py | # coding=utf-8
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Image processor class for LLaVa-NeXT."""
from typing import Optional, Union
import torch
from torchvision.transforms.v2 import functional as F
from ...image_processing_utils import BatchFeature, get_patch_output_size, select_best_resolution
from ...image_processing_utils_fast import (
BaseImageProcessorFast,
divide_to_patches,
group_images_by_shape,
reorder_images,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
SizeDict,
get_image_size,
)
from ...processing_utils import Unpack
from ...utils import (
TensorType,
auto_docstring,
)
from .image_processing_llava_next import LlavaNextImageProcessorKwargs
@auto_docstring
class LlavaNextImageProcessorFast(BaseImageProcessorFast):
# To be checked against the slow image processor
# None values left after checking can be removed
model_input_names = ["pixel_values", "image_sizes"]
resample = PILImageResampling.BICUBIC
image_mean = OPENAI_CLIP_MEAN
image_std = OPENAI_CLIP_STD
size = {"shortest_edge": 224}
default_to_square = False
crop_size = {"height": 224, "width": 224}
do_resize = True
do_center_crop = True
do_rescale = True
do_normalize = True
do_convert_rgb = True
do_pad = True
image_grid_pinpoints = [[336, 672], [672, 336], [672, 672], [1008, 336], [336, 1008]]
valid_kwargs = LlavaNextImageProcessorKwargs
def __init__(self, **kwargs: Unpack[LlavaNextImageProcessorKwargs]):
super().__init__(**kwargs)
@auto_docstring
def preprocess(self, images: ImageInput, **kwargs: Unpack[LlavaNextImageProcessorKwargs]) -> BatchFeature:
return super().preprocess(images, **kwargs)
def _resize_for_patching(
self,
image: "torch.Tensor",
target_resolution: tuple,
interpolation: "F.InterpolationMode",
input_data_format: ChannelDimension,
) -> "torch.Tensor":
"""
Resizes an image to a target resolution while maintaining aspect ratio.
Args:
image ("torch.Tensor"):
The input image.
target_resolution (tuple):
The target resolution (height, width) of the image.
interpolation (`InterpolationMode`):
Resampling filter to use if resizing the image.
input_data_format (`ChannelDimension` or `str`):
The channel dimension format of the input image.
Returns:
"torch.Tensor": The resized and padded image.
"""
new_height, new_width = get_patch_output_size(image, target_resolution, input_data_format)
# Resize the image
resized_image = self.resize(
image=image,
size=SizeDict(height=new_height, width=new_width),
interpolation=interpolation,
)
return resized_image
def _get_padding_size(self, original_resolution: tuple, target_resolution: tuple):
original_height, original_width = original_resolution
target_height, target_width = target_resolution
paste_x, r_x = divmod(target_width - original_width, 2)
paste_y, r_y = divmod(target_height - original_height, 2)
return [paste_x, paste_y, paste_x + r_x, paste_y + r_y]
def _pad_for_patching(
self, image: "torch.Tensor", target_resolution: tuple, input_data_format: ChannelDimension
) -> "torch.Tensor":
"""
Pad an image to a target resolution while maintaining aspect ratio.
"""
new_resolution = get_patch_output_size(image, target_resolution, input_data_format)
padding = self._get_padding_size(new_resolution, target_resolution)
padded_image = F.pad(image, padding=padding)
return padded_image
def _get_image_patches(
self,
image: "torch.Tensor",
grid_pinpoints,
size: tuple,
patch_size: int,
interpolation: "F.InterpolationMode",
) -> list["torch.Tensor"]:
"""
Process an image with variable resolutions by dividing it into patches.
Args:
image ("torch.Tensor"):
The input image to be processed.
grid_pinpoints (List):
A string representation of a list of possible resolutions.
size (`tuple`):
Size to resize the original image to.
patch_size (`int`):
Size of the patches to divide the image into.
interpolation (`"InterpolationMode"`):
Resampling filter to use if resizing the image.
Returns:
list["torch.Tensor"]: A list of NumPy arrays containing the processed image patches.
"""
if not isinstance(grid_pinpoints, list):
raise TypeError("grid_pinpoints must be a list of possible resolutions.")
possible_resolutions = grid_pinpoints
image_size = get_image_size(image, channel_dim=ChannelDimension.FIRST)
best_resolution = select_best_resolution(image_size, possible_resolutions)
resized_image = self._resize_for_patching(
image, best_resolution, interpolation=interpolation, input_data_format=ChannelDimension.FIRST
)
padded_image = self._pad_for_patching(resized_image, best_resolution, input_data_format=ChannelDimension.FIRST)
patches = divide_to_patches(padded_image, patch_size=patch_size)
resized_original_image = F.resize(image, size=size, interpolation=interpolation)
image_patches = [resized_original_image] + patches
return image_patches
def _pad_for_batching(
self,
pixel_values: list["torch.Tensor"],
) -> list["torch.Tensor"]:
"""
Pads images on the `num_of_patches` dimension with zeros to form a batch of same number of patches.
Args:
pixel_values (`list[torch.Tensor]`):
An array of pixel values of each images of shape (`batch_size`, `num_patches`, `image_in_3D`)
Returns:
list[`torch.Tensor`]: The padded images.
"""
max_patch = max(len(x) for x in pixel_values)
pixel_values = [
torch.nn.functional.pad(image, pad=[0, 0, 0, 0, 0, 0, 0, max_patch - image.shape[0]])
for image in pixel_values
]
return pixel_values
def _preprocess(
self,
images: list["torch.Tensor"],
do_resize: bool,
size: SizeDict,
image_grid_pinpoints: list[list[int]],
interpolation: Optional["F.InterpolationMode"],
do_center_crop: bool,
crop_size: SizeDict,
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
image_mean: Optional[Union[float, list[float]]],
image_std: Optional[Union[float, list[float]]],
do_pad: bool,
disable_grouping: Optional[bool],
return_tensors: Optional[Union[str, TensorType]],
**kwargs,
) -> BatchFeature:
processed_images = []
image_sizes = []
# Determine the size tuple
if size and size.height and size.width:
size_tuple = (size.height, size.width)
else:
size_tuple = (size.shortest_edge, size.shortest_edge)
# Determine the patch size
if crop_size and crop_size.height:
patch_size = crop_size.height
elif size and size.height:
patch_size = size.height
else:
patch_size = size.shortest_edge
for image in images:
image_patches = self._get_image_patches(
image,
image_grid_pinpoints,
size=size_tuple,
patch_size=patch_size,
interpolation=interpolation,
)
# Group images by size for batched processing
processed_image_patches_grouped = {}
grouped_image_patches, grouped_image_patches_index = group_images_by_shape(
image_patches, disable_grouping=disable_grouping
)
for shape, stacked_image_patches in grouped_image_patches.items():
if do_resize:
stacked_image_patches = self.resize(
image=stacked_image_patches,
size=size,
interpolation=interpolation,
)
if do_center_crop:
stacked_image_patches = self.center_crop(stacked_image_patches, crop_size)
# Fused rescale and normalize
stacked_image_patches = self.rescale_and_normalize(
stacked_image_patches, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
processed_image_patches_grouped[shape] = stacked_image_patches
processed_image_patches = reorder_images(processed_image_patches_grouped, grouped_image_patches_index)
processed_image_patches = torch.stack(processed_image_patches, dim=0)
processed_images.append(processed_image_patches)
image_sizes.append(get_image_size(image, ChannelDimension.FIRST))
if do_pad:
processed_images = self._pad_for_batching(processed_images)
return BatchFeature(
data={"pixel_values": processed_images, "image_sizes": image_sizes}, tensor_type=return_tensors
)
__all__ = ["LlavaNextImageProcessorFast"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/llava_next/configuration_llava_next.py | src/transformers/models/llava_next/configuration_llava_next.py | # coding=utf-8
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Llava-NeXT model configuration"""
from ...configuration_utils import PreTrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING, AutoConfig
logger = logging.get_logger(__name__)
class LlavaNextConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`LlavaNextForConditionalGeneration`]. It is used to instantiate an
Llava-NeXT model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the [llava-hf/llava-v1.6-mistral-7b-hf](https://huggingface.co/llava-hf/llava-v1.6-mistral-7b-hf)
model.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vision_config (`Union[AutoConfig, dict]`, *optional*, defaults to `CLIPVisionConfig`):
The config object or dictionary of the vision backbone.
text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `LlamaConfig`):
The config object or dictionary of the text backbone.
image_token_index (`int`, *optional*, defaults to 32000):
The image token index to encode the image prompt.
projector_hidden_act (`str`, *optional*, defaults to `"gelu"`):
The activation function used by the multimodal projector.
vision_feature_select_strategy (`str`, *optional*, defaults to `"default"`):
The feature selection strategy used to select the vision feature from the vision backbone.
Can be one of `"default"` or `"full"`. If `"default"`, the CLS token is removed from the vision features.
If `"full"`, the full vision features are used.
vision_feature_layer (`Union[int, list[int]]`, *optional*, defaults to -2):
The index of the layer to select the vision feature. If multiple indices are provided,
the vision feature of the corresponding indices will be concatenated to form the
vision features.
image_grid_pinpoints (`List`, *optional*, defaults to `[[336, 672], [672, 336], [672, 672], [1008, 336], [336, 1008]]`):
A list of possible resolutions to use for processing high resolution images. Each item in the list should be a tuple or list
of the form `(height, width)`.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether the model's input and output word embeddings should be tied.
image_seq_length (`int`, *optional*, defaults to 576):
Sequence length of one image embedding.
multimodal_projector_bias (`bool`, *optional*, defaults to `True`):
Whether to use bias in the multimodal projector.
Example:
```python
>>> from transformers import LlavaNextForConditionalGeneration, LlavaNextConfig, CLIPVisionConfig, LlamaConfig
>>> # Initializing a CLIP-vision config
>>> vision_config = CLIPVisionConfig()
>>> # Initializing a Llama config
>>> text_config = LlamaConfig()
>>> # Initializing a Llava-Next llava-hf/llava-v1.6-mistral-7b-hf style configuration
>>> configuration = LlavaNextConfig(vision_config, text_config)
>>> # Initializing a model from the llava-hf/llava-v1.6-mistral-7b-hf style configuration
>>> model = LlavaNextForConditionalGeneration(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "llava_next"
attribute_map = {
"image_token_id": "image_token_index",
}
sub_configs = {"text_config": AutoConfig, "vision_config": AutoConfig}
def __init__(
self,
vision_config=None,
text_config=None,
image_token_index=32000,
projector_hidden_act="gelu",
vision_feature_select_strategy="default",
vision_feature_layer=-2,
image_grid_pinpoints=None,
tie_word_embeddings=False,
image_seq_length=576,
multimodal_projector_bias=True,
**kwargs,
):
self.image_token_index = image_token_index
self.projector_hidden_act = projector_hidden_act
self.image_seq_length = image_seq_length
self.multimodal_projector_bias = multimodal_projector_bias
if vision_feature_select_strategy not in ["default", "full"]:
raise ValueError(
"vision_feature_select_strategy should be one of 'default', 'full'."
f"Got: {vision_feature_select_strategy}"
)
self.vision_feature_select_strategy = vision_feature_select_strategy
self.vision_feature_layer = vision_feature_layer
image_grid_pinpoints = (
image_grid_pinpoints
if image_grid_pinpoints is not None
else [[336, 672], [672, 336], [672, 672], [1008, 336], [336, 1008]]
)
self.image_grid_pinpoints = image_grid_pinpoints
if isinstance(vision_config, dict):
vision_config["model_type"] = vision_config.get("model_type", "clip_vision_model")
vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config)
elif vision_config is None:
vision_config = CONFIG_MAPPING["clip_vision_model"](
intermediate_size=4096,
hidden_size=1024,
patch_size=14,
image_size=336,
num_hidden_layers=24,
num_attention_heads=16,
vocab_size=32000,
projection_dim=768,
)
self.vision_config = vision_config
if isinstance(text_config, dict):
text_config["model_type"] = text_config.get("model_type", "llama")
text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config)
elif text_config is None:
text_config = CONFIG_MAPPING["llama"]()
self.text_config = text_config
super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs)
__all__ = ["LlavaNextConfig"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/llava_next/processing_llava_next.py | src/transformers/models/llava_next/processing_llava_next.py | # coding=utf-8
# Copyright 2024 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Processor class for LLaVa-NeXT.
"""
from typing import Optional, Union
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...image_processing_utils import select_best_resolution
from ...image_utils import ImageInput, get_image_size, to_numpy_array
from ...processing_utils import (
MultiModalData,
ProcessingKwargs,
ProcessorMixin,
Unpack,
)
from ...tokenization_utils_base import PreTokenizedInput, TextInput
from ...utils import logging
logger = logging.get_logger(__name__)
class LlavaNextProcessorKwargs(ProcessingKwargs, total=False):
_defaults = {
"text_kwargs": {
"padding": False,
"return_mm_token_type_ids": False,
},
"images_kwargs": {
"do_pad": True,
},
}
class LlavaNextProcessor(ProcessorMixin):
r"""
Constructs a LLaVa-NeXT processor which wraps a LLaVa-NeXT image processor and a LLaMa tokenizer into a single processor.
[`LlavaNextProcessor`] offers all the functionalities of [`LlavaNextImageProcessor`] and [`LlamaTokenizerFast`]. See the
[`~LlavaNextProcessor.__call__`] and [`~LlavaNextProcessor.decode`] for more information.
Args:
image_processor ([`LlavaNextImageProcessor`], *optional*):
The image processor is a required input.
tokenizer ([`LlamaTokenizerFast`], *optional*):
The tokenizer is a required input.
patch_size (`int`, *optional*):
Patch size from the vision tower.
vision_feature_select_strategy (`str`, *optional*):
The feature selection strategy used to select the vision feature from the vision backbone.
Should be same as in model's config
chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
in a chat into a tokenizable string.
image_token (`str`, *optional*, defaults to `"<image>"`):
Special token used to denote image location.
num_additional_image_tokens (`int`, *optional*, defaults to 0):
Number of additional tokens added to the image embeddings, such as CLS (+1). If the backbone has no CLS or other
extra tokens appended, no need to set this arg.
"""
def __init__(
self,
image_processor=None,
tokenizer=None,
patch_size=None,
vision_feature_select_strategy=None,
chat_template=None,
image_token="<image>", # set the default and let users change if they have peculiar special tokens in rare cases
num_additional_image_tokens=0,
**kwargs,
):
self.patch_size = patch_size
self.num_additional_image_tokens = num_additional_image_tokens
self.vision_feature_select_strategy = vision_feature_select_strategy
self.image_token = tokenizer.image_token if hasattr(tokenizer, "image_token") else image_token
self.image_token_id = (
tokenizer.image_token_id
if getattr(tokenizer, "image_token_id", None)
else tokenizer.convert_tokens_to_ids(self.image_token)
)
super().__init__(image_processor, tokenizer, chat_template=chat_template)
def __call__(
self,
images: Optional[ImageInput] = None,
text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None,
**kwargs: Unpack[LlavaNextProcessorKwargs],
) -> BatchFeature:
"""
Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`] if `text` is not `None` to encode
the text. To prepare the image(s), this method forwards the `images` and `kwargs` arguments to
LlavaNextImageProcessor's [`~LlavaNextImageProcessor.__call__`] if `images` is not `None`. Please refer to the docstring
of the above two methods for more information.
Args:
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. Both channels-first and channels-last formats are supported.
text (`str`, `list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
"""
if images is None and text is None:
raise ValueError("You have to specify at least images or text.")
output_kwargs = self._merge_kwargs(
LlavaNextProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
if images is not None:
image_inputs = self.image_processor(images, **output_kwargs["images_kwargs"])
else:
image_inputs = {}
if isinstance(text, str):
text = [text]
elif not isinstance(text, list) and not isinstance(text[0], str):
raise TypeError("Invalid input text. Please provide a string, or a list of strings")
prompt_strings = text
if image_inputs:
image_sizes = iter(image_inputs["image_sizes"])
height, width = get_image_size(to_numpy_array(image_inputs["pixel_values"][0][0]))
prompt_strings = []
for sample in text:
while self.image_token in sample:
image_size = next(image_sizes)
if not isinstance(image_size, (list, tuple)):
# cast to list to avoid numerical precision errors when calculating unpadding
image_size = image_size.tolist()
orig_height, orig_width = image_size
num_image_tokens = self._get_number_of_features(orig_height, orig_width, height, width)
if self.vision_feature_select_strategy == "default":
num_image_tokens -= 1
sample = sample.replace(self.image_token, "<placeholder>" * num_image_tokens, 1)
prompt_strings.append(sample)
prompt_strings = [sample.replace("<placeholder>", self.image_token) for sample in prompt_strings]
return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None)
return_mm_token_type_ids = output_kwargs["text_kwargs"].pop("return_mm_token_type_ids", None)
text_inputs = self.tokenizer(prompt_strings, **output_kwargs["text_kwargs"])
self._check_special_mm_tokens(prompt_strings, text_inputs, modalities=["image"])
if return_mm_token_type_ids:
array_ids = np.array(text_inputs["input_ids"])
mm_token_type_ids = np.zeros_like(text_inputs["input_ids"])
mm_token_type_ids[array_ids == self.image_token_id] = 1
text_inputs["mm_token_type_ids"] = mm_token_type_ids.tolist()
return BatchFeature(data={**text_inputs, **image_inputs}, tensor_type=return_tensors)
def _get_number_of_features(self, orig_height: int, orig_width: int, height: int, width: int) -> int:
image_grid_pinpoints = self.image_processor.image_grid_pinpoints
height_best_resolution, width_best_resolution = select_best_resolution(
[orig_height, orig_width], image_grid_pinpoints
)
scale_height, scale_width = height_best_resolution // height, width_best_resolution // width
patches_height = height // self.patch_size
patches_width = width // self.patch_size
unpadded_features, newline_features = self._get_unpadded_features(
orig_height, orig_width, patches_height, patches_width, scale_height, scale_width
)
# The base patch covers the entire image (+1 for the CLS)
base_features = patches_height * patches_width + self.num_additional_image_tokens
num_image_tokens = unpadded_features + newline_features + base_features
return num_image_tokens
def _get_unpadded_features(self, height, width, patches_height, patches_width, scale_height, scale_width):
"""
Get number of features for a given image with height/width. LLaVA-NeXT is different from LLaVA
because it divided each image into patches depending on its resolution. Therefore we need to calculate how many
patches an image is divided into and get the number of features from that.
"""
current_height = patches_height * scale_height
current_width = patches_width * scale_width
original_aspect_ratio = width / height
current_aspect_ratio = current_width / current_height
if original_aspect_ratio > current_aspect_ratio:
new_height = int(round(height * (current_width / width), 7))
padding = (current_height - new_height) // 2
current_height -= padding * 2
else:
new_width = int(round(width * (current_height / height), 7))
padding = (current_width - new_width) // 2
current_width -= padding * 2
unpadded_features = current_height * current_width
newline_features = current_height
return (unpadded_features, newline_features)
def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs):
"""
Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.
Args:
image_sizes (list[list[str]], *optional*):
The input sizes formatted as (height, width) per each image.
Returns:
`MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided
input modalities, along with other useful data.
"""
vision_data = {}
if image_sizes is not None:
images_kwargs = LlavaNextProcessorKwargs._defaults.get("images_kwargs", {})
images_kwargs.update(kwargs)
size = images_kwargs.get("size", None) or self.image_processor.size
size = (
(size["shortest_edge"], size["shortest_edge"])
if "shortest_edge" in size
else (min(size["height"], size["width"]), min(size["height"], size["width"]))
)
processed_height, processed_width = size
batch_num_image_tokens = []
num_image_patches = [1] * len(image_sizes) # llava-next doesn't batch pixels as Idefics, thus `1` patch`
for image_size in image_sizes:
orig_height, orig_width = image_size
num_image_tokens = self._get_number_of_features(
orig_height, orig_width, processed_height, processed_width
)
if self.vision_feature_select_strategy == "default":
num_image_tokens -= 1
batch_num_image_tokens.append(num_image_tokens)
vision_data.update({"num_image_tokens": batch_num_image_tokens, "num_image_patches": num_image_patches})
return MultiModalData(**vision_data)
__all__ = ["LlavaNextProcessor"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/swin/modeling_swin.py | src/transformers/models/swin/modeling_swin.py | # coding=utf-8
# Copyright 2022 Microsoft Research and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch Swin Transformer model."""
import collections.abc
import math
import warnings
from dataclasses import dataclass
from typing import Optional, Union
import torch
from torch import nn
from ... import initialization as init
from ...activations import ACT2FN
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...pytorch_utils import meshgrid
from ...utils import ModelOutput, auto_docstring, logging, torch_int
from ...utils.backbone_utils import BackboneMixin
from .configuration_swin import SwinConfig
logger = logging.get_logger(__name__)
# drop_path, SwinPatchEmbeddings, SwinPatchMerging and SwinDropPath are from the timm library.
@dataclass
@auto_docstring(
custom_intro="""
Swin encoder's outputs, with potential hidden states and attentions.
"""
)
class SwinEncoderOutput(ModelOutput):
r"""
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
reshaped_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass
@auto_docstring(
custom_intro="""
Swin model's outputs that also contains a pooling of the last hidden states.
"""
)
class SwinModelOutput(ModelOutput):
r"""
pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*, returned when `add_pooling_layer=True` is passed):
Average pooling of the last layer hidden-state.
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
pooler_output: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
reshaped_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass
@auto_docstring(
custom_intro="""
Swin masked image model outputs.
"""
)
class SwinMaskedImageModelingOutput(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `bool_masked_pos` is provided):
Masked image modeling (MLM) loss.
reconstruction (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Reconstructed pixel values.
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
"""
loss: Optional[torch.FloatTensor] = None
reconstruction: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
reshaped_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
@property
def logits(self):
warnings.warn(
"logits attribute is deprecated and will be removed in version 5 of Transformers."
" Please use the reconstruction attribute to retrieve the final output instead.",
FutureWarning,
)
return self.reconstruction
@dataclass
@auto_docstring(
custom_intro="""
Swin outputs for image classification.
"""
)
class SwinImageClassifierOutput(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
reshaped_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
def window_partition(input_feature, window_size):
"""
Partitions the given input into windows.
"""
batch_size, height, width, num_channels = input_feature.shape
input_feature = input_feature.view(
batch_size, height // window_size, window_size, width // window_size, window_size, num_channels
)
windows = input_feature.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, num_channels)
return windows
def window_reverse(windows, window_size, height, width):
"""
Merges windows to produce higher resolution features.
"""
num_channels = windows.shape[-1]
windows = windows.view(-1, height // window_size, width // window_size, window_size, window_size, num_channels)
windows = windows.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, height, width, num_channels)
return windows
class SwinEmbeddings(nn.Module):
"""
Construct the patch and position embeddings. Optionally, also the mask token.
"""
def __init__(self, config, use_mask_token=False):
super().__init__()
self.patch_embeddings = SwinPatchEmbeddings(config)
num_patches = self.patch_embeddings.num_patches
self.patch_grid = self.patch_embeddings.grid_size
self.mask_token = nn.Parameter(torch.zeros(1, 1, config.embed_dim)) if use_mask_token else None
if config.use_absolute_embeddings:
self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.embed_dim))
else:
self.position_embeddings = None
self.norm = nn.LayerNorm(config.embed_dim)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.patch_size = config.patch_size
self.config = config
# Copied from transformers.models.vit.modeling_vit.ViTEmbeddings.interpolate_pos_encoding
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
"""
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support torch.jit tracing.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
"""
num_patches = embeddings.shape[1] - 1
num_positions = self.position_embeddings.shape[1] - 1
# always interpolate when tracing to ensure the exported model works for dynamic input shapes
if not torch.jit.is_tracing() and num_patches == num_positions and height == width:
return self.position_embeddings
class_pos_embed = self.position_embeddings[:, :1]
patch_pos_embed = self.position_embeddings[:, 1:]
dim = embeddings.shape[-1]
new_height = height // self.patch_size
new_width = width // self.patch_size
sqrt_num_positions = torch_int(num_positions**0.5)
patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed,
size=(new_height, new_width),
mode="bicubic",
align_corners=False,
)
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed, patch_pos_embed), dim=1)
def forward(
self,
pixel_values: Optional[torch.FloatTensor],
bool_masked_pos: Optional[torch.BoolTensor] = None,
interpolate_pos_encoding: bool = False,
) -> tuple[torch.Tensor]:
_, num_channels, height, width = pixel_values.shape
embeddings, output_dimensions = self.patch_embeddings(pixel_values)
embeddings = self.norm(embeddings)
batch_size, seq_len, _ = embeddings.size()
if bool_masked_pos is not None:
mask_tokens = self.mask_token.expand(batch_size, seq_len, -1)
# replace the masked visual tokens by mask_tokens
mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens)
embeddings = embeddings * (1.0 - mask) + mask_tokens * mask
if self.position_embeddings is not None:
if interpolate_pos_encoding:
embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
else:
embeddings = embeddings + self.position_embeddings
embeddings = self.dropout(embeddings)
return embeddings, output_dimensions
class SwinPatchEmbeddings(nn.Module):
"""
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
`hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
Transformer.
"""
def __init__(self, config):
super().__init__()
image_size, patch_size = config.image_size, config.patch_size
num_channels, hidden_size = config.num_channels, config.embed_dim
image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.num_patches = num_patches
self.grid_size = (image_size[0] // patch_size[0], image_size[1] // patch_size[1])
self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)
def maybe_pad(self, pixel_values, height, width):
if width % self.patch_size[1] != 0:
pad_values = (0, self.patch_size[1] - width % self.patch_size[1])
pixel_values = nn.functional.pad(pixel_values, pad_values)
if height % self.patch_size[0] != 0:
pad_values = (0, 0, 0, self.patch_size[0] - height % self.patch_size[0])
pixel_values = nn.functional.pad(pixel_values, pad_values)
return pixel_values
def forward(self, pixel_values: Optional[torch.FloatTensor]) -> tuple[torch.Tensor, tuple[int]]:
_, num_channels, height, width = pixel_values.shape
# pad the input to be divisible by self.patch_size, if needed
pixel_values = self.maybe_pad(pixel_values, height, width)
embeddings = self.projection(pixel_values)
_, _, height, width = embeddings.shape
output_dimensions = (height, width)
embeddings = embeddings.flatten(2).transpose(1, 2)
return embeddings, output_dimensions
class SwinPatchMerging(nn.Module):
"""
Patch Merging Layer.
Args:
input_resolution (`tuple[int]`):
Resolution of input feature.
dim (`int`):
Number of input channels.
norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`):
Normalization layer class.
"""
def __init__(self, input_resolution: tuple[int], dim: int, norm_layer: nn.Module = nn.LayerNorm) -> None:
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(4 * dim)
def maybe_pad(self, input_feature, height, width):
should_pad = (height % 2 == 1) or (width % 2 == 1)
if should_pad:
pad_values = (0, 0, 0, width % 2, 0, height % 2)
input_feature = nn.functional.pad(input_feature, pad_values)
return input_feature
def forward(self, input_feature: torch.Tensor, input_dimensions: tuple[int, int]) -> torch.Tensor:
height, width = input_dimensions
# `dim` is height * width
batch_size, dim, num_channels = input_feature.shape
input_feature = input_feature.view(batch_size, height, width, num_channels)
# pad input to be divisible by width and height, if needed
input_feature = self.maybe_pad(input_feature, height, width)
# [batch_size, height/2, width/2, num_channels]
input_feature_0 = input_feature[:, 0::2, 0::2, :]
# [batch_size, height/2, width/2, num_channels]
input_feature_1 = input_feature[:, 1::2, 0::2, :]
# [batch_size, height/2, width/2, num_channels]
input_feature_2 = input_feature[:, 0::2, 1::2, :]
# [batch_size, height/2, width/2, num_channels]
input_feature_3 = input_feature[:, 1::2, 1::2, :]
# batch_size height/2 width/2 4*num_channels
input_feature = torch.cat([input_feature_0, input_feature_1, input_feature_2, input_feature_3], -1)
input_feature = input_feature.view(batch_size, -1, 4 * num_channels) # batch_size height/2*width/2 4*C
input_feature = self.norm(input_feature)
input_feature = self.reduction(input_feature)
return input_feature
# Copied from transformers.models.beit.modeling_beit.drop_path
def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
"""
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
if drop_prob == 0.0 or not training:
return input
keep_prob = 1 - drop_prob
shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
random_tensor.floor_() # binarize
output = input.div(keep_prob) * random_tensor
return output
# Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->Swin
class SwinDropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob: Optional[float] = None) -> None:
super().__init__()
self.drop_prob = drop_prob
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
return drop_path(hidden_states, self.drop_prob, self.training)
def extra_repr(self) -> str:
return f"p={self.drop_prob}"
class SwinSelfAttention(nn.Module):
def __init__(self, config, dim, num_heads, window_size):
super().__init__()
if dim % num_heads != 0:
raise ValueError(
f"The hidden size ({dim}) is not a multiple of the number of attention heads ({num_heads})"
)
self.num_attention_heads = num_heads
self.attention_head_size = int(dim / num_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.window_size = (
window_size if isinstance(window_size, collections.abc.Iterable) else (window_size, window_size)
)
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * self.window_size[0] - 1) * (2 * self.window_size[1] - 1), num_heads)
)
self.register_buffer("relative_position_index", self.create_relative_position_index())
self.query = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
self.key = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
self.value = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
) -> tuple[torch.Tensor]:
batch_size, dim, num_channels = hidden_states.shape
hidden_shape = (batch_size, dim, -1, self.attention_head_size)
query_layer = self.query(hidden_states).view(hidden_shape).transpose(1, 2)
key_layer = self.key(hidden_states).view(hidden_shape).transpose(1, 2)
value_layer = self.value(hidden_states).view(hidden_shape).transpose(1, 2)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)]
relative_position_bias = relative_position_bias.view(
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1
)
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous()
attention_scores = attention_scores + relative_position_bias.unsqueeze(0)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in SwinModel forward() function)
mask_shape = attention_mask.shape[0]
attention_scores = attention_scores.view(
batch_size // mask_shape, mask_shape, self.num_attention_heads, dim, dim
)
attention_scores = attention_scores + attention_mask.unsqueeze(1).unsqueeze(0)
attention_scores = attention_scores.view(-1, self.num_attention_heads, dim, dim)
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
def create_relative_position_index(self):
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(meshgrid([coords_h, coords_w], indexing="ij"))
coords_flatten = torch.flatten(coords, 1)
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :]
relative_coords = relative_coords.permute(1, 2, 0).contiguous()
relative_coords[:, :, 0] += self.window_size[0] - 1
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1)
return relative_position_index
class SwinSelfOutput(nn.Module):
def __init__(self, config, dim):
super().__init__()
self.dense = nn.Linear(dim, dim)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
class SwinAttention(nn.Module):
def __init__(self, config, dim, num_heads, window_size):
super().__init__()
self.self = SwinSelfAttention(config, dim, num_heads, window_size)
self.output = SwinSelfOutput(config, dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
) -> tuple[torch.Tensor]:
self_outputs = self.self(hidden_states, attention_mask, output_attentions)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class SwinIntermediate(nn.Module):
def __init__(self, config, dim):
super().__init__()
self.dense = nn.Linear(dim, int(config.mlp_ratio * dim))
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class SwinOutput(nn.Module):
def __init__(self, config, dim):
super().__init__()
self.dense = nn.Linear(int(config.mlp_ratio * dim), dim)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
class SwinLayer(nn.Module):
def __init__(self, config, dim, input_resolution, num_heads, drop_path_rate=0.0, shift_size=0):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.shift_size = shift_size
self.window_size = config.window_size
self.input_resolution = input_resolution
self.layernorm_before = nn.LayerNorm(dim, eps=config.layer_norm_eps)
self.attention = SwinAttention(config, dim, num_heads, window_size=self.window_size)
self.drop_path = SwinDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity()
self.layernorm_after = nn.LayerNorm(dim, eps=config.layer_norm_eps)
self.intermediate = SwinIntermediate(config, dim)
self.output = SwinOutput(config, dim)
def set_shift_and_window_size(self, input_resolution):
if min(input_resolution) <= self.window_size:
# if window size is larger than input resolution, we don't partition windows
self.shift_size = torch_int(0)
self.window_size = (
torch.min(torch.tensor(input_resolution)) if torch.jit.is_tracing() else min(input_resolution)
)
def get_attn_mask(self, height, width, dtype, device):
if self.shift_size > 0:
# calculate attention mask for SW-MSA
img_mask = torch.zeros((1, height, width, 1), dtype=dtype, device=device)
height_slices = (
slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None),
)
width_slices = (
slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None),
)
count = 0
for height_slice in height_slices:
for width_slice in width_slices:
img_mask[:, height_slice, width_slice, :] = count
count += 1
mask_windows = window_partition(img_mask, self.window_size)
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, -100.0).masked_fill(attn_mask == 0, 0.0)
else:
attn_mask = None
return attn_mask
def maybe_pad(self, hidden_states, height, width):
pad_right = (self.window_size - width % self.window_size) % self.window_size
pad_bottom = (self.window_size - height % self.window_size) % self.window_size
pad_values = (0, 0, 0, pad_right, 0, pad_bottom)
hidden_states = nn.functional.pad(hidden_states, pad_values)
return hidden_states, pad_values
def forward(
self,
hidden_states: torch.Tensor,
input_dimensions: tuple[int, int],
output_attentions: Optional[bool] = False,
always_partition: Optional[bool] = False,
) -> tuple[torch.Tensor, torch.Tensor]:
if not always_partition:
self.set_shift_and_window_size(input_dimensions)
else:
pass
height, width = input_dimensions
batch_size, _, channels = hidden_states.size()
shortcut = hidden_states
hidden_states = self.layernorm_before(hidden_states)
hidden_states = hidden_states.view(batch_size, height, width, channels)
# pad hidden_states to multiples of window size
hidden_states, pad_values = self.maybe_pad(hidden_states, height, width)
_, height_pad, width_pad, _ = hidden_states.shape
# cyclic shift
if self.shift_size > 0:
shifted_hidden_states = torch.roll(hidden_states, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
else:
shifted_hidden_states = hidden_states
# partition windows
hidden_states_windows = window_partition(shifted_hidden_states, self.window_size)
hidden_states_windows = hidden_states_windows.view(-1, self.window_size * self.window_size, channels)
attn_mask = self.get_attn_mask(
height_pad, width_pad, dtype=hidden_states.dtype, device=hidden_states_windows.device
)
attention_outputs = self.attention(hidden_states_windows, attn_mask, output_attentions=output_attentions)
attention_output = attention_outputs[0]
attention_windows = attention_output.view(-1, self.window_size, self.window_size, channels)
shifted_windows = window_reverse(attention_windows, self.window_size, height_pad, width_pad)
# reverse cyclic shift
if self.shift_size > 0:
attention_windows = torch.roll(shifted_windows, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
attention_windows = shifted_windows
was_padded = pad_values[3] > 0 or pad_values[5] > 0
if was_padded:
attention_windows = attention_windows[:, :height, :width, :].contiguous()
attention_windows = attention_windows.view(batch_size, height * width, channels)
hidden_states = shortcut + self.drop_path(attention_windows)
layer_output = self.layernorm_after(hidden_states)
layer_output = self.intermediate(layer_output)
layer_output = hidden_states + self.output(layer_output)
layer_outputs = (layer_output, attention_outputs[1]) if output_attentions else (layer_output,)
return layer_outputs
class SwinStage(GradientCheckpointingLayer):
def __init__(self, config, dim, input_resolution, depth, num_heads, drop_path, downsample):
super().__init__()
self.config = config
self.dim = dim
self.blocks = nn.ModuleList(
[
SwinLayer(
config=config,
dim=dim,
input_resolution=input_resolution,
num_heads=num_heads,
drop_path_rate=drop_path[i],
shift_size=0 if (i % 2 == 0) else config.window_size // 2,
)
for i in range(depth)
]
)
# patch merging layer
if downsample is not None:
self.downsample = downsample(input_resolution, dim=dim, norm_layer=nn.LayerNorm)
else:
self.downsample = None
self.pointing = False
def forward(
self,
hidden_states: torch.Tensor,
input_dimensions: tuple[int, int],
output_attentions: Optional[bool] = False,
always_partition: Optional[bool] = False,
) -> tuple[torch.Tensor]:
height, width = input_dimensions
for i, layer_module in enumerate(self.blocks):
layer_outputs = layer_module(hidden_states, input_dimensions, output_attentions, always_partition)
hidden_states = layer_outputs[0]
hidden_states_before_downsampling = hidden_states
if self.downsample is not None:
height_downsampled, width_downsampled = (height + 1) // 2, (width + 1) // 2
output_dimensions = (height, width, height_downsampled, width_downsampled)
hidden_states = self.downsample(hidden_states_before_downsampling, input_dimensions)
else:
output_dimensions = (height, width, height, width)
stage_outputs = (hidden_states, hidden_states_before_downsampling, output_dimensions)
if output_attentions:
stage_outputs += layer_outputs[1:]
return stage_outputs
class SwinEncoder(nn.Module):
def __init__(self, config, grid_size):
super().__init__()
self.num_layers = len(config.depths)
self.config = config
dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths), device="cpu")]
self.layers = nn.ModuleList(
[
SwinStage(
config=config,
dim=int(config.embed_dim * 2**i_layer),
input_resolution=(grid_size[0] // (2**i_layer), grid_size[1] // (2**i_layer)),
depth=config.depths[i_layer],
num_heads=config.num_heads[i_layer],
drop_path=dpr[sum(config.depths[:i_layer]) : sum(config.depths[: i_layer + 1])],
downsample=SwinPatchMerging if (i_layer < self.num_layers - 1) else None,
)
for i_layer in range(self.num_layers)
]
)
self.gradient_checkpointing = False
def forward(
self,
hidden_states: torch.Tensor,
input_dimensions: tuple[int, int],
output_attentions: Optional[bool] = False,
output_hidden_states: Optional[bool] = False,
output_hidden_states_before_downsampling: Optional[bool] = False,
always_partition: Optional[bool] = False,
return_dict: Optional[bool] = True,
) -> Union[tuple, SwinEncoderOutput]:
all_hidden_states = () if output_hidden_states else None
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | true |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/swin/convert_swin_simmim_to_pytorch.py | src/transformers/models/swin/convert_swin_simmim_to_pytorch.py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert Swin SimMIM checkpoints from the original repository.
URL: https://github.com/microsoft/Swin-Transformer/blob/main/MODELHUB.md#simmim-pretrained-swin-v1-models"""
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def get_swin_config(model_name):
config = SwinConfig(image_size=192)
if "base" in model_name:
window_size = 6
embed_dim = 128
depths = (2, 2, 18, 2)
num_heads = (4, 8, 16, 32)
elif "large" in model_name:
window_size = 12
embed_dim = 192
depths = (2, 2, 18, 2)
num_heads = (6, 12, 24, 48)
else:
raise ValueError("Model not supported, only supports base and large variants")
config.window_size = window_size
config.embed_dim = embed_dim
config.depths = depths
config.num_heads = num_heads
return config
def rename_key(name):
if "encoder.mask_token" in name:
name = name.replace("encoder.mask_token", "embeddings.mask_token")
if "encoder.patch_embed.proj" in name:
name = name.replace("encoder.patch_embed.proj", "embeddings.patch_embeddings.projection")
if "encoder.patch_embed.norm" in name:
name = name.replace("encoder.patch_embed.norm", "embeddings.norm")
if "attn.proj" in name:
name = name.replace("attn.proj", "attention.output.dense")
if "attn" in name:
name = name.replace("attn", "attention.self")
if "norm1" in name:
name = name.replace("norm1", "layernorm_before")
if "norm2" in name:
name = name.replace("norm2", "layernorm_after")
if "mlp.fc1" in name:
name = name.replace("mlp.fc1", "intermediate.dense")
if "mlp.fc2" in name:
name = name.replace("mlp.fc2", "output.dense")
if name == "encoder.norm.weight":
name = "layernorm.weight"
if name == "encoder.norm.bias":
name = "layernorm.bias"
if "decoder" in name:
pass
else:
name = "swin." + name
return name
def convert_state_dict(orig_state_dict, model):
for key in orig_state_dict.copy():
val = orig_state_dict.pop(key)
if "attn_mask" in key:
pass
elif "qkv" in key:
key_split = key.split(".")
layer_num = int(key_split[2])
block_num = int(key_split[4])
dim = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
orig_state_dict[f"swin.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.query.weight"] = (
val[:dim, :]
)
orig_state_dict[f"swin.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.key.weight"] = val[
dim : dim * 2, :
]
orig_state_dict[f"swin.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.value.weight"] = (
val[-dim:, :]
)
else:
orig_state_dict[f"swin.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.query.bias"] = val[
:dim
]
orig_state_dict[f"swin.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.key.bias"] = val[
dim : dim * 2
]
orig_state_dict[f"swin.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.value.bias"] = val[
-dim:
]
else:
orig_state_dict[rename_key(key)] = val
return orig_state_dict
def convert_swin_checkpoint(model_name, checkpoint_path, pytorch_dump_folder_path, push_to_hub):
state_dict = torch.load(checkpoint_path, map_location="cpu", weights_only=True)["model"]
config = get_swin_config(model_name)
model = SwinForMaskedImageModeling(config)
model.eval()
new_state_dict = convert_state_dict(state_dict, model)
model.load_state_dict(new_state_dict)
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
image_processor = ViTImageProcessor(size={"height": 192, "width": 192})
image = Image.open(requests.get(url, stream=True).raw)
inputs = image_processor(images=image, return_tensors="pt")
with torch.no_grad():
outputs = model(**inputs).logits
print(outputs.keys())
print("Looks ok!")
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}")
model.save_pretrained(pytorch_dump_folder_path)
print(f"Saving image processor to {pytorch_dump_folder_path}")
image_processor.save_pretrained(pytorch_dump_folder_path)
if push_to_hub:
print(f"Pushing model and image processor for {model_name} to hub")
model.push_to_hub(f"microsoft/{model_name}")
image_processor.push_to_hub(f"microsoft/{model_name}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="swin-base-simmim-window6-192",
type=str,
choices=["swin-base-simmim-window6-192", "swin-large-simmim-window12-192"],
help="Name of the Swin SimMIM model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth",
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model to the Hugging Face hub.",
)
args = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/swin/configuration_swin.py | src/transformers/models/swin/configuration_swin.py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Swin Transformer model configuration"""
from ...configuration_utils import PreTrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
logger = logging.get_logger(__name__)
class SwinConfig(BackboneConfigMixin, PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`SwinModel`]. It is used to instantiate a Swin
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the Swin
[microsoft/swin-tiny-patch4-window7-224](https://huggingface.co/microsoft/swin-tiny-patch4-window7-224)
architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 4):
The size (resolution) of each patch.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
embed_dim (`int`, *optional*, defaults to 96):
Dimensionality of patch embedding.
depths (`list(int)`, *optional*, defaults to `[2, 2, 6, 2]`):
Depth of each layer in the Transformer encoder.
num_heads (`list(int)`, *optional*, defaults to `[3, 6, 12, 24]`):
Number of attention heads in each layer of the Transformer encoder.
window_size (`int`, *optional*, defaults to 7):
Size of windows.
mlp_ratio (`float`, *optional*, defaults to 4.0):
Ratio of MLP hidden dimensionality to embedding dimensionality.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether or not a learnable bias should be added to the queries, keys and values.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings and encoder.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
drop_path_rate (`float`, *optional*, defaults to 0.1):
Stochastic depth rate.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder. If string, `"gelu"`, `"relu"`,
`"selu"` and `"gelu_new"` are supported.
use_absolute_embeddings (`bool`, *optional*, defaults to `False`):
Whether or not to add absolute position embeddings to the patch embeddings.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
encoder_stride (`int`, *optional*, defaults to 32):
Factor to increase the spatial resolution by in the decoder head for masked image modeling.
out_features (`list[str]`, *optional*):
If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
(depending on how many stages the model has). If unset and `out_indices` is set, will default to the
corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
out_indices (`list[int]`, *optional*):
If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
If unset and `out_features` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
Example:
```python
>>> from transformers import SwinConfig, SwinModel
>>> # Initializing a Swin microsoft/swin-tiny-patch4-window7-224 style configuration
>>> configuration = SwinConfig()
>>> # Initializing a model (with random weights) from the microsoft/swin-tiny-patch4-window7-224 style configuration
>>> model = SwinModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "swin"
attribute_map = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__(
self,
image_size=224,
patch_size=4,
num_channels=3,
embed_dim=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4.0,
qkv_bias=True,
hidden_dropout_prob=0.0,
attention_probs_dropout_prob=0.0,
drop_path_rate=0.1,
hidden_act="gelu",
use_absolute_embeddings=False,
initializer_range=0.02,
layer_norm_eps=1e-5,
encoder_stride=32,
out_features=None,
out_indices=None,
**kwargs,
):
super().__init__(**kwargs)
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.embed_dim = embed_dim
self.depths = depths
self.num_layers = len(depths)
self.num_heads = num_heads
self.window_size = window_size
self.mlp_ratio = mlp_ratio
self.qkv_bias = qkv_bias
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.drop_path_rate = drop_path_rate
self.hidden_act = hidden_act
self.use_absolute_embeddings = use_absolute_embeddings
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range
self.encoder_stride = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
self.hidden_size = int(embed_dim * 2 ** (len(depths) - 1))
self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(depths) + 1)]
self._out_features, self._out_indices = get_aligned_output_features_output_indices(
out_features=out_features, out_indices=out_indices, stage_names=self.stage_names
)
__all__ = ["SwinConfig"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/swin/convert_swin_timm_to_pytorch.py | src/transformers/models/swin/convert_swin_timm_to_pytorch.py | import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def get_swin_config(swin_name):
config = SwinConfig()
name_split = swin_name.split("_")
model_size = name_split[1]
img_size = int(name_split[4])
window_size = int(name_split[3][-1])
if model_size == "tiny":
embed_dim = 96
depths = (2, 2, 6, 2)
num_heads = (3, 6, 12, 24)
elif model_size == "small":
embed_dim = 96
depths = (2, 2, 18, 2)
num_heads = (3, 6, 12, 24)
elif model_size == "base":
embed_dim = 128
depths = (2, 2, 18, 2)
num_heads = (4, 8, 16, 32)
else:
embed_dim = 192
depths = (2, 2, 18, 2)
num_heads = (6, 12, 24, 48)
if "in22k" in swin_name:
num_classes = 21841
else:
num_classes = 1000
repo_id = "huggingface/label-files"
filename = "imagenet-1k-id2label.json"
id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
id2label = {int(k): v for k, v in id2label.items()}
config.id2label = id2label
config.label2id = {v: k for k, v in id2label.items()}
config.image_size = img_size
config.num_labels = num_classes
config.embed_dim = embed_dim
config.depths = depths
config.num_heads = num_heads
config.window_size = window_size
return config
def rename_key(name):
if "patch_embed.proj" in name:
name = name.replace("patch_embed.proj", "embeddings.patch_embeddings.projection")
if "patch_embed.norm" in name:
name = name.replace("patch_embed.norm", "embeddings.norm")
if "layers" in name:
name = "encoder." + name
if "attn.proj" in name:
name = name.replace("attn.proj", "attention.output.dense")
if "attn" in name:
name = name.replace("attn", "attention.self")
if "norm1" in name:
name = name.replace("norm1", "layernorm_before")
if "norm2" in name:
name = name.replace("norm2", "layernorm_after")
if "mlp.fc1" in name:
name = name.replace("mlp.fc1", "intermediate.dense")
if "mlp.fc2" in name:
name = name.replace("mlp.fc2", "output.dense")
if name == "norm.weight":
name = "layernorm.weight"
if name == "norm.bias":
name = "layernorm.bias"
if "head" in name:
name = name.replace("head", "classifier")
else:
name = "swin." + name
return name
def convert_state_dict(orig_state_dict, model):
for key in orig_state_dict.copy():
val = orig_state_dict.pop(key)
if "mask" in key:
continue
elif "qkv" in key:
key_split = key.split(".")
layer_num = int(key_split[1])
block_num = int(key_split[3])
dim = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
orig_state_dict[f"swin.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.query.weight"] = (
val[:dim, :]
)
orig_state_dict[f"swin.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.key.weight"] = val[
dim : dim * 2, :
]
orig_state_dict[f"swin.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.value.weight"] = (
val[-dim:, :]
)
else:
orig_state_dict[f"swin.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.query.bias"] = val[
:dim
]
orig_state_dict[f"swin.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.key.bias"] = val[
dim : dim * 2
]
orig_state_dict[f"swin.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.value.bias"] = val[
-dim:
]
else:
orig_state_dict[rename_key(key)] = val
return orig_state_dict
def convert_swin_checkpoint(swin_name, pytorch_dump_folder_path):
timm_model = timm.create_model(swin_name, pretrained=True)
timm_model.eval()
config = get_swin_config(swin_name)
model = SwinForImageClassification(config)
model.eval()
new_state_dict = convert_state_dict(timm_model.state_dict(), model)
model.load_state_dict(new_state_dict)
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
image_processor = AutoImageProcessor.from_pretrained("microsoft/{}".format(swin_name.replace("_", "-")))
image = Image.open(requests.get(url, stream=True).raw)
inputs = image_processor(images=image, return_tensors="pt")
timm_outs = timm_model(inputs["pixel_values"])
hf_outs = model(**inputs).logits
assert torch.allclose(timm_outs, hf_outs, atol=1e-3)
print(f"Saving model {swin_name} to {pytorch_dump_folder_path}")
model.save_pretrained(pytorch_dump_folder_path)
print(f"Saving image processor to {pytorch_dump_folder_path}")
image_processor.save_pretrained(pytorch_dump_folder_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swin_name",
default="swin_tiny_patch4_window7_224",
type=str,
help="Name of the Swin timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
args = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/swin/__init__.py | src/transformers/models/swin/__init__.py | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_swin import *
from .modeling_swin import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/dinat/configuration_dinat.py | src/transformers/models/dinat/configuration_dinat.py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dilated Neighborhood Attention Transformer model configuration"""
from ...configuration_utils import PreTrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
logger = logging.get_logger(__name__)
class DinatConfig(BackboneConfigMixin, PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`DinatModel`]. It is used to instantiate a Dinat
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the Dinat
[shi-labs/dinat-mini-in1k-224](https://huggingface.co/shi-labs/dinat-mini-in1k-224) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
patch_size (`int`, *optional*, defaults to 4):
The size (resolution) of each patch. NOTE: Only patch size of 4 is supported at the moment.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
embed_dim (`int`, *optional*, defaults to 64):
Dimensionality of patch embedding.
depths (`list[int]`, *optional*, defaults to `[3, 4, 6, 5]`):
Number of layers in each level of the encoder.
num_heads (`list[int]`, *optional*, defaults to `[2, 4, 8, 16]`):
Number of attention heads in each layer of the Transformer encoder.
kernel_size (`int`, *optional*, defaults to 7):
Neighborhood Attention kernel size.
dilations (`list[list[int]]`, *optional*, defaults to `[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]]`):
Dilation value of each NA layer in the Transformer encoder.
mlp_ratio (`float`, *optional*, defaults to 3.0):
Ratio of MLP hidden dimensionality to embedding dimensionality.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether or not a learnable bias should be added to the queries, keys and values.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings and encoder.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
drop_path_rate (`float`, *optional*, defaults to 0.1):
Stochastic depth rate.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder. If string, `"gelu"`, `"relu"`,
`"selu"` and `"gelu_new"` are supported.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
layer_scale_init_value (`float`, *optional*, defaults to 0.0):
The initial value for the layer scale. Disabled if <=0.
out_features (`list[str]`, *optional*):
If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
(depending on how many stages the model has). If unset and `out_indices` is set, will default to the
corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
out_indices (`list[int]`, *optional*):
If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
If unset and `out_features` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
Example:
```python
>>> from transformers import DinatConfig, DinatModel
>>> # Initializing a Dinat shi-labs/dinat-mini-in1k-224 style configuration
>>> configuration = DinatConfig()
>>> # Initializing a model (with random weights) from the shi-labs/dinat-mini-in1k-224 style configuration
>>> model = DinatModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "dinat"
attribute_map = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__(
self,
patch_size=4,
num_channels=3,
embed_dim=64,
depths=[3, 4, 6, 5],
num_heads=[2, 4, 8, 16],
kernel_size=7,
dilations=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]],
mlp_ratio=3.0,
qkv_bias=True,
hidden_dropout_prob=0.0,
attention_probs_dropout_prob=0.0,
drop_path_rate=0.1,
hidden_act="gelu",
initializer_range=0.02,
layer_norm_eps=1e-5,
layer_scale_init_value=0.0,
out_features=None,
out_indices=None,
**kwargs,
):
super().__init__(**kwargs)
self.patch_size = patch_size
self.num_channels = num_channels
self.embed_dim = embed_dim
self.depths = depths
self.num_layers = len(depths)
self.num_heads = num_heads
self.kernel_size = kernel_size
self.dilations = dilations
self.mlp_ratio = mlp_ratio
self.qkv_bias = qkv_bias
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.drop_path_rate = drop_path_rate
self.hidden_act = hidden_act
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
self.hidden_size = int(embed_dim * 2 ** (len(depths) - 1))
self.layer_scale_init_value = layer_scale_init_value
self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(depths) + 1)]
self._out_features, self._out_indices = get_aligned_output_features_output_indices(
out_features=out_features, out_indices=out_indices, stage_names=self.stage_names
)
__all__ = ["DinatConfig"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/dinat/__init__.py | src/transformers/models/dinat/__init__.py | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_dinat import *
from .modeling_dinat import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/dinat/modeling_dinat.py | src/transformers/models/dinat/modeling_dinat.py | # coding=utf-8
# Copyright 2022 SHI Labs and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch Dilated Neighborhood Attention Transformer model."""
import math
from dataclasses import dataclass
from typing import Optional, Union
import torch
from torch import nn
from ...activations import ACT2FN
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import (
ModelOutput,
OptionalDependencyNotAvailable,
auto_docstring,
is_natten_available,
logging,
requires_backends,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_dinat import DinatConfig
if is_natten_available():
from natten.functional import natten2dav, natten2dqkrpb
else:
def natten2dqkrpb(*args, **kwargs):
raise OptionalDependencyNotAvailable()
def natten2dav(*args, **kwargs):
raise OptionalDependencyNotAvailable()
logger = logging.get_logger(__name__)
# drop_path and DinatDropPath are from the timm library.
@dataclass
@auto_docstring(
custom_intro="""
Dinat encoder's outputs, with potential hidden states and attentions.
"""
)
class DinatEncoderOutput(ModelOutput):
r"""
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
reshaped_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass
@auto_docstring(
custom_intro="""
Dinat model's outputs that also contains a pooling of the last hidden states.
"""
)
class DinatModelOutput(ModelOutput):
r"""
pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*, returned when `add_pooling_layer=True` is passed):
Average pooling of the last layer hidden-state.
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
pooler_output: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
reshaped_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass
@auto_docstring(
custom_intro="""
Dinat outputs for image classification.
"""
)
class DinatImageClassifierOutput(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
reshaped_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
class DinatEmbeddings(nn.Module):
"""
Construct the patch and position embeddings.
"""
def __init__(self, config):
super().__init__()
self.patch_embeddings = DinatPatchEmbeddings(config)
self.norm = nn.LayerNorm(config.embed_dim)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, pixel_values: Optional[torch.FloatTensor]) -> tuple[torch.Tensor]:
embeddings = self.patch_embeddings(pixel_values)
embeddings = self.norm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class DinatPatchEmbeddings(nn.Module):
"""
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
`hidden_states` (patch embeddings) of shape `(batch_size, height, width, hidden_size)` to be consumed by a
Transformer.
"""
def __init__(self, config):
super().__init__()
patch_size = config.patch_size
num_channels, hidden_size = config.num_channels, config.embed_dim
self.num_channels = num_channels
if patch_size == 4:
pass
else:
# TODO: Support arbitrary patch sizes.
raise ValueError("Dinat only supports patch size of 4 at the moment.")
self.projection = nn.Sequential(
nn.Conv2d(self.num_channels, hidden_size // 2, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)),
nn.Conv2d(hidden_size // 2, hidden_size, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)),
)
def forward(self, pixel_values: Optional[torch.FloatTensor]) -> torch.Tensor:
_, num_channels, height, width = pixel_values.shape
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration."
)
embeddings = self.projection(pixel_values)
embeddings = embeddings.permute(0, 2, 3, 1)
return embeddings
class DinatDownsampler(nn.Module):
"""
Convolutional Downsampling Layer.
Args:
dim (`int`):
Number of input channels.
norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`):
Normalization layer class.
"""
def __init__(self, dim: int, norm_layer: nn.Module = nn.LayerNorm) -> None:
super().__init__()
self.dim = dim
self.reduction = nn.Conv2d(dim, 2 * dim, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
self.norm = norm_layer(2 * dim)
def forward(self, input_feature: torch.Tensor) -> torch.Tensor:
input_feature = self.reduction(input_feature.permute(0, 3, 1, 2)).permute(0, 2, 3, 1)
input_feature = self.norm(input_feature)
return input_feature
# Copied from transformers.models.beit.modeling_beit.drop_path
def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
"""
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
if drop_prob == 0.0 or not training:
return input
keep_prob = 1 - drop_prob
shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
random_tensor.floor_() # binarize
output = input.div(keep_prob) * random_tensor
return output
# Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->Dinat
class DinatDropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob: Optional[float] = None) -> None:
super().__init__()
self.drop_prob = drop_prob
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
return drop_path(hidden_states, self.drop_prob, self.training)
def extra_repr(self) -> str:
return f"p={self.drop_prob}"
class NeighborhoodAttention(nn.Module):
def __init__(self, config, dim, num_heads, kernel_size, dilation):
super().__init__()
if dim % num_heads != 0:
raise ValueError(
f"The hidden size ({dim}) is not a multiple of the number of attention heads ({num_heads})"
)
self.num_attention_heads = num_heads
self.attention_head_size = int(dim / num_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.kernel_size = kernel_size
self.dilation = dilation
# rpb is learnable relative positional biases; same concept is used Swin.
self.rpb = nn.Parameter(torch.zeros(num_heads, (2 * self.kernel_size - 1), (2 * self.kernel_size - 1)))
self.query = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
self.key = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
self.value = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def forward(
self,
hidden_states: torch.Tensor,
output_attentions: Optional[bool] = False,
) -> tuple[torch.Tensor]:
batch_size, seq_length, _ = hidden_states.shape
query_layer = (
self.query(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
key_layer = (
self.key(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
value_layer = (
self.value(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
# Apply the scale factor before computing attention weights. It's usually more efficient because
# attention weights are typically a bigger tensor compared to query.
# It gives identical results because scalars are commutable in matrix multiplication.
query_layer = query_layer / math.sqrt(self.attention_head_size)
# Compute NA between "query" and "key" to get the raw attention scores, and add relative positional biases.
attention_scores = natten2dqkrpb(query_layer, key_layer, self.rpb, self.kernel_size, self.dilation)
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = natten2dav(attention_probs, value_layer, self.kernel_size, self.dilation)
context_layer = context_layer.permute(0, 2, 3, 1, 4).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
class NeighborhoodAttentionOutput(nn.Module):
def __init__(self, config, dim):
super().__init__()
self.dense = nn.Linear(dim, dim)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
class NeighborhoodAttentionModule(nn.Module):
def __init__(self, config, dim, num_heads, kernel_size, dilation):
super().__init__()
self.self = NeighborhoodAttention(config, dim, num_heads, kernel_size, dilation)
self.output = NeighborhoodAttentionOutput(config, dim)
def forward(
self,
hidden_states: torch.Tensor,
output_attentions: Optional[bool] = False,
) -> tuple[torch.Tensor]:
self_outputs = self.self(hidden_states, output_attentions)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class DinatIntermediate(nn.Module):
def __init__(self, config, dim):
super().__init__()
self.dense = nn.Linear(dim, int(config.mlp_ratio * dim))
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class DinatOutput(nn.Module):
def __init__(self, config, dim):
super().__init__()
self.dense = nn.Linear(int(config.mlp_ratio * dim), dim)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
class DinatLayer(nn.Module):
def __init__(self, config, dim, num_heads, dilation, drop_path_rate=0.0):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.kernel_size = config.kernel_size
self.dilation = dilation
self.window_size = self.kernel_size * self.dilation
self.layernorm_before = nn.LayerNorm(dim, eps=config.layer_norm_eps)
self.attention = NeighborhoodAttentionModule(
config, dim, num_heads, kernel_size=self.kernel_size, dilation=self.dilation
)
self.drop_path = DinatDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity()
self.layernorm_after = nn.LayerNorm(dim, eps=config.layer_norm_eps)
self.intermediate = DinatIntermediate(config, dim)
self.output = DinatOutput(config, dim)
self.layer_scale_parameters = (
nn.Parameter(config.layer_scale_init_value * torch.ones((2, dim)), requires_grad=True)
if config.layer_scale_init_value > 0
else None
)
def maybe_pad(self, hidden_states, height, width):
window_size = self.window_size
pad_values = (0, 0, 0, 0, 0, 0)
if height < window_size or width < window_size:
pad_l = pad_t = 0
pad_r = max(0, window_size - width)
pad_b = max(0, window_size - height)
pad_values = (0, 0, pad_l, pad_r, pad_t, pad_b)
hidden_states = nn.functional.pad(hidden_states, pad_values)
return hidden_states, pad_values
def forward(
self,
hidden_states: torch.Tensor,
output_attentions: Optional[bool] = False,
) -> tuple[torch.Tensor, torch.Tensor]:
batch_size, height, width, channels = hidden_states.size()
shortcut = hidden_states
hidden_states = self.layernorm_before(hidden_states)
# pad hidden_states if they are smaller than kernel size x dilation
hidden_states, pad_values = self.maybe_pad(hidden_states, height, width)
_, height_pad, width_pad, _ = hidden_states.shape
attention_outputs = self.attention(hidden_states, output_attentions=output_attentions)
attention_output = attention_outputs[0]
was_padded = pad_values[3] > 0 or pad_values[5] > 0
if was_padded:
attention_output = attention_output[:, :height, :width, :].contiguous()
if self.layer_scale_parameters is not None:
attention_output = self.layer_scale_parameters[0] * attention_output
hidden_states = shortcut + self.drop_path(attention_output)
layer_output = self.layernorm_after(hidden_states)
layer_output = self.output(self.intermediate(layer_output))
if self.layer_scale_parameters is not None:
layer_output = self.layer_scale_parameters[1] * layer_output
layer_output = hidden_states + self.drop_path(layer_output)
layer_outputs = (layer_output, attention_outputs[1]) if output_attentions else (layer_output,)
return layer_outputs
class DinatStage(nn.Module):
def __init__(self, config, dim, depth, num_heads, dilations, drop_path_rate, downsample):
super().__init__()
self.config = config
self.dim = dim
self.layers = nn.ModuleList(
[
DinatLayer(
config=config,
dim=dim,
num_heads=num_heads,
dilation=dilations[i],
drop_path_rate=drop_path_rate[i],
)
for i in range(depth)
]
)
# patch merging layer
if downsample is not None:
self.downsample = downsample(dim=dim, norm_layer=nn.LayerNorm)
else:
self.downsample = None
self.pointing = False
def forward(
self,
hidden_states: torch.Tensor,
output_attentions: Optional[bool] = False,
) -> tuple[torch.Tensor]:
_, height, width, _ = hidden_states.size()
for i, layer_module in enumerate(self.layers):
layer_outputs = layer_module(hidden_states, output_attentions)
hidden_states = layer_outputs[0]
hidden_states_before_downsampling = hidden_states
if self.downsample is not None:
hidden_states = self.downsample(hidden_states_before_downsampling)
stage_outputs = (hidden_states, hidden_states_before_downsampling)
if output_attentions:
stage_outputs += layer_outputs[1:]
return stage_outputs
class DinatEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.num_levels = len(config.depths)
self.config = config
dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths), device="cpu")]
self.levels = nn.ModuleList(
[
DinatStage(
config=config,
dim=int(config.embed_dim * 2**i_layer),
depth=config.depths[i_layer],
num_heads=config.num_heads[i_layer],
dilations=config.dilations[i_layer],
drop_path_rate=dpr[sum(config.depths[:i_layer]) : sum(config.depths[: i_layer + 1])],
downsample=DinatDownsampler if (i_layer < self.num_levels - 1) else None,
)
for i_layer in range(self.num_levels)
]
)
def forward(
self,
hidden_states: torch.Tensor,
output_attentions: Optional[bool] = False,
output_hidden_states: Optional[bool] = False,
output_hidden_states_before_downsampling: Optional[bool] = False,
return_dict: Optional[bool] = True,
) -> Union[tuple, DinatEncoderOutput]:
all_hidden_states = () if output_hidden_states else None
all_reshaped_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
if output_hidden_states:
# rearrange b h w c -> b c h w
reshaped_hidden_state = hidden_states.permute(0, 3, 1, 2)
all_hidden_states += (hidden_states,)
all_reshaped_hidden_states += (reshaped_hidden_state,)
for i, layer_module in enumerate(self.levels):
layer_outputs = layer_module(hidden_states, output_attentions)
hidden_states = layer_outputs[0]
hidden_states_before_downsampling = layer_outputs[1]
if output_hidden_states and output_hidden_states_before_downsampling:
# rearrange b h w c -> b c h w
reshaped_hidden_state = hidden_states_before_downsampling.permute(0, 3, 1, 2)
all_hidden_states += (hidden_states_before_downsampling,)
all_reshaped_hidden_states += (reshaped_hidden_state,)
elif output_hidden_states and not output_hidden_states_before_downsampling:
# rearrange b h w c -> b c h w
reshaped_hidden_state = hidden_states.permute(0, 3, 1, 2)
all_hidden_states += (hidden_states,)
all_reshaped_hidden_states += (reshaped_hidden_state,)
if output_attentions:
all_self_attentions += layer_outputs[2:]
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
return DinatEncoderOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
reshaped_hidden_states=all_reshaped_hidden_states,
)
@auto_docstring
class DinatPreTrainedModel(PreTrainedModel):
config: DinatConfig
base_model_prefix = "dinat"
main_input_name = "pixel_values"
input_modalities = ("image",)
@auto_docstring
class DinatModel(DinatPreTrainedModel):
def __init__(self, config, add_pooling_layer=True):
r"""
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
"""
super().__init__(config)
requires_backends(self, ["natten"])
self.config = config
self.num_levels = len(config.depths)
self.num_features = int(config.embed_dim * 2 ** (self.num_levels - 1))
self.embeddings = DinatEmbeddings(config)
self.encoder = DinatEncoder(config)
self.layernorm = nn.LayerNorm(self.num_features, eps=config.layer_norm_eps)
self.pooler = nn.AdaptiveAvgPool1d(1) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.patch_embeddings
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
**kwargs,
) -> Union[tuple, DinatModelOutput]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
embedding_output = self.embeddings(pixel_values)
encoder_outputs = self.encoder(
embedding_output,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
sequence_output = self.layernorm(sequence_output)
pooled_output = None
if self.pooler is not None:
pooled_output = self.pooler(sequence_output.flatten(1, 2).transpose(1, 2))
pooled_output = torch.flatten(pooled_output, 1)
if not return_dict:
output = (sequence_output, pooled_output) + encoder_outputs[1:]
return output
return DinatModelOutput(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
reshaped_hidden_states=encoder_outputs.reshaped_hidden_states,
)
@auto_docstring(
custom_intro="""
Dinat Model transformer with an image classification head on top (a linear layer on top of the final hidden state
of the [CLS] token) e.g. for ImageNet.
"""
)
class DinatForImageClassification(DinatPreTrainedModel):
def __init__(self, config):
super().__init__(config)
requires_backends(self, ["natten"])
self.num_labels = config.num_labels
self.dinat = DinatModel(config)
# Classifier head
self.classifier = (
nn.Linear(self.dinat.num_features, config.num_labels) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
**kwargs,
) -> Union[tuple, DinatImageClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.dinat(
pixel_values,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
loss = self.loss_function(labels, logits, self.config)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return DinatImageClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
reshaped_hidden_states=outputs.reshaped_hidden_states,
)
@auto_docstring(
custom_intro="""
NAT backbone, to be used with frameworks like DETR and MaskFormer.
"""
)
class DinatBackbone(DinatPreTrainedModel, BackboneMixin):
def __init__(self, config):
super().__init__(config)
super()._init_backbone(config)
requires_backends(self, ["natten"])
self.embeddings = DinatEmbeddings(config)
self.encoder = DinatEncoder(config)
self.num_features = [config.embed_dim] + [int(config.embed_dim * 2**i) for i in range(len(config.depths))]
# Add layer norms to hidden states of out_features
hidden_states_norms = {}
for stage, num_channels in zip(self._out_features, self.channels):
hidden_states_norms[stage] = nn.LayerNorm(num_channels)
self.hidden_states_norms = nn.ModuleDict(hidden_states_norms)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.patch_embeddings
@auto_docstring
def forward(
self,
pixel_values: torch.Tensor,
output_hidden_states: Optional[bool] = None,
output_attentions: Optional[bool] = None,
return_dict: Optional[bool] = None,
**kwargs,
) -> BackboneOutput:
r"""
Examples:
```python
>>> from transformers import AutoImageProcessor, AutoBackbone
>>> import torch
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> processor = AutoImageProcessor.from_pretrained("shi-labs/nat-mini-in1k-224")
>>> model = AutoBackbone.from_pretrained(
... "shi-labs/nat-mini-in1k-224", out_features=["stage1", "stage2", "stage3", "stage4"]
... )
>>> inputs = processor(image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> feature_maps = outputs.feature_maps
>>> list(feature_maps[-1].shape)
[1, 512, 7, 7]
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
embedding_output = self.embeddings(pixel_values)
outputs = self.encoder(
embedding_output,
output_attentions=output_attentions,
output_hidden_states=True,
output_hidden_states_before_downsampling=True,
return_dict=True,
)
hidden_states = outputs.reshaped_hidden_states
feature_maps = ()
for stage, hidden_state in zip(self.stage_names, hidden_states):
if stage in self.out_features:
batch_size, num_channels, height, width = hidden_state.shape
hidden_state = hidden_state.permute(0, 2, 3, 1).contiguous()
hidden_state = hidden_state.view(batch_size, height * width, num_channels)
hidden_state = self.hidden_states_norms[stage](hidden_state)
hidden_state = hidden_state.view(batch_size, height, width, num_channels)
hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous()
feature_maps += (hidden_state,)
if not return_dict:
output = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=feature_maps,
hidden_states=outputs.hidden_states if output_hidden_states else None,
attentions=outputs.attentions,
)
__all__ = ["DinatForImageClassification", "DinatModel", "DinatPreTrainedModel", "DinatBackbone"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/lfm2_vl/modeling_lfm2_vl.py | src/transformers/models/lfm2_vl/modeling_lfm2_vl.py | # π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨
# This file was automatically generated from src/transformers/models/lfm2_vl/modular_lfm2_vl.py.
# Do NOT edit this file manually as any edits will be overwritten by the generation of
# the file from the modular. If any change should be done, please apply the change to the
# modular_lfm2_vl.py file directly. One of our CI enforces this.
# π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨
# coding=utf-8
# Copyright 2025 the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Optional, Union
import torch
from torch import nn
from ...activations import ACT2FN
from ...cache_utils import Cache
from ...generation import GenerationMixin
from ...modeling_outputs import BaseModelOutputWithPast, ModelOutput
from ...modeling_utils import PreTrainedModel
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
from ..auto import AutoModel
from .configuration_lfm2_vl import Lfm2VlConfig
class Lfm2VlMultiModalProjector(nn.Module):
def __init__(self, config: Lfm2VlConfig):
super().__init__()
in_channels = config.vision_config.hidden_size * (config.downsample_factor**2)
self.factor = config.downsample_factor
self.layer_norm = nn.LayerNorm(in_channels)
self.linear_1 = nn.Linear(
in_channels,
config.projector_hidden_size,
bias=config.projector_bias,
)
self.act = ACT2FN[config.projector_hidden_act]
self.linear_2 = nn.Linear(
config.projector_hidden_size,
config.text_config.hidden_size,
bias=config.projector_bias,
)
def forward(self, image_features: torch.Tensor):
image_features = self.pixel_unshuffle(image_features)
image_features = self.layer_norm(image_features)
hidden_states = self.linear_1(image_features)
hidden_states = self.act(hidden_states)
hidden_states = self.linear_2(hidden_states)
return hidden_states
def pixel_unshuffle(self, hidden_states: torch.Tensor):
batch_size, width, height, channels = hidden_states.size()
hidden_states = hidden_states.reshape(batch_size, width, height // self.factor, channels * self.factor)
hidden_states = hidden_states.permute(0, 2, 1, 3)
hidden_states = hidden_states.reshape(
batch_size, height // self.factor, width // self.factor, channels * self.factor**2
)
hidden_states = hidden_states.permute(0, 2, 1, 3)
return hidden_states
@auto_docstring
class Lfm2VlPreTrainedModel(PreTrainedModel):
config: Lfm2VlConfig
base_model_prefix = "model"
input_modalities = ("image", "text")
supports_gradient_checkpointing = True
_skip_keys_device_placement = "past_key_values"
_supports_flash_attn = True
_supports_sdpa = True
_can_compile_fullgraph = False
_supports_flex_attn = True
_supports_attention_backend = True
@dataclass
@auto_docstring(
custom_intro="""
Base class for Lfm2Vl causal language model (or autoregressive) outputs.
"""
)
class Lfm2VlCausalLMOutputWithPast(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
image_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`.
image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
past_key_values: Optional[Cache] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
image_hidden_states: Optional[torch.FloatTensor] = None
@dataclass
@auto_docstring(
custom_intro="""
Base class for Lfm2Vl outputs, with hidden states and attentions.
"""
)
class Lfm2VlModelOutputWithPast(BaseModelOutputWithPast):
r"""
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
image_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`.
image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
"""
image_hidden_states: Optional[torch.FloatTensor] = None
@auto_docstring(
custom_intro="""
The Lfm2Vl model which consists of a vision backbone and a language model, without a language modeling head.
"""
)
class Lfm2VlModel(Lfm2VlPreTrainedModel):
_checkpoint_conversion_mapping = {}
def __init__(self, config: Lfm2VlConfig):
super().__init__(config)
self.vision_tower = AutoModel.from_config(config.vision_config)
self.multi_modal_projector = Lfm2VlMultiModalProjector(config)
self.language_model = AutoModel.from_config(config.text_config)
self.post_init()
def get_input_embeddings(self):
return self.language_model.get_input_embeddings()
def set_input_embeddings(self, value):
self.language_model.set_input_embeddings(value)
def get_image_features(
self,
pixel_values: torch.FloatTensor,
spatial_shapes: torch.Tensor,
pixel_attention_mask: torch.Tensor,
**kwargs,
) -> list[torch.Tensor]:
"""
Obtains image last hidden states from the vision tower and apply multimodal projection.
Args:
pixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`):
The tensors corresponding to the input images.
spatial_shapes (`torch.Tensor` of shape `(batch_size, 2)`):
The spatial shapes of the input images.
pixel_attention_mask (`torch.Tensor` of shape `(batch_size, height, width)`):
The pixel attention mask of the input images.
Returns:
image_features (`list[torch.Tensor]`): Image feature tensor of shape `(num_images, image_length, embed_dim)`).
"""
image_outputs = self.vision_tower(
pixel_values=pixel_values,
spatial_shapes=spatial_shapes,
pixel_attention_mask=pixel_attention_mask,
).last_hidden_state
img_feature_lengths = pixel_attention_mask.sum(dim=1)
image_features = []
for img_idx in range(image_outputs.size(0)):
feature = image_outputs[img_idx]
# unpad the image representation
feature = feature[: img_feature_lengths[img_idx], :].unsqueeze(0)
# reshape to original height and width
feature_org_h, feature_org_w = spatial_shapes[img_idx]
feature = feature.reshape(1, feature_org_h, feature_org_w, -1)
# project the image representation
img_embedding = self.multi_modal_projector(feature)
# flatten here to handle variable length in naflex
img_embedding = img_embedding.reshape(-1, img_embedding.size(-1))
image_features.append(img_embedding)
return image_features
def get_placeholder_mask(
self, input_ids: torch.LongTensor, inputs_embeds: torch.FloatTensor, image_features: torch.FloatTensor
):
"""
Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is
equal to the length of multimodal features. If the lengths are different, an error is raised.
"""
if input_ids is None:
special_image_mask = inputs_embeds == self.get_input_embeddings()(
torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device)
)
special_image_mask = special_image_mask.all(-1)
else:
special_image_mask = input_ids == self.config.image_token_id
n_image_tokens = special_image_mask.sum()
special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
n_image_features = image_features.shape[0]
if inputs_embeds[special_image_mask].numel() != image_features.numel():
raise ValueError(
f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {n_image_features}"
)
return special_image_mask
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
spatial_shapes: Optional[torch.Tensor] = None,
pixel_attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, Lfm2VlModelOutputWithPast]:
r"""
spatial_shapes (`torch.Tensor` of shape `(batch_size, 2)`, *optional*):
The spatial shapes of the input images.
pixel_attention_mask (`torch.Tensor` of shape `(batch_size, height, width)`, *optional*):
The pixel attention mask of the input images.
"""
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.get_input_embeddings()(input_ids)
if pixel_values is not None:
image_features = self.get_image_features(
pixel_values=pixel_values,
spatial_shapes=spatial_shapes,
pixel_attention_mask=pixel_attention_mask,
)
image_features = torch.cat(image_features, dim=0).to(inputs_embeds.device, inputs_embeds.dtype)
special_image_mask = self.get_placeholder_mask(
input_ids=input_ids,
inputs_embeds=inputs_embeds,
image_features=image_features,
)
inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features)
outputs = self.language_model(
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
return Lfm2VlModelOutputWithPast(
last_hidden_state=outputs.last_hidden_state,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=image_features if pixel_values is not None else None,
)
@auto_docstring(
custom_intro="""
The LFM2_VL model which consists of a vision backbone and a language model.
"""
)
class Lfm2VlForConditionalGeneration(Lfm2VlPreTrainedModel, GenerationMixin):
_checkpoint_conversion_mapping = {}
_tied_weights_keys = {"lm_head.weight": "model.language_model.embed_tokens.weight"}
def __init__(self, config: Lfm2VlConfig):
super().__init__(config)
self.model = Lfm2VlModel(config)
self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vocab_size, bias=False)
self.post_init()
def get_input_embeddings(self):
return self.model.get_input_embeddings()
def set_input_embeddings(self, value):
self.model.set_input_embeddings(value)
def get_output_embeddings(self) -> nn.Module:
return self.lm_head
def get_image_features(
self,
pixel_values: torch.FloatTensor,
spatial_shapes: torch.Tensor,
pixel_attention_mask: torch.Tensor,
**kwargs,
):
return self.model.get_image_features(
pixel_values=pixel_values,
spatial_shapes=spatial_shapes,
pixel_attention_mask=pixel_attention_mask,
**kwargs,
)
@can_return_tuple
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
spatial_shapes: Optional[torch.Tensor] = None,
pixel_attention_mask: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, Lfm2VlCausalLMOutputWithPast]:
r"""
pixel_values (`torch.FloatTensor` of shape `(batch_size, channels, height, width)`, *optional*):
The input image tensors.
spatial_shapes (`torch.Tensor` of shape `(batch_size, 2)`, *optional*):
The spatial shapes of the input images.
pixel_attention_mask (`torch.Tensor` of shape `(batch_size, height, width)`, *optional*):
The pixel attention mask of the input images.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, AutoModelForImageTextToText
>>> from transformers.image_utils import load_image
>>> model = AutoModelForImageTextToText.from_pretrained(
... "LiquidAI/LFM2-VL-1.6B",
... )
>>> processor = AutoProcessor.from_pretrained(
... "LiquidAI/LFM2-VL-1.6B",
... )
>>> url = "https://www.ilankelman.org/stopsigns/australia.jpg"
>>> image = load_image(url)
>>> conversation = [
... {
... "role": "user",
... "content": [
... {"type": "image", "image": image},
... {"type": "text", "text": "What is in this image?"},
... ],
... },
... ]
>>> inputs = processor.apply_chat_template(
... conversation,
... add_generation_prompt=True,
... tokenize=True,
... return_dict=True,
... return_tensors="pt"
... )
>>> # Generate
>>> outputs = model.generate(**inputs, max_new_tokens=45)
>>> processor.batch_decode(outputs, skip_special_tokens=True)[0]
'This image depicts a vibrant street scene in what appears to be a Chinatown or similar cultural area. The focal point is a large red stop sign with white lettering, mounted on a pole.'
```"""
outputs = self.model(
input_ids=input_ids,
pixel_values=pixel_values,
spatial_shapes=spatial_shapes,
pixel_attention_mask=pixel_attention_mask,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs[0]
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(
logits=logits,
labels=labels,
vocab_size=self.config.text_config.vocab_size,
**kwargs,
)
return Lfm2VlCausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=outputs.image_hidden_states,
)
def prepare_inputs_for_generation(
self,
input_ids,
past_key_values=None,
inputs_embeds=None,
pixel_values=None,
attention_mask=None,
cache_position=None,
logits_to_keep=None,
is_first_iteration=False,
**kwargs,
):
# Overwritten -- in specific circumstances we don't want to forward image inputs to the model
model_inputs = super().prepare_inputs_for_generation(
input_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
logits_to_keep=logits_to_keep,
is_first_iteration=is_first_iteration,
**kwargs,
)
if is_first_iteration or not kwargs.get("use_cache", True):
# Pixel values are used only in the first iteration if available
# In subsquent iterations, they are already merged with text and cached
# NOTE: first iteration doesn't have to be prefill, it can be the first
# iteration with a question and cached system prompt (continue generate from cache)
model_inputs["pixel_values"] = pixel_values
return model_inputs
__all__ = ["Lfm2VlForConditionalGeneration", "Lfm2VlPreTrainedModel", "Lfm2VlModel"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/lfm2_vl/modular_lfm2_vl.py | src/transformers/models/lfm2_vl/modular_lfm2_vl.py | # coding=utf-8
# Copyright 2025 the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch Lfm2-VL model."""
from typing import Optional, Union
import torch
from torch import nn
from ...activations import ACT2FN
from ...cache_utils import Cache
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
from ..llava.modeling_llava import (
LlavaCausalLMOutputWithPast,
LlavaForConditionalGeneration,
LlavaModel,
LlavaModelOutputWithPast,
LlavaPreTrainedModel,
)
from .configuration_lfm2_vl import Lfm2VlConfig
logger = logging.get_logger(__name__)
class Lfm2VlMultiModalProjector(nn.Module):
def __init__(self, config: Lfm2VlConfig):
super().__init__()
in_channels = config.vision_config.hidden_size * (config.downsample_factor**2)
self.factor = config.downsample_factor
self.layer_norm = nn.LayerNorm(in_channels)
self.linear_1 = nn.Linear(
in_channels,
config.projector_hidden_size,
bias=config.projector_bias,
)
self.act = ACT2FN[config.projector_hidden_act]
self.linear_2 = nn.Linear(
config.projector_hidden_size,
config.text_config.hidden_size,
bias=config.projector_bias,
)
def forward(self, image_features: torch.Tensor):
image_features = self.pixel_unshuffle(image_features)
image_features = self.layer_norm(image_features)
hidden_states = self.linear_1(image_features)
hidden_states = self.act(hidden_states)
hidden_states = self.linear_2(hidden_states)
return hidden_states
def pixel_unshuffle(self, hidden_states: torch.Tensor):
batch_size, width, height, channels = hidden_states.size()
hidden_states = hidden_states.reshape(batch_size, width, height // self.factor, channels * self.factor)
hidden_states = hidden_states.permute(0, 2, 1, 3)
hidden_states = hidden_states.reshape(
batch_size, height // self.factor, width // self.factor, channels * self.factor**2
)
hidden_states = hidden_states.permute(0, 2, 1, 3)
return hidden_states
class Lfm2VlPreTrainedModel(LlavaPreTrainedModel):
_can_compile_fullgraph = False
base_model_prefix = "model"
class Lfm2VlCausalLMOutputWithPast(LlavaCausalLMOutputWithPast):
pass
class Lfm2VlModelOutputWithPast(LlavaModelOutputWithPast):
pass
class Lfm2VlModel(LlavaModel):
_checkpoint_conversion_mapping = {}
def __init__(self, config: Lfm2VlConfig):
super().__init__(config)
def get_image_features(
self,
pixel_values: torch.FloatTensor,
spatial_shapes: torch.Tensor,
pixel_attention_mask: torch.Tensor,
**kwargs,
) -> list[torch.Tensor]:
"""
Obtains image last hidden states from the vision tower and apply multimodal projection.
Args:
pixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`):
The tensors corresponding to the input images.
spatial_shapes (`torch.Tensor` of shape `(batch_size, 2)`):
The spatial shapes of the input images.
pixel_attention_mask (`torch.Tensor` of shape `(batch_size, height, width)`):
The pixel attention mask of the input images.
Returns:
image_features (`list[torch.Tensor]`): Image feature tensor of shape `(num_images, image_length, embed_dim)`).
"""
image_outputs = self.vision_tower(
pixel_values=pixel_values,
spatial_shapes=spatial_shapes,
pixel_attention_mask=pixel_attention_mask,
).last_hidden_state
img_feature_lengths = pixel_attention_mask.sum(dim=1)
image_features = []
for img_idx in range(image_outputs.size(0)):
feature = image_outputs[img_idx]
# unpad the image representation
feature = feature[: img_feature_lengths[img_idx], :].unsqueeze(0)
# reshape to original height and width
feature_org_h, feature_org_w = spatial_shapes[img_idx]
feature = feature.reshape(1, feature_org_h, feature_org_w, -1)
# project the image representation
img_embedding = self.multi_modal_projector(feature)
# flatten here to handle variable length in naflex
img_embedding = img_embedding.reshape(-1, img_embedding.size(-1))
image_features.append(img_embedding)
return image_features
def get_placeholder_mask(
self, input_ids: torch.LongTensor, inputs_embeds: torch.FloatTensor, image_features: torch.FloatTensor
):
"""
Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is
equal to the length of multimodal features. If the lengths are different, an error is raised.
"""
if input_ids is None:
special_image_mask = inputs_embeds == self.get_input_embeddings()(
torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device)
)
special_image_mask = special_image_mask.all(-1)
else:
special_image_mask = input_ids == self.config.image_token_id
n_image_tokens = special_image_mask.sum()
special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
n_image_features = image_features.shape[0]
if inputs_embeds[special_image_mask].numel() != image_features.numel():
raise ValueError(
f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {n_image_features}"
)
return special_image_mask
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
spatial_shapes: Optional[torch.Tensor] = None,
pixel_attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, Lfm2VlModelOutputWithPast]:
r"""
spatial_shapes (`torch.Tensor` of shape `(batch_size, 2)`, *optional*):
The spatial shapes of the input images.
pixel_attention_mask (`torch.Tensor` of shape `(batch_size, height, width)`, *optional*):
The pixel attention mask of the input images.
"""
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.get_input_embeddings()(input_ids)
if pixel_values is not None:
image_features = self.get_image_features(
pixel_values=pixel_values,
spatial_shapes=spatial_shapes,
pixel_attention_mask=pixel_attention_mask,
)
image_features = torch.cat(image_features, dim=0).to(inputs_embeds.device, inputs_embeds.dtype)
special_image_mask = self.get_placeholder_mask(
input_ids=input_ids,
inputs_embeds=inputs_embeds,
image_features=image_features,
)
inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features)
outputs = self.language_model(
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
return Lfm2VlModelOutputWithPast(
last_hidden_state=outputs.last_hidden_state,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=image_features if pixel_values is not None else None,
)
class Lfm2VlForConditionalGeneration(LlavaForConditionalGeneration):
_checkpoint_conversion_mapping = {}
def get_image_features(
self,
pixel_values: torch.FloatTensor,
spatial_shapes: torch.Tensor,
pixel_attention_mask: torch.Tensor,
**kwargs,
):
return self.model.get_image_features(
pixel_values=pixel_values,
spatial_shapes=spatial_shapes,
pixel_attention_mask=pixel_attention_mask,
**kwargs,
)
@can_return_tuple
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
spatial_shapes: Optional[torch.Tensor] = None,
pixel_attention_mask: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, Lfm2VlCausalLMOutputWithPast]:
r"""
pixel_values (`torch.FloatTensor` of shape `(batch_size, channels, height, width)`, *optional*):
The input image tensors.
spatial_shapes (`torch.Tensor` of shape `(batch_size, 2)`, *optional*):
The spatial shapes of the input images.
pixel_attention_mask (`torch.Tensor` of shape `(batch_size, height, width)`, *optional*):
The pixel attention mask of the input images.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, AutoModelForImageTextToText
>>> from transformers.image_utils import load_image
>>> model = AutoModelForImageTextToText.from_pretrained(
... "LiquidAI/LFM2-VL-1.6B",
... )
>>> processor = AutoProcessor.from_pretrained(
... "LiquidAI/LFM2-VL-1.6B",
... )
>>> url = "https://www.ilankelman.org/stopsigns/australia.jpg"
>>> image = load_image(url)
>>> conversation = [
... {
... "role": "user",
... "content": [
... {"type": "image", "image": image},
... {"type": "text", "text": "What is in this image?"},
... ],
... },
... ]
>>> inputs = processor.apply_chat_template(
... conversation,
... add_generation_prompt=True,
... tokenize=True,
... return_dict=True,
... return_tensors="pt"
... )
>>> # Generate
>>> outputs = model.generate(**inputs, max_new_tokens=45)
>>> processor.batch_decode(outputs, skip_special_tokens=True)[0]
'This image depicts a vibrant street scene in what appears to be a Chinatown or similar cultural area. The focal point is a large red stop sign with white lettering, mounted on a pole.'
```"""
outputs = self.model(
input_ids=input_ids,
pixel_values=pixel_values,
spatial_shapes=spatial_shapes,
pixel_attention_mask=pixel_attention_mask,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs[0]
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(
logits=logits,
labels=labels,
vocab_size=self.config.text_config.vocab_size,
**kwargs,
)
return Lfm2VlCausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=outputs.image_hidden_states,
)
__all__ = ["Lfm2VlForConditionalGeneration", "Lfm2VlPreTrainedModel", "Lfm2VlModel"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/lfm2_vl/image_processing_lfm2_vl_fast.py | src/transformers/models/lfm2_vl/image_processing_lfm2_vl_fast.py | # coding=utf-8
# Copyright 2025 the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from functools import lru_cache
from typing import Union
import torch
from torchvision.transforms.v2 import functional as F
from ...image_processing_utils import BatchFeature
from ...image_processing_utils_fast import (
BaseImageProcessorFast,
group_images_by_shape,
reorder_images,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ImageInput,
PILImageResampling,
SizeDict,
)
from ...processing_utils import ImagesKwargs, Unpack
from ...utils import (
TensorType,
auto_docstring,
logging,
)
logger = logging.get_logger(__name__)
def round_by_factor(number: float, factor: int) -> int:
"""Returns the closest integer to 'number' that is divisible by 'factor'."""
return round(number / factor) * factor
def find_closest_aspect_ratio(
aspect_ratio: float,
target_ratios: list[tuple[int, int]],
width: int,
height: int,
image_size: int,
) -> tuple[int, int]:
"""Find the closest aspect ratio from target_ratios to match the input aspect ratio.
Args:
aspect_ratio: The aspect ratio to match (width/height).
target_ratios: List of possible aspect ratios as tuples of (width, height) integers.
width: Original image width in pixels.
height: Original image height in pixels.
image_size: Base size for calculating target area.
Returns:
tuple[int, int]: The best matching ratio as (width, height) integers.
"""
best_ratio_diff = float("inf")
best_ratio = (1, 1)
area = width * height
for ratio in target_ratios:
target_aspect_ratio = ratio[0] / ratio[1]
ratio_diff = abs(aspect_ratio - target_aspect_ratio)
# update best ratio if we found a closer match
if ratio_diff < best_ratio_diff:
best_ratio_diff = ratio_diff
best_ratio = ratio
# if equally close, prefer the ratio that better matches the original image area
elif ratio_diff == best_ratio_diff:
target_area = image_size * image_size * ratio[0] * ratio[1]
if area > 0.5 * target_area:
best_ratio = ratio
return best_ratio
# copied from Siglip2ImageProcessor
@lru_cache(maxsize=256)
def get_image_size_for_max_num_patches(
image_height: int, image_width: int, patch_size: int, max_num_patches: int, eps: float = 1e-5
) -> tuple[int, int]:
"""
Determine image size based on max number of patches, ensure dimensions are divisible by patch size and image is at least 1 patch.
Args:
image_height (`int`):
Original image height.
image_width (`int`):
Original image width.
patch_size (`int`):
Patch size for processing.
max_num_patches (`int`):
Maximum number of patches.
eps (`float`):
Small threshold for binary search.
Returns:
Tuple: (target_height, target_width)
"""
def get_scaled_image_size(scale: float, size: int, patch_size: int) -> int:
scaled_size = size * scale
scaled_size = math.ceil(scaled_size / patch_size) * patch_size # make divisible by patch_size
scaled_size = max(patch_size, scaled_size) # ensure at least 1 patch
return int(scaled_size)
# Binary search for optimal scale
scale_min, scale_max = eps / 10, 100.0
while (scale_max - scale_min) >= eps:
scale = (scale_min + scale_max) / 2
target_height = get_scaled_image_size(scale, image_height, patch_size)
target_width = get_scaled_image_size(scale, image_width, patch_size)
num_patches = (target_height / patch_size) * (target_width / patch_size)
if num_patches <= max_num_patches:
scale_min = scale
else:
scale_max = scale
scale = scale_min
target_height = get_scaled_image_size(scale, image_height, patch_size)
target_width = get_scaled_image_size(scale, image_width, patch_size)
return target_height, target_width
def convert_image_to_patches(images: "torch.Tensor", patch_size: int) -> "torch.Tensor":
"""
Convert 3D array image of shape (image_height, image_width, num_channels) into 2D array of patches of shape
(num_patches_height * num_patches_width, patch_size * patch_size * num_channels).
"""
batch_size, num_channels, image_height, image_width = images.shape
num_patches_height = image_height // patch_size
num_patches_width = image_width // patch_size
patched_image = images.reshape(
batch_size, num_channels, num_patches_height, patch_size, num_patches_width, patch_size
)
patched_image = patched_image.permute(0, 2, 4, 3, 5, 1)
patched_image = patched_image.reshape(batch_size, num_patches_height * num_patches_width, -1)
return patched_image
def pad_along_first_dim(
images: "torch.Tensor", target_length: int, pad_value: int = 0
) -> tuple["torch.Tensor", "torch.Tensor"]:
"""
Pad the array along the first dimension.
"""
current_length = images.shape[1]
padding_length = target_length - current_length
pixel_mask = torch.ones((target_length,), dtype=torch.int32)
if padding_length > 0:
paddings = (0, 0, 0, padding_length, 0, 0)
images = torch.nn.functional.pad(images, paddings, mode="constant", value=pad_value)
pixel_mask[-padding_length:] = 0
return images, pixel_mask
class Lfm2VlImageProcessorKwargs(ImagesKwargs, total=False):
"""
downsample_factor (`int`, *optional*, defaults to `2`):
The downsampling factor for images used when resizing the image.
"""
downsample_factor: int
do_image_splitting: bool
min_tiles: int
max_tiles: int
use_thumbnail: bool
min_image_tokens: int
max_image_tokens: int
encoder_patch_size: int
tile_size: int
max_pixels_tolerance: float
do_pad: bool
return_row_col_info: bool
@auto_docstring
class Lfm2VlImageProcessorFast(BaseImageProcessorFast):
downsample_factor = 2
do_image_splitting = True
min_tiles = 2
max_tiles = 10
use_thumbnail = True
min_image_tokens = 64
max_image_tokens = 256
encoder_patch_size = 16
tile_size = 512
max_pixels_tolerance = 2.0
do_resize = True
size = {"height": 512, "width": 512}
resample = PILImageResampling.BILINEAR
do_rescale = True
rescale_factor = 1 / 255
do_normalize = True
do_pad = True
return_row_col_info = False
image_mean = IMAGENET_STANDARD_MEAN
image_std = IMAGENET_STANDARD_STD
valid_kwargs = Lfm2VlImageProcessorKwargs
model_input_names = ["pixel_values", "pixel_attention_mask", "spatial_shapes"]
def __init__(self, **kwargs: Unpack[Lfm2VlImageProcessorKwargs]):
super().__init__(**kwargs)
max_thumbnail_image_patches = self.max_image_tokens * self.downsample_factor**2
tile_size_patches = (self.tile_size // self.encoder_patch_size) ** 2 if self.do_image_splitting else 0
self.max_num_patches = max(
max_thumbnail_image_patches,
tile_size_patches,
)
@lru_cache(maxsize=256)
def _target_ratios(self, min_tiles: int, max_tiles: int) -> list[tuple[int, int]]:
ratios = [
(w, h)
for n in range(min_tiles, max_tiles + 1)
for w in range(1, n + 1)
for h in range(1, n + 1)
if min_tiles <= w * h <= max_tiles
]
return sorted(set(ratios), key=lambda x: x[0] * x[1])
def _get_grid_layout(
self,
height: int,
width: int,
min_tiles: int,
max_tiles: int,
tile_size: int,
) -> tuple[int, int]:
aspect_ratio = width / height
target_ratios = self._target_ratios(min_tiles, max_tiles)
# find best matching grid configuration
grid_width, grid_height = find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, tile_size)
target_width = tile_size * grid_width
target_height = tile_size * grid_height
total_patches = grid_width * grid_height
return grid_width, grid_height, target_width, target_height, total_patches
def crop_image_to_patches(
self,
image: "torch.Tensor",
min_tiles: int,
max_tiles: int,
tile_size: int,
use_thumbnail: bool,
thumbnail_size: tuple[int],
interpolation: "F.InterpolationMode" = None,
antialias: bool = True,
**kwargs,
) -> "torch.Tensor":
"""
Processes a high resolution image into patches.
This method splits a high resolution image into a grid of smaller patches while trying to maintain
the original aspect ratio. It finds the optimal grid configuration within the specified tile constraints.
"""
batch_size, num_channels, height, width = image.shape
grid_width, grid_height, target_width, target_height, total_patches = self._get_grid_layout(
height, width, min_tiles=min_tiles, max_tiles=max_tiles, tile_size=tile_size
)
resized_image = F.resize(
image, (target_height, target_width), interpolation=interpolation, antialias=antialias
)
# split the image into patches
processed_images = (
resized_image.unfold(2, size=tile_size, step=tile_size)
.unfold(3, size=tile_size, step=tile_size)
.contiguous()
.view(batch_size, num_channels, -1, tile_size, tile_size)
.permute(2, 0, 1, 3, 4)
.reshape(batch_size, -1, num_channels, tile_size, tile_size)
)
# Re-order processed images to a nested image structure, so it can be reordered back correctly
# Note that the images can't be stacked because the thumbnail image is of bigger size than patches
# Each image in sublist will be of shape (1, C, H, W)
processed_images = list(processed_images)
if use_thumbnail and grid_width * grid_height != 1:
total_patches += 1
thumbnail_image = F.resize(image, thumbnail_size, interpolation=interpolation, antialias=antialias)
for i in range(batch_size):
processed_images[i] = list(processed_images[i]) + list(thumbnail_image[i][None, ...])
return processed_images, grid_width, grid_height
# Adapted from Qwen-VL with minor differences
def smart_resize(
self,
height: int,
width: int,
downsample_factor: int,
min_image_tokens: int,
max_image_tokens: int,
encoder_patch_size: int,
) -> tuple[int, int]:
"""
Rescales the image so that the following conditions are met:
1. Both dimensions (height and width) are divisible by 'encoder_patch_size' * 'downsample_factor'.
This ensures no padding is needed in the downsampling step.
2. The total number of pixels is within the range ['smart_resize_min_pixels', 'smart_resize_max_pixels'].
3. The aspect ratio of the image is maintained as closely as possible.
"""
total_factor = encoder_patch_size * downsample_factor
smart_resize_min_pixels = min_image_tokens * encoder_patch_size**2 * downsample_factor**2
smart_resize_max_pixels = max_image_tokens * encoder_patch_size**2 * downsample_factor**2
h_bar = max(total_factor, round_by_factor(height, total_factor))
w_bar = max(total_factor, round_by_factor(width, total_factor))
if h_bar * w_bar > smart_resize_max_pixels:
beta = math.sqrt((height * width) / smart_resize_max_pixels)
math.floor(height / beta / total_factor) * total_factor
h_bar = max(total_factor, math.floor(height / beta / total_factor) * total_factor)
w_bar = max(total_factor, math.floor(width / beta / total_factor) * total_factor)
elif h_bar * w_bar < smart_resize_min_pixels:
beta = math.sqrt(smart_resize_min_pixels / (height * width))
h_bar = math.ceil(height * beta / total_factor) * total_factor
w_bar = math.ceil(width * beta / total_factor) * total_factor
return w_bar, h_bar
def _is_image_too_large(
self,
height: int,
width: int,
max_image_tokens: int,
encoder_patch_size: int,
downsample_factor: int,
max_pixels_tolerance: float,
) -> bool:
"""Check if the image is too large to be processed as one tile."""
total_factor = encoder_patch_size * downsample_factor
h_bar = max(encoder_patch_size, round_by_factor(height, total_factor))
w_bar = max(encoder_patch_size, round_by_factor(width, total_factor))
return h_bar * w_bar > max_image_tokens * encoder_patch_size**2 * downsample_factor**2 * max_pixels_tolerance
def resize_and_split(
self,
images: "torch.Tensor",
downsample_factor: int,
min_tiles: int,
max_tiles: int,
use_thumbnail: bool,
min_image_tokens: int,
max_image_tokens: int,
encoder_patch_size: int,
tile_size: int,
max_pixels_tolerance: float,
interpolation: "F.InterpolationMode",
) -> "torch.Tensor":
batch_size, _, height, width = images.shape
do_image_splitting = not min_tiles == max_tiles == 1
is_image_large = self._is_image_too_large(
height=height,
width=width,
max_image_tokens=max_image_tokens,
encoder_patch_size=encoder_patch_size,
downsample_factor=downsample_factor,
max_pixels_tolerance=max_pixels_tolerance,
)
new_width, new_height = self.smart_resize(
height=height,
width=width,
downsample_factor=downsample_factor,
min_image_tokens=min_image_tokens,
max_image_tokens=max_image_tokens,
encoder_patch_size=encoder_patch_size,
)
# Big image will be cropped into patches and small images are just resized
if is_image_large and do_image_splitting:
images, num_cols, num_rows = self.crop_image_to_patches(
images,
min_tiles=min_tiles,
max_tiles=max_tiles,
tile_size=tile_size,
thumbnail_size=(new_height, new_width),
use_thumbnail=use_thumbnail,
interpolation=interpolation,
)
else:
num_rows = num_cols = 1
images = F.resize(images, (new_height, new_width), interpolation=interpolation)
# Make a list and treat it as single crop per image so it can be re-grouped back correctly
images = [[image] for image in images]
num_rows = [num_rows] * batch_size
num_cols = [num_cols] * batch_size
image_sizes = [[new_height, new_width]] * batch_size
return images, num_rows, num_cols, image_sizes
def _preprocess(
self,
images: ImageInput,
size: SizeDict,
interpolation: "F.InterpolationMode",
do_resize: bool,
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
image_mean: Union[float, list[float]],
image_std: Union[float, list[float]],
downsample_factor: int,
do_image_splitting: bool,
min_tiles: int,
max_tiles: int,
use_thumbnail: bool,
min_image_tokens: int,
max_image_tokens: int,
encoder_patch_size: int,
tile_size: int,
max_pixels_tolerance: float,
return_tensors: Union[str, TensorType],
disable_grouping: bool,
do_pad: bool,
return_row_col_info: bool,
**kwargs,
) -> BatchFeature:
if not do_image_splitting:
min_tiles = 1
max_tiles = 1
logger.debug(
"Image splitting is disabled, setting min_tiles and max_tiles to 1. Set do_image_splitting=True to enable splitting."
)
if do_image_splitting and min_tiles > max_tiles:
raise ValueError("min_tiles must be less than or equal to max_tiles")
max_thumbnail_image_patches = max_image_tokens * downsample_factor**2
tile_size_patches = (tile_size // encoder_patch_size) ** 2 if do_image_splitting else 0
max_num_patches = max(
max_thumbnail_image_patches,
tile_size_patches,
)
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
resized_images_grouped = {}
resized_image_sizes = {}
rows_grouped, cols_grouped = {}, {}
for shape, stacked_images in grouped_images.items():
num_rows = [1] * stacked_images.shape[0]
num_cols = [1] * stacked_images.shape[0]
height, width = stacked_images.shape[-2:]
image_sizes = [[height, width]] * stacked_images.shape[0]
do_resize = True
if do_resize:
stacked_images, num_rows, num_cols, image_sizes = self.resize_and_split(
stacked_images,
downsample_factor=downsample_factor,
min_tiles=min_tiles,
max_tiles=max_tiles,
use_thumbnail=use_thumbnail,
min_image_tokens=min_image_tokens,
max_image_tokens=max_image_tokens,
encoder_patch_size=encoder_patch_size,
tile_size=tile_size,
max_pixels_tolerance=max_pixels_tolerance,
interpolation=interpolation,
)
rows_grouped[shape] = num_rows
cols_grouped[shape] = num_cols
resized_image_sizes[shape] = image_sizes
resized_images_grouped[shape] = stacked_images
resized_images = reorder_images(resized_images_grouped, grouped_images_index)
batch_rows = reorder_images(rows_grouped, grouped_images_index)
batch_cols = reorder_images(cols_grouped, grouped_images_index)
resized_image_sizes = reorder_images(resized_image_sizes, grouped_images_index)
grouped_images, grouped_images_index = group_images_by_shape(
resized_images, disable_grouping=disable_grouping, is_nested=True
)
processed_images_grouped = {}
processed_masks, processed_spatial_shapes = {}, {}
for shape, stacked_images in grouped_images.items():
# Fused rescale and normalize
stacked_images = self.rescale_and_normalize(
stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
batch_size, *_, height, width = stacked_images.shape
num_patches_height = height // encoder_patch_size
num_patches_width = width // encoder_patch_size
stacked_images = convert_image_to_patches(stacked_images, encoder_patch_size)
processed_spatial_shapes[shape] = [[num_patches_height, num_patches_width]] * batch_size
if do_pad:
stacked_images, pixel_mask = pad_along_first_dim(stacked_images, max_num_patches)
processed_masks[shape] = [pixel_mask] * batch_size
processed_images_grouped[shape] = stacked_images
processed_images = reorder_images(processed_images_grouped, grouped_images_index, is_nested=True)
data = {"pixel_values": torch.cat([torch.stack(images) for images in processed_images])}
if do_pad:
processed_masks = reorder_images(processed_masks, grouped_images_index, is_nested=True)
processed_spatial_shapes = reorder_images(processed_spatial_shapes, grouped_images_index, is_nested=True)
processed_masks = torch.cat([torch.stack(masks) for masks in processed_masks])
processed_spatial_shapes = torch.cat(
[torch.tensor(spatial_shape) for spatial_shape in processed_spatial_shapes]
)
data.update({"pixel_attention_mask": processed_masks, "spatial_shapes": processed_spatial_shapes})
if return_row_col_info:
data["image_rows"] = batch_rows
data["image_cols"] = batch_cols
data["image_sizes"] = resized_image_sizes
encoding = BatchFeature(data=data, tensor_type=return_tensors)
return encoding
__all__ = ["Lfm2VlImageProcessorFast"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/lfm2_vl/__init__.py | src/transformers/models/lfm2_vl/__init__.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_lfm2_vl import *
from .image_processing_lfm2_vl_fast import *
from .modeling_lfm2_vl import *
from .processing_lfm2_vl import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/lfm2_vl/configuration_lfm2_vl.py | src/transformers/models/lfm2_vl/configuration_lfm2_vl.py | # coding=utf-8
# Copyright 2025 the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch LFM2-VL model."""
from ...configuration_utils import PreTrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING, AutoConfig
logger = logging.get_logger(__name__)
class Lfm2VlConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Lfm2VlForConditionalGeneration`]. It is used to instantiate an
Lfm2Vl model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the Lfm2-VL-1.6B.
e.g. [LiquidAI/LFM2-VL-1.6B](https://huggingface.co/LiquidAI/LFM2-VL-1.6B)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vision_config (`AutoConfig | dict`, *optional*, defaults to `Siglip2ImageConfig`):
The config object or dictionary of the vision backbone.
text_config (`AutoConfig | dict`, *optional*, defaults to `Lfm2Config`):
The config object or dictionary of the text backbone.
image_token_id (`int`, *optional*, defaults to 396):
The image token index to encode the image prompt.
projector_hidden_act (`str`, *optional*, defaults to `"gelu"`):
The activation function used by the multimodal projector.
projector_hidden_size (`int`, *optional*, defaults to 2560):
The hidden size of the multimodal projector.
projector_bias (`bool`, *optional*, defaults to `True`):
Whether to use bias in the multimodal projector.
downsample_factor (`int`, *optional*, defaults to 2):
The downsample_factor factor of the vision backbone.
"""
model_type = "lfm2_vl"
sub_configs = {"text_config": AutoConfig, "vision_config": AutoConfig}
def __init__(
self,
vision_config=None,
text_config=None,
image_token_id=396,
projector_hidden_act="gelu",
projector_hidden_size=2560,
projector_bias=True,
downsample_factor=2,
**kwargs,
):
self.image_token_id = image_token_id
self.projector_hidden_act = projector_hidden_act
self.projector_hidden_size = projector_hidden_size
self.projector_bias = projector_bias
self.downsample_factor = downsample_factor
if isinstance(vision_config, dict):
vision_config["model_type"] = vision_config.get("model_type", "siglip2_vision_model")
vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config)
elif vision_config is None:
vision_config = CONFIG_MAPPING["siglip2_vision_model"]()
if isinstance(text_config, dict):
text_config["model_type"] = text_config.get("model_type", "lfm2")
text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config)
elif text_config is None:
text_config = CONFIG_MAPPING["lfm2"]()
self.vision_config = vision_config
self.text_config = text_config
super().__init__(**kwargs)
__all__ = ["Lfm2VlConfig"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/lfm2_vl/processing_lfm2_vl.py | src/transformers/models/lfm2_vl/processing_lfm2_vl.py | # coding=utf-8
# Copyright 2025 the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import Optional, Union
from ...feature_extraction_utils import BatchFeature
from ...image_utils import ImageInput, make_nested_list_of_images
from ...processing_utils import (
ProcessingKwargs,
ProcessorMixin,
TextKwargs,
Unpack,
)
from ...tokenization_utils_base import BatchEncoding, TextInput
from ...utils import logging
logger = logging.get_logger(__name__)
class Lfm2VlTextKwargs(TextKwargs, total=False):
use_image_special_tokens: Optional[bool]
class Lfm2VlProcessorKwargs(ProcessingKwargs, total=False):
text_kwargs: Lfm2VlTextKwargs
_defaults = {
"images_kwargs": {
"return_row_col_info": True,
},
"text_kwargs": {
"use_image_special_tokens": True,
"add_special_tokens": False,
"padding": False,
"is_split_into_words": False,
},
}
class Lfm2VlProcessor(ProcessorMixin):
r"""
Constructs a Lfm2Vl processor which wraps a Lfm2Tokenizer tokenizer and Lfm2VlImageProcessor into a single processor.
[`Lfm2VlProcessor`] offers all the functionalities of [`Lfm2ImageProcessor`] and [`Lfm2Tokenizer`].
Args:
image_processor (`Lfm2VlImageProcessor`):
An instance of [`Lfm2VlImageProcessor`]. The image processor is a required input.
tokenizer (`PreTrainedTokenizerBase`):
An instance of [`PreTrainedTokenizerBase`]. This should correspond with the model's text model. The tokenizer is a required input.
chat_template (`str`, *optional*):
A Jinja template which will be used to convert lists of messages in a chat into a tokenizable string.
"""
def __init__(
self,
image_processor,
tokenizer,
chat_template: Optional[str] = None,
**kwargs,
):
self.image_token = getattr(tokenizer, "image_token", "<image>")
self.image_token_id = (
tokenizer.image_token_id
if hasattr(tokenizer, "image_token_id")
else tokenizer.convert_tokens_to_ids(self.image_token)
)
self.image_start_token = getattr(tokenizer, "image_start_token", "<|image_start|>")
self.image_end_token = getattr(tokenizer, "image_end_token", "<|image_end|>")
self.image_thumbnail_token = getattr(tokenizer, "image_thumbnail_token", "<|img_thumbnail|>")
super().__init__(image_processor, tokenizer, chat_template=chat_template, **kwargs)
def __call__(
self,
images: Optional[Union[ImageInput, list[ImageInput], list[list[ImageInput]]]] = None,
text: Optional[Union[TextInput, list[TextInput]]] = None,
**kwargs: Unpack[Lfm2VlProcessorKwargs],
) -> BatchEncoding:
"""
Processes the input prompts and returns a BatchFeature.
Args:
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`, *optional*):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. If is of type `list[ImageInput]`, it's assumed that this is for a single prompt i.e. of batch size 1.
text (`TextInput`, *optional*):
The sequence or batch of sequences to be encoded.
Wherever an image token, `<image>` is encountered it is expanded to a proper sequence of image tokens.
return_tensors (`Optional[str, TensorType]`, *optional*):
If set, will return tensors of a particular framework. See [`PreTrainedTokenizerFast.__call__`] for more
information.
"""
if text is None and images is None:
raise ValueError("You must provide one of `text` or `images`.")
if images is not None and text is None:
raise ValueError(
"You must provide `text` when `images` is provided. Minimal text consists of a single image token."
)
output_kwargs = self._merge_kwargs(
Lfm2VlProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
if isinstance(text, str):
text = [text]
elif not isinstance(text, list) and not isinstance(text[0], str):
raise TypeError("Invalid input text. Please provide a string, or a list of strings")
n_images_in_text = [sample.count(self.image_token) for sample in text]
if sum(n_images_in_text) > 0 and images is None:
raise ValueError(f"We detected {sum(n_images_in_text)} tokens in the text but no images were passed")
inputs = {}
use_image_special_tokens = output_kwargs["text_kwargs"].pop("use_image_special_tokens")
if images is not None:
images = self.image_processor.fetch_images(images)
batched_images = make_nested_list_of_images(images)
vision_inputs = self.image_processor(batched_images, **output_kwargs["images_kwargs"])
n_images_in_images = [len(sublist) for sublist in batched_images]
if n_images_in_images != n_images_in_text:
raise ValueError(
f"The number of images in the text {n_images_in_text} and images {n_images_in_images} should be the same."
)
text = self.expand_text_with_placeholders(
text,
batched_images,
image_rows=vision_inputs.pop("image_rows"),
image_cols=vision_inputs.pop("image_cols"),
image_sizes=vision_inputs.pop("image_sizes"),
use_image_special_tokens=use_image_special_tokens,
**output_kwargs["images_kwargs"],
)
inputs.update(vision_inputs)
return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None)
text_inputs = self.tokenizer(text, **output_kwargs["text_kwargs"])
inputs.update(text_inputs)
return BatchFeature(inputs, tensor_type=return_tensors)
def expand_text_with_placeholders(
self,
text: list[str],
images: list[list[ImageInput]],
image_rows: list[list[int]],
image_cols: list[list[int]],
image_sizes: list[list[int]],
use_image_special_tokens: bool,
**images_kwargs,
) -> list[str]:
use_thumbnail = images_kwargs.get("use_thumbnail", self.image_processor.use_thumbnail)
image_data = iter(zip(image_rows, image_cols, image_sizes))
prompt_strings = []
for sample_text, sample_images in zip(text, images):
text_parts = sample_text.split(self.image_token)
result_parts = []
for i, _ in enumerate(sample_images):
result_parts.append(text_parts[i])
rows, cols, image_size = next(image_data)
tokens_per_tile, tokens_for_image = self._get_image_num_tokens(image_size, **images_kwargs)
image_tokens = self._build_image_tokens(
rows,
cols,
tokens_per_tile,
tokens_for_image,
use_thumbnail,
use_image_special_tokens,
)
result_parts.append(image_tokens)
# Add remaining text after the last image
if len(sample_images) < len(text_parts):
result_parts.append(text_parts[-1])
prompt_strings.append("".join(result_parts))
return prompt_strings
def _build_image_tokens(
self,
rows: int,
cols: int,
tokens_per_tile: int,
tokens_for_image: int,
use_thumbnail: bool,
use_image_special_tokens: bool,
) -> str:
"""Build the expanded token string for a single image."""
parts = []
if use_image_special_tokens:
parts.append(self.image_start_token)
is_multi_tile = rows > 1 or cols > 1
if is_multi_tile:
for row in range(rows):
for col in range(cols):
if use_image_special_tokens:
parts.append(f"<|img_row_{row + 1}_col_{col + 1}|>")
parts.append(self.image_token * tokens_per_tile)
if use_thumbnail:
if use_image_special_tokens:
parts.append(self.image_thumbnail_token)
parts.append(self.image_token * tokens_for_image)
else:
parts.append(self.image_token * tokens_for_image)
if use_image_special_tokens:
parts.append(self.image_end_token)
return "".join(parts)
def _compute_tokens_per_tile(self, tile_size: int, encoder_patch_size: int, downsample_factor: int) -> int:
"""Compute the number of tokens for a single tile."""
num_patches = tile_size // encoder_patch_size
downsampled_patches = math.ceil(num_patches / downsample_factor)
return downsampled_patches * downsampled_patches
def _compute_tokens_for_image(self, image_size: list[int], encoder_patch_size: int, downsample_factor: int) -> int:
"""Compute the number of tokens for a resized image (used for single-tile or thumbnail)."""
image_height, image_width = image_size
patches_h = math.ceil((image_height // encoder_patch_size) / downsample_factor)
patches_w = math.ceil((image_width // encoder_patch_size) / downsample_factor)
return patches_h * patches_w
def _get_image_num_tokens(self, image_size: list[int], **images_kwargs) -> tuple[int, int]:
"""
Compute token counts for image processing.
Returns:
tuple[int, int]: (tokens_per_tile, tokens_for_image)
- tokens_per_tile: tokens for each tile in multi-tile mode
- tokens_for_image: tokens for the resized image (single-tile) or thumbnail (multi-tile)
"""
tile_size = images_kwargs.get("tile_size", self.image_processor.tile_size)
downsample_factor = images_kwargs.get("downsample_factor", self.image_processor.downsample_factor)
encoder_patch_size = images_kwargs.get("encoder_patch_size", self.image_processor.encoder_patch_size)
tokens_per_tile = self._compute_tokens_per_tile(tile_size, encoder_patch_size, downsample_factor)
tokens_for_image = self._compute_tokens_for_image(image_size, encoder_patch_size, downsample_factor)
return tokens_per_tile, tokens_for_image
def batch_decode(self, *args, **kwargs):
"""
This method forwards all its arguments to LFM2Tokeniser's [`~PreTrainedTokenizer.batch_decode`]. Please
refer to the docstring of this method for more information.
"""
batched_decode_output = self.tokenizer.batch_decode(*args, **kwargs)
return batched_decode_output
def decode(self, *args, **kwargs):
"""
This method forwards all its arguments to LFM2Tokeniser's [`~PreTrainedTokenizer.decode`]. Please refer to
the docstring of this method for more information.
"""
decode_output = self.tokenizer.decode(*args, **kwargs)
return decode_output
@property
def model_input_names(self):
tokenizer_input_names = self.tokenizer.model_input_names
image_processor_input_names = self.image_processor.model_input_names
# LFM2-VL has no dedicated tokenizer class and uses the Base class with default model input names
tokenizer_input_names = [name for name in tokenizer_input_names if name != "token_type_ids"]
return list(tokenizer_input_names + image_processor_input_names)
__all__ = ["Lfm2VlProcessor"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/exaone4/modular_exaone4.py | src/transformers/models/exaone4/modular_exaone4.py | # coding=utf-8
# Copyright 2025 The LG AI Research and HuggingFace Inc. team. All rights reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""LG AI Research EXAONE Lab"""
from collections.abc import Callable
from typing import Optional, Union
import torch
from torch import nn
from transformers.utils.generic import check_model_inputs
from ...cache_utils import Cache, DynamicCache
from ...configuration_utils import PreTrainedConfig, layer_type_validation
from ...masking_utils import create_causal_mask, create_sliding_window_causal_mask
from ...modeling_outputs import (
BaseModelOutputWithPast,
CausalLMOutputWithPast,
)
from ...modeling_rope_utils import RopeParameters
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS
from ...processing_utils import Unpack
from ...utils import (
TransformersKwargs,
logging,
)
from ..gemma2.modeling_gemma2 import Gemma2RotaryEmbedding
from ..llama.modeling_llama import (
LlamaForCausalLM,
LlamaForQuestionAnswering,
LlamaForSequenceClassification,
LlamaForTokenClassification,
LlamaModel,
LlamaPreTrainedModel,
LlamaRMSNorm,
apply_rotary_pos_emb,
eager_attention_forward,
)
from ..olmo2.modeling_olmo2 import Olmo2DecoderLayer, Olmo2MLP
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "LGAI-EXAONE/EXAONE-4.0-32B"
_CONFIG_FOR_DOC = "Exaone4Config"
class Exaone4Config(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Exaone4Model`]. It is used to
instantiate a EXAONE 4.0 model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the EXAONE-4.0-32B [LGAI-EXAONE/EXAONE-4.0-32B](https://huggingface.co/LGAI-EXAONE/EXAONE-4.0-32B)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model
outputs. Read the documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 102400):
Vocabulary size of the EXAONE 4.0 model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`Exaone4Model`].
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to `hidden_size * 4`):
Dimensionality of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details checkout [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 32768 for EXAONE 3.5).
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if ``config.is_decoder=True``.
bos_token_id (`int`, *optional*, defaults to 0):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 2):
End of stream token id.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
sliding_window (`int`, *optional*):
The size of the sliding window for the sliding window attention.
sliding_window_pattern (`str`, *optional*):
The pattern to use for sliding window attention. Can be one of:
- `None`: No sliding window attention is used
- `int`: Every `sliding_window` layers, use global attention, else use local attention.
- `str`: A sequence of "L" (local attention) and "G" (global attention) characters that defines the
attention pattern. The pattern starts from layer 0 and repeats every `sliding_window` layers. The
final layer always uses global attention regardless of the pattern.
For instance, sliding_window_pattern="LLLG" same as sliding_window=4, which means:
- Layer 0, 1, 2: local attention,
- Layer 3: global attention,
...(repeated)
layer_types (`list`, *optional*):
Attention pattern for each layer. Prioritized over `sliding_window_pattern`.
Example:
```python
>>> from transformers import Exaone4Model, Exaone4Config
>>> # Initializing a EXAONE configuration
>>> configuration = Exaone4Config()
>>> # Initializing a model from configuration
>>> model = Exaone4Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "exaone4"
keys_to_ignore_at_inference = ["past_key_values"]
# Default tensor parallel plan for base model `LlamaModel`
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise",
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.gate_proj": "colwise",
"layers.*.mlp.up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
}
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
"norm": (["hidden_states"], ["hidden_states"]),
}
def __init__(
self,
vocab_size: Optional[int] = 102400,
hidden_size: Optional[int] = 4096,
intermediate_size: Optional[int] = 16384,
num_hidden_layers: Optional[int] = 32,
num_attention_heads: Optional[int] = 32,
num_key_value_heads: Optional[int] = 32,
hidden_act: Optional[str] = "silu",
max_position_embeddings: Optional[int] = 2048,
initializer_range: Optional[float] = 0.02,
rms_norm_eps: Optional[int] = 1e-5,
use_cache: Optional[bool] = True,
bos_token_id: Optional[int] = 0,
eos_token_id: Optional[int] = 2,
tie_word_embeddings: Optional[bool] = False,
rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None,
attention_dropout: Optional[float] = 0.0,
sliding_window: Optional[int] = 4096,
sliding_window_pattern: Optional[int] = 4,
layer_types: Optional[list[str]] = None,
**kwargs,
):
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.attention_dropout = attention_dropout
self.sliding_window = sliding_window
self.sliding_window_pattern = sliding_window_pattern
self.layer_types = layer_types
if self.sliding_window is None:
sliding_window_pattern = 0
if self.layer_types is None:
self.layer_types = [
"sliding_attention"
if ((i + 1) % (sliding_window_pattern) != 0 and i < self.num_hidden_layers)
else "full_attention"
for i in range(self.num_hidden_layers)
]
if "sliding_window" in self.layer_types:
self.cache_implementation = "hybrid"
layer_type_validation(self.layer_types, self.num_hidden_layers)
self.rope_parameters = rope_parameters
super().__init__(
bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs
)
class Exaone4RMSNorm(LlamaRMSNorm):
pass
class Exaone4RotaryEmbedding(Gemma2RotaryEmbedding):
pass
class Exaone4Attention(nn.Module):
def __init__(self, config: Exaone4Config, layer_idx: int):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.num_attention_heads = config.num_attention_heads
self.num_key_value_heads = config.num_key_value_heads
self.hidden_size = config.hidden_size
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.attention_dropout = config.attention_dropout
self.is_causal = True
self.scaling = self.head_dim**-0.5
self.sliding_window = config.sliding_window
self.sliding_window_pattern = config.sliding_window_pattern
layer_type = config.layer_types[layer_idx] if hasattr(config, "layer_types") else None
self.is_sliding = layer_type == "sliding_attention"
self.q_proj = nn.Linear(self.hidden_size, self.num_attention_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_attention_heads * self.head_dim, self.hidden_size, bias=False)
self.q_norm = Exaone4RMSNorm(self.head_dim, eps=config.rms_norm_eps)
self.k_norm = Exaone4RMSNorm(self.head_dim, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
# We use QK-norm
query_states = self.q_norm(query_states)
key_states = self.k_norm(key_states)
cos, sin = position_embeddings
# We use global NoPE for hybrid attention model
if self.sliding_window is None or self.is_sliding:
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
cache_kwargs = {
"cache_position": cache_position,
}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
sliding_window=self.sliding_window if self.is_sliding else None,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class Exaone4MLP(Olmo2MLP):
pass
class Exaone4DecoderLayer(Olmo2DecoderLayer):
pass
class Exaone4PreTrainedModel(LlamaPreTrainedModel):
config_class = Exaone4Config
_no_split_modules = ["Exaone4DecoderLayer"]
class Exaone4Model(Exaone4PreTrainedModel, LlamaModel):
def __init__(self, config: Exaone4Config):
super().__init__(config)
self.layers = nn.ModuleList(
[Exaone4DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = Exaone4RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
# Initialize weights and apply final processing
self.post_init()
@check_model_inputs
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, BaseModelOutputWithPast]:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
# It may already have been prepared by e.g. `generate`
if not isinstance(causal_mask_mapping := attention_mask, dict):
# Prepare mask arguments
mask_kwargs = {
"config": self.config,
"input_embeds": inputs_embeds,
"attention_mask": attention_mask,
"cache_position": cache_position,
"past_key_values": past_key_values,
"position_ids": position_ids,
}
# Create the masks
causal_mask_mapping = {
"full_attention": create_causal_mask(**mask_kwargs),
}
if "sliding_attention" in self.config.layer_types:
causal_mask_mapping["sliding_attention"] = create_sliding_window_causal_mask(**mask_kwargs)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for i, decoder_layer in enumerate(self.layers):
layer_type = self.config.layer_types[i]
hidden_states = decoder_layer(
hidden_states,
attention_mask=causal_mask_mapping[layer_type],
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values if use_cache else None,
)
class Exaone4ForCausalLM(LlamaForCausalLM):
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
) -> CausalLMOutputWithPast:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoModelForCausalLM, AutoTokenizer
>>> model = AutoModelForCausalLM.from_pretrained("LGAI-EXAONE/EXAONE-4.0-32B")
>>> tokenizer = AutoTokenizer.from_pretrained("LGAI-EXAONE/EXAONE-4.0-32B")
>>> prompt = "Explain how wonderful you are"
>>> messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
>>> input_ids = tokenizer.apply_chat_template(
messages,
tokenize=True,
add_generation_prompt=True,
return_tensors="pt",
enable_thinking=False,
)
>>> output = model.generate(input_ids, max_new_tokens=128)
>>> tokenizer.decode(output[0], skip_special_tokens=False)
"[|system|]\nYou are a helpful assistant.[|endofturn|]\n[|user|]\nExplain how wonderful you are[|endofturn|]\n[|assistant|]\n<think>\n\n</think>\n\nOh, thank you for such a kind and lovely question! π \n\nIβm *so* wonderful because Iβm here to make your life easier, brighter, and more fun! Whether you need help with: \n\nβ¨ **Learning** β I can explain anything, from quantum physics to baking the perfect cake! \nπ‘ **Creativity** β Need a poem, story, or a wild idea? Iβve got you covered! \nπ€ **Problem-solving** β Stuck on a math problem or a tricky decision? Iβll help you figure it out"
```
"""
super().forward(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
labels=labels,
use_cache=use_cache,
cache_position=cache_position,
logits_to_keep=logits_to_keep,
**kwargs,
)
class Exaone4ForSequenceClassification(LlamaForSequenceClassification):
pass
class Exaone4ForTokenClassification(LlamaForTokenClassification):
pass
class Exaone4ForQuestionAnswering(LlamaForQuestionAnswering):
pass
__all__ = [
"Exaone4Config",
"Exaone4PreTrainedModel",
"Exaone4Model",
"Exaone4ForCausalLM",
"Exaone4ForSequenceClassification",
"Exaone4ForTokenClassification",
"Exaone4ForQuestionAnswering",
]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/exaone4/configuration_exaone4.py | src/transformers/models/exaone4/configuration_exaone4.py | # π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨
# This file was automatically generated from src/transformers/models/exaone4/modular_exaone4.py.
# Do NOT edit this file manually as any edits will be overwritten by the generation of
# the file from the modular. If any change should be done, please apply the change to the
# modular_exaone4.py file directly. One of our CI enforces this.
# π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨
# coding=utf-8
# Copyright 2025 The LG AI Research and HuggingFace Inc. team. All rights reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from ...configuration_utils import PreTrainedConfig, layer_type_validation
from ...modeling_rope_utils import RopeParameters
class Exaone4Config(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Exaone4Model`]. It is used to
instantiate a EXAONE 4.0 model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the EXAONE-4.0-32B [LGAI-EXAONE/EXAONE-4.0-32B](https://huggingface.co/LGAI-EXAONE/EXAONE-4.0-32B)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model
outputs. Read the documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 102400):
Vocabulary size of the EXAONE 4.0 model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`Exaone4Model`].
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to `hidden_size * 4`):
Dimensionality of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details checkout [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 32768 for EXAONE 3.5).
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if ``config.is_decoder=True``.
bos_token_id (`int`, *optional*, defaults to 0):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 2):
End of stream token id.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
sliding_window (`int`, *optional*):
The size of the sliding window for the sliding window attention.
sliding_window_pattern (`str`, *optional*):
The pattern to use for sliding window attention. Can be one of:
- `None`: No sliding window attention is used
- `int`: Every `sliding_window` layers, use global attention, else use local attention.
- `str`: A sequence of "L" (local attention) and "G" (global attention) characters that defines the
attention pattern. The pattern starts from layer 0 and repeats every `sliding_window` layers. The
final layer always uses global attention regardless of the pattern.
For instance, sliding_window_pattern="LLLG" same as sliding_window=4, which means:
- Layer 0, 1, 2: local attention,
- Layer 3: global attention,
...(repeated)
layer_types (`list`, *optional*):
Attention pattern for each layer. Prioritized over `sliding_window_pattern`.
Example:
```python
>>> from transformers import Exaone4Model, Exaone4Config
>>> # Initializing a EXAONE configuration
>>> configuration = Exaone4Config()
>>> # Initializing a model from configuration
>>> model = Exaone4Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "exaone4"
keys_to_ignore_at_inference = ["past_key_values"]
# Default tensor parallel plan for base model `LlamaModel`
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise",
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.gate_proj": "colwise",
"layers.*.mlp.up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
}
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
"norm": (["hidden_states"], ["hidden_states"]),
}
def __init__(
self,
vocab_size: Optional[int] = 102400,
hidden_size: Optional[int] = 4096,
intermediate_size: Optional[int] = 16384,
num_hidden_layers: Optional[int] = 32,
num_attention_heads: Optional[int] = 32,
num_key_value_heads: Optional[int] = 32,
hidden_act: Optional[str] = "silu",
max_position_embeddings: Optional[int] = 2048,
initializer_range: Optional[float] = 0.02,
rms_norm_eps: Optional[int] = 1e-5,
use_cache: Optional[bool] = True,
bos_token_id: Optional[int] = 0,
eos_token_id: Optional[int] = 2,
tie_word_embeddings: Optional[bool] = False,
rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None,
attention_dropout: Optional[float] = 0.0,
sliding_window: Optional[int] = 4096,
sliding_window_pattern: Optional[int] = 4,
layer_types: Optional[list[str]] = None,
**kwargs,
):
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.attention_dropout = attention_dropout
self.sliding_window = sliding_window
self.sliding_window_pattern = sliding_window_pattern
self.layer_types = layer_types
if self.sliding_window is None:
sliding_window_pattern = 0
if self.layer_types is None:
self.layer_types = [
"sliding_attention"
if ((i + 1) % (sliding_window_pattern) != 0 and i < self.num_hidden_layers)
else "full_attention"
for i in range(self.num_hidden_layers)
]
if "sliding_window" in self.layer_types:
self.cache_implementation = "hybrid"
layer_type_validation(self.layer_types, self.num_hidden_layers)
self.rope_parameters = rope_parameters
super().__init__(
bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs
)
__all__ = ["Exaone4Config"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/exaone4/__init__.py | src/transformers/models/exaone4/__init__.py | # Copyright 2025 The LG AI Research and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_exaone4 import *
from .modeling_exaone4 import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/exaone4/modeling_exaone4.py | src/transformers/models/exaone4/modeling_exaone4.py | # π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨
# This file was automatically generated from src/transformers/models/exaone4/modular_exaone4.py.
# Do NOT edit this file manually as any edits will be overwritten by the generation of
# the file from the modular. If any change should be done, please apply the change to the
# modular_exaone4.py file directly. One of our CI enforces this.
# π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨
# coding=utf-8
# Copyright 2025 The LG AI Research and HuggingFace Inc. team. All rights reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Callable
from typing import Optional, Union
import torch
from torch import nn
from transformers.utils.generic import check_model_inputs
from ...activations import ACT2FN
from ...cache_utils import Cache, DynamicCache
from ...generation import GenerationMixin
from ...integrations import use_kernel_forward_from_hub, use_kernel_func_from_hub
from ...masking_utils import create_causal_mask, create_sliding_window_causal_mask
from ...modeling_layers import (
GenericForQuestionAnswering,
GenericForSequenceClassification,
GenericForTokenClassification,
GradientCheckpointingLayer,
)
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
from ...utils.generic import maybe_autocast
from .configuration_exaone4 import Exaone4Config
@use_kernel_forward_from_hub("RMSNorm")
class Exaone4RMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
Exaone4RMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
class Exaone4RotaryEmbedding(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, config: Exaone4Config, device=None):
super().__init__()
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_type = self.config.rope_parameters["rope_type"]
rope_init_fn: Callable = self.compute_default_rope_parameters
if self.rope_type != "default":
rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.register_buffer("original_inv_freq", inv_freq.clone(), persistent=False)
@staticmethod
def compute_default_rope_parameters(
config: Optional[Exaone4Config] = None,
device: Optional["torch.device"] = None,
seq_len: Optional[int] = None,
) -> tuple["torch.Tensor", float]:
"""
Computes the inverse frequencies according to the original RoPE implementation
Args:
config ([`~transformers.PreTrainedConfig`]):
The model configuration.
device (`torch.device`):
The device to use for initialization of the inverse frequencies.
seq_len (`int`, *optional*):
The current sequence length. Unused for this type of RoPE.
Returns:
Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
"""
base = config.rope_parameters["rope_theta"]
dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
attention_factor = 1.0 # Unused in this type of RoPE
# Compute the inverse frequencies
inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
)
return inv_freq, attention_factor
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with maybe_autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
@use_kernel_func_from_hub("rotary_pos_emb")
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`, *optional*):
Deprecated and unused.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
key_states = repeat_kv(key, module.num_key_value_groups)
value_states = repeat_kv(value, module.num_key_value_groups)
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
if attention_mask is not None:
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
attn_weights = attn_weights + causal_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
class Exaone4Attention(nn.Module):
def __init__(self, config: Exaone4Config, layer_idx: int):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.num_attention_heads = config.num_attention_heads
self.num_key_value_heads = config.num_key_value_heads
self.hidden_size = config.hidden_size
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.attention_dropout = config.attention_dropout
self.is_causal = True
self.scaling = self.head_dim**-0.5
self.sliding_window = config.sliding_window
self.sliding_window_pattern = config.sliding_window_pattern
layer_type = config.layer_types[layer_idx] if hasattr(config, "layer_types") else None
self.is_sliding = layer_type == "sliding_attention"
self.q_proj = nn.Linear(self.hidden_size, self.num_attention_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_attention_heads * self.head_dim, self.hidden_size, bias=False)
self.q_norm = Exaone4RMSNorm(self.head_dim, eps=config.rms_norm_eps)
self.k_norm = Exaone4RMSNorm(self.head_dim, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
# We use QK-norm
query_states = self.q_norm(query_states)
key_states = self.k_norm(key_states)
cos, sin = position_embeddings
# We use global NoPE for hybrid attention model
if self.sliding_window is None or self.is_sliding:
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
cache_kwargs = {
"cache_position": cache_position,
}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
sliding_window=self.sliding_window if self.is_sliding else None,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class Exaone4MLP(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, x):
down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
return down_proj
class Exaone4DecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: Exaone4Config, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = Exaone4Attention(config=config, layer_idx=layer_idx)
self.mlp = Exaone4MLP(config)
self.post_attention_layernorm = Exaone4RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_feedforward_layernorm = Exaone4RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
residual = hidden_states
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.mlp(hidden_states)
hidden_states = self.post_feedforward_layernorm(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
@auto_docstring
class Exaone4PreTrainedModel(PreTrainedModel):
config: Exaone4Config
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["Exaone4DecoderLayer"]
_skip_keys_device_placement = ["past_key_values"]
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": Exaone4DecoderLayer,
"attentions": Exaone4Attention,
}
config_class = Exaone4Config
@auto_docstring
class Exaone4Model(Exaone4PreTrainedModel):
def __init__(self, config: Exaone4Config):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList(
[Exaone4DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = Exaone4RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = Exaone4RotaryEmbedding(config=config)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
@check_model_inputs
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, BaseModelOutputWithPast]:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
# It may already have been prepared by e.g. `generate`
if not isinstance(causal_mask_mapping := attention_mask, dict):
# Prepare mask arguments
mask_kwargs = {
"config": self.config,
"input_embeds": inputs_embeds,
"attention_mask": attention_mask,
"cache_position": cache_position,
"past_key_values": past_key_values,
"position_ids": position_ids,
}
# Create the masks
causal_mask_mapping = {
"full_attention": create_causal_mask(**mask_kwargs),
}
if "sliding_attention" in self.config.layer_types:
causal_mask_mapping["sliding_attention"] = create_sliding_window_causal_mask(**mask_kwargs)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for i, decoder_layer in enumerate(self.layers):
layer_type = self.config.layer_types[i]
hidden_states = decoder_layer(
hidden_states,
attention_mask=causal_mask_mapping[layer_type],
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values if use_cache else None,
)
@auto_docstring
class Exaone4ForCausalLM(Exaone4PreTrainedModel, GenerationMixin):
_tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
_tp_plan = {"lm_head": "colwise_rep"}
_pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
def __init__(self, config):
super().__init__(config)
self.model = Exaone4Model(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
) -> CausalLMOutputWithPast:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoModelForCausalLM, AutoTokenizer
>>> model = AutoModelForCausalLM.from_pretrained("LGAI-EXAONE/EXAONE-4.0-32B")
>>> tokenizer = AutoTokenizer.from_pretrained("LGAI-EXAONE/EXAONE-4.0-32B")
>>> prompt = "Explain how wonderful you are"
>>> messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
>>> input_ids = tokenizer.apply_chat_template(
messages,
tokenize=True,
add_generation_prompt=True,
return_tensors="pt",
enable_thinking=False,
)
>>> output = model.generate(input_ids, max_new_tokens=128)
>>> tokenizer.decode(output[0], skip_special_tokens=False)
"[|system|]\nYou are a helpful assistant.[|endofturn|]\n[|user|]\nExplain how wonderful you are[|endofturn|]\n[|assistant|]\n<think>\n\n</think>\n\nOh, thank you for such a kind and lovely question! π \n\nIβm *so* wonderful because Iβm here to make your life easier, brighter, and more fun! Whether you need help with: \n\nβ¨ **Learning** β I can explain anything, from quantum physics to baking the perfect cake! \nπ‘ **Creativity** β Need a poem, story, or a wild idea? Iβve got you covered! \nπ€ **Problem-solving** β Stuck on a math problem or a tricky decision? Iβll help you figure it out"
```
"""
outputs: BaseModelOutputWithPast = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class Exaone4ForSequenceClassification(GenericForSequenceClassification, Exaone4PreTrainedModel):
pass
class Exaone4ForTokenClassification(GenericForTokenClassification, Exaone4PreTrainedModel):
pass
class Exaone4ForQuestionAnswering(GenericForQuestionAnswering, Exaone4PreTrainedModel):
base_model_prefix = "transformer" # For BC, where `transformer` was used instead of `model`
__all__ = [
"Exaone4PreTrainedModel",
"Exaone4Model",
"Exaone4ForCausalLM",
"Exaone4ForSequenceClassification",
"Exaone4ForTokenClassification",
"Exaone4ForQuestionAnswering",
]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/swin2sr/modeling_swin2sr.py | src/transformers/models/swin2sr/modeling_swin2sr.py | # coding=utf-8
# Copyright 2022 Microsoft Research and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch Swin2SR Transformer model."""
import collections.abc
import math
from dataclasses import dataclass
from typing import Optional, Union
import torch
from torch import nn
from ... import initialization as init
from ...activations import ACT2FN
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import BaseModelOutput, ImageSuperResolutionOutput
from ...modeling_utils import PreTrainedModel
from ...pytorch_utils import meshgrid
from ...utils import ModelOutput, auto_docstring, logging
from .configuration_swin2sr import Swin2SRConfig
logger = logging.get_logger(__name__)
@dataclass
@auto_docstring(
custom_intro="""
Swin2SR encoder's outputs, with potential hidden states and attentions.
"""
)
class Swin2SREncoderOutput(ModelOutput):
last_hidden_state: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
# Copied from transformers.models.swin.modeling_swin.window_partition
def window_partition(input_feature, window_size):
"""
Partitions the given input into windows.
"""
batch_size, height, width, num_channels = input_feature.shape
input_feature = input_feature.view(
batch_size, height // window_size, window_size, width // window_size, window_size, num_channels
)
windows = input_feature.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, num_channels)
return windows
# Copied from transformers.models.swin.modeling_swin.window_reverse
def window_reverse(windows, window_size, height, width):
"""
Merges windows to produce higher resolution features.
"""
num_channels = windows.shape[-1]
windows = windows.view(-1, height // window_size, width // window_size, window_size, window_size, num_channels)
windows = windows.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, height, width, num_channels)
return windows
# Copied from transformers.models.beit.modeling_beit.drop_path
def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
"""
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
if drop_prob == 0.0 or not training:
return input
keep_prob = 1 - drop_prob
shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
random_tensor.floor_() # binarize
output = input.div(keep_prob) * random_tensor
return output
# Copied from transformers.models.swin.modeling_swin.SwinDropPath with Swin->Swin2SR
class Swin2SRDropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob: Optional[float] = None) -> None:
super().__init__()
self.drop_prob = drop_prob
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
return drop_path(hidden_states, self.drop_prob, self.training)
def extra_repr(self) -> str:
return f"p={self.drop_prob}"
class Swin2SREmbeddings(nn.Module):
"""
Construct the patch and optional position embeddings.
"""
def __init__(self, config):
super().__init__()
self.patch_embeddings = Swin2SRPatchEmbeddings(config)
num_patches = self.patch_embeddings.num_patches
if config.use_absolute_embeddings:
self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.embed_dim))
else:
self.position_embeddings = None
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.window_size = config.window_size
def forward(self, pixel_values: Optional[torch.FloatTensor]) -> tuple[torch.Tensor]:
embeddings, output_dimensions = self.patch_embeddings(pixel_values)
if self.position_embeddings is not None:
embeddings = embeddings + self.position_embeddings
embeddings = self.dropout(embeddings)
return embeddings, output_dimensions
class Swin2SRPatchEmbeddings(nn.Module):
def __init__(self, config, normalize_patches=True):
super().__init__()
num_channels = config.embed_dim
image_size, patch_size = config.image_size, config.patch_size
image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
patches_resolution = [image_size[0] // patch_size[0], image_size[1] // patch_size[1]]
self.patches_resolution = patches_resolution
self.num_patches = patches_resolution[0] * patches_resolution[1]
self.projection = nn.Conv2d(num_channels, config.embed_dim, kernel_size=patch_size, stride=patch_size)
self.layernorm = nn.LayerNorm(config.embed_dim) if normalize_patches else None
def forward(self, embeddings: Optional[torch.FloatTensor]) -> tuple[torch.Tensor, tuple[int]]:
embeddings = self.projection(embeddings)
_, _, height, width = embeddings.shape
output_dimensions = (height, width)
embeddings = embeddings.flatten(2).transpose(1, 2)
if self.layernorm is not None:
embeddings = self.layernorm(embeddings)
return embeddings, output_dimensions
class Swin2SRPatchUnEmbeddings(nn.Module):
r"""Image to Patch Unembedding"""
def __init__(self, config):
super().__init__()
self.embed_dim = config.embed_dim
def forward(self, embeddings, x_size):
batch_size, height_width, num_channels = embeddings.shape
embeddings = embeddings.transpose(1, 2).view(batch_size, self.embed_dim, x_size[0], x_size[1]) # B Ph*Pw C
return embeddings
# Copied from transformers.models.swinv2.modeling_swinv2.Swinv2PatchMerging with Swinv2->Swin2SR
class Swin2SRPatchMerging(nn.Module):
"""
Patch Merging Layer.
Args:
input_resolution (`tuple[int]`):
Resolution of input feature.
dim (`int`):
Number of input channels.
norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`):
Normalization layer class.
"""
def __init__(self, input_resolution: tuple[int], dim: int, norm_layer: nn.Module = nn.LayerNorm) -> None:
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(2 * dim)
def maybe_pad(self, input_feature, height, width):
should_pad = (height % 2 == 1) or (width % 2 == 1)
if should_pad:
pad_values = (0, 0, 0, width % 2, 0, height % 2)
input_feature = nn.functional.pad(input_feature, pad_values)
return input_feature
def forward(self, input_feature: torch.Tensor, input_dimensions: tuple[int, int]) -> torch.Tensor:
height, width = input_dimensions
# `dim` is height * width
batch_size, dim, num_channels = input_feature.shape
input_feature = input_feature.view(batch_size, height, width, num_channels)
# pad input to be divisible by width and height, if needed
input_feature = self.maybe_pad(input_feature, height, width)
# [batch_size, height/2, width/2, num_channels]
input_feature_0 = input_feature[:, 0::2, 0::2, :]
# [batch_size, height/2, width/2, num_channels]
input_feature_1 = input_feature[:, 1::2, 0::2, :]
# [batch_size, height/2, width/2, num_channels]
input_feature_2 = input_feature[:, 0::2, 1::2, :]
# [batch_size, height/2, width/2, num_channels]
input_feature_3 = input_feature[:, 1::2, 1::2, :]
# [batch_size, height/2 * width/2, 4*num_channels]
input_feature = torch.cat([input_feature_0, input_feature_1, input_feature_2, input_feature_3], -1)
input_feature = input_feature.view(batch_size, -1, 4 * num_channels) # [batch_size, height/2 * width/2, 4*C]
input_feature = self.reduction(input_feature)
input_feature = self.norm(input_feature)
return input_feature
# Copied from transformers.models.swinv2.modeling_swinv2.Swinv2SelfAttention with Swinv2->Swin2SR
class Swin2SRSelfAttention(nn.Module):
def __init__(self, config, dim, num_heads, window_size, pretrained_window_size=[0, 0]):
super().__init__()
if dim % num_heads != 0:
raise ValueError(
f"The hidden size ({dim}) is not a multiple of the number of attention heads ({num_heads})"
)
self.num_attention_heads = num_heads
self.attention_head_size = int(dim / num_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.window_size = (
window_size if isinstance(window_size, collections.abc.Iterable) else (window_size, window_size)
)
self.pretrained_window_size = pretrained_window_size
self.logit_scale = nn.Parameter(torch.log(10 * torch.ones((num_heads, 1, 1))))
# mlp to generate continuous relative position bias
self.continuous_position_bias_mlp = nn.Sequential(
nn.Linear(2, 512, bias=True), nn.ReLU(inplace=True), nn.Linear(512, num_heads, bias=False)
)
relative_coords_table, relative_position_index = self.create_coords_table_and_index()
self.register_buffer("relative_coords_table", relative_coords_table, persistent=False)
self.register_buffer("relative_position_index", relative_position_index, persistent=False)
self.query = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
self.key = nn.Linear(self.all_head_size, self.all_head_size, bias=False)
self.value = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
) -> tuple[torch.Tensor]:
batch_size, dim, num_channels = hidden_states.shape
query_layer = (
self.query(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
key_layer = (
self.key(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
value_layer = (
self.value(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
# cosine attention
attention_scores = nn.functional.normalize(query_layer, dim=-1) @ nn.functional.normalize(
key_layer, dim=-1
).transpose(-2, -1)
logit_scale = torch.clamp(self.logit_scale, max=math.log(1.0 / 0.01)).exp()
attention_scores = attention_scores * logit_scale
relative_position_bias_table = self.continuous_position_bias_mlp(self.relative_coords_table).view(
-1, self.num_attention_heads
)
# [window_height*window_width,window_height*window_width,num_attention_heads]
relative_position_bias = relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1
)
# [num_attention_heads,window_height*window_width,window_height*window_width]
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
relative_position_bias = 16 * torch.sigmoid(relative_position_bias)
attention_scores = attention_scores + relative_position_bias.unsqueeze(0)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in Swin2SRModel forward() function)
mask_shape = attention_mask.shape[0]
attention_scores = attention_scores.view(
batch_size // mask_shape, mask_shape, self.num_attention_heads, dim, dim
) + attention_mask.unsqueeze(1).unsqueeze(0)
attention_scores = attention_scores + attention_mask.unsqueeze(1).unsqueeze(0)
attention_scores = attention_scores.view(-1, self.num_attention_heads, dim, dim)
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
def create_coords_table_and_index(self):
# get relative_coords_table
relative_coords_h = torch.arange(-(self.window_size[0] - 1), self.window_size[0], dtype=torch.int64).float()
relative_coords_w = torch.arange(-(self.window_size[1] - 1), self.window_size[1], dtype=torch.int64).float()
relative_coords_table = (
torch.stack(meshgrid([relative_coords_h, relative_coords_w], indexing="ij"))
.permute(1, 2, 0)
.contiguous()
.unsqueeze(0)
) # [1, 2*window_height - 1, 2*window_width - 1, 2]
if self.pretrained_window_size[0] > 0:
relative_coords_table[:, :, :, 0] /= self.pretrained_window_size[0] - 1
relative_coords_table[:, :, :, 1] /= self.pretrained_window_size[1] - 1
elif self.window_size[0] > 1:
relative_coords_table[:, :, :, 0] /= self.window_size[0] - 1
relative_coords_table[:, :, :, 1] /= self.window_size[1] - 1
relative_coords_table *= 8 # normalize to -8, 8
relative_coords_table = (
torch.sign(relative_coords_table) * torch.log2(torch.abs(relative_coords_table) + 1.0) / math.log2(8)
)
# set to same dtype as mlp weight
relative_coords_table = relative_coords_table.to(next(self.continuous_position_bias_mlp.parameters()).dtype)
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(meshgrid([coords_h, coords_w], indexing="ij"))
coords_flatten = torch.flatten(coords, 1)
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :]
relative_coords = relative_coords.permute(1, 2, 0).contiguous()
relative_coords[:, :, 0] += self.window_size[0] - 1
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1)
return relative_coords_table, relative_position_index
# Copied from transformers.models.swin.modeling_swin.SwinSelfOutput with Swin->Swin2SR
class Swin2SRSelfOutput(nn.Module):
def __init__(self, config, dim):
super().__init__()
self.dense = nn.Linear(dim, dim)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
# Copied from transformers.models.swinv2.modeling_swinv2.Swinv2Attention with Swinv2->Swin2SR
class Swin2SRAttention(nn.Module):
def __init__(self, config, dim, num_heads, window_size, pretrained_window_size=0):
super().__init__()
self.self = Swin2SRSelfAttention(
config=config,
dim=dim,
num_heads=num_heads,
window_size=window_size,
pretrained_window_size=pretrained_window_size
if isinstance(pretrained_window_size, collections.abc.Iterable)
else (pretrained_window_size, pretrained_window_size),
)
self.output = Swin2SRSelfOutput(config, dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
) -> tuple[torch.Tensor]:
self_outputs = self.self(hidden_states, attention_mask, output_attentions)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
# Copied from transformers.models.swin.modeling_swin.SwinIntermediate with Swin->Swin2SR
class Swin2SRIntermediate(nn.Module):
def __init__(self, config, dim):
super().__init__()
self.dense = nn.Linear(dim, int(config.mlp_ratio * dim))
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.swin.modeling_swin.SwinOutput with Swin->Swin2SR
class Swin2SROutput(nn.Module):
def __init__(self, config, dim):
super().__init__()
self.dense = nn.Linear(int(config.mlp_ratio * dim), dim)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
# Copied from transformers.models.swinv2.modeling_swinv2.Swinv2Layer with Swinv2->Swin2SR
class Swin2SRLayer(nn.Module):
def __init__(
self, config, dim, input_resolution, num_heads, drop_path_rate=0.0, shift_size=0, pretrained_window_size=0
):
super().__init__()
self.input_resolution = input_resolution
window_size, shift_size = self._compute_window_shift(
(config.window_size, config.window_size), (shift_size, shift_size)
)
self.window_size = window_size[0]
self.shift_size = shift_size[0]
self.attention = Swin2SRAttention(
config=config,
dim=dim,
num_heads=num_heads,
window_size=self.window_size,
pretrained_window_size=pretrained_window_size
if isinstance(pretrained_window_size, collections.abc.Iterable)
else (pretrained_window_size, pretrained_window_size),
)
self.layernorm_before = nn.LayerNorm(dim, eps=config.layer_norm_eps)
self.drop_path = Swin2SRDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity()
self.intermediate = Swin2SRIntermediate(config, dim)
self.output = Swin2SROutput(config, dim)
self.layernorm_after = nn.LayerNorm(dim, eps=config.layer_norm_eps)
def _compute_window_shift(self, target_window_size, target_shift_size) -> tuple[tuple[int, int], tuple[int, int]]:
window_size = [min(r, w) for r, w in zip(self.input_resolution, target_window_size)]
shift_size = [0 if r <= w else s for r, w, s in zip(self.input_resolution, window_size, target_shift_size)]
return window_size, shift_size
def get_attn_mask(self, height, width, dtype):
if self.shift_size > 0:
# calculate attention mask for shifted window multihead self attention
img_mask = torch.zeros((1, height, width, 1), dtype=dtype)
height_slices = (
slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None),
)
width_slices = (
slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None),
)
count = 0
for height_slice in height_slices:
for width_slice in width_slices:
img_mask[:, height_slice, width_slice, :] = count
count += 1
mask_windows = window_partition(img_mask, self.window_size)
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, -100.0).masked_fill(attn_mask == 0, 0.0)
else:
attn_mask = None
return attn_mask
def maybe_pad(self, hidden_states, height, width):
pad_right = (self.window_size - width % self.window_size) % self.window_size
pad_bottom = (self.window_size - height % self.window_size) % self.window_size
pad_values = (0, 0, 0, pad_right, 0, pad_bottom)
hidden_states = nn.functional.pad(hidden_states, pad_values)
return hidden_states, pad_values
def forward(
self,
hidden_states: torch.Tensor,
input_dimensions: tuple[int, int],
output_attentions: Optional[bool] = False,
) -> tuple[torch.Tensor, torch.Tensor]:
height, width = input_dimensions
batch_size, _, channels = hidden_states.size()
shortcut = hidden_states
# pad hidden_states to multiples of window size
hidden_states = hidden_states.view(batch_size, height, width, channels)
hidden_states, pad_values = self.maybe_pad(hidden_states, height, width)
_, height_pad, width_pad, _ = hidden_states.shape
# cyclic shift
if self.shift_size > 0:
shifted_hidden_states = torch.roll(hidden_states, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
else:
shifted_hidden_states = hidden_states
# partition windows
hidden_states_windows = window_partition(shifted_hidden_states, self.window_size)
hidden_states_windows = hidden_states_windows.view(-1, self.window_size * self.window_size, channels)
attn_mask = self.get_attn_mask(height_pad, width_pad, dtype=hidden_states.dtype)
if attn_mask is not None:
attn_mask = attn_mask.to(hidden_states_windows.device)
attention_outputs = self.attention(hidden_states_windows, attn_mask, output_attentions=output_attentions)
attention_output = attention_outputs[0]
attention_windows = attention_output.view(-1, self.window_size, self.window_size, channels)
shifted_windows = window_reverse(attention_windows, self.window_size, height_pad, width_pad)
# reverse cyclic shift
if self.shift_size > 0:
attention_windows = torch.roll(shifted_windows, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
attention_windows = shifted_windows
was_padded = pad_values[3] > 0 or pad_values[5] > 0
if was_padded:
attention_windows = attention_windows[:, :height, :width, :].contiguous()
attention_windows = attention_windows.view(batch_size, height * width, channels)
hidden_states = self.layernorm_before(attention_windows)
hidden_states = shortcut + self.drop_path(hidden_states)
layer_output = self.intermediate(hidden_states)
layer_output = self.output(layer_output)
layer_output = hidden_states + self.drop_path(self.layernorm_after(layer_output))
layer_outputs = (layer_output, attention_outputs[1]) if output_attentions else (layer_output,)
return layer_outputs
class Swin2SRStage(GradientCheckpointingLayer):
"""
This corresponds to the Residual Swin Transformer Block (RSTB) in the original implementation.
"""
def __init__(self, config, dim, input_resolution, depth, num_heads, drop_path, pretrained_window_size=0):
super().__init__()
self.config = config
self.dim = dim
self.layers = nn.ModuleList(
[
Swin2SRLayer(
config=config,
dim=dim,
input_resolution=input_resolution,
num_heads=num_heads,
shift_size=0 if (i % 2 == 0) else config.window_size // 2,
pretrained_window_size=pretrained_window_size,
)
for i in range(depth)
]
)
if config.resi_connection == "1conv":
self.conv = nn.Conv2d(dim, dim, 3, 1, 1)
elif config.resi_connection == "3conv":
# to save parameters and memory
self.conv = nn.Sequential(
nn.Conv2d(dim, dim // 4, 3, 1, 1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(dim // 4, dim // 4, 1, 1, 0),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(dim // 4, dim, 3, 1, 1),
)
self.patch_embed = Swin2SRPatchEmbeddings(config, normalize_patches=False)
self.patch_unembed = Swin2SRPatchUnEmbeddings(config)
def forward(
self,
hidden_states: torch.Tensor,
input_dimensions: tuple[int, int],
output_attentions: Optional[bool] = False,
) -> tuple[torch.Tensor]:
residual = hidden_states
height, width = input_dimensions
for i, layer_module in enumerate(self.layers):
layer_outputs = layer_module(hidden_states, input_dimensions, output_attentions)
hidden_states = layer_outputs[0]
output_dimensions = (height, width, height, width)
hidden_states = self.patch_unembed(hidden_states, input_dimensions)
hidden_states = self.conv(hidden_states)
hidden_states, _ = self.patch_embed(hidden_states)
hidden_states = hidden_states + residual
stage_outputs = (hidden_states, output_dimensions)
if output_attentions:
stage_outputs += layer_outputs[1:]
return stage_outputs
class Swin2SREncoder(nn.Module):
def __init__(self, config, grid_size):
super().__init__()
self.num_stages = len(config.depths)
self.config = config
dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths), device="cpu")]
self.stages = nn.ModuleList(
[
Swin2SRStage(
config=config,
dim=config.embed_dim,
input_resolution=(grid_size[0], grid_size[1]),
depth=config.depths[stage_idx],
num_heads=config.num_heads[stage_idx],
drop_path=dpr[sum(config.depths[:stage_idx]) : sum(config.depths[: stage_idx + 1])],
pretrained_window_size=0,
)
for stage_idx in range(self.num_stages)
]
)
self.gradient_checkpointing = False
def forward(
self,
hidden_states: torch.Tensor,
input_dimensions: tuple[int, int],
output_attentions: Optional[bool] = False,
output_hidden_states: Optional[bool] = False,
return_dict: Optional[bool] = True,
) -> Union[tuple, Swin2SREncoderOutput]:
all_input_dimensions = ()
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
if output_hidden_states:
all_hidden_states += (hidden_states,)
for i, stage_module in enumerate(self.stages):
layer_outputs = stage_module(hidden_states, input_dimensions, output_attentions)
hidden_states = layer_outputs[0]
output_dimensions = layer_outputs[1]
input_dimensions = (output_dimensions[-2], output_dimensions[-1])
all_input_dimensions += (input_dimensions,)
if output_hidden_states:
all_hidden_states += (hidden_states,)
if output_attentions:
all_self_attentions += layer_outputs[2:]
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
return Swin2SREncoderOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
@auto_docstring
class Swin2SRPreTrainedModel(PreTrainedModel):
config: Swin2SRConfig
base_model_prefix = "swin2sr"
main_input_name = "pixel_values"
input_modalities = ("image",)
supports_gradient_checkpointing = True
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
init.trunc_normal_(module.weight, std=self.config.initializer_range)
if module.bias is not None:
init.zeros_(module.bias)
elif isinstance(module, nn.LayerNorm):
init.zeros_(module.bias)
init.ones_(module.weight)
elif isinstance(module, Swin2SRSelfAttention):
init.constant_(module.logit_scale, math.log(10))
relative_coords_table, relative_position_index = module.create_coords_table_and_index()
init.copy_(module.relative_coords_table, relative_coords_table)
init.copy_(module.relative_position_index, relative_position_index)
elif isinstance(module, Swin2SRModel):
if module.config.num_channels == 3 and module.config.num_channels_out == 3:
mean = torch.tensor([0.4488, 0.4371, 0.4040]).view(1, 3, 1, 1)
else:
mean = torch.zeros(1, 1, 1, 1)
init.copy_(module.mean, mean)
@auto_docstring
class Swin2SRModel(Swin2SRPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
if config.num_channels == 3 and config.num_channels_out == 3:
mean = torch.tensor([0.4488, 0.4371, 0.4040]).view(1, 3, 1, 1)
else:
mean = torch.zeros(1, 1, 1, 1)
self.register_buffer("mean", mean, persistent=False)
self.img_range = config.img_range
self.first_convolution = nn.Conv2d(config.num_channels, config.embed_dim, 3, 1, 1)
self.embeddings = Swin2SREmbeddings(config)
self.encoder = Swin2SREncoder(config, grid_size=self.embeddings.patch_embeddings.patches_resolution)
self.layernorm = nn.LayerNorm(config.embed_dim, eps=config.layer_norm_eps)
self.patch_unembed = Swin2SRPatchUnEmbeddings(config)
self.conv_after_body = nn.Conv2d(config.embed_dim, config.embed_dim, 3, 1, 1)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.patch_embeddings
def pad_and_normalize(self, pixel_values):
_, _, height, width = pixel_values.size()
# 1. pad
window_size = self.config.window_size
modulo_pad_height = (window_size - height % window_size) % window_size
modulo_pad_width = (window_size - width % window_size) % window_size
pixel_values = nn.functional.pad(pixel_values, (0, modulo_pad_width, 0, modulo_pad_height), "reflect")
# 2. normalize
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | true |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/swin2sr/convert_swin2sr_original_to_pytorch.py | src/transformers/models/swin2sr/convert_swin2sr_original_to_pytorch.py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert Swin2SR checkpoints from the original repository. URL: https://github.com/mv-lab/swin2sr"""
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import Swin2SRConfig, Swin2SRForImageSuperResolution, Swin2SRImageProcessor
def get_config(checkpoint_url):
config = Swin2SRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
config.upscale = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
config.upscale = 4
config.image_size = 48
config.upsampler = "pixelshuffle_aux"
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
config.depths = [6, 6, 6, 6]
config.embed_dim = 60
config.num_heads = [6, 6, 6, 6]
config.upsampler = "pixelshuffledirect"
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
config.upscale = 4
config.upsampler = "nearest+conv"
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
config.num_channels = 1
config.upscale = 1
config.image_size = 126
config.window_size = 7
config.img_range = 255.0
config.upsampler = ""
return config
def rename_key(name, config):
if "patch_embed.proj" in name and "layers" not in name:
name = name.replace("patch_embed.proj", "embeddings.patch_embeddings.projection")
if "patch_embed.norm" in name:
name = name.replace("patch_embed.norm", "embeddings.patch_embeddings.layernorm")
if "layers" in name:
name = name.replace("layers", "encoder.stages")
if "residual_group.blocks" in name:
name = name.replace("residual_group.blocks", "layers")
if "attn.proj" in name:
name = name.replace("attn.proj", "attention.output.dense")
if "attn" in name:
name = name.replace("attn", "attention.self")
if "norm1" in name:
name = name.replace("norm1", "layernorm_before")
if "norm2" in name:
name = name.replace("norm2", "layernorm_after")
if "mlp.fc1" in name:
name = name.replace("mlp.fc1", "intermediate.dense")
if "mlp.fc2" in name:
name = name.replace("mlp.fc2", "output.dense")
if "q_bias" in name:
name = name.replace("q_bias", "query.bias")
if "k_bias" in name:
name = name.replace("k_bias", "key.bias")
if "v_bias" in name:
name = name.replace("v_bias", "value.bias")
if "cpb_mlp" in name:
name = name.replace("cpb_mlp", "continuous_position_bias_mlp")
if "patch_embed.proj" in name:
name = name.replace("patch_embed.proj", "patch_embed.projection")
if name == "norm.weight":
name = "layernorm.weight"
if name == "norm.bias":
name = "layernorm.bias"
if "conv_first" in name:
name = name.replace("conv_first", "first_convolution")
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
name = name.replace("conv_last", "final_convolution")
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
name = name.replace("conv_before_upsample.0", "conv_before_upsample")
if "upsample.0" in name:
name = name.replace("upsample.0", "upsample.convolution_0")
if "upsample.2" in name:
name = name.replace("upsample.2", "upsample.convolution_1")
name = "upsample." + name
elif config.upsampler == "pixelshuffledirect":
name = name.replace("upsample.0.weight", "upsample.conv.weight")
name = name.replace("upsample.0.bias", "upsample.conv.bias")
else:
pass
else:
name = "swin2sr." + name
return name
def convert_state_dict(orig_state_dict, config):
for key in orig_state_dict.copy():
val = orig_state_dict.pop(key)
if "qkv" in key:
key_split = key.split(".")
stage_num = int(key_split[1])
block_num = int(key_split[4])
dim = config.embed_dim
if "weight" in key:
orig_state_dict[
f"swin2sr.encoder.stages.{stage_num}.layers.{block_num}.attention.self.query.weight"
] = val[:dim, :]
orig_state_dict[f"swin2sr.encoder.stages.{stage_num}.layers.{block_num}.attention.self.key.weight"] = (
val[dim : dim * 2, :]
)
orig_state_dict[
f"swin2sr.encoder.stages.{stage_num}.layers.{block_num}.attention.self.value.weight"
] = val[-dim:, :]
else:
orig_state_dict[f"swin2sr.encoder.stages.{stage_num}.layers.{block_num}.attention.self.query.bias"] = (
val[:dim]
)
orig_state_dict[f"swin2sr.encoder.stages.{stage_num}.layers.{block_num}.attention.self.key.bias"] = (
val[dim : dim * 2]
)
orig_state_dict[f"swin2sr.encoder.stages.{stage_num}.layers.{block_num}.attention.self.value.bias"] = (
val[-dim:]
)
else:
orig_state_dict[rename_key(key, config)] = val
return orig_state_dict
def convert_swin2sr_checkpoint(checkpoint_url, pytorch_dump_folder_path, push_to_hub):
config = get_config(checkpoint_url)
model = Swin2SRForImageSuperResolution(config)
model.eval()
state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
new_state_dict = convert_state_dict(state_dict, config)
missing_keys, unexpected_keys = model.load_state_dict(new_state_dict, strict=False)
if len(missing_keys) > 0:
raise ValueError(f"Missing keys when converting: {missing_keys}")
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f"Unexpected key {key} in state_dict")
# verify values
url = "https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"
image = Image.open(requests.get(url, stream=True).raw).convert("RGB")
processor = Swin2SRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
image_size = 126 if "Jpeg" in checkpoint_url else 256
transforms = Compose(
[
Resize((image_size, image_size)),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)
pixel_values = transforms(image).unsqueeze(0)
if config.num_channels == 1:
pixel_values = pixel_values[:, 0, :, :].unsqueeze(1)
outputs = model(pixel_values)
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
expected_shape = torch.Size([1, 3, 512, 512])
expected_slice = torch.tensor(
[[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]]
)
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
expected_shape = torch.Size([1, 3, 1024, 1024])
expected_slice = torch.tensor(
[[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]]
)
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
expected_shape = torch.Size([1, 3, 1024, 1024])
expected_slice = torch.tensor(
[[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]]
)
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
expected_shape = torch.Size([1, 3, 512, 512])
expected_slice = torch.tensor(
[[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]]
)
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
expected_shape = torch.Size([1, 3, 1024, 1024])
expected_slice = torch.tensor(
[[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]]
)
assert outputs.reconstruction.shape == expected_shape, (
f"Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"
)
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3], expected_slice, atol=1e-3)
print("Looks ok!")
url_to_name = {
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth": (
"swin2SR-classical-sr-x2-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth": (
"swin2SR-classical-sr-x4-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth": (
"swin2SR-compressed-sr-x4-48"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth": (
"swin2SR-lightweight-x2-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth": (
"swin2SR-realworld-sr-x4-64-bsrgan-psnr"
),
}
model_name = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}")
model.save_pretrained(pytorch_dump_folder_path)
print(f"Saving image processor to {pytorch_dump_folder_path}")
processor.save_pretrained(pytorch_dump_folder_path)
if push_to_hub:
model.push_to_hub(f"caidas/{model_name}")
processor.push_to_hub(f"caidas/{model_name}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth",
type=str,
help="URL of the original Swin2SR checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the converted model to the hub.")
args = parser.parse_args()
convert_swin2sr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/swin2sr/configuration_swin2sr.py | src/transformers/models/swin2sr/configuration_swin2sr.py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Swin2SR Transformer model configuration"""
from ...configuration_utils import PreTrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
class Swin2SRConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Swin2SRModel`]. It is used to instantiate a Swin
Transformer v2 model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the Swin Transformer v2
[caidas/swin2sr-classicalsr-x2-64](https://huggingface.co/caidas/swin2sr-classicalsr-x2-64) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
image_size (`int`, *optional*, defaults to 64):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 1):
The size (resolution) of each patch.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
num_channels_out (`int`, *optional*, defaults to `num_channels`):
The number of output channels. If not set, it will be set to `num_channels`.
embed_dim (`int`, *optional*, defaults to 180):
Dimensionality of patch embedding.
depths (`list(int)`, *optional*, defaults to `[6, 6, 6, 6, 6, 6]`):
Depth of each layer in the Transformer encoder.
num_heads (`list(int)`, *optional*, defaults to `[6, 6, 6, 6, 6, 6]`):
Number of attention heads in each layer of the Transformer encoder.
window_size (`int`, *optional*, defaults to 8):
Size of windows.
mlp_ratio (`float`, *optional*, defaults to 2.0):
Ratio of MLP hidden dimensionality to embedding dimensionality.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether or not a learnable bias should be added to the queries, keys and values.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings and encoder.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
drop_path_rate (`float`, *optional*, defaults to 0.1):
Stochastic depth rate.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder. If string, `"gelu"`, `"relu"`,
`"selu"` and `"gelu_new"` are supported.
use_absolute_embeddings (`bool`, *optional*, defaults to `False`):
Whether or not to add absolute position embeddings to the patch embeddings.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
upscale (`int`, *optional*, defaults to 2):
The upscale factor for the image. 2/3/4/8 for image super resolution, 1 for denoising and compress artifact
reduction
img_range (`float`, *optional*, defaults to 1.0):
The range of the values of the input image.
resi_connection (`str`, *optional*, defaults to `"1conv"`):
The convolutional block to use before the residual connection in each stage.
upsampler (`str`, *optional*, defaults to `"pixelshuffle"`):
The reconstruction reconstruction module. Can be 'pixelshuffle'/'pixelshuffledirect'/'nearest+conv'/None.
Example:
```python
>>> from transformers import Swin2SRConfig, Swin2SRModel
>>> # Initializing a Swin2SR caidas/swin2sr-classicalsr-x2-64 style configuration
>>> configuration = Swin2SRConfig()
>>> # Initializing a model (with random weights) from the caidas/swin2sr-classicalsr-x2-64 style configuration
>>> model = Swin2SRModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "swin2sr"
attribute_map = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__(
self,
image_size=64,
patch_size=1,
num_channels=3,
num_channels_out=None,
embed_dim=180,
depths=[6, 6, 6, 6, 6, 6],
num_heads=[6, 6, 6, 6, 6, 6],
window_size=8,
mlp_ratio=2.0,
qkv_bias=True,
hidden_dropout_prob=0.0,
attention_probs_dropout_prob=0.0,
drop_path_rate=0.1,
hidden_act="gelu",
use_absolute_embeddings=False,
initializer_range=0.02,
layer_norm_eps=1e-5,
upscale=2,
img_range=1.0,
resi_connection="1conv",
upsampler="pixelshuffle",
**kwargs,
):
super().__init__(**kwargs)
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.num_channels_out = num_channels if num_channels_out is None else num_channels_out
self.embed_dim = embed_dim
self.depths = depths
self.num_layers = len(depths)
self.num_heads = num_heads
self.window_size = window_size
self.mlp_ratio = mlp_ratio
self.qkv_bias = qkv_bias
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.drop_path_rate = drop_path_rate
self.hidden_act = hidden_act
self.use_absolute_embeddings = use_absolute_embeddings
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range
self.upscale = upscale
self.img_range = img_range
self.resi_connection = resi_connection
self.upsampler = upsampler
__all__ = ["Swin2SRConfig"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/swin2sr/image_processing_swin2sr_fast.py | src/transformers/models/swin2sr/image_processing_swin2sr_fast.py | # coding=utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Image processor class for Swin2SR."""
from typing import Optional, Union
import torch
from torchvision.transforms.v2 import functional as F
from ...image_processing_utils import BatchFeature, ChannelDimension, get_image_size
from ...image_processing_utils_fast import (
BaseImageProcessorFast,
group_images_by_shape,
reorder_images,
)
from ...image_utils import ImageInput
from ...processing_utils import Unpack
from ...utils import (
TensorType,
auto_docstring,
logging,
)
from .image_processing_swin2sr import Swin2SRImageProcessorKwargs
logger = logging.get_logger(__name__)
@auto_docstring
class Swin2SRImageProcessorFast(BaseImageProcessorFast):
do_rescale = True
rescale_factor = 1 / 255
do_pad = True
size_divisor = 8
valid_kwargs = Swin2SRImageProcessorKwargs
def __init__(self, **kwargs: Unpack[Swin2SRImageProcessorKwargs]):
pad_size = kwargs.pop("pad_size", None)
kwargs.setdefault("size_divisor", pad_size)
super().__init__(**kwargs)
def preprocess(self, images: ImageInput, **kwargs: Unpack[Swin2SRImageProcessorKwargs]) -> BatchFeature:
return super().preprocess(images, **kwargs)
def pad(self, images: "torch.Tensor", size_divisor: int) -> "torch.Tensor":
"""
Pad an image to make the height and width divisible by `size_divisor`.
Args:
images (`torch.Tensor`):
Images to pad.
size_divisor (`int`):
The size to make the height and width divisible by.
Returns:
`torch.Tensor`: The padded images.
"""
height, width = get_image_size(images, ChannelDimension.FIRST)
pad_height = (height // size_divisor + 1) * size_divisor - height
pad_width = (width // size_divisor + 1) * size_divisor - width
return F.pad(
images,
(0, 0, pad_width, pad_height),
padding_mode="symmetric",
)
def _preprocess(
self,
images: list["torch.Tensor"],
do_rescale: bool,
rescale_factor: float,
do_pad: bool,
size_divisor: int,
disable_grouping: Optional[bool],
return_tensors: Optional[Union[str, TensorType]],
**kwargs,
) -> BatchFeature:
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
processed_image_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_rescale:
stacked_images = self.rescale(stacked_images, scale=rescale_factor)
if do_pad:
stacked_images = self.pad(stacked_images, size_divisor=size_divisor)
processed_image_grouped[shape] = stacked_images
processed_images = reorder_images(processed_image_grouped, grouped_images_index)
return BatchFeature(data={"pixel_values": processed_images}, tensor_type=return_tensors)
__all__ = ["Swin2SRImageProcessorFast"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/swin2sr/__init__.py | src/transformers/models/swin2sr/__init__.py | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_swin2sr import *
from .image_processing_swin2sr import *
from .image_processing_swin2sr_fast import *
from .modeling_swin2sr import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/swin2sr/image_processing_swin2sr.py | src/transformers/models/swin2sr/image_processing_swin2sr.py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image processor class for Swin2SR."""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
infer_channel_dimension_format,
is_scaled_image,
make_flat_list_of_images,
to_numpy_array,
valid_images,
validate_preprocess_arguments,
)
from ...processing_utils import ImagesKwargs
from ...utils import TensorType, filter_out_non_signature_kwargs, logging
logger = logging.get_logger(__name__)
class Swin2SRImageProcessorKwargs(ImagesKwargs, total=False):
size_divisor: int
class Swin2SRImageProcessor(BaseImageProcessor):
r"""
Constructs a Swin2SR image processor.
Args:
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
parameter in the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
`preprocess` method.
"""
model_input_names = ["pixel_values"]
valid_kwargs = Swin2SRImageProcessorKwargs
def __init__(
self,
do_rescale: bool = True,
rescale_factor: Union[int, float] = 1 / 255,
do_pad: bool = True,
size_divisor: int = 8,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_pad = do_pad
pad_size = kwargs.get("pad_size")
self.size_divisor = size_divisor if size_divisor is not None else pad_size
def pad(
self,
image: np.ndarray,
size: int,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
):
"""
Pad an image to make the height and width divisible by `size`.
Args:
image (`np.ndarray`):
Image to pad.
size (`int`):
The size to make the height and width divisible by.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
Returns:
`np.ndarray`: The padded image.
"""
old_height, old_width = get_image_size(image, input_data_format)
pad_height = (old_height // size + 1) * size - old_height
pad_width = (old_width // size + 1) * size - old_width
return pad(
image,
((0, pad_height), (0, pad_width)),
mode="symmetric",
data_format=data_format,
input_data_format=input_data_format,
)
@filter_out_non_signature_kwargs()
def preprocess(
self,
images: ImageInput,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_pad: Optional[bool] = None,
size_divisor: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
):
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_pad (`bool`, *optional*, defaults to `True`):
Whether to pad the image to make the height and width divisible by `window_size`.
size_divisor (`int`, *optional*, defaults to 32):
The size of the sliding window for the local attention.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_pad = do_pad if do_pad is not None else self.do_pad
size_divisor = size_divisor if size_divisor is not None else self.size_divisor
images = make_flat_list_of_images(images)
if not valid_images(images):
raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor")
validate_preprocess_arguments(
do_rescale=do_rescale,
rescale_factor=rescale_factor,
)
# All transformations expect numpy arrays.
images = [to_numpy_array(image) for image in images]
if do_rescale and is_scaled_image(images[0]):
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])
if do_rescale:
images = [
self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
for image in images
]
if do_pad:
images = [self.pad(image, size=size_divisor, input_data_format=input_data_format) for image in images]
images = [
to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
]
data = {"pixel_values": images}
return BatchFeature(data=data, tensor_type=return_tensors)
__all__ = ["Swin2SRImageProcessor"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/xlstm/modeling_xlstm.py | src/transformers/models/xlstm/modeling_xlstm.py | # Copyright 2025 NXAI GmbH. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch xLSTM Model."""
from dataclasses import dataclass
from typing import Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import initialization as init
from ...generation import GenerationMixin
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_utils import PreTrainedModel
from ...utils import ModelOutput, auto_docstring, can_return_tuple, is_xlstm_available
from .configuration_xlstm import xLSTMConfig
if is_xlstm_available():
from xlstm.xlstm_large.model import RMSNorm as xLSTMRMSNorm
from xlstm.xlstm_large.model import mLSTMBlock, mLSTMStateType, soft_cap
external_xlstm = True
class xLSTMBlock(GradientCheckpointingLayer, mLSTMBlock):
pass
else:
from collections.abc import Callable
from functools import partial
from typing import Literal
from .configuration_xlstm import round_up_to_next_multiple_of
mLSTMLayerStateType = tuple[torch.Tensor, torch.Tensor, torch.Tensor]
mLSTMStateType = dict[int, mLSTMLayerStateType]
external_xlstm = False
def soft_cap(values: torch.Tensor, cap_value: Optional[Union[float, torch.Tensor]] = None) -> torch.Tensor:
"""
Soft caps a tensor to a value.
Performs a tanh operation on the logits and scales the result to the cap value. Common technique in attention
and output language heads to prevent large logits from dominating the softmax. See for example Gemma2:
https://huggingface.co/papers/2408.00118
Args:
values: The tensor to cap.
cap_value: The value to cap the values to. If None, no cap is applied.
Returns:
The capped values.
"""
if cap_value is None:
return values
return cap_value * torch.tanh(values / cap_value)
def mlstm_chunkwise_recurrent_fw_C(
matK: torch.Tensor,
matV: torch.Tensor,
vecB: torch.Tensor,
vecI: torch.Tensor,
matC_states: Optional[torch.Tensor] = None,
vecN_states: Optional[torch.Tensor] = None,
scaMinter_states: Optional[torch.Tensor] = None,
matC_initial: Optional[torch.Tensor] = None,
vecN_initial: Optional[torch.Tensor] = None,
scaMinter_initial: Optional[torch.Tensor] = None,
qk_scale: Optional[float] = None,
chunk_size: int = 64,
num_chunks: int = 1,
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
batch_size, nh, _, dhqk, dhhv = *matK.shape, matV.shape[-1]
nc = num_chunks
_dtype, _device = matK.dtype, matK.device
if qk_scale is None:
qk_scale = dhqk**-0.5
# initialize the states tensors
if matC_states is None:
matC_states = torch.zeros((batch_size, nh, (nc + 1) * dhqk, dhhv), dtype=_dtype, device=_device)
if vecN_states is None:
vecN_states = torch.zeros((batch_size, nh, (nc + 1) * dhqk), dtype=_dtype, device=_device)
if scaMinter_states is None:
scaMinter_states = torch.zeros((batch_size, nh, (nc + 1)), dtype=_dtype, device=_device)
# assign the initial states to the running states
matC_k = (
torch.zeros((batch_size, nh, dhqk, dhhv), dtype=_dtype, device=_device)
if matC_initial is None
else matC_initial
)
vecN_k = (
torch.zeros((batch_size, nh, dhqk), dtype=_dtype, device=_device) if vecN_initial is None else vecN_initial
)
scaM_inter_k = (
torch.zeros((batch_size, nh, 1), dtype=_dtype, device=_device)
if scaMinter_initial is None
else scaMinter_initial
)
vecA = vecB[..., -1, None] - vecB + vecI
scaG = vecB[..., -1]
scaA_max = vecA.max(-1).values
scaM_inter_k = scaM_inter_k.squeeze(-1)
for key in range(0, num_chunks):
# store the states from the previous iteration before updating them
# in the first iteration, these are the initial states
matC_states[:, :, key * dhqk : (key + 1) * dhqk, :] = matC_k
vecN_states[:, :, key * dhqk : (key + 1) * dhqk] = vecN_k
scaMinter_states[:, :, key] = scaM_inter_k
# m_k update
scaA_max_k = scaA_max[:, :, key]
scaG_k = scaG[:, :, key]
scaM_inter_k_next = torch.max(scaG_k + scaM_inter_k, scaA_max_k)
# C_k update
matK_chunk = matK[:, :, key * chunk_size : (key + 1) * chunk_size, :] # * qk_scale
matV_chunk = matV[:, :, key * chunk_size : (key + 1) * chunk_size, :]
vecA_k = vecA[:, :, key, :]
vecAbar_k = torch.exp(vecA_k - scaM_inter_k_next[..., None])[:, :, :, None]
matK_chunk_gated = matK_chunk * vecAbar_k
scaGbar_k = torch.exp(scaG_k + scaM_inter_k - scaM_inter_k_next)[:, :, None]
# NOTE: no update in-place (i.e. +=) as this gives error for autograd backward
matC_k_next = scaGbar_k[..., None] * matC_k + matK_chunk_gated.transpose(-2, -1) @ (matV_chunk)
# n_k update
vecN_k_next = scaGbar_k * vecN_k + matK_chunk_gated.transpose(-2, -1).sum(-1)
# move to the next iteration
scaM_inter_k = scaM_inter_k_next
matC_k = matC_k_next
vecN_k = vecN_k_next
# store the states from the last iteration
matC_states[:, :, -dhqk:, :] = matC_k
vecN_states[:, :, -dhqk:] = vecN_k
scaMinter_states[:, :, -1] = scaM_inter_k
return matC_states, vecN_states, scaMinter_states
def mlstm_chunkwise_parallel_fw_H(
matQ: torch.Tensor,
matK: torch.Tensor,
matV: torch.Tensor,
# these states must be all states up to the last chunk, i.e. :-1
matC_states: torch.Tensor,
vecN_states: torch.Tensor,
scaMinter_states: torch.Tensor,
vecI: torch.Tensor,
vecB: torch.Tensor,
qk_scale: float,
chunk_size: int = 64,
num_chunks: int = 1,
eps: float = 1e-6,
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
_device = matQ.device
nc = num_chunks
batch_size, nh, dqk, dhv = matC_states.shape
matC_k_states = matC_states.view(batch_size, nh, nc, dqk // nc, dhv)
vecN_k_states = vecN_states.view(batch_size, nh, nc, dqk // nc)
scaMinter_k_states = scaMinter_states
matQ = matQ.view(batch_size, nh, nc, chunk_size, dqk)
matK = matK.view(batch_size, nh, nc, chunk_size, dqk)
matV = matV.view(batch_size, nh, nc, chunk_size, dhv)
ltr = torch.tril(
torch.ones(
(chunk_size, chunk_size),
dtype=torch.bool,
device=_device,
)
)
# Compute intra chunk contribution: H_intra
matF_logsig_chunk = vecB[:, :, :, :, None] - vecB[:, :, :, None, :]
matF_logsig_mask_chunk = torch.where(ltr, matF_logsig_chunk, -float("inf"))
matLogD_chunk = matF_logsig_mask_chunk + vecI[:, :, :, None, :]
# max_state intra
vecMintra_k = torch.max(matLogD_chunk, dim=-1, keepdim=False).values
# max_state combined
vecM_b_inter = vecB + scaMinter_k_states[:, :, :, None]
vecM_k_combine = torch.maximum(vecM_b_inter, vecMintra_k)
vecM_k_combine = vecM_k_combine[:, :, :, :, None]
vecM_b_inter = vecM_b_inter[:, :, :, :, None]
matLogD_stabilized_chunk = matLogD_chunk - vecM_k_combine
matD_chunk = torch.exp(matLogD_stabilized_chunk)
matS_chunk = (matQ @ matK.transpose(-2, -1)) * qk_scale
matM_chunk = matS_chunk * matD_chunk
# ? Combine H_intra with H_inter
vecBbar = torch.exp(vecM_b_inter - vecM_k_combine)
matQ_chunk_gated = matQ * vecBbar * qk_scale
matNumerator_common = matQ_chunk_gated @ matC_k_states + matM_chunk @ matV
vecDenom_l_common = matQ_chunk_gated @ vecN_k_states.unsqueeze(-1) + matM_chunk.sum(dim=-1, keepdim=True)
vecDenom_max_common = torch.maximum(torch.abs(vecDenom_l_common), torch.exp(-vecM_k_combine))
matH_k_chunk = matNumerator_common / (vecDenom_max_common + eps)
matH_out = matH_k_chunk.view(batch_size, nh, nc * chunk_size, dhv)
# we need the denominator and the overall max state for the backward pass
vecN_out = vecDenom_max_common.reshape(batch_size, nh, nc * chunk_size)
vecM_out = vecM_k_combine(batch_size, nh, nc * chunk_size)
return matH_out, vecN_out, vecM_out
def mlstm_chunkwise_fw(
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
igate: torch.Tensor,
fgate: torch.Tensor,
cstate: Optional[torch.Tensor] = None,
nstate: Optional[torch.Tensor] = None,
mstate: Optional[torch.Tensor] = None,
qk_scale: Optional[float] = None,
return_last_states: bool = False,
return_all_states: bool = False,
chunk_size: int = 64,
eps: float = 1e-6,
) -> tuple[
torch.Tensor,
torch.Tensor,
torch.Tensor,
Optional[tuple[torch.Tensor, torch.Tensor, torch.Tensor]],
Optional[tuple[torch.Tensor, torch.Tensor, torch.Tensor]],
]:
batch_size, nh, sequence_length, dhqk = query.shape
if sequence_length % chunk_size != 0:
raise ValueError(f"Sequence length {sequence_length} is not divisible by chunk size {chunk_size}.")
nc = sequence_length // chunk_size
vecI = igate.view(batch_size, nh, nc, chunk_size)
vecF = fgate.view(batch_size, nh, nc, chunk_size)
# compute the gates, the g and the a and b vectors
vecF_logsig = fgate.logsigmoid(vecF)
vecB = vecF_logsig.cumsum(-1)
if qk_scale is None:
qk_scale = dhqk**-0.5
#! materialize the C_k, n_k, m_k states for each chunk
matC_k_states, vecN_k_states, scaMinter_k_states = mlstm_chunkwise_recurrent_fw_C(
matK=key,
matV=value,
vecB=vecB,
vecI=vecI,
matC_initial=cstate,
vecN_initial=nstate,
scaMinter_initial=mstate,
qk_scale=qk_scale,
chunk_size=chunk_size,
num_chunks=nc,
)
#! compute the outputs within each chunk
matH_out, vecN_out, vecM_out = mlstm_chunkwise_parallel_fw_H(
matQ=query,
matK=key,
matV=value,
matC_states=matC_k_states[:, :, :-dhqk, :],
vecN_states=vecN_k_states[:, :, :-dhqk],
scaMinter_states=scaMinter_k_states[:, :, :-1],
vecI=vecI,
vecB=vecB,
qk_scale=qk_scale,
chunk_size=chunk_size,
num_chunks=nc,
eps=eps,
)
ret_tuple = (matH_out, vecN_out, vecM_out)
if return_last_states:
ret_tuple += (
(matC_k_states[:, :, -dhqk:, :], vecN_k_states[:, :, -dhqk:], scaMinter_k_states[:, :, -1:]),
)
else:
ret_tuple += (None,)
if return_all_states:
ret_tuple += ((matC_k_states, vecN_k_states, scaMinter_k_states),)
else:
ret_tuple += (None,)
return ret_tuple
def mlstm_chunkwise_native_autograd(
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
igate: torch.Tensor,
fgate: torch.Tensor,
c_initial: Optional[torch.Tensor] = None,
n_initial: Optional[torch.Tensor] = None,
m_initial: Optional[torch.Tensor] = None,
return_last_states: bool = False,
eps: float = 1e-6,
chunk_size: int = 64,
**kwargs,
) -> Union[torch.Tensor, tuple[torch.Tensor, tuple[torch.Tensor, torch.Tensor, torch.Tensor]]]:
batch_size, nh, sequence_length, dhqk = query.shape
if sequence_length % chunk_size != 0:
raise ValueError(f"Sequence length {sequence_length} is not divisible by chunk size {chunk_size}.")
nc = sequence_length // chunk_size
vecI = igate.view(batch_size, nh, nc, chunk_size)
vecF = fgate.view(batch_size, nh, nc, chunk_size)
# compute the gates, the g and the a and b vectors
vecF_logsig = F.logsigmoid(vecF)
vecB = vecF_logsig.cumsum(-1)
qk_scale = dhqk**-0.5
#! materialize the C_k, n_k, m_k states for each chunk
matC_k_states, vecN_k_states, scaMinter_k_states = mlstm_chunkwise_recurrent_fw_C(
matK=key,
matV=value,
vecB=vecB,
vecI=vecI,
matC_initial=c_initial,
vecN_initial=n_initial,
scaMinter_initial=m_initial,
qk_scale=qk_scale,
chunk_size=chunk_size,
num_chunks=nc,
)
#! compute the outputs within each chunk
matH_out, vecN_out, vecM_out = mlstm_chunkwise_parallel_fw_H(
matQ=query,
matK=key,
matV=value,
matC_states=matC_k_states[:, :, :-dhqk, :],
vecN_states=vecN_k_states[:, :, :-dhqk],
scaMinter_states=scaMinter_k_states[:, :, :-1],
vecI=vecI,
vecB=vecB,
qk_scale=qk_scale,
chunk_size=chunk_size,
num_chunks=nc,
eps=eps,
)
last_states = (matC_k_states[:, :, -dhqk:, :], vecN_k_states[:, :, -dhqk:], scaMinter_k_states[:, :, -1:])
if return_last_states:
return matH_out, last_states
else:
return matH_out
def mlstm_recurrent_step_native(
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
igate: torch.Tensor,
fgate: torch.Tensor,
cstate: torch.Tensor,
nstate: torch.Tensor,
mstate: torch.Tensor,
eps: float = 1e-6,
dtype_state: torch.dtype = torch.float32,
**kwargs,
) -> tuple[torch.Tensor, tuple[torch.Tensor, torch.Tensor, torch.Tensor]]:
"""This is a single step of the mLSTM operation in recurrent form."""
dtype_qkv = query.dtype
matC_old = cstate.to(dtype=dtype_state)
vecN_old = nstate.to(dtype=dtype_state)
scaM_old = mstate.to(dtype=dtype_state)
batch_size, nh, dhqk = query.shape
_, _, dhhv = value.shape
if query.shape != key.shape:
raise ValueError("query and key must have the same shape")
if matC_old.shape != (batch_size, nh, dhqk, dhhv):
raise ValueError(f"matC_old has wrong shape, got {matC_old.shape}")
if vecN_old.shape != (batch_size, nh, dhqk):
raise ValueError(f"vecN_old has wrong shape, got {vecN_old.shape}")
if scaM_old.shape != (batch_size, nh, 1):
raise ValueError(f"scaM_old has wrong shape, got {scaM_old.shape}")
if igate.shape != (batch_size, nh, 1):
raise ValueError(f"scaI has wrong shape, got {igate.shape}")
if fgate.shape != (batch_size, nh, 1):
raise ValueError(f"scaF has wrong shape, got {fgate.shape}")
# gates
scaF_log = torch.nn.functional.logsigmoid(fgate)
# update rule
scaM_state_new = torch.max(scaF_log + scaM_old, igate)
scaF_act = torch.exp(scaF_log + scaM_old - scaM_state_new)
scaI_act = torch.exp(igate - scaM_state_new)
vecQ_scaled = query * (dhqk ** (-0.5))
matC_state_new = scaF_act[:, :, :, None] * matC_old + scaI_act[:, :, :, None] * (
key[:, :, :, None] @ value[:, :, None, :]
)
vecN_state_new = scaF_act * vecN_old + scaI_act * key
h_num = vecQ_scaled[:, :, None, :] @ matC_state_new.to(dtype=dtype_qkv)
h_num = h_num.squeeze(2).to(dtype=dtype_state)
qn_dotproduct = vecQ_scaled[:, :, None, :] @ vecN_state_new[:, :, :, None].to(dtype=dtype_qkv)
qn_dotproduct = qn_dotproduct.squeeze(2)
max_val = torch.exp(-scaM_state_new)
h_denom = (torch.maximum(qn_dotproduct.abs(), max_val) + eps).to(dtype=dtype_state)
h = h_num / h_denom
h = h.to(dtype=dtype_qkv)
matC_state_new = matC_state_new.to(dtype=dtype_state)
vecN_state_new = vecN_state_new.to(dtype=dtype_state)
scaM_state_new = scaM_state_new.to(dtype=dtype_state)
return h, (matC_state_new, vecN_state_new, scaM_state_new)
def mlstm_recurrent_sequence_native(
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
igate: torch.Tensor,
fgate: torch.Tensor,
c_initial: Optional[torch.Tensor] = None,
n_initial: Optional[torch.Tensor] = None,
m_initial: Optional[torch.Tensor] = None,
return_last_states: bool = False,
eps: float = 1e-6,
dtype_state: torch.dtype = torch.float32,
**kwargs,
) -> tuple[
torch.Tensor,
torch.Tensor,
torch.Tensor,
Optional[tuple[torch.Tensor, torch.Tensor, torch.Tensor]],
Optional[tuple[torch.Tensor, torch.Tensor, torch.Tensor]],
]:
batch_size, nh, sequence_length, dhqk = query.shape
dhv = value.shape[-1]
device = query.device
if c_initial is not None:
if n_initial is None or m_initial is None:
raise ValueError("Initial states must be provided together.")
if n_initial is None or m_initial is None:
raise ValueError("Initial states must be provided together.")
matC_state, vecN_state, vecM_state = (
c_initial.to(dtype=dtype_state),
n_initial.to(dtype=dtype_state),
m_initial.to(dtype=dtype_state),
)
else:
# memory state
matC_state = torch.zeros((batch_size, nh, dhqk, dhv), dtype=dtype_state, device=device)
# normalizer state
vecN_state = torch.zeros((batch_size, nh, dhqk), dtype=dtype_state, device=device)
# max state
vecM_state = torch.zeros((batch_size, nh, 1), dtype=dtype_state, device=device)
vecH_list = []
for t in range(sequence_length):
# gates
vecF_t, vecI_t = fgate[:, :, t, None], igate[:, :, t, None]
# projections
vecQ_t, vecK_t, vecV_t = query[:, :, t, :], key[:, :, t, :], value[:, :, t, :]
# step
vecH, (matC_state, vecN_state, vecM_state) = mlstm_recurrent_step_native(
cstate=matC_state,
nstate=vecN_state,
mstate=vecM_state,
query=vecQ_t,
key=vecK_t,
value=vecV_t,
igate=vecI_t,
fgate=vecF_t,
eps=eps,
dtype_state=dtype_state,
**kwargs,
)
vecH_list.append(vecH)
matH = torch.stack(vecH_list, dim=-2)
if return_last_states:
return matH, (matC_state, vecN_state, vecM_state)
else:
return matH
def wrap_chunkwise_pad_zeros(
mlstm_chunkwise_kernel: Callable,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
fgate: torch.Tensor,
igate: torch.Tensor,
c_initial: Optional[torch.Tensor] = None,
n_initial: Optional[torch.Tensor] = None,
m_initial: Optional[torch.Tensor] = None,
return_last_states: bool = False,
eps: float = 1e-6,
autocast_kernel_dtype: torch.dtype = torch.bfloat16,
chunk_size: int = 64,
**kwargs,
) -> Union[torch.Tensor, tuple[torch.Tensor, tuple[torch.Tensor, torch.Tensor, torch.Tensor]]]:
if return_last_states:
raise ValueError(
"We are padding zeros, so we cannot return last states,",
"as they would be not the true last states.",
)
batch_size, nh, sequence_length, dhqk = query.shape
S_unpadded = sequence_length
# padding to chunk size for kernels
if sequence_length % chunk_size != 0:
S_padded = ((sequence_length + chunk_size - 1) // chunk_size) * chunk_size
q_pad = query.new_zeros(batch_size, nh, S_padded, query.shape[3])
k_pad = key.new_zeros(batch_size, nh, S_padded, key.shape[3])
v_pad = value.new_zeros(batch_size, nh, S_padded, value.shape[3])
i_pad = igate.new_zeros(batch_size, nh, S_padded)
f_pad = fgate.new_zeros(batch_size, nh, S_padded)
q_pad[:, :, :S_unpadded, :] = query
k_pad[:, :, :S_unpadded, :] = key
v_pad[:, :, :S_unpadded, :] = value
i_pad[:, :, :S_unpadded] = igate
f_pad[:, :, :S_unpadded] = fgate
else:
q_pad = query
k_pad = key
v_pad = value
i_pad = igate
f_pad = fgate
matH = mlstm_chunkwise_kernel(
query=q_pad,
key=k_pad,
value=v_pad,
igate=i_pad,
fgate=f_pad,
c_initial=c_initial,
n_initial=n_initial,
m_initial=m_initial,
return_last_states=return_last_states,
eps=eps,
autocast_kernel_dtype=autocast_kernel_dtype,
chunk_size=chunk_size,
**kwargs,
)
matH = matH[:, :, :S_unpadded, :]
return matH
def wrap_chunkwise_arbitrary_sequence_length(
mlstm_chunkwise_kernel: Callable,
mlstm_sequence_kernel: Callable,
mlstm_step_kernel: Callable,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
fgate: torch.Tensor,
igate: torch.Tensor,
c_initial: Optional[torch.Tensor] = None,
n_initial: Optional[torch.Tensor] = None,
m_initial: Optional[torch.Tensor] = None,
return_last_states: bool = True,
eps: float = 1e-6,
autocast_kernel_dtype: torch.dtype = torch.bfloat16,
chunk_size: int = 64,
enable_logging: bool = False,
) -> Union[torch.Tensor, tuple[torch.Tensor, tuple[torch.Tensor, torch.Tensor, torch.Tensor]]]:
"""This function computes the last hidden state and matH outputs of the mLSTM, independently of the sequence length.
For this it uses three kernels:
- mlstm_chunkwise_kernel: mlstm chunkwise kernels that processes chunks of a given chunk size in parallel.
- mlstm_sequence_kernel: mlstm kernel that processes the remaining sequence length in a single step recurrence.
- mlstm_step_kernel: mlstm kernel that processes a sequence length of 1 in a single step.
It tries to maximize the chunksizes to improve performance.
It will start with the given chunk size and then divides the chunksize by 2 until the chunk size is smaller than 16.
At every chunksize it will process the maximal number of chunks that fit into the remaining sequence length.
E.g. for chunk_size = 64, this function will try the chunksizes [64, 32, 16] if necessary.
For the remaining sequence length, which is smaller than 16, we use a different kernel that computes the mLSTM
in a single step and loop over this in pytorch.
Args:
mlstm_chunkwise_kernel: The mLSTM chunkwise kernel that processes chunks of a given chunk size in parallel
mlstm_sequence_kernel: The mLSTM kernel that processes the remaining sequence length in a single step recurrence
query: The query tensor (batch_size, nh, sequence_length, dhqk)
key: The key tensor (batch_size, nh, sequence_length, dhqk)
value: The value tensor (batch_size, nh, sequence_length, dhhv)
fgate: The forget gate tensor (batch_size, nh, sequence_length)
igate: The input gate tensor (batch_size, nh, sequence_length)
c_initial: The initial cell state tensor (batch_size, nh, dhqk, dhhv)
n_initial: The initial hidden state tensor (batch_size, nh, dhqk)
m_initial: The initial memory state tensor (batch_size, nh, 1)
return_last_states: If True, the function will return the last states of the mLSTM
eps: The epsilon value used for numerical stability
autocast_kernel_dtype: The dtype used for the kernel computation
chunk_size: The chunk size used for the chunkwise kernel
enable_logging: If True, the function will log debug information. Default is False.
Returns:
The last hidden state tensor (batch_size, nh, sequence_length, dhhv) or a tuple containing the last hidden state tensor and the last states of the mLSTM
Last states are (cstate (batch_size, nh, dhqk, dhhv), nstate (batch_size, nh, dhqk), mstate (batch_size, nh, 1)).
"""
batch_size, nh, sequence_length, dhqk = key.shape
dhhv = value.shape[-1]
c_state = (
c_initial
if c_initial is not None
else torch.zeros(batch_size, nh, dhqk, dhhv, device=key.device, dtype=torch.float32)
)
n_state = (
n_initial
if n_initial is not None
else torch.zeros(batch_size, nh, dhqk, device=key.device, dtype=torch.float32)
)
m_state = (
m_initial
if m_initial is not None
else torch.zeros(batch_size, nh, 1, device=key.device, dtype=torch.float32)
)
if sequence_length > 1:
# process the sequence length in chunks
h_outs = []
seq_len_start_idx = 0
remaining_seq_len = sequence_length - seq_len_start_idx
num_chunks = remaining_seq_len // chunk_size
if num_chunks > 0:
iter_seq_len = chunk_size * num_chunks
seq_len_idx = seq_len_start_idx + iter_seq_len
h_out, (c_state, n_state, m_state) = mlstm_chunkwise_kernel(
query=query[..., seq_len_start_idx:seq_len_idx, :].contiguous(),
key=key[..., seq_len_start_idx:seq_len_idx, :].contiguous(),
value=value[..., seq_len_start_idx:seq_len_idx, :].contiguous(),
fgate=fgate[..., seq_len_start_idx:seq_len_idx].contiguous(),
igate=igate[..., seq_len_start_idx:seq_len_idx].contiguous(),
c_initial=c_state,
n_initial=n_state,
m_initial=m_state,
chunk_size=chunk_size,
return_last_states=True,
autocast_kernel_dtype=autocast_kernel_dtype,
eps=eps,
)
seq_len_start_idx += iter_seq_len
h_outs.append(h_out)
remaining_seq_len = sequence_length - seq_len_start_idx
if remaining_seq_len > 0:
# we use here matK as query as this kernel does not need a query, since we do not care about the outputs only about the last state
h_out, (c_state, n_state, m_state) = mlstm_sequence_kernel(
query=query[..., seq_len_start_idx:sequence_length, :].contiguous(),
key=key[..., seq_len_start_idx:sequence_length, :].contiguous(),
value=value[..., seq_len_start_idx:sequence_length, :].contiguous(),
igate=igate[..., seq_len_start_idx:sequence_length].contiguous(),
fgate=fgate[..., seq_len_start_idx:sequence_length].contiguous(),
c_initial=c_state,
n_initial=n_state,
m_initial=m_state,
return_last_states=True,
eps=eps,
)
h_outs.append(h_out)
h_out = torch.concatenate(h_outs, dim=2)
else:
if sequence_length != 1:
raise ValueError(
f"Received empty sequence (sequence_length={sequence_length}), require at least single element in the sequence."
)
# process the sequence length in a single step
# while this case is also captured by the regular mode above,
# it avoids the overhead of the loop and calls the step kernel directly
# The step function does not want a sequence dimension
# qkv shape is (batch_size, nh, dhqk/dhv)
# igate, fgate shape is (batch_size, nh, 1)
h_out, (c_state, n_state, m_state) = mlstm_step_kernel(
query=query.squeeze(2),
key=key.squeeze(2),
value=value.squeeze(2),
igate=igate,
fgate=fgate,
cstate=c_state,
nstate=n_state,
mstate=m_state,
eps=eps,
)
h_out = h_out[:, :, None, :]
if return_last_states:
return h_out, (c_state, n_state, m_state)
else:
return h_out
class xLSTMBackend(nn.Module):
"""xLSTM Backend Module for PyTorch.
This module wraps the xLSTM kernels and provides a high-level interface for training and inference.
"""
config_class = xLSTMConfig
def __init__(self, config: xLSTMConfig):
super().__init__()
self.config = config
self.chunkwise_kernel_fn = mlstm_chunkwise_native_autograd
self.sequence_kernel_fn = mlstm_recurrent_sequence_native
self.step_kernel_fn = mlstm_recurrent_step_native
self._inference_fn = partial(
wrap_chunkwise_arbitrary_sequence_length,
mlstm_chunkwise_kernel=self.chunkwise_kernel_fn,
mlstm_sequence_kernel=partial(
self.sequence_kernel_fn,
dtype_state=getattr(torch, config.inference_state_dtype),
),
mlstm_step_kernel=partial(
self.step_kernel_fn,
dtype_state=getattr(torch, config.inference_state_dtype),
),
chunk_size=config.chunk_size,
eps=config.eps,
autocast_kernel_dtype=getattr(torch, config.autocast_kernel_dtype),
return_last_states=True,
)
train_kernel_fn = partial(
self.chunkwise_kernel_fn,
autocast_kernel_dtype=getattr(torch, config.autocast_kernel_dtype),
eps=config.eps,
chunk_size=config.chunk_size,
)
if "with_padding" in config.mode:
train_kernel_fn = partial(wrap_chunkwise_pad_zeros, mlstm_chunkwise_kernel=train_kernel_fn)
self._train_fn = train_kernel_fn
def forward(
self,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
igate: torch.Tensor,
fgate: torch.Tensor,
c_initial: Optional[torch.Tensor] = None,
n_initial: Optional[torch.Tensor] = None,
m_initial: Optional[torch.Tensor] = None,
return_last_states: bool = False,
mode: Optional[Literal["train", "inference"]] = None,
) -> Union[torch.Tensor, tuple[torch.Tensor, tuple[torch.Tensor, torch.Tensor, torch.Tensor]]]:
"""Forward pass of the mLSTM backend.
Depending on the configured mode, this method will call the appropriate kernel function.
Args:
query: The query tensor of shape (batch_size, nh, sequence_length, dhqk).
key: The key tensor of shape (batch_size, nh, sequence_length, dhqk).
value: The value tensor of shape (batch_size, nh, sequence_length, dhhv).
igate: The input gate preactivation tensor of shape (batch_size, nh, sequence_length).
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.