sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
huggingface/transformers:src/transformers/models/evolla/modular_evolla.py | # Copyright 2025 Westlake Representational Learning Lab (Fajie Yuan Lab) team and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
import torch
from torch import nn
from ... import initialization as init
from ...cache_utils import Cache, DynamicCache
from ...generation import GenerationMixin
from ...masking_utils import create_bidirectional_mask, create_causal_mask
from ...modeling_outputs import (
BaseModelOutputWithPast,
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithPast,
ModelOutput,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
auto_docstring,
can_return_tuple,
logging,
)
from ...utils.generic import merge_with_config_defaults
from ...utils.output_capturing import OutputRecorder, capture_outputs
from ..esm.modeling_esm import (
EsmAttention,
EsmEmbeddings,
EsmEncoder,
EsmIntermediate,
EsmLayer,
EsmOutput,
EsmPooler,
EsmSelfAttention,
EsmSelfOutput,
)
from ..llama.modeling_llama import (
LlamaAttention,
LlamaDecoderLayer,
LlamaMLP,
LlamaPreTrainedModel,
LlamaRMSNorm,
LlamaRotaryEmbedding,
)
from .configuration_evolla import EvollaConfig, SaProtConfig
logger = logging.get_logger(__name__)
class EvollaSaProtEmbeddings(EsmEmbeddings):
def __init__(self, config):
super().__init__(config)
# remove the position_ids in EsmEmbeddings
self.position_ids = None
def rotate_half_esm(x):
x1, x2 = x.chunk(2, dim=-1)
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb_esm(x, cos, sin):
cos = cos[:, :, : x.shape[-2], :]
sin = sin[:, :, : x.shape[-2], :]
return (x * cos) + (rotate_half_esm(x) * sin)
class EvollaSaProtRotaryEmbedding(nn.Module):
"""
Rotary position embeddings based on those in
[RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer). Query and keys are transformed by rotation
matrices which depend on their relative positions.
"""
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, dim: int):
super().__init__()
self.dim = dim
# Generate and save the inverse frequency buffer (non trainable)
inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2, dtype=torch.int64).float() / dim))
self.register_buffer("inv_freq", inv_freq)
self._seq_len_cached = None
self._cos_cached = None
self._sin_cached = None
def _update_cos_sin_tables(self, x, seq_dimension=2):
seq_len = x.shape[seq_dimension]
# Reset the tables if the sequence length has changed,
# or if we're on a new device (possibly due to tracing for instance)
if seq_len != self._seq_len_cached or self._cos_cached.device != x.device:
self._seq_len_cached = seq_len
t = torch.arange(x.shape[seq_dimension], device=x.device).type_as(self.inv_freq)
freqs = torch.outer(t, self.inv_freq)
emb = torch.cat((freqs, freqs), dim=-1).to(x.device)
self._cos_cached = emb.cos()[None, None, :, :]
self._sin_cached = emb.sin()[None, None, :, :]
return self._cos_cached, self._sin_cached
def forward(self, q: torch.Tensor, k: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
self._cos_cached, self._sin_cached = self._update_cos_sin_tables(k, seq_dimension=-2)
return (
apply_rotary_pos_emb_esm(q, self._cos_cached, self._sin_cached).to(dtype=q.dtype),
apply_rotary_pos_emb_esm(k, self._cos_cached, self._sin_cached).to(dtype=k.dtype),
)
class EvollaSaProtSelfAttention(EsmSelfAttention):
def __init__(self, config, position_embedding_type=None, layer_idx=None, is_cross_attention=False):
nn.Module.__init__(self)
self.config = config
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = config.attention_probs_dropout_prob
self.rotary_embeddings = None
self.position_embedding_type = position_embedding_type or getattr(
config, "position_embedding_type", "absolute"
)
if self.position_embedding_type == "rotary":
self.rotary_embeddings = EvollaSaProtRotaryEmbedding(dim=self.attention_head_size)
self.is_decoder = config.is_decoder
self.layer_idx = layer_idx
self.scaling = 1.0
self.is_causal = self.is_decoder and not is_cross_attention
class EvollaSaProtSelfOutput(EsmSelfOutput):
pass
class EvollaSaProtAttention(EsmAttention):
pass
class EvollaSaProtIntermediate(EsmIntermediate):
pass
class EvollaSaProtOutput(EsmOutput):
pass
class EvollaSaProtLayer(EsmLayer):
pass
class EvollaSaProtEncoder(EsmEncoder):
pass
class EvollaSaProtPooler(EsmPooler):
pass
@auto_docstring
class EvollaSaProtPreTrainedModel(PreTrainedModel):
config: SaProtConfig
_no_split_modules = ["EvollaSaProtLayer"]
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": EvollaSaProtLayer,
"attentions": [OutputRecorder(EvollaSaProtSelfAttention, index=1, layer_name="attention")],
"cross_attentions": [
OutputRecorder(EvollaSaProtSelfAttention, index=1, layer_name="crossattention"),
],
}
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, EvollaSaProtRotaryEmbedding):
inv_freq = 1.0 / (10000 ** (torch.arange(0, module.dim, 2, dtype=torch.int64).float() / module.dim))
init.copy_(module.inv_freq, inv_freq)
class EvollaSaProtProteinEncoder(EvollaSaProtPreTrainedModel):
def __init__(self, config: SaProtConfig):
super().__init__(config)
self.embeddings = EvollaSaProtEmbeddings(config)
self.encoder = EvollaSaProtEncoder(config)
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
@merge_with_config_defaults
@capture_outputs
def forward(
self,
input_ids: torch.Tensor | None,
attention_mask: torch.Tensor | None = None,
**kwargs,
) -> tuple[torch.Tensor] | BaseModelOutputWithPoolingAndCrossAttentions:
input_shape = input_ids.size()
batch_size, seq_length = input_shape
device = input_ids.device
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length)), device=device)
inputs_embeds = self.embeddings(input_ids=input_ids, attention_mask=attention_mask)
attention_mask = create_bidirectional_mask(
config=self.config,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
)
encoder_outputs = self.encoder(inputs_embeds, attention_mask=attention_mask, **kwargs)
sequence_output = encoder_outputs[0]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
class EvollaSequenceCompressorAttention(nn.Module):
def __init__(self, dim, dim_head=64, heads=8):
super().__init__()
self.scale = dim_head**-0.5
self.heads = heads
inner_dim = dim_head * heads
self.norm_media = nn.LayerNorm(dim)
self.norm_latents = nn.LayerNorm(dim)
self.to_q = nn.Linear(dim, inner_dim, bias=False)
self.to_kv = nn.Linear(dim, inner_dim * 2, bias=False)
self.to_out = nn.Linear(inner_dim, dim, bias=False)
def forward(self, x, latents, mask):
"""
Args:
x (torch.Tensor): image features
shape (b, n1, D)
latent (torch.Tensor): latent features
shape (b, n2, D); n2: num of latent tokens
"""
x = self.norm_media(x)
latents = self.norm_latents(latents)
h = self.heads
q = self.to_q(latents)
kv_input = torch.cat((x, latents), dim=-2)
k, v = self.to_kv(kv_input).chunk(
2, dim=-1
) # each: batch_size, max_protein_length+num_latents, dim_head*num_heads
q = q.view(q.size(0), q.size(1), h, -1).permute(0, 2, 1, 3)
k = k.view(k.size(0), k.size(1), h, -1).permute(0, 2, 1, 3)
v = v.view(v.size(0), v.size(1), h, -1).permute(0, 2, 1, 3)
q = q * self.scale # batch_size, num_heads, num_latents, dim_head
# attention
sim = torch.matmul(q, k.transpose(-1, -2))
sim = sim - sim.amax(dim=-1, keepdim=True).detach()
bs, nh, skd, okd = sim.shape
ones = torch.ones(nh, skd).to(mask.device) # Create a tensor of ones with shape (nh, skd)
mask_exp = mask[:, None, None, :]
ones_exp = ones[None, :, :, None]
mask = mask_exp * ones_exp
sim = sim.masked_fill((1 - mask).bool(), -1e4)
attn = sim.softmax(dim=-1)
out = torch.matmul(attn, v)
out = out.permute(0, 2, 1, 3)
# [batch, seq, head, features] -> [batch, seq, head*features]
out = out.reshape(out.size(0), out.size(1), -1)
return self.to_out(out)
class EvollaFeedForward(nn.Module):
def __init__(self, dim, mult=4):
super().__init__()
inner_dim = int(dim * mult)
self.norm = nn.LayerNorm(dim)
self.fc1 = nn.Linear(dim, inner_dim, bias=False)
self.activation = nn.GELU()
self.fc2 = nn.Linear(inner_dim, dim, bias=False)
def forward(self, x):
return self.fc2(self.activation(self.fc1(self.norm(x))))
class EvollaSequenceCompressorResampler(nn.Module):
def __init__(self, config: EvollaConfig):
super().__init__()
protein_repr_dim = config.protein_encoder_config.hidden_size
self.num_latents = config.resampler_num_latents
self.latents = nn.Parameter(torch.randn(self.num_latents, protein_repr_dim), requires_grad=True)
self.layers = nn.ModuleList([])
for _ in range(config.resampler_depth):
self.layers.append(
nn.ModuleList(
[
EvollaSequenceCompressorAttention(
dim=protein_repr_dim, dim_head=config.resampler_dim_head, heads=config.resampler_heads
),
EvollaFeedForward(dim=protein_repr_dim, mult=config.resampler_ff_mult),
]
)
)
self.norm = nn.LayerNorm(config.hidden_size)
self.protein_projector = nn.Linear(protein_repr_dim, config.hidden_size)
def forward(self, embeds, mask):
b = embeds.shape[0]
bs, _ = mask.shape # bs, max_protein_length
latent_mask = torch.ones(bs, self.num_latents).to(mask.device)
mask = torch.cat((mask, latent_mask), dim=1) # bs, max_protein_length + num_latents
# blocks
ones = torch.ones(b).to(self.latents.device)
latents = self.latents[None] * ones.view(-1, 1, 1) # [b,n,d]
latents = latents.to(embeds.dtype)
for attn, ff in self.layers:
latents = attn(embeds, latents, mask) + latents
latents = ff(latents) + latents
transformed_feature = self.protein_projector(latents)
return self.norm(transformed_feature)
@dataclass
@auto_docstring
class EvollaProteinEncoderModelOutput(ModelOutput):
sequence_compressor_output: torch.FloatTensor | None = None
last_hidden_state: torch.FloatTensor | None = None
hidden_states: tuple[torch.FloatTensor, ...] | None = None
attentions: tuple[torch.FloatTensor, ...] | None = None
class EvollaProteinEncoder(nn.Module):
def __init__(self, config: EvollaConfig):
super().__init__()
self.model = EvollaSaProtProteinEncoder(config=config.protein_encoder_config)
self.sequence_compressor_resampler = EvollaSequenceCompressorResampler(config=config)
@can_return_tuple
def forward(self, input_ids: torch.LongTensor, attention_mask: torch.FloatTensor, **kwargs):
protein_output = self.model(input_ids=input_ids, attention_mask=attention_mask)
protein_embeds = protein_output.last_hidden_state
sequence_repr = self.sequence_compressor_resampler(protein_embeds, attention_mask)
return EvollaProteinEncoderModelOutput(
sequence_compressor_output=sequence_repr,
last_hidden_state=protein_output.last_hidden_state,
)
class EvollaSequenceAlignerCrossAttention(nn.Module):
def __init__(
self,
config,
protein_encoder_dim: int | None = None,
structure_encoder_dim: int | None = None,
msa_encoder_dim: int | None = None,
):
super().__init__()
self.hidden_size = config.hidden_size
self.num_attention_heads = config.num_attention_heads
self.scale = self.num_attention_heads**-0.5
self.attention_head_size = int(self.hidden_size / self.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
attention_probs_dropout_prob = config.aligner_attention_probs_dropout_prob
enable_bias = config.aligner_enable_bias
ffn_mult = config.aligner_ffn_mult
self.query = nn.Linear(self.hidden_size, self.all_head_size)
if protein_encoder_dim is not None:
self.key_protein = nn.Linear(protein_encoder_dim, self.all_head_size)
self.value_protein = nn.Linear(protein_encoder_dim, self.all_head_size)
else:
self.key_protein = None
self.value_protein = None
if structure_encoder_dim is not None:
self.key_structure = nn.Linear(structure_encoder_dim, self.all_head_size)
self.value_structure = nn.Linear(structure_encoder_dim, self.all_head_size)
else:
self.key_structure = None
self.value_structure = None
if msa_encoder_dim is not None:
self.key_msa = nn.Linear(msa_encoder_dim, self.all_head_size)
self.value_msa = nn.Linear(msa_encoder_dim, self.all_head_size)
else:
self.key_msa = None
self.value_msa = None
self.attention_norm = EvollaRMSNorm(self.hidden_size)
self.dropout = nn.Dropout(attention_probs_dropout_prob)
self.out_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=enable_bias)
self.ff = EvollaFeedForward(self.hidden_size, ffn_mult)
self.gate_attention = nn.Parameter(torch.tensor([0.0]))
self.gate_ffw = nn.Parameter(torch.tensor([0.0]))
def cross_attention(
self,
query_states,
protein_key_value_states,
structure_key_value_states,
msa_key_value_states,
query_attn_mask,
protein_kv_attn_mask,
structure_kv_attn_mask,
msa_kv_attn_mask,
):
"""
query_states: text
key_value_states: protein
query_states: [bs, query_seq_len, dim]
key_value_states: [bs, kv_seq_len, dim]
query_attn_mask: [bs, query_seq_len]
kv_attn_mask: [bs, kv_seq_len]
"""
# Concatenate protein and structure
kv_attn_mask = [protein_kv_attn_mask, structure_kv_attn_mask, msa_kv_attn_mask]
kv_attn_mask = [_ for _ in kv_attn_mask if _ is not None]
if not kv_attn_mask:
raise ValueError("At least one modality should be provided for cross attention.")
kv_attn_mask = torch.cat(kv_attn_mask, dim=1)
query_layer = self.attention_norm(query_states)
# Warning: This place might cause issues, refers to
# https://discuss.pytorch.org/t/cuda-error-cublas-status-not-supported-when-calling-cublasltmatmul-from-torch-nn-functional-linear/170214/13
# Solution: add `DISABLE_ADDMM_CUDA_LT=1` as environment variable
# Apply linear transformation to input_query, input_key, and input_value
query_layer = self.query(query_layer) # [bs, querylength, dim]
if self.key_protein is not None and self.value_protein is not None:
protein_key_value_states = protein_key_value_states.to(query_states)
key_layer_protein = self.key_protein(protein_key_value_states) # [bs, keylength, dim]
value_layer_protein = self.value_protein(protein_key_value_states) # [bs, keylength, dim]
else:
key_layer_protein = None
value_layer_protein = None
if self.key_structure is not None and self.value_structure is not None:
structure_key_value_states = structure_key_value_states.to(query_states)
key_layer_structure = self.key_structure(structure_key_value_states) # [bs, keylength, dim]
value_layer_structure = self.value_structure(structure_key_value_states) # [bs, keylength, dim]
else:
key_layer_structure = None
value_layer_structure = None
if self.key_msa is not None and self.value_msa is not None:
msa_key_value_states = msa_key_value_states.to(query_states)
key_layer_msa = self.key_msa(msa_key_value_states) # [bs, keylength, dim]
value_layer_msa = self.value_msa(msa_key_value_states) # [bs, keylength, dim]
else:
key_layer_msa = None
value_layer_msa = None
key_layer = [key_layer_protein, key_layer_structure, key_layer_msa]
key_layer = [_ for _ in key_layer if _ is not None]
key_layer = torch.cat(key_layer, dim=1)
value_layer = [value_layer_protein, value_layer_structure, value_layer_msa]
value_layer = [_ for _ in value_layer if _ is not None]
value_layer = torch.cat(value_layer, dim=1)
new_query_layer_shape = query_layer.size()[:-1] + (
self.num_attention_heads,
self.attention_head_size,
)
query_layer = query_layer.view(*new_query_layer_shape).permute(0, 2, 1, 3)
new_key_layer_shape = key_layer.size()[:-1] + (
self.num_attention_heads,
self.attention_head_size,
)
key_layer = key_layer.view(*new_key_layer_shape).permute(0, 2, 1, 3)
new_value_layer_shape = value_layer.size()[:-1] + (
self.num_attention_heads,
self.attention_head_size,
)
value_layer = value_layer.view(*new_value_layer_shape).permute(0, 2, 1, 3)
query_layer = query_layer * self.scale
# attention_mask: [bs, 1, querylength, keylength]
if query_attn_mask is None:
query_attn_mask = torch.ones(query_states.size(0), query_states.size(1)).to(query_states.device)
attention_mask = query_attn_mask[:, None, :, None] * kv_attn_mask[:, None, None, :]
# Compute the scaled dot-product attention scores
attn_weights = torch.matmul(query_layer, key_layer.transpose(-1, -2)) # [bs, numheads, querylength, keylength]
attn_weights = attn_weights - attn_weights.amax(dim=-1, keepdim=True).detach() # To stabilize score
attention_scores = attn_weights.masked_fill(
(1 - attention_mask).bool(), torch.finfo(attn_weights.dtype).min
) # [bs, numheads, querylength, keylength]
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# attention_probs_dropped = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer) # [bs, numheads, querylength, dim/numheads]
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
context_layer = self.out_proj(context_layer)
return context_layer
def forward(
self,
query_states,
protein_kv_states,
structure_kv_states,
msa_kv_states,
query_attn_mask,
protein_kv_attn_mask=None,
structure_kv_attn_mask=None,
msa_kv_attn_mask=None,
protein_batch_mask=None,
structure_batch_mask=None,
msa_batch_mask=None,
past_key_values=None,
):
if protein_kv_states is not None:
bs, protein_kv_seq_len, dim = protein_kv_states.shape
if protein_kv_attn_mask is None:
protein_kv_attn_mask = (
torch.ones(bs, protein_kv_seq_len).to(protein_batch_mask.device)
* protein_batch_mask.expand(size=(protein_kv_seq_len, bs)).T
).to(protein_kv_states.device)
else:
protein_kv_attn_mask = None
if structure_kv_states is not None:
bs, structure_kv_seq_len, dim = structure_kv_states.shape
if structure_kv_attn_mask is None:
structure_kv_attn_mask = (
torch.ones(bs, structure_kv_seq_len).to(protein_batch_mask.device)
* structure_batch_mask.expand(size=(structure_kv_seq_len, bs)).T
).to(structure_kv_states.device)
else:
structure_kv_attn_mask = None
if msa_kv_states is not None:
bs, msa_kv_seq_len, dim = msa_kv_states.shape
if msa_kv_attn_mask is None:
msa_kv_attn_mask = (
torch.ones(bs, msa_kv_seq_len).to(protein_batch_mask.device)
* msa_batch_mask.expand(size=(msa_kv_seq_len, bs)).T
).to(msa_kv_states.device)
else:
msa_kv_attn_mask = None
hidden_states = query_states
# only when there's at least one valid modality, crossattention will be performed
if (
(protein_kv_states is not None and protein_kv_attn_mask.any())
or (structure_kv_states is not None and structure_kv_attn_mask.any())
or (msa_kv_states is not None and msa_kv_attn_mask.any())
):
residual = hidden_states
hidden_states = self.cross_attention(
query_states=hidden_states,
protein_key_value_states=protein_kv_states,
structure_key_value_states=structure_kv_states,
msa_key_value_states=msa_kv_states,
query_attn_mask=query_attn_mask,
protein_kv_attn_mask=protein_kv_attn_mask,
structure_kv_attn_mask=structure_kv_attn_mask,
msa_kv_attn_mask=msa_kv_attn_mask,
) # [bs, query_seq_len, dim]
# tanh gate
hidden_states = torch.tanh(self.gate_attention) * hidden_states
hidden_states = residual + hidden_states # input_query
residual = hidden_states
hidden_states = self.ff(hidden_states) * torch.tanh(self.gate_ffw)
hidden_states = residual + hidden_states
return hidden_states
class EvollaRMSNorm(LlamaRMSNorm):
pass
class EvollaRotaryEmbedding(LlamaRotaryEmbedding):
pass
class EvollaMLP(LlamaMLP):
pass
class EvollaAttention(LlamaAttention):
pass
class EvollaDecoderLayer(LlamaDecoderLayer):
def __init__(self, config: EvollaConfig, layer_idx: int):
super().__init__(config, layer_idx)
if (layer_idx + 1) % max(config.num_hidden_layers // config.aligner_num_add_layers, 1) == 0:
self.adapter = EvollaSequenceAlignerCrossAttention(
config,
protein_encoder_dim=config.hidden_size,
)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
use_cache: bool | None = False,
cache_position: torch.LongTensor | None = None,
protein_kv_states: torch.Tensor | None = None,
structure_kv_states: torch.Tensor | None = None,
msa_kv_states: torch.Tensor | None = None,
protein_batch_mask: torch.Tensor | None = None,
structure_batch_mask: torch.Tensor | None = None,
msa_batch_mask: torch.Tensor | None = None,
query_attn_mask: torch.Tensor | None = None,
**kwargs,
):
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
if hasattr(self, "adapter"):
hidden_states = self.adapter(
query_states=hidden_states,
protein_kv_states=protein_kv_states,
structure_kv_states=structure_kv_states,
msa_kv_states=msa_kv_states,
query_attn_mask=query_attn_mask,
protein_batch_mask=protein_batch_mask,
structure_batch_mask=structure_batch_mask,
msa_batch_mask=msa_batch_mask,
)
return hidden_states
class EvollaPreTrainedModel(LlamaPreTrainedModel):
_supports_flash_attn = False # see dependency on `EvollaSequenceCompressorResampler`
_supports_flex_attn = False # see dependency on `EvollaSequenceCompressorResampler`
_supports_attention_backend = False
_no_split_modules = [
"EvollaDecoderLayer",
"EvollaSequenceCompressorResampler",
"EvollaSequenceAlignerCrossAttention",
]
@torch.no_grad()
def _init_weights(self, module):
std = self.config.initializer_range
PreTrainedModel._init_weights(self, module)
if isinstance(module, EvollaSequenceAlignerCrossAttention):
init.zeros_(module.gate_attention)
init.zeros_(module.gate_ffw)
init.ones_(module.attention_norm.weight)
elif isinstance(module, EvollaSequenceCompressorResampler):
init.normal_(module.latents, mean=0.0, std=std)
class EvollaModel(EvollaPreTrainedModel):
def __init__(self, config: EvollaConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(self.vocab_size, config.hidden_size, self.padding_idx)
self.protein_encoder = EvollaProteinEncoder(config=config)
self.layers = nn.ModuleList(
[
EvollaDecoderLayer(
config=config,
layer_idx=layer_idx,
)
for layer_idx in range(config.num_hidden_layers)
]
)
self.norm = EvollaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.gradient_checkpointing = getattr(config, "gradient_checkpointing", False)
self.rotary_emb = EvollaRotaryEmbedding(config=config)
self.post_init()
def get_input_embeddings(self):
return self.embed_tokens
def set_input_embeddings(self, value):
self.embed_tokens = value
@auto_docstring
@merge_with_config_defaults
@capture_outputs
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
use_cache: bool | None = None,
cache_position: torch.LongTensor | None = None,
protein_input_ids: torch.LongTensor | None = None,
protein_attention_mask: torch.Tensor | None = None,
structure_feats: torch.FloatTensor | None = None,
msa_feats: torch.FloatTensor | None = None,
structure_batch_mask: torch.Tensor | None = None,
msa_batch_mask: torch.Tensor | None = None,
**kwargs,
) -> tuple | BaseModelOutputWithPast:
r"""
protein_input_ids (torch.LongTensor):
The input IDs for the protein sequence in structure-aware tokens. Should be of shape `(batch_size, protein_seq_length)` and type `torch.LongTensor`.
protein_attention_mask (torch.Tensor):
The attention mask for the protein sequence. Should be of shape `(batch_size, protein_seq_length)` and type `torch.Tensor`.
structure_feats (torch.FloatTensor):
The input IDs for purely structure-based features. Should be of shape `(batch_size, structure_seq_length, structure_feat_dim)` and type `torch.FloatTensor`. Dummy input for now.
msa_feats (torch.FloatTensor):
The input IDs for purely MSA-based features. Should be of shape `(batch_size, msa_seq_length, msa_feat_dim)` and type `torch.FloatTensor`. Dummy input for now.
structure_batch_mask (torch.Tensor):
The batch mask to decide which protein sequences are purely structure-based. Should be of shape `(batch_size)` and type `torch.Tensor`. Should be paired with `structure_feats`. Dummpy input for now.
msa_batch_mask (torch.Tensor):
The batch mask to decide which protein sequences are purely MSA-based. Should be of shape `(batch_size)` and type `torch.Tensor`. Should be paired with `msa_feats`. Dummpy input for now.
"""
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
protein_feats = None
protein_batch_mask = None
# If provided, actually compute them
if protein_input_ids is not None and protein_attention_mask is not None:
protein_outputs = self.protein_encoder(
input_ids=protein_input_ids,
attention_mask=protein_attention_mask,
)
protein_feats = protein_outputs.sequence_compressor_output
protein_batch_mask = torch.ones(
protein_input_ids.shape[0],
device=protein_input_ids.device,
dtype=torch.bool,
)
causal_mask = create_causal_mask(
config=self.config,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids)
for decoder_layer in self.layers:
hidden_states = decoder_layer(
hidden_states,
attention_mask=causal_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
protein_kv_states=protein_feats,
structure_kv_states=structure_feats,
msa_kv_states=msa_feats,
protein_batch_mask=protein_batch_mask,
structure_batch_mask=structure_batch_mask,
msa_batch_mask=msa_batch_mask,
query_attn_mask=attention_mask,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = self.norm(hidden_states)
output = BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
return output
class EvollaForProteinText2Text(EvollaPreTrainedModel, GenerationMixin):
def __init__(self, config):
super().__init__(config)
self.model = EvollaModel(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, self.vocab_size, bias=False)
self.post_init()
def get_input_embeddings(self):
return self.model.get_input_embeddings()
def set_input_embeddings(self, value):
return self.model.set_input_embeddings(value)
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None, # text input ids
attention_mask: torch.Tensor | None = None, # text attention mask
inputs_embeds: torch.FloatTensor | None = None, # text input embeddings
labels: torch.LongTensor | None = None,
protein_input_ids: torch.LongTensor | None = None,
protein_attention_mask: torch.Tensor | None = None,
use_cache: bool | None = None,
logits_to_keep: int | torch.Tensor = 0,
**kwargs,
):
r"""
protein_input_ids (torch.LongTensor):
The input IDs for the protein sequence. Should be of shape `(batch_size, protein_seq_length)` and type `torch.LongTensor`.
protein_attention_mask (torch.Tensor):
The attention mask for the protein sequence. Should be of shape `(batch_size, protein_seq_length)` and type `torch.Tensor`.
Example:
```python
>>> from transformers import EvollaProcessor, EvollaForProteinText2Text
>>> model = EvollaForProteinText2Text.from_pretrained("westlake/Evolla-10B-hf")
>>> processor = EvollaProcessor.from_pretrained("westlake/Evolla-10B-hf")
>>> protein_information = {
"aa_seq": "your amino acid sequence",
"foldseek": "your foldseek sequence",
}
>>> question = "What is the function of this protein?"
>>> message = [
{"role": "system", "content": "You are an AI expert that can answer any questions about protein."},
{"role": "user", "content": question},
]
>>> inputs = processor(proteins=[protein_information], messages_list=[message], return_tensors="pt", padding="longest")
>>> outputs = model.generate(**inputs)
>>> print(processor.batch_decode(outputs, skip_special_tokens=True))
```"""
outputs: BaseModelOutputWithPast = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
protein_input_ids=protein_input_ids,
protein_attention_mask=protein_attention_mask,
use_cache=use_cache,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.vocab_size, **kwargs)
lm_outputs = CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
return lm_outputs
__all__ = ["EvollaForProteinText2Text", "EvollaModel", "EvollaPreTrainedModel"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/evolla/modular_evolla.py",
"license": "Apache License 2.0",
"lines": 785,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/evolla/processing_evolla.py | # Copyright 2025 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Processor class for EVOLLA.
"""
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import (
ProcessorMixin,
)
from ...utils import auto_docstring
PROTEIN_VALID_KEYS = ["aa_seq", "foldseek", "msa"]
@auto_docstring
class EvollaProcessor(ProcessorMixin):
def __init__(self, protein_tokenizer, tokenizer=None, protein_max_length=1024, text_max_length=512, **kwargs):
r"""
protein_tokenizer (`EsmTokenizer`):
An instance of [`EsmTokenizer`]. The protein tokenizer is a required input.
protein_max_length (`int`, *optional*, defaults to 1024):
The maximum length of the sequence to be generated.
text_max_length (`int`, *optional*, defaults to 512):
The maximum length of the text to be generated.
"""
if protein_tokenizer is None:
raise ValueError("You need to specify an `protein_tokenizer`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(protein_tokenizer, tokenizer)
self.tokenizer.pad_token = "<|reserved_special_token_0|>"
self.protein_max_length = protein_max_length
self.text_max_length = text_max_length
def process_proteins(self, proteins, protein_max_length=1024):
sa_sequences = []
for protein in proteins:
aa_seq = protein.get("aa_seq")
foldseek = protein.get("foldseek")
sa_sequence = "".join([s.upper() + f.lower() for s, f in zip(aa_seq, foldseek)])
sa_sequences.append(sa_sequence)
sa_tokens = self.protein_tokenizer(
sa_sequences, return_tensors="pt", truncation=True, max_length=protein_max_length, padding=True
)
return sa_tokens
def process_text(
self,
texts,
text_max_length: int = 512,
):
prompts = []
for messages in texts:
prompt = self.tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True,
)
prompts.append(prompt)
prompt_inputs = self.tokenizer(
prompts,
add_special_tokens=False,
return_tensors="pt",
padding="longest",
truncation=True,
max_length=text_max_length,
)
return prompt_inputs
@auto_docstring
def __call__(
self,
proteins: list[dict] | dict | None = None,
messages_list: list[list[dict]] | list[dict] | None = None,
protein_max_length: int | None = None,
text_max_length: int | None = None,
**kwargs,
):
r"""
proteins (`Union[List[dict], dict]`):
A list of dictionaries or a single dictionary containing the following keys:
- `"aa_seq"` (`str`) -- The amino acid sequence of the protein.
- `"foldseek"` (`str`) -- The foldseek string of the protein.
messages_list (`Union[List[List[dict]], List[dict]]`):
A list of lists of dictionaries or a list of dictionaries containing the following keys:
- `"role"` (`str`) -- The role of the message.
- `"content"` (`str`) -- The content of the message.
protein_max_length (`int`, *optional*, defaults to 1024):
The maximum length of the sequence to be generated.
text_max_length (`int`, *optional*, defaults to 512):
The maximum length of the text.
Return:
a dict with following keys:
- `protein_input_ids` (`torch.Tensor` of shape `(batch_size, sequence_length)`) -- The input IDs for the protein sequence.
- `protein_attention_mask` (`torch.Tensor` of shape `(batch_size, sequence_length)`) -- The attention mask for the protein sequence.
- `text_input_ids` (`torch.Tensor` of shape `(batch_size, sequence_length)`) -- The input IDs for the text sequence.
- `text_attention_mask` (`torch.Tensor` of shape `(batch_size, sequence_length)`) -- The attention mask for the text sequence.
"""
# proteins and messages_list should be provided
if proteins is None or messages_list is None:
raise ValueError("You need to specify `messages_list` and `proteins`.")
protein_max_length = protein_max_length if protein_max_length is not None else self.protein_max_length
text_max_length = text_max_length if text_max_length is not None else self.text_max_length
# proteins should be List[dict]
if isinstance(proteins, dict):
proteins = [proteins]
# messages_list should be List[List[dict]]
if isinstance(messages_list, (list, tuple)) and not isinstance(messages_list[0], (list, tuple)):
messages_list = [messages_list]
# Check if batched proteins are in the correct format
if isinstance(proteins, (list, tuple)) and not all(isinstance(p, dict) for p in proteins):
raise ValueError("The proteins should be a list of dictionaries, but not all elements are dictionaries.")
if isinstance(proteins, (list, tuple)) and not all(
all(k in PROTEIN_VALID_KEYS for k in p.keys()) for p in proteins
):
raise ValueError(
"There should be a list of dictionaries with keys: "
f"{', '.join(PROTEIN_VALID_KEYS)} for each protein."
f"But got: {proteins}"
)
# Check if batched messages_list is in the correct format
if isinstance(messages_list, (list, tuple)):
for messages in messages_list:
if not isinstance(messages, (list, tuple)):
raise TypeError(f"Each messages in messages_list should be a list instead of {type(messages)}.")
if not all(isinstance(m, dict) for m in messages):
raise ValueError(
"Each message in messages_list should be a list of dictionaries, but not all elements are dictionaries."
)
if any(len(m.keys()) != 2 for m in messages) or any(
set(m.keys()) != {"role", "content"} for m in messages
):
raise ValueError(
"Each message in messages_list should be a list of dictionaries with two keys: 'role' and 'content'."
f"But got: {messages}"
)
else:
raise ValueError(
f"The messages_list should be a list of lists of dictionaries, but it's {type(messages_list)}."
)
sa_tokens = self.process_proteins(proteins, protein_max_length)
text_tokens = self.process_text(messages_list, text_max_length)
return BatchFeature(
data={
"protein_input_ids": sa_tokens["input_ids"],
"protein_attention_mask": sa_tokens["attention_mask"],
"input_ids": text_tokens["input_ids"],
"attention_mask": text_tokens["attention_mask"],
}
)
def batch_decode(self, *args, **kwargs):
return self.tokenizer.batch_decode(*args, **kwargs)
def decode(self, *args, **kwargs):
return self.tokenizer.decode(*args, **kwargs)
def protein_batch_decode(self, *args, **kwargs):
return self.protein_tokenizer.batch_decode(*args, **kwargs)
def protein_decode(self, *args, **kwargs):
return self.protein_tokenizer.decode(*args, **kwargs)
__all__ = ["EvollaProcessor"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/evolla/processing_evolla.py",
"license": "Apache License 2.0",
"lines": 164,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/evolla/test_modeling_evolla.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch Evolla model."""
import unittest
from functools import cached_property
from parameterized import parameterized
from transformers import BitsAndBytesConfig, EvollaConfig, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
require_bitsandbytes,
require_torch,
slow,
torch_device,
)
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import (
TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION,
ModelTesterMixin,
ids_tensor,
random_attention_mask,
)
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EvollaForProteinText2Text, EvollaModel, EvollaProcessor
class EvollaModelTester:
def __init__(
self,
parent,
batch_size=1,
is_training=False,
text_seq_length=20,
text_vocab_size=100,
protein_seq_length=10,
protein_vocab_size=20,
hidden_size=4, # llama hidden size
intermediate_size=7, # llama intermediate size
num_hidden_layers=1, # llama hidden layers
num_attention_heads=2, # llama attention heads
num_key_value_heads=2, # llama key value heads
protein_hidden_size=8, # protein encoder hidden size
protein_num_hidden_layers=1, # protein encoder hidden layers
protein_num_attention_heads=4, # protein encoder attention heads
protein_intermediate_size=11, # protein encoder intermediate size
resampler_num_latents=7, # sequence compressor num latents
resampler_ff_mult=1, # sequence compressor ff mult
resampler_depth=2, # sequence compressor depth
resampler_dim_head=4, # sequence compressor dim head
resampler_heads=2, # sequence compressor heads
aligner_num_add_layers=1, # sequence aligner num add layers
aligner_ffn_mult=1, # sequence aligner ffn mult
use_input_mask=True,
):
self.parent = parent
self.batch_size = batch_size
self.protein_seq_length = protein_seq_length
self.protein_vocab_size = protein_vocab_size
self.text_seq_length = text_seq_length
self.text_vocab_size = text_vocab_size
self.seq_length = text_seq_length
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.protein_hidden_size = protein_hidden_size
self.protein_num_hidden_layers = protein_num_hidden_layers
self.protein_num_attention_heads = protein_num_attention_heads
self.protein_intermediate_size = protein_intermediate_size
self.resampler_num_latents = resampler_num_latents
self.resampler_ff_mult = resampler_ff_mult
self.resampler_depth = resampler_depth
self.resampler_dim_head = resampler_dim_head
self.resampler_heads = resampler_heads
self.aligner_num_add_layers = aligner_num_add_layers
self.aligner_ffn_mult = aligner_ffn_mult
self.use_input_mask = use_input_mask
self.is_training = is_training
@property
def is_encoder_decoder(self):
return False
def prepare_config_and_inputs(self, num_proteins=None):
batch_size = num_proteins if num_proteins is not None else self.batch_size
text_input_ids = ids_tensor([batch_size, self.text_seq_length], self.text_vocab_size)
protein_input_ids = ids_tensor([batch_size, self.protein_seq_length], self.protein_vocab_size)
if self.use_input_mask:
text_input_mask = random_attention_mask([batch_size, self.text_seq_length])
protein_input_mask = random_attention_mask([batch_size, self.protein_seq_length])
config = self.get_config()
return (config, text_input_ids, text_input_mask, protein_input_ids, protein_input_mask)
def get_config(self):
return EvollaConfig(
protein_encoder_config={
"vocab_size": self.protein_vocab_size,
"hidden_size": self.protein_hidden_size,
"num_hidden_layers": self.protein_num_hidden_layers,
"num_attention_heads": self.protein_num_attention_heads,
"intermediate_size": self.protein_intermediate_size,
},
vocab_size=self.text_vocab_size,
hidden_size=self.hidden_size,
intermediate_size=self.intermediate_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
num_key_value_heads=self.num_key_value_heads,
aligner_ffn_mult=self.aligner_ffn_mult,
aligner_num_add_layers=self.aligner_num_add_layers,
resampler_depth=self.resampler_depth,
resampler_dim_head=self.resampler_dim_head,
resampler_heads=self.resampler_heads,
resampler_num_latents=self.resampler_num_latents,
resampler_ff_mult=self.resampler_ff_mult,
)
def create_and_check_model(
self,
config,
input_ids,
input_mask,
protein_input_ids,
protein_input_mask,
batch_size=None,
):
batch_size = batch_size if batch_size is not None else self.batch_size
model = EvollaModel(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
protein_input_ids=protein_input_ids,
protein_attention_mask=protein_input_mask,
)
self.parent.assertEqual(result.last_hidden_state.shape, (batch_size, input_ids.shape[1], self.hidden_size))
def create_and_check_model_gen(
self,
config,
input_ids,
input_mask,
protein_input_ids,
protein_input_mask,
):
model = EvollaForProteinText2Text(config)
model.to(torch_device)
model.eval()
model.generate(
input_ids,
attention_mask=input_mask,
protein_input_ids=protein_input_ids,
protein_attention_mask=protein_input_mask,
max_length=self.seq_length + 2,
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(config, text_input_ids, text_input_mask, protein_input_ids, protein_input_mask) = config_and_inputs
inputs_dict = {
"input_ids": text_input_ids,
"attention_mask": text_input_mask,
"protein_input_ids": protein_input_ids,
"protein_attention_mask": protein_input_mask,
}
return config, inputs_dict
@require_torch
class EvollaModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (EvollaModel, EvollaForProteinText2Text) if is_torch_available() else ()
pipeline_model_mapping = {"feature-extraction": EvollaModel} if is_torch_available() else {}
test_resize_embeddings = False
maxDiff = None
test_torch_exportable = False
def setUp(self):
self.model_tester = EvollaModelTester(self)
self.config_tester = ConfigTester(self, config_class=EvollaConfig, hidden_size=37)
@property
def is_encoder_decoder(self):
return self.model_tester.is_encoder_decoder
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
# XXX: EvollaForProteinText2Text has no MODEL_FOR group yet, but it should be the same
# as MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, so for now manually changing to do the right thing
# as super won't do it
if return_labels:
inputs_dict["labels"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device
)
return inputs_dict
def test_model_outputs_equivalence(self):
try:
orig = self.all_model_classes
# EvollaModel.forward doesn't have labels input arg - only EvollaForProteinText2Text does
self.all_model_classes = (EvollaForProteinText2Text,) if is_torch_available() else ()
super().test_model_outputs_equivalence()
finally:
self.all_model_classes = orig
def test_config(self):
self.config_tester.run_common_tests()
def test_model_single_protein(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs(num_proteins=1)
self.model_tester.create_and_check_model(*config_and_inputs, batch_size=1)
def test_model_multiple_proteins(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs(num_proteins=2)
self.model_tester.create_and_check_model(*config_and_inputs, batch_size=2)
def test_generate_single_protein(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs(num_proteins=1)
self.model_tester.create_and_check_model_gen(*config_and_inputs)
def test_generate_multiple_proteins(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs(num_proteins=2)
self.model_tester.create_and_check_model_gen(*config_and_inputs)
def test_saprot_output(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
protein_information = {
"input_ids": inputs_dict["protein_input_ids"],
"attention_mask": inputs_dict["protein_attention_mask"],
}
for model_class in self.all_model_classes:
if model_class is not EvollaModel:
continue
model = model_class(config)
model.to(torch_device)
model.eval()
protein_encoder_outputs = model.protein_encoder.model(**protein_information, return_dict=True)
print(model_class, protein_encoder_outputs)
def test_protein_encoder_output(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
protein_information = {
"input_ids": inputs_dict["protein_input_ids"],
"attention_mask": inputs_dict["protein_attention_mask"],
}
for model_class in self.all_model_classes:
if model_class is not EvollaModel:
continue
model = model_class(config)
model.to(torch_device)
model.eval()
protein_encoder_outputs = model.protein_encoder(**protein_information, return_dict=True)
print(model_class, protein_encoder_outputs)
def test_single_forward(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
print(outputs)
@parameterized.expand(TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION)
@unittest.skip("Evolla requires both text and protein inputs which is currently not done in this test.")
def test_eager_matches_sdpa_inference(self):
pass
@unittest.skip("Evolla does not support eager attention implementation.")
def test_eager_padding_matches_padding_free_with_position_ids(self):
pass
@unittest.skip(
"Evolla has a separate test runner for generation tests with complex inheritance, causing this check to fail."
)
def test_generation_tester_mixin_inheritance(self):
pass
@unittest.skip("Evolla requires both text and protein inputs which is currently not done in this test.")
def test_flex_attention_with_grads(self):
pass
@require_torch
class EvollaModelIntegrationTest(TestCasePlus):
def _prepare_for_inputs(self):
aa_seq = "MLLEETLKSCPIVKRGKYHYFIHPISDGVPLVEPKLLREVATRIIKIGNFEGVNKIVTAEAMGIPLVTTLSLYTDIPYVIMRKREYKLPGEVPVFQSTGYSKGQLYLNGIEKGDKVIIIDDVISTGGTMIAIINALERAGAEIKDIICVIERGDGKKIVEEKTGYKIKTLVKIDVVDGEVVIL"
foldseek = "dvvvvqqqpfawdddppdtdgcgclapvpdpddpvvlvvllvlcvvpadpvqaqeeeeeddscpsnvvsncvvpvhyydywylddppdppkdwqwf######gitidpdqaaaheyeyeeaeqdqlrvvlsvvvrcvvrnyhhrayeyaeyhycnqvvccvvpvghyhynwywdqdpsgidtd"
question = "What is the function of this protein?"
protein_information = {
"aa_seq": aa_seq,
"foldseek": foldseek,
}
messages = [
{"role": "system", "content": "You are an AI expert that can answer any questions about protein."},
{"role": "user", "content": question},
]
return protein_information, messages
@cached_property
def default_processor(self):
return EvollaProcessor.from_pretrained("westlake-repl/Evolla-10B-hf")
@require_bitsandbytes
@slow
def test_inference_natural_language_protein_reasoning(self):
protein_information, messages = self._prepare_for_inputs()
processor = self.default_processor
inputs = processor(
messages_list=[messages], proteins=[protein_information], return_tensors="pt", padding="longest"
).to(torch_device)
# the CI gpu is small so using quantization to fit
quantization_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_compute_dtype="float16",
)
model = EvollaForProteinText2Text.from_pretrained(
"westlake-repl/Evolla-10B-hf",
quantization_config=quantization_config,
device_map=torch_device,
)
generated_ids = model.generate(**inputs, max_new_tokens=100, do_sample=False)
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)
self.assertIn("This protein", generated_text[0])
self.assertIn("purine", generated_text[0])
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/evolla/test_modeling_evolla.py",
"license": "Apache License 2.0",
"lines": 315,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/owlv2/modular_owlv2.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Image processor class for OWLv2."""
import warnings
from typing import Optional
import torch
import torchvision.transforms.v2.functional as tvF
from ...image_processing_utils_fast import (
BatchFeature,
)
from ...image_transforms import group_images_by_shape, reorder_images
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
PILImageResampling,
SizeDict,
)
from ...utils import (
TensorType,
auto_docstring,
)
from ..owlvit.image_processing_owlvit_fast import OwlViTImageProcessorFast
@auto_docstring
class Owlv2ImageProcessorFast(OwlViTImageProcessorFast):
resample = PILImageResampling.BILINEAR
image_mean = OPENAI_CLIP_MEAN
image_std = OPENAI_CLIP_STD
size = {"height": 960, "width": 960}
rescale_factor = 1 / 255
do_resize = True
do_rescale = True
do_normalize = True
do_pad = True
crop_size = None
do_center_crop = None
def _pad_images(self, images: "torch.Tensor", constant_value: float = 0.0) -> "torch.Tensor":
"""
Pad an image with zeros to the given size.
"""
height, width = images.shape[-2:]
size = max(height, width)
pad_bottom = size - height
pad_right = size - width
padding = (0, 0, pad_right, pad_bottom)
padded_image = tvF.pad(images, padding, fill=constant_value)
return padded_image
def pad(
self,
images: list["torch.Tensor"],
disable_grouping: bool | None,
constant_value: float = 0.0,
**kwargs,
) -> list["torch.Tensor"]:
"""
Unlike the Base class `self.pad` where all images are padded to the maximum image size,
Owlv2 pads an image to square.
"""
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
processed_images_grouped = {}
for shape, stacked_images in grouped_images.items():
stacked_images = self._pad_images(
stacked_images,
constant_value=constant_value,
)
processed_images_grouped[shape] = stacked_images
processed_images = reorder_images(processed_images_grouped, grouped_images_index)
return processed_images
def resize(
self,
image: "torch.Tensor",
size: SizeDict,
anti_aliasing: bool = True,
anti_aliasing_sigma=None,
**kwargs,
) -> "torch.Tensor":
"""
Resize an image as per the original implementation.
Args:
image (`Tensor`):
Image to resize.
size (`dict[str, int]`):
Dictionary containing the height and width to resize the image to.
anti_aliasing (`bool`, *optional*, defaults to `True`):
Whether to apply anti-aliasing when downsampling the image.
anti_aliasing_sigma (`float`, *optional*, defaults to `None`):
Standard deviation for Gaussian kernel when downsampling the image. If `None`, it will be calculated
automatically.
"""
output_shape = (size.height, size.width)
input_shape = image.shape
# select height and width from input tensor
factors = torch.tensor(input_shape[2:]).to(image.device) / torch.tensor(output_shape).to(image.device)
if anti_aliasing:
if anti_aliasing_sigma is None:
anti_aliasing_sigma = ((factors - 1) / 2).clamp(min=0)
else:
anti_aliasing_sigma = torch.atleast_1d(anti_aliasing_sigma) * torch.ones_like(factors)
if torch.any(anti_aliasing_sigma < 0):
raise ValueError("Anti-aliasing standard deviation must be greater than or equal to zero")
elif torch.any((anti_aliasing_sigma > 0) & (factors <= 1)):
warnings.warn(
"Anti-aliasing standard deviation greater than zero but not down-sampling along all axes"
)
if torch.any(anti_aliasing_sigma == 0):
filtered = image
else:
kernel_sizes = 2 * torch.ceil(3 * anti_aliasing_sigma).int() + 1
filtered = tvF.gaussian_blur(
image, (kernel_sizes[0], kernel_sizes[1]), sigma=anti_aliasing_sigma.tolist()
)
else:
filtered = image
out = tvF.resize(filtered, size=(size.height, size.width), antialias=False)
return out
def _preprocess(
self,
images: list["torch.Tensor"],
do_resize: bool,
size: SizeDict,
interpolation: Optional["tvF.InterpolationMode"],
do_pad: bool,
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
image_mean: float | list[float] | None,
image_std: float | list[float] | None,
disable_grouping: bool | None,
return_tensors: str | TensorType | None,
**kwargs,
) -> BatchFeature:
# Group images by size for batched resizing
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
processed_images_grouped = {}
for shape, stacked_images in grouped_images.items():
# Rescale images before other operations as done in original implementation
stacked_images = self.rescale_and_normalize(
stacked_images, do_rescale, rescale_factor, False, image_mean, image_std
)
processed_images_grouped[shape] = stacked_images
processed_images = reorder_images(processed_images_grouped, grouped_images_index)
if do_pad:
processed_images = self.pad(processed_images, constant_value=0.0, disable_grouping=disable_grouping)
grouped_images, grouped_images_index = group_images_by_shape(
processed_images, disable_grouping=disable_grouping
)
resized_images_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_resize:
resized_stack = self.resize(
image=stacked_images,
size=size,
interpolation=interpolation,
input_data_format=ChannelDimension.FIRST,
)
resized_images_grouped[shape] = resized_stack
resized_images = reorder_images(resized_images_grouped, grouped_images_index)
# Group images by size for further processing
# Needed in case do_resize is False, or resize returns images with different sizes
grouped_images, grouped_images_index = group_images_by_shape(resized_images, disable_grouping=disable_grouping)
processed_images_grouped = {}
for shape, stacked_images in grouped_images.items():
# Fused rescale and normalize
stacked_images = self.rescale_and_normalize(
stacked_images, False, rescale_factor, do_normalize, image_mean, image_std
)
processed_images_grouped[shape] = stacked_images
processed_images = reorder_images(processed_images_grouped, grouped_images_index)
return BatchFeature(data={"pixel_values": processed_images}, tensor_type=return_tensors)
__all__ = ["Owlv2ImageProcessorFast"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/owlv2/modular_owlv2.py",
"license": "Apache License 2.0",
"lines": 181,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/integrations/fp_quant.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"FP-Quant integration file"
import torch
from ..utils import (
is_fp_quant_available,
)
if is_fp_quant_available():
from fp_quant import FPQuantConfig as FPQuantLinearConfig
from fp_quant import FPQuantDtype
from transformers.utils.quantization_config import FPQuantConfig
from ..core_model_loading import ConversionOps
from ..quantizers.quantizers_utils import get_module_from_name
class FpQuantQuantize(ConversionOps):
def __init__(self, hf_quantizer):
self.hf_quantizer = hf_quantizer
def convert(
self,
input_dict: torch.Tensor,
model: torch.nn.Module | None = None,
missing_keys: list[str] | None = None,
**kwargs,
) -> dict[str, torch.Tensor]:
target_key, value = tuple(input_dict.items())[0]
value = value[0]
# Loading master weights or an unquantized checkpoint
weight = torch.nn.Parameter(value)
module, _ = get_module_from_name(model, target_key)
module.weight = weight
# Let pre-forward handle the quantization and set None where necessary
# This operation will quantize the weights internally
with torch.cuda.device(value.device):
module.pre_forward()
prefix_target_key = target_key.rsplit(".", 1)[0]
# keys are set inside the module.pre_forward() method, we don't need remove them from the missing keys list
missing_keys.discard(target_key)
missing_keys.discard(f"{prefix_target_key}.backward_hadamard_matrix")
missing_keys.discard(f"{prefix_target_key}.forward_hadamard_matrix")
missing_keys.discard(f"{prefix_target_key}.act_global_scale")
missing_keys.discard(f"{prefix_target_key}.weight_global_scale")
missing_keys.discard(f"{prefix_target_key}.qweight")
missing_keys.discard(f"{prefix_target_key}.scales")
missing_keys.discard(f"{prefix_target_key}.dqweight")
return {}
class FpQuantDeserialize(ConversionOps):
def __init__(self, hf_quantizer):
self.hf_quantizer = hf_quantizer
def convert(
self,
input_dict: torch.Tensor,
model: torch.nn.Module | None = None,
full_layer_name: str | None = None,
missing_keys: list[str] | None = None,
**kwargs,
) -> dict[str, torch.Tensor]:
target_key, value = tuple(input_dict.items())[0]
value = value[0] if isinstance(value, list) else value
module, _ = get_module_from_name(model, target_key)
# The module holds either:
# * `weight` when `store_master_weights=True`
# * `qweight` and `scales` when `store_master_weights=False` and `pseudoquantization=False`
# * `dqweight` when `store_master_weights=False` and `pseudoquantization=True`
if target_key == ".qweight":
# Loading a real quantized checkpoint without master weights
qweight = torch.nn.Parameter(
value,
requires_grad=False,
)
return {
".qweight": qweight,
# the way the FPQuantLinear module is designed, these parameters are expected in the model
# even though they are not used so we need to set them to zeros
".weight": torch.nn.Parameter(torch.zeros(0)),
".dqweight": torch.nn.Parameter(torch.zeros(0)),
}
if target_key == ".dqweight":
# Loading a pseudo-quantized checkpoint without master weights
dqweight = torch.nn.Parameter(value)
return {
".dqweight": dqweight,
# the way the FPQuantLinear module ips designed, these parameters are expected in the model
# even though they are not used so we need to set them to zeros
".weight": torch.nn.Parameter(torch.zeros(0)),
".qweight": torch.nn.Parameter(torch.zeros(0)),
".scales": torch.nn.Parameter(torch.zeros(0)),
}
def adapt_fp_quant_config(config: FPQuantConfig):
if config.forward_dtype == "mxfp4":
forward_dtype = FPQuantDtype.MXFP4
elif config.forward_dtype == "nvfp4":
forward_dtype = FPQuantDtype.NVFP4
else:
raise ValueError(f"Unsupported forward dtype: {config.forward_dtype}")
if config.backward_dtype == "bf16":
backward_dtype = FPQuantDtype.BF16
elif config.backward_dtype == "mxfp8":
backward_dtype = FPQuantDtype.MXFP8
elif config.backward_dtype == "mxfp4":
backward_dtype = FPQuantDtype.MXFP4
else:
raise ValueError(f"Unsupported backward dtype: {config.backward_dtype}")
return FPQuantLinearConfig(
forward_dtype=forward_dtype,
forward_method=config.forward_method,
backward_dtype=backward_dtype,
store_master_weights=config.store_master_weights,
hadamard_group_size=config.hadamard_group_size,
pseudoquantization=config.pseudoquantization,
transform_init=config.transform_init,
modules_to_not_convert=config.modules_to_not_convert,
)
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/integrations/fp_quant.py",
"license": "Apache License 2.0",
"lines": 122,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/quantizers/quantizer_fp_quant.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Optional
from .base import HfQuantizer
from .quantizers_utils import get_module_from_name
if TYPE_CHECKING:
from ..modeling_utils import PreTrainedModel
from ..utils import is_fp_quant_available, is_qutlass_available, is_torch_available, is_torch_xpu_available, logging
from ..utils.quantization_config import QuantizationConfigMixin
if is_torch_available():
import torch
logger = logging.get_logger(__name__)
class FPQuantHfQuantizer(HfQuantizer):
"""
Quantizer for the FP-Quant method. Enables the loading of prequantized models and in-flight quantization of full-precision models.
"""
requires_calibration = False
is_qat_trainable = True
def __init__(self, quantization_config: QuantizationConfigMixin, **kwargs):
super().__init__(quantization_config, **kwargs)
def validate_environment(self, device_map, **kwargs):
if not torch.cuda.is_available() and not is_torch_xpu_available():
raise NotImplementedError(
"FPQuant quantization is only supported on GPU or Intel XPU. Please use a different quantizer."
)
if not is_qutlass_available() and not self.quantization_config.pseudoquantization:
raise ImportError(
"Using `fp_quant` with real quantization requires a **Blackwell GPU** and qutlass: `git clone https://github.com/IST-DASLab/qutlass.git && cd qutlass && pip install --no-build-isolation .`. You can use `FPQuantConfig(pseudoquantization=True, ...)` to use Triton-based pseudo-quantization. It doesn't provide any speedups but emulates the quantization behavior of the real quantization."
)
if self.quantization_config.pseudoquantization:
logger.warning(
"Using pseudo-quantization for FP-Quant. This doesn't provide any speedups but emulates the quantization behavior of the real quantization."
)
if not is_fp_quant_available():
raise ImportError("Using `fp_quant` quantization requires fp_quant: `pip install fp_quant`")
if device_map is None and not self.quantization_config.pseudoquantization:
raise ValueError(
"You are attempting to load a FPQuant model without setting device_map."
" Please set device_map comprised of 'cuda' devices."
)
elif isinstance(device_map, dict):
if (
not self.quantization_config.pseudoquantization
and len(device_map) > 1
and "cpu" in device_map.values()
or "disk" in device_map.values()
):
raise ValueError(
"You are attempting to load a FPQuant model with a device_map that contains a CPU or disk device."
" This is not supported. Please remove the CPU or disk device from the device_map."
)
def update_dtype(self, dtype: "torch.dtype") -> "torch.dtype":
if dtype != torch.bfloat16:
logger.warning_once(
f"Setting dtype to {dtype}, but only bfloat16 is supported right now. Overwriting torch_dtype to bfloat16."
)
dtype = torch.bfloat16
return dtype
def param_needs_quantization(self, model: "PreTrainedModel", param_name: str, **kwargs) -> bool:
from fp_quant import FPQuantLinear
module, tensor_name = get_module_from_name(model, param_name)
if isinstance(module, FPQuantLinear) and tensor_name in ["weight", "qweight", "dqweight"]:
# Only quantize weights of FPQuantLinear modules that are not already quantized
return True
else:
return False
def _process_model_before_weight_loading(
self,
model: "PreTrainedModel",
**kwargs,
):
from fp_quant import replace_with_fp_quant_linear
from ..integrations.fp_quant import adapt_fp_quant_config
replace_with_fp_quant_linear(
model,
fp_quant_linear_config=adapt_fp_quant_config(self.quantization_config),
)
@property
def is_trainable(self, model: Optional["PreTrainedModel"] = None):
trainable = self.quantization_config.store_master_weights
if not trainable:
logger.warning(
"You are attempting to train a model with FPQuant quantization. This is only supported when `store_master_weights=True`. Please set `store_master_weights=True` to train the model."
)
return trainable
def is_serializable(self):
return True
def get_quantize_ops(self):
from ..integrations.fp_quant import FpQuantQuantize
return FpQuantQuantize(self)
def get_weight_conversions(self):
from ..core_model_loading import WeightConverter
from ..integrations.fp_quant import FpQuantDeserialize
if self.pre_quantized:
if self.quantization_config.pseudoquantization:
return [
WeightConverter(
source_patterns=[".dqweight"],
target_patterns=".dqweight",
operations=[FpQuantDeserialize(self)],
),
]
else:
return [
WeightConverter(
source_patterns=[".qweight"],
target_patterns=".qweight",
operations=[FpQuantDeserialize(self)],
),
]
return []
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/quantizers/quantizer_fp_quant.py",
"license": "Apache License 2.0",
"lines": 122,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/quantization/fp_quant_integration/test_fp_quant.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import tempfile
import unittest
from transformers import AutoModelForCausalLM, AutoTokenizer, FPQuantConfig
from transformers.testing_utils import (
backend_empty_cache,
require_accelerate,
require_fp_quant,
require_qutlass,
require_torch_accelerator,
require_torch_multi_accelerator,
slow,
torch_device,
)
@require_torch_accelerator
class FPQuantConfigTest(unittest.TestCase):
def test_to_dict(self):
"""
Simple test that checks if one uses a config and converts it to a dict, the dict is the same as the config object
"""
quantization_config = FPQuantConfig()
config_to_dict = quantization_config.to_dict()
for key in config_to_dict:
self.assertEqual(getattr(quantization_config, key), config_to_dict[key])
def test_from_dict(self):
"""
Simple test that checks if one uses a dict and converts it to a config object, the config object is the same as the dict
"""
dict = {"modules_to_not_convert": ["embed_tokens", "lm_head"], "quant_method": "fp_quant"}
quantization_config = FPQuantConfig.from_dict(dict)
self.assertEqual(dict["modules_to_not_convert"], quantization_config.modules_to_not_convert)
self.assertEqual(dict["quant_method"], quantization_config.quant_method)
@slow
@require_torch_accelerator
@require_fp_quant
@require_accelerate
class FPQuantBaseTest(unittest.TestCase):
model_name = "unsloth/Llama-3.2-1B"
input_text = "1 2 3 4"
max_new_tokens = 4
EXPECTED_OUTPUT = "1 2 3 4 5 6"
device_map = torch_device
@classmethod
def getQuantizationConfig(cls):
unittest.skip("Subclass must implement this method")
# called only once for all test in this class
@classmethod
def setUpClass(cls):
"""
Setup quantized model
"""
cls.quantization_config = cls.getQuantizationConfig()
cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name)
cls.quantized_model = AutoModelForCausalLM.from_pretrained(
cls.model_name, device_map=cls.device_map, quantization_config=cls.quantization_config
)
def tearDown(self):
gc.collect()
backend_empty_cache(torch_device)
gc.collect()
def test_quantized_model(self):
"""
Simple test that checks if the quantized model is working properly
"""
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
output = self.quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
def test_save_pretrained(self):
"""
Simple test that checks if the quantized model is working properly after being saved and loaded
"""
with tempfile.TemporaryDirectory() as tmpdirname:
self.quantized_model.save_pretrained(tmpdirname)
model = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map=self.device_map)
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
output = model.generate(**input_ids, max_new_tokens=self.max_new_tokens)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
@require_torch_multi_accelerator
def test_quantized_model_multi_accelerator(self):
"""
Simple test that checks if the quantized model is working properly with multiple accelerators.
Set CUDA_VISIBLE_DEVICES=0,1 if you have more than 2 CUDA GPUs. Or set ZE_AFFINITY_MASK=0,1
if you have more than 2 Intel XPUs.
"""
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
quantized_model = AutoModelForCausalLM.from_pretrained(
self.model_name, device_map="auto", quantization_config=self.quantization_config
)
self.assertTrue(set(quantized_model.hf_device_map.values()) == {0, 1})
output = quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
@require_torch_multi_accelerator
def test_save_pretrained_multi_accelerator(self):
"""
Simple test that checks if the quantized model is working properly after being saved and loaded
"""
with tempfile.TemporaryDirectory() as tmpdirname:
self.quantized_model.save_pretrained(tmpdirname)
model = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map="auto")
self.assertTrue(set(model.hf_device_map.values()) == {0, 1})
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
output = model.generate(**input_ids, max_new_tokens=self.max_new_tokens)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
class FPQuantMXFP4PseudoquantTest(FPQuantBaseTest):
@classmethod
def getQuantizationConfig(cls):
return FPQuantConfig(forward_dtype="mxfp4", pseudoquantization=True)
class FPQuantNVFP4PseudoquantTest(FPQuantBaseTest):
@classmethod
def getQuantizationConfig(cls):
return FPQuantConfig(forward_dtype="nvfp4", pseudoquantization=True)
@require_qutlass
class FPQuantMXFP4Test(FPQuantBaseTest):
@classmethod
def getQuantizationConfig(cls):
return FPQuantConfig(forward_dtype="mxfp4", pseudoquantization=False)
@require_qutlass
class FPQuantNVFP4Test(FPQuantBaseTest):
@classmethod
def getQuantizationConfig(cls):
return FPQuantConfig(forward_dtype="nvfp4", pseudoquantization=False)
@require_qutlass
class FPQuantMXFP4GS128Test(FPQuantBaseTest):
@classmethod
def getQuantizationConfig(cls):
return FPQuantConfig(forward_dtype="mxfp4", pseudoquantization=False, hadamard_group_size=128)
@require_qutlass
class FPQuantNVFP4GS128Test(FPQuantBaseTest):
@classmethod
def getQuantizationConfig(cls):
return FPQuantConfig(forward_dtype="nvfp4", pseudoquantization=False, hadamard_group_size=128)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/quantization/fp_quant_integration/test_fp_quant.py",
"license": "Apache License 2.0",
"lines": 144,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/mask2former/modular_mask2former.py | # Copyright 2025 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch import nn
from transformers.models.maskformer.image_processing_maskformer_fast import MaskFormerImageProcessorFast
from ...utils import (
TensorType,
logging,
)
from .image_processing_mask2former import (
compute_segments,
convert_segmentation_to_rle,
remove_low_and_no_objects,
)
logger = logging.get_logger(__name__)
class Mask2FormerImageProcessorFast(MaskFormerImageProcessorFast):
def post_process_semantic_segmentation(
self, outputs, target_sizes: list[tuple[int, int]] | None = None
) -> "torch.Tensor":
"""
Converts the output of [`Mask2FormerForUniversalSegmentation`] into semantic segmentation maps. Only supports
PyTorch.
Args:
outputs ([`Mask2FormerForUniversalSegmentation`]):
Raw outputs of the model.
target_sizes (`List[Tuple[int, int]]`, *optional*):
List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested
final size (height, width) of each prediction. If left to None, predictions will not be resized.
Returns:
`List[torch.Tensor]`:
A list of length `batch_size`, where each item is a semantic segmentation map of shape (height, width)
corresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each
`torch.Tensor` correspond to a semantic class id.
"""
class_queries_logits = outputs.class_queries_logits # [batch_size, num_queries, num_classes+1]
masks_queries_logits = outputs.masks_queries_logits # [batch_size, num_queries, height, width]
# Scale back to preprocessed image size - (384, 384) for all models
masks_queries_logits = torch.nn.functional.interpolate(
masks_queries_logits, size=(384, 384), mode="bilinear", align_corners=False
)
# Remove the null class `[..., :-1]`
masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1]
masks_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width]
# Semantic segmentation logits of shape (batch_size, num_classes, height, width)
segmentation = torch.einsum("bqc, bqhw -> bchw", masks_classes, masks_probs)
batch_size = class_queries_logits.shape[0]
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if batch_size != len(target_sizes):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits"
)
semantic_segmentation = []
for idx in range(batch_size):
resized_logits = torch.nn.functional.interpolate(
segmentation[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False
)
semantic_map = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(semantic_map)
else:
semantic_segmentation = segmentation.argmax(dim=1)
semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
def post_process_instance_segmentation(
self,
outputs,
threshold: float = 0.5,
mask_threshold: float = 0.5,
overlap_mask_area_threshold: float = 0.8,
target_sizes: list[tuple[int, int]] | None = None,
return_coco_annotation: bool | None = False,
return_binary_maps: bool | None = False,
) -> list[dict]:
"""
Converts the output of [`Mask2FormerForUniversalSegmentationOutput`] into instance segmentation predictions.
Only supports PyTorch. If instances could overlap, set either return_coco_annotation or return_binary_maps
to `True` to get the correct segmentation result.
Args:
outputs ([`Mask2FormerForUniversalSegmentation`]):
Raw outputs of the model.
threshold (`float`, *optional*, defaults to 0.5):
The probability score threshold to keep predicted instance masks.
mask_threshold (`float`, *optional*, defaults to 0.5):
Threshold to use when turning the predicted masks into binary values.
overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8):
The overlap mask area threshold to merge or discard small disconnected parts within each binary
instance mask.
target_sizes (`List[Tuple]`, *optional*):
List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested
final size (height, width) of each prediction. If left to None, predictions will not be resized.
return_coco_annotation (`bool`, *optional*, defaults to `False`):
If set to `True`, segmentation maps are returned in COCO run-length encoding (RLE) format.
return_binary_maps (`bool`, *optional*, defaults to `False`):
If set to `True`, segmentation maps are returned as a concatenated tensor of binary segmentation maps
(one per detected instance).
Returns:
`List[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys:
- **segmentation** -- A tensor of shape `(height, width)` where each pixel represents a `segment_id`, or
`List[List]` run-length encoding (RLE) of the segmentation map if return_coco_annotation is set to
`True`, or a tensor of shape `(num_instances, height, width)` if return_binary_maps is set to `True`.
Set to `None` if no mask if found above `threshold`.
- **segments_info** -- A dictionary that contains additional information on each segment.
- **id** -- An integer representing the `segment_id`.
- **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`.
- **score** -- Prediction score of segment with `segment_id`.
"""
if return_coco_annotation and return_binary_maps:
raise ValueError("return_coco_annotation and return_binary_maps can not be both set to True.")
# [batch_size, num_queries, num_classes+1]
class_queries_logits = outputs.class_queries_logits
# [batch_size, num_queries, height, width]
masks_queries_logits = outputs.masks_queries_logits
# Scale back to preprocessed image size - (384, 384) for all models
masks_queries_logits = torch.nn.functional.interpolate(
masks_queries_logits, size=(384, 384), mode="bilinear", align_corners=False
)
device = masks_queries_logits.device
num_classes = class_queries_logits.shape[-1] - 1
num_queries = class_queries_logits.shape[-2]
# Loop over items in batch size
results: list[dict[str, TensorType]] = []
for i in range(class_queries_logits.shape[0]):
mask_pred = masks_queries_logits[i]
mask_cls = class_queries_logits[i]
scores = torch.nn.functional.softmax(mask_cls, dim=-1)[:, :-1]
labels = torch.arange(num_classes, device=device).unsqueeze(0).repeat(num_queries, 1).flatten(0, 1)
scores_per_image, topk_indices = scores.flatten(0, 1).topk(num_queries, sorted=False)
labels_per_image = labels[topk_indices]
topk_indices = torch.div(topk_indices, num_classes, rounding_mode="floor")
mask_pred = mask_pred[topk_indices]
pred_masks = (mask_pred > 0).float()
# Calculate average mask prob
mask_scores_per_image = (mask_pred.sigmoid().flatten(1) * pred_masks.flatten(1)).sum(1) / (
pred_masks.flatten(1).sum(1) + 1e-6
)
pred_scores = scores_per_image * mask_scores_per_image
pred_classes = labels_per_image
segmentation = torch.zeros((384, 384)) - 1
if target_sizes is not None:
segmentation = torch.zeros(target_sizes[i]) - 1
pred_masks = torch.nn.functional.interpolate(
pred_masks.unsqueeze(0), size=target_sizes[i], mode="nearest"
)[0]
instance_maps, segments = [], []
current_segment_id = 0
for j in range(num_queries):
score = pred_scores[j].item()
if not torch.all(pred_masks[j] == 0) and score >= threshold:
segmentation[pred_masks[j] == 1] = current_segment_id
segments.append(
{
"id": current_segment_id,
"label_id": pred_classes[j].item(),
"was_fused": False,
"score": round(score, 6),
}
)
current_segment_id += 1
instance_maps.append(pred_masks[j])
# Return segmentation map in run-length encoding (RLE) format
if return_coco_annotation:
segmentation = convert_segmentation_to_rle(segmentation)
# Return a concatenated tensor of binary instance maps
if return_binary_maps and len(instance_maps) != 0:
segmentation = torch.stack(instance_maps, dim=0)
results.append({"segmentation": segmentation, "segments_info": segments})
return results
def post_process_panoptic_segmentation(
self,
outputs,
threshold: float = 0.5,
mask_threshold: float = 0.5,
overlap_mask_area_threshold: float = 0.8,
label_ids_to_fuse: set[int] | None = None,
target_sizes: list[tuple[int, int]] | None = None,
) -> list[dict]:
"""
Converts the output of [`Mask2FormerForUniversalSegmentationOutput`] into image panoptic segmentation
predictions. Only supports PyTorch.
Args:
outputs ([`Mask2FormerForUniversalSegmentationOutput`]):
The outputs from [`Mask2FormerForUniversalSegmentation`].
threshold (`float`, *optional*, defaults to 0.5):
The probability score threshold to keep predicted instance masks.
mask_threshold (`float`, *optional*, defaults to 0.5):
Threshold to use when turning the predicted masks into binary values.
overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8):
The overlap mask area threshold to merge or discard small disconnected parts within each binary
instance mask.
label_ids_to_fuse (`Set[int]`, *optional*):
The labels in this state will have all their instances be fused together. For instance we could say
there can only be one sky in an image, but several persons, so the label ID for sky would be in that
set, but not the one for person.
target_sizes (`List[Tuple]`, *optional*):
List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested
final size (height, width) of each prediction in batch. If left to None, predictions will not be
resized.
Returns:
`List[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys:
- **segmentation** -- a tensor of shape `(height, width)` where each pixel represents a `segment_id`, set
to `None` if no mask if found above `threshold`. If `target_sizes` is specified, segmentation is resized
to the corresponding `target_sizes` entry.
- **segments_info** -- A dictionary that contains additional information on each segment.
- **id** -- an integer representing the `segment_id`.
- **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`.
- **was_fused** -- a boolean, `True` if `label_id` was in `label_ids_to_fuse`, `False` otherwise.
Multiple instances of the same class / label were fused and assigned a single `segment_id`.
- **score** -- Prediction score of segment with `segment_id`.
"""
if label_ids_to_fuse is None:
logger.warning("`label_ids_to_fuse` unset. No instance will be fused.")
label_ids_to_fuse = set()
class_queries_logits = outputs.class_queries_logits # [batch_size, num_queries, num_classes+1]
masks_queries_logits = outputs.masks_queries_logits # [batch_size, num_queries, height, width]
# Scale back to preprocessed image size - (384, 384) for all models
masks_queries_logits = torch.nn.functional.interpolate(
masks_queries_logits, size=(384, 384), mode="bilinear", align_corners=False
)
batch_size = class_queries_logits.shape[0]
num_labels = class_queries_logits.shape[-1] - 1
mask_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width]
# Predicted label and score of each query (batch_size, num_queries)
pred_scores, pred_labels = nn.functional.softmax(class_queries_logits, dim=-1).max(-1)
# Loop over items in batch size
results: list[dict[str, TensorType]] = []
for i in range(batch_size):
mask_probs_item, pred_scores_item, pred_labels_item = remove_low_and_no_objects(
mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels
)
# No mask found
if mask_probs_item.shape[0] <= 0:
height, width = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:]
segmentation = torch.zeros((height, width)) - 1
results.append({"segmentation": segmentation, "segments_info": []})
continue
# Get segmentation map and segment information of batch item
target_size = target_sizes[i] if target_sizes is not None else None
segmentation, segments = compute_segments(
mask_probs=mask_probs_item,
pred_scores=pred_scores_item,
pred_labels=pred_labels_item,
mask_threshold=mask_threshold,
overlap_mask_area_threshold=overlap_mask_area_threshold,
label_ids_to_fuse=label_ids_to_fuse,
target_size=target_size,
)
results.append({"segmentation": segmentation, "segments_info": segments})
return results
__all__ = ["Mask2FormerImageProcessorFast"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/mask2former/modular_mask2former.py",
"license": "Apache License 2.0",
"lines": 260,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/maskformer/image_processing_maskformer_fast.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Image processor class for MaskFormer."""
import math
from typing import TYPE_CHECKING, Any, Optional, Union
import torch
import torchvision.transforms.v2.functional as tvF
from torch import nn
from transformers.image_transforms import get_size_with_aspect_ratio
from ...image_processing_utils import BatchFeature, get_size_dict
from ...image_processing_utils_fast import (
BaseImageProcessorFast,
SizeDict,
get_image_size_for_max_height_width,
get_max_height_width,
group_images_by_shape,
reorder_images,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
)
from ...processing_utils import Unpack
from ...utils import (
TensorType,
auto_docstring,
logging,
)
from .image_processing_maskformer import (
MaskFormerImageProcessorKwargs,
compute_segments,
convert_segmentation_to_rle,
remove_low_and_no_objects,
)
logger = logging.get_logger(__name__)
if TYPE_CHECKING:
pass
def convert_segmentation_map_to_binary_masks_fast(
segmentation_map: "torch.Tensor",
instance_id_to_semantic_id: dict[int, int] | None = None,
ignore_index: int | None = None,
do_reduce_labels: bool = False,
):
if do_reduce_labels and ignore_index is None:
raise ValueError("If `do_reduce_labels` is True, `ignore_index` must be provided.")
if do_reduce_labels:
segmentation_map = torch.where(segmentation_map == 0, ignore_index, segmentation_map - 1)
all_labels = torch.unique(segmentation_map)
if ignore_index is not None:
all_labels = all_labels[all_labels != ignore_index] # drop background label if applicable
binary_masks = [(segmentation_map == i) for i in all_labels]
if binary_masks:
binary_masks = torch.stack(binary_masks, dim=0)
else:
binary_masks = torch.zeros((0, *segmentation_map.shape), device=segmentation_map.device)
# Convert instance ids to class ids
if instance_id_to_semantic_id is not None:
labels = torch.zeros(all_labels.shape[0], device=segmentation_map.device)
for i, label in enumerate(all_labels):
class_id = instance_id_to_semantic_id[(label.item() + 1 if do_reduce_labels else label.item())]
labels[i] = class_id - 1 if do_reduce_labels else class_id
else:
labels = all_labels
return binary_masks.float(), labels.long()
@auto_docstring
class MaskFormerImageProcessorFast(BaseImageProcessorFast):
resample = PILImageResampling.BILINEAR
image_mean = IMAGENET_DEFAULT_MEAN
image_std = IMAGENET_DEFAULT_STD
size = {"shortest_edge": 800, "longest_edge": 1333}
default_to_square = False
do_resize = True
do_rescale = True
rescale_factor = 1 / 255
do_normalize = True
do_pad = True
model_input_names = ["pixel_values", "pixel_mask"]
size_divisor = 32
do_reduce_labels = False
valid_kwargs = MaskFormerImageProcessorKwargs
def __init__(self, **kwargs: Unpack[MaskFormerImageProcessorKwargs]) -> None:
size = kwargs.pop("size", None)
max_size = kwargs.pop("max_size", None)
if size is None and max_size is not None:
size = self.size
size["longest_edge"] = max_size
elif size is None:
size = self.size
self.size = get_size_dict(size, max_size=max_size, default_to_square=False)
super().__init__(**kwargs)
def to_dict(self) -> dict[str, Any]:
"""
Serializes this instance to a Python dictionary. This method calls the superclass method and then removes the
`_max_size` attribute from the dictionary.
"""
image_processor_dict = super().to_dict()
image_processor_dict.pop("_max_size", None)
return image_processor_dict
def reduce_label(self, labels: list["torch.Tensor"]):
for idx in range(len(labels)):
label = labels[idx]
label = torch.where(label == 0, torch.tensor(255, dtype=label.dtype), label)
label = label - 1
label = torch.where(label == 254, torch.tensor(255, dtype=label.dtype), label)
labels[idx] = label
def resize(
self,
image: torch.Tensor,
size: SizeDict,
size_divisor: int = 0,
interpolation: Optional["tvF.InterpolationMode"] = None,
**kwargs,
) -> torch.Tensor:
"""
Resize the image to the given size. Size can be `min_size` (scalar) or `(height, width)` tuple. If size is an
int, smaller edge of the image will be matched to this number.
Args:
image (`torch.Tensor`):
Image to resize.
size (`SizeDict`):
Size of the image's `(height, width)` dimensions after resizing. Available options are:
- `{"height": int, "width": int}`: The image will be resized to the exact size `(height, width)`.
Do NOT keep the aspect ratio.
- `{"shortest_edge": int, "longest_edge": int}`: The image will be resized to a maximum size respecting
the aspect ratio and keeping the shortest edge less or equal to `shortest_edge` and the longest edge
less or equal to `longest_edge`.
- `{"max_height": int, "max_width": int}`: The image will be resized to the maximum size respecting the
aspect ratio and keeping the height less or equal to `max_height` and the width less or equal to
`max_width`.
size_divisor (`int`, *optional*, defaults to 0):
If `size_divisor` is given, the output image size will be divisible by the number.
interpolation (`InterpolationMode`, *optional*, defaults to `InterpolationMode.BILINEAR`):
Resampling filter to use if resizing the image.
"""
interpolation = interpolation if interpolation is not None else tvF.InterpolationMode.BILINEAR
if size.shortest_edge and size.longest_edge:
# Resize the image so that the shortest edge or the longest edge is of the given size
# while maintaining the aspect ratio of the original image.
new_size = get_size_with_aspect_ratio(
image.size()[-2:],
size["shortest_edge"],
size["longest_edge"],
)
elif size.max_height and size.max_width:
new_size = get_image_size_for_max_height_width(image.size()[-2:], size["max_height"], size["max_width"])
elif size.height and size.width:
new_size = (size["height"], size["width"])
else:
raise ValueError(
"Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got"
f" {size.keys()}."
)
if size_divisor > 0:
height, width = new_size
height = int(math.ceil(height / size_divisor) * size_divisor)
width = int(math.ceil(width / size_divisor) * size_divisor)
new_size = (height, width)
image = tvF.resize(
image,
size=new_size,
interpolation=interpolation,
**kwargs,
)
return image
def pad(
self,
images: torch.Tensor,
padded_size: tuple[int, int],
segmentation_maps: torch.Tensor | None = None,
fill: int = 0,
ignore_index: int = 255,
) -> BatchFeature:
original_size = images.size()[-2:]
padding_bottom = padded_size[0] - original_size[0]
padding_right = padded_size[1] - original_size[1]
if padding_bottom < 0 or padding_right < 0:
raise ValueError(
f"Padding dimensions are negative. Please make sure that the padded size is larger than the "
f"original size. Got padded size: {padded_size}, original size: {original_size}."
)
if original_size != padded_size:
padding = [0, 0, padding_right, padding_bottom]
images = tvF.pad(images, padding, fill=fill)
if segmentation_maps is not None:
segmentation_maps = [tvF.pad(mask, padding, fill=ignore_index) for mask in segmentation_maps]
# Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding.
pixel_mask = torch.zeros((images.shape[0], *padded_size), dtype=torch.int64, device=images.device)
pixel_mask[:, : original_size[0], : original_size[1]] = 1
return images, pixel_mask, segmentation_maps
@auto_docstring
def preprocess(
self,
images: ImageInput,
segmentation_maps: ImageInput | None = None,
instance_id_to_semantic_id: list[dict[int, int]] | dict[int, int] | None = None,
**kwargs: Unpack[MaskFormerImageProcessorKwargs],
) -> BatchFeature:
r"""
segmentation_maps (`ImageInput`, *optional*):
The segmentation maps.
instance_id_to_semantic_id (`Union[list[dict[int, int]], dict[int, int]]`, *optional*):
A mapping from instance IDs to semantic IDs.
"""
return super().preprocess(
images,
segmentation_maps,
instance_id_to_semantic_id,
**kwargs,
)
def _preprocess_image_like_inputs(
self,
images: ImageInput,
segmentation_maps: ImageInput,
instance_id_to_semantic_id: list[dict[int, int]] | dict[int, int] | None,
do_convert_rgb: bool,
input_data_format: ChannelDimension,
device: Union[str, "torch.device"] | None = None,
**kwargs: Unpack[MaskFormerImageProcessorKwargs],
) -> BatchFeature:
"""
Preprocess image-like inputs.
To be overridden by subclasses when image-like inputs other than images should be processed.
It can be used for segmentation maps, depth maps, etc.
"""
# Prepare input images
images = self._prepare_image_like_inputs(
images=images, do_convert_rgb=do_convert_rgb, input_data_format=input_data_format, device=device
)
if segmentation_maps is not None:
segmentation_maps = self._prepare_image_like_inputs(
images=segmentation_maps,
expected_ndims=2,
do_convert_rgb=False,
input_data_format=ChannelDimension.FIRST,
)
return self._preprocess(images, segmentation_maps, instance_id_to_semantic_id, **kwargs)
def _preprocess(
self,
images: list["torch.Tensor"],
segmentation_maps: Optional["torch.Tensor"],
instance_id_to_semantic_id: dict[int, int] | None,
do_resize: bool | None,
size: SizeDict | None,
pad_size: SizeDict | None,
size_divisor: int | None,
interpolation: Union["PILImageResampling", "tvF.InterpolationMode"] | None,
do_rescale: bool | None,
rescale_factor: float | None,
do_normalize: bool | None,
image_mean: float | list[float] | None,
image_std: float | list[float] | None,
ignore_index: int | None,
do_reduce_labels: bool | None,
disable_grouping: bool | None,
return_tensors: str | TensorType | None,
**kwargs,
) -> BatchFeature:
if segmentation_maps is not None and len(images) != len(segmentation_maps):
raise ValueError("Images and segmentation maps must have the same length.")
# Group images by size for batched resizing
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
resized_images_grouped = {}
if segmentation_maps is not None:
grouped_segmentation_maps, grouped_segmentation_maps_index = group_images_by_shape(
segmentation_maps, disable_grouping=disable_grouping
)
resized_segmentation_maps_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_resize:
stacked_images = self.resize(
image=stacked_images, size=size, size_divisor=size_divisor, interpolation=interpolation
)
if segmentation_maps is not None:
stacked_segmentation_maps = grouped_segmentation_maps[shape]
if do_resize:
stacked_segmentation_maps = self.resize(
image=stacked_segmentation_maps,
size=size,
size_divisor=size_divisor,
interpolation=tvF.InterpolationMode.NEAREST_EXACT,
)
resized_images_grouped[shape] = stacked_images
if segmentation_maps is not None:
resized_segmentation_maps_grouped[shape] = stacked_segmentation_maps
resized_images = reorder_images(resized_images_grouped, grouped_images_index)
if segmentation_maps is not None:
resized_segmentation_maps = reorder_images(
resized_segmentation_maps_grouped, grouped_segmentation_maps_index
)
if pad_size is not None:
padded_size = (pad_size.height, pad_size.width)
else:
padded_size = get_max_height_width(resized_images)
if segmentation_maps is not None:
mask_labels = []
class_labels = []
# Convert to list of binary masks and labels
for idx, segmentation_map in enumerate(resized_segmentation_maps):
if isinstance(instance_id_to_semantic_id, list):
instance_id = instance_id_to_semantic_id[idx]
else:
instance_id = instance_id_to_semantic_id
# Use instance2class_id mapping per image
masks, classes = convert_segmentation_map_to_binary_masks_fast(
segmentation_map.squeeze(0),
instance_id,
ignore_index=ignore_index,
do_reduce_labels=do_reduce_labels,
)
mask_labels.append(masks)
class_labels.append(classes)
if segmentation_maps is not None:
# group mask_labels as paired inputs and not images so as not to stack them
grouped_images, grouped_segmentation_maps, grouped_images_index = group_images_by_shape(
resized_images, mask_labels, disable_grouping=disable_grouping
)
processed_segmentation_maps_grouped = {}
else:
grouped_images, grouped_images_index = group_images_by_shape(
resized_images, disable_grouping=disable_grouping
)
processed_images_grouped = {}
processed_pixel_masks_grouped = {}
for shape, stacked_images in grouped_images.items():
# Fused rescale and normalize
stacked_images = self.rescale_and_normalize(
stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
padded_images, pixel_masks, padded_segmentation_maps = self.pad(
images=stacked_images,
segmentation_maps=grouped_segmentation_maps[shape] if segmentation_maps is not None else None,
padded_size=padded_size,
ignore_index=ignore_index,
)
processed_images_grouped[shape] = padded_images
processed_pixel_masks_grouped[shape] = pixel_masks
if segmentation_maps is not None:
processed_segmentation_maps_grouped[shape] = padded_segmentation_maps
processed_images = reorder_images(processed_images_grouped, grouped_images_index)
processed_pixel_masks = reorder_images(processed_pixel_masks_grouped, grouped_images_index)
encoded_inputs = BatchFeature(
data={"pixel_values": processed_images, "pixel_mask": processed_pixel_masks},
tensor_type=return_tensors,
)
if segmentation_maps is not None:
mask_labels = reorder_images(processed_segmentation_maps_grouped, grouped_images_index)
# we cannot batch them since they don't share a common class size
encoded_inputs["mask_labels"] = mask_labels
encoded_inputs["class_labels"] = class_labels
return encoded_inputs
# Copied from transformers.models.maskformer.image_processing_maskformer.MaskFormerImageProcessor.post_process_semantic_segmentation
def post_process_semantic_segmentation(
self, outputs, target_sizes: list[tuple[int, int]] | None = None
) -> "torch.Tensor":
"""
Converts the output of [`MaskFormerForInstanceSegmentation`] into semantic segmentation maps. Only supports
PyTorch.
Args:
outputs ([`MaskFormerForInstanceSegmentation`]):
Raw outputs of the model.
target_sizes (`list[tuple[int, int]]`, *optional*):
List of length (batch_size), where each list item (`tuple[int, int]]`) corresponds to the requested
final size (height, width) of each prediction. If left to None, predictions will not be resized.
Returns:
`list[torch.Tensor]`:
A list of length `batch_size`, where each item is a semantic segmentation map of shape (height, width)
corresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each
`torch.Tensor` correspond to a semantic class id.
"""
class_queries_logits = outputs.class_queries_logits # [batch_size, num_queries, num_classes+1]
masks_queries_logits = outputs.masks_queries_logits # [batch_size, num_queries, height, width]
# Remove the null class `[..., :-1]`
masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1]
masks_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width]
# Semantic segmentation logits of shape (batch_size, num_classes, height, width)
segmentation = torch.einsum("bqc, bqhw -> bchw", masks_classes, masks_probs)
batch_size = class_queries_logits.shape[0]
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if batch_size != len(target_sizes):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits"
)
semantic_segmentation = []
for idx in range(batch_size):
resized_logits = torch.nn.functional.interpolate(
segmentation[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False
)
semantic_map = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(semantic_map)
else:
semantic_segmentation = segmentation.argmax(dim=1)
semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
# Copied from transformers.models.maskformer.image_processing_maskformer.MaskFormerImageProcessor.post_process_instance_segmentation
def post_process_instance_segmentation(
self,
outputs,
threshold: float = 0.5,
mask_threshold: float = 0.5,
overlap_mask_area_threshold: float = 0.8,
target_sizes: list[tuple[int, int]] | None = None,
return_coco_annotation: bool | None = False,
return_binary_maps: bool | None = False,
) -> list[dict]:
"""
Converts the output of [`MaskFormerForInstanceSegmentationOutput`] into instance segmentation predictions. Only
supports PyTorch. If instances could overlap, set either return_coco_annotation or return_binary_maps
to `True` to get the correct segmentation result.
Args:
outputs ([`MaskFormerForInstanceSegmentation`]):
Raw outputs of the model.
threshold (`float`, *optional*, defaults to 0.5):
The probability score threshold to keep predicted instance masks.
mask_threshold (`float`, *optional*, defaults to 0.5):
Threshold to use when turning the predicted masks into binary values.
overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8):
The overlap mask area threshold to merge or discard small disconnected parts within each binary
instance mask.
target_sizes (`list[Tuple]`, *optional*):
List of length (batch_size), where each list item (`tuple[int, int]]`) corresponds to the requested
final size (height, width) of each prediction. If left to None, predictions will not be resized.
return_coco_annotation (`bool`, *optional*, defaults to `False`):
If set to `True`, segmentation maps are returned in COCO run-length encoding (RLE) format.
return_binary_maps (`bool`, *optional*, defaults to `False`):
If set to `True`, segmentation maps are returned as a concatenated tensor of binary segmentation maps
(one per detected instance).
Returns:
`list[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys:
- **segmentation** -- A tensor of shape `(height, width)` where each pixel represents a `segment_id`, or
`list[List]` run-length encoding (RLE) of the segmentation map if return_coco_annotation is set to
`True`, or a tensor of shape `(num_instances, height, width)` if return_binary_maps is set to `True`.
Set to `None` if no mask if found above `threshold`.
- **segments_info** -- A dictionary that contains additional information on each segment.
- **id** -- An integer representing the `segment_id`.
- **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`.
- **score** -- Prediction score of segment with `segment_id`.
"""
if return_coco_annotation and return_binary_maps:
raise ValueError("return_coco_annotation and return_binary_maps can not be both set to True.")
# [batch_size, num_queries, num_classes+1]
class_queries_logits = outputs.class_queries_logits
# [batch_size, num_queries, height, width]
masks_queries_logits = outputs.masks_queries_logits
device = masks_queries_logits.device
num_classes = class_queries_logits.shape[-1] - 1
num_queries = class_queries_logits.shape[-2]
# Loop over items in batch size
results: list[dict[str, TensorType]] = []
for i in range(class_queries_logits.shape[0]):
mask_pred = masks_queries_logits[i]
mask_cls = class_queries_logits[i]
scores = torch.nn.functional.softmax(mask_cls, dim=-1)[:, :-1]
labels = torch.arange(num_classes, device=device).unsqueeze(0).repeat(num_queries, 1).flatten(0, 1)
scores_per_image, topk_indices = scores.flatten(0, 1).topk(num_queries, sorted=False)
labels_per_image = labels[topk_indices]
topk_indices = torch.div(topk_indices, num_classes, rounding_mode="floor")
mask_pred = mask_pred[topk_indices]
pred_masks = (mask_pred > 0).float()
# Calculate average mask prob
mask_scores_per_image = (mask_pred.sigmoid().flatten(1) * pred_masks.flatten(1)).sum(1) / (
pred_masks.flatten(1).sum(1) + 1e-6
)
pred_scores = scores_per_image * mask_scores_per_image
pred_classes = labels_per_image
segmentation = torch.zeros(masks_queries_logits.shape[2:]) - 1
if target_sizes is not None:
segmentation = torch.zeros(target_sizes[i]) - 1
pred_masks = torch.nn.functional.interpolate(
pred_masks.unsqueeze(0), size=target_sizes[i], mode="nearest"
)[0]
instance_maps, segments = [], []
current_segment_id = 0
for j in range(num_queries):
score = pred_scores[j].item()
if not torch.all(pred_masks[j] == 0) and score >= threshold:
segmentation[pred_masks[j] == 1] = current_segment_id
segments.append(
{
"id": current_segment_id,
"label_id": pred_classes[j].item(),
"was_fused": False,
"score": round(score, 6),
}
)
current_segment_id += 1
instance_maps.append(pred_masks[j])
# Return segmentation map in run-length encoding (RLE) format
if return_coco_annotation:
segmentation = convert_segmentation_to_rle(segmentation)
# Return a concatenated tensor of binary instance maps
if return_binary_maps and len(instance_maps) != 0:
segmentation = torch.stack(instance_maps, dim=0)
results.append({"segmentation": segmentation, "segments_info": segments})
return results
# Copied from transformers.models.maskformer.image_processing_maskformer.MaskFormerImageProcessor.post_process_panoptic_segmentation
def post_process_panoptic_segmentation(
self,
outputs,
threshold: float = 0.5,
mask_threshold: float = 0.5,
overlap_mask_area_threshold: float = 0.8,
label_ids_to_fuse: set[int] | None = None,
target_sizes: list[tuple[int, int]] | None = None,
) -> list[dict]:
"""
Converts the output of [`MaskFormerForInstanceSegmentationOutput`] into image panoptic segmentation
predictions. Only supports PyTorch.
Args:
outputs ([`MaskFormerForInstanceSegmentationOutput`]):
The outputs from [`MaskFormerForInstanceSegmentation`].
threshold (`float`, *optional*, defaults to 0.5):
The probability score threshold to keep predicted instance masks.
mask_threshold (`float`, *optional*, defaults to 0.5):
Threshold to use when turning the predicted masks into binary values.
overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8):
The overlap mask area threshold to merge or discard small disconnected parts within each binary
instance mask.
label_ids_to_fuse (`Set[int]`, *optional*):
The labels in this state will have all their instances be fused together. For instance we could say
there can only be one sky in an image, but several persons, so the label ID for sky would be in that
set, but not the one for person.
target_sizes (`list[Tuple]`, *optional*):
List of length (batch_size), where each list item (`tuple[int, int]]`) corresponds to the requested
final size (height, width) of each prediction in batch. If left to None, predictions will not be
resized.
Returns:
`list[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys:
- **segmentation** -- a tensor of shape `(height, width)` where each pixel represents a `segment_id`, set
to `None` if no mask if found above `threshold`. If `target_sizes` is specified, segmentation is resized
to the corresponding `target_sizes` entry.
- **segments_info** -- A dictionary that contains additional information on each segment.
- **id** -- an integer representing the `segment_id`.
- **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`.
- **was_fused** -- a boolean, `True` if `label_id` was in `label_ids_to_fuse`, `False` otherwise.
Multiple instances of the same class / label were fused and assigned a single `segment_id`.
- **score** -- Prediction score of segment with `segment_id`.
"""
if label_ids_to_fuse is None:
logger.warning("`label_ids_to_fuse` unset. No instance will be fused.")
label_ids_to_fuse = set()
class_queries_logits = outputs.class_queries_logits # [batch_size, num_queries, num_classes+1]
masks_queries_logits = outputs.masks_queries_logits # [batch_size, num_queries, height, width]
batch_size = class_queries_logits.shape[0]
num_labels = class_queries_logits.shape[-1] - 1
mask_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width]
# Predicted label and score of each query (batch_size, num_queries)
pred_scores, pred_labels = nn.functional.softmax(class_queries_logits, dim=-1).max(-1)
# Loop over items in batch size
results: list[dict[str, TensorType]] = []
for i in range(batch_size):
mask_probs_item, pred_scores_item, pred_labels_item = remove_low_and_no_objects(
mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels
)
# No mask found
if mask_probs_item.shape[0] <= 0:
height, width = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:]
segmentation = torch.zeros((height, width)) - 1
results.append({"segmentation": segmentation, "segments_info": []})
continue
# Get segmentation map and segment information of batch item
target_size = target_sizes[i] if target_sizes is not None else None
segmentation, segments = compute_segments(
mask_probs=mask_probs_item,
pred_scores=pred_scores_item,
pred_labels=pred_labels_item,
mask_threshold=mask_threshold,
overlap_mask_area_threshold=overlap_mask_area_threshold,
label_ids_to_fuse=label_ids_to_fuse,
target_size=target_size,
)
results.append({"segmentation": segmentation, "segments_info": segments})
return results
__all__ = ["MaskFormerImageProcessorFast"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/maskformer/image_processing_maskformer_fast.py",
"license": "Apache License 2.0",
"lines": 588,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/efficientloftr/configuration_efficientloftr.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...configuration_utils import PreTrainedConfig
class EfficientLoFTRConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`EfficientLoFTRFromKeypointMatching`].
It is used to instantiate a EfficientLoFTR model according to the specified arguments, defining the model
architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the
EfficientLoFTR [zju-community/efficientloftr](https://huggingface.co/zju-community/efficientloftr) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
stage_num_blocks (`List`, *optional*, defaults to [1, 2, 4, 14]):
The number of blocks in each stages
out_features (`List`, *optional*, defaults to [64, 64, 128, 256]):
The number of channels in each stage
stage_stride (`List`, *optional*, defaults to [2, 1, 2, 2]):
The stride used in each stage
hidden_size (`int`, *optional*, defaults to 256):
The dimension of the descriptors.
activation_function (`str`, *optional*, defaults to `"relu"`):
The activation function used in the backbone
q_aggregation_kernel_size (`int`, *optional*, defaults to 4):
The kernel size of the aggregation of query states in the fusion network
kv_aggregation_kernel_size (`int`, *optional*, defaults to 4):
The kernel size of the aggregation of key and value states in the fusion network
q_aggregation_stride (`int`, *optional*, defaults to 4):
The stride of the aggregation of query states in the fusion network
kv_aggregation_stride (`int`, *optional*, defaults to 4):
The stride of the aggregation of key and value states in the fusion network
num_attention_layers (`int`, *optional*, defaults to 4):
Number of attention layers in the LocalFeatureTransformer
num_attention_heads (`int`, *optional*, defaults to 8):
The number of heads in the GNN layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during attention.
mlp_activation_function (`str`, *optional*, defaults to `"leaky_relu"`):
Activation function used in the attention mlp layer.
coarse_matching_skip_softmax (`bool`, *optional*, defaults to `False`):
Whether to skip softmax or not at the coarse matching step.
coarse_matching_threshold (`float`, *optional*, defaults to 0.2):
The threshold for the minimum score required for a match.
coarse_matching_temperature (`float`, *optional*, defaults to 0.1):
The temperature to apply to the coarse similarity matrix
coarse_matching_border_removal (`int`, *optional*, defaults to 2):
The size of the border to remove during coarse matching
fine_kernel_size (`int`, *optional*, defaults to 8):
Kernel size used for the fine feature matching
batch_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the batch normalization layers
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
fine_matching_slice_dim (`int`, *optional*, defaults to 8):
The size of the slice used to divide the fine features for the first and second fine matching stages.
fine_matching_regress_temperature (`float`, *optional*, defaults to 10.0):
The temperature to apply to the fine similarity matrix
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
Examples:
```python
>>> from transformers import EfficientLoFTRConfig, EfficientLoFTRForKeypointMatching
>>> # Initializing a EfficientLoFTR configuration
>>> configuration = EfficientLoFTRConfig()
>>> # Initializing a model from the EfficientLoFTR configuration
>>> model = EfficientLoFTRForKeypointMatching(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = "efficientloftr"
def __init__(
self,
stage_num_blocks: list[int] | None = None,
out_features: list[int] | None = None,
stage_stride: list[int] | None = None,
hidden_size: int = 256,
activation_function: str = "relu",
q_aggregation_kernel_size: int = 4,
kv_aggregation_kernel_size: int = 4,
q_aggregation_stride: int = 4,
kv_aggregation_stride: int = 4,
num_attention_layers: int = 4,
num_attention_heads: int = 8,
attention_dropout: float = 0.0,
attention_bias: bool = False,
mlp_activation_function: str = "leaky_relu",
coarse_matching_skip_softmax: bool = False,
coarse_matching_threshold: float = 0.2,
coarse_matching_temperature: float = 0.1,
coarse_matching_border_removal: int = 2,
fine_kernel_size: int = 8,
batch_norm_eps: float = 1e-5,
rope_parameters: dict | None = None,
fine_matching_slice_dim: int = 8,
fine_matching_regress_temperature: float = 10.0,
initializer_range: float = 0.02,
**kwargs,
):
# Stage level of RepVGG
self.stage_num_blocks = stage_num_blocks if stage_num_blocks is not None else [1, 2, 4, 14]
self.stage_stride = stage_stride if stage_stride is not None else [2, 1, 2, 2]
self.out_features = out_features if out_features is not None else [64, 64, 128, 256]
self.stage_in_channels = [1] + self.out_features[:-1]
# Block level of RepVGG
self.stage_block_stride = [
[stride] + [1] * (num_blocks - 1) for stride, num_blocks in zip(self.stage_stride, self.stage_num_blocks)
]
self.stage_block_out_channels = [
[self.out_features[stage_idx]] * num_blocks for stage_idx, num_blocks in enumerate(self.stage_num_blocks)
]
self.stage_block_in_channels = [
[self.stage_in_channels[stage_idx]] + self.stage_block_out_channels[stage_idx][:-1]
for stage_idx in range(len(self.stage_num_blocks))
]
# Fine matching level of EfficientLoFTR
self.fine_fusion_dims = list(reversed(self.out_features))[:-1]
self.hidden_size = hidden_size
if self.hidden_size != self.out_features[-1]:
raise ValueError(
f"hidden_size should be equal to the last value in out_features. hidden_size = {self.hidden_size}, out_features = {self.out_features[-1]}"
)
self.activation_function = activation_function
self.q_aggregation_kernel_size = q_aggregation_kernel_size
self.kv_aggregation_kernel_size = kv_aggregation_kernel_size
self.q_aggregation_stride = q_aggregation_stride
self.kv_aggregation_stride = kv_aggregation_stride
self.num_attention_layers = num_attention_layers
self.num_attention_heads = num_attention_heads
self.attention_dropout = attention_dropout
self.attention_bias = attention_bias
self.intermediate_size = self.hidden_size * 2
self.mlp_activation_function = mlp_activation_function
self.coarse_matching_skip_softmax = coarse_matching_skip_softmax
self.coarse_matching_threshold = coarse_matching_threshold
self.coarse_matching_temperature = coarse_matching_temperature
self.coarse_matching_border_removal = coarse_matching_border_removal
self.fine_kernel_size = fine_kernel_size
self.batch_norm_eps = batch_norm_eps
self.fine_matching_slice_dim = fine_matching_slice_dim
self.fine_matching_regress_temperature = fine_matching_regress_temperature
self.num_key_value_heads = num_attention_heads
self.initializer_range = initializer_range
self.rope_parameters = rope_parameters
kwargs.setdefault("partial_rotary_factor", 4.0) # assign default for BC
super().__init__(**kwargs)
__all__ = ["EfficientLoFTRConfig"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/efficientloftr/configuration_efficientloftr.py",
"license": "Apache License 2.0",
"lines": 161,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/efficientloftr/convert_efficientloftr_to_hf.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import gc
import os
import re
import torch
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers.models.efficientloftr.image_processing_efficientloftr import EfficientLoFTRImageProcessor
from transformers.models.efficientloftr.modeling_efficientloftr import (
EfficientLoFTRConfig,
EfficientLoFTRForKeypointMatching,
)
DEFAULT_MODEL_REPO = "stevenbucaille/efficient_loftr_pth"
DEFAULT_FILE = "eloftr.pth"
def prepare_imgs():
dataset = load_dataset("hf-internal-testing/image-matching-test-dataset", split="train")
image0 = dataset[0]["image"]
image2 = dataset[2]["image"]
return [[image2, image0]]
def verify_model_outputs(model, device):
images = prepare_imgs()
preprocessor = EfficientLoFTRImageProcessor()
inputs = preprocessor(images=images, return_tensors="pt").to(device)
model.to(device)
model.eval()
with torch.no_grad():
outputs = model(**inputs, output_hidden_states=True, output_attentions=True)
predicted_number_of_matches = outputs.matches.shape[-1]
predicted_top10 = torch.topk(outputs.matching_scores[0, 0], k=10)
predicted_top10_matches_indices = predicted_top10.indices
predicted_top10_matching_scores = predicted_top10.values
expected_number_of_matches = 4800
expected_matches_shape = torch.Size((len(images), 2, expected_number_of_matches))
expected_matching_scores_shape = torch.Size((len(images), 2, expected_number_of_matches))
expected_top10_matches_indices = torch.tensor(
[1798, 1639, 1401, 1559, 2596, 2362, 2441, 2605, 1643, 2607], dtype=torch.int64
).to(device)
expected_top10_matching_scores = torch.tensor(
[0.9563, 0.9355, 0.9265, 0.9091, 0.9071, 0.9062, 0.9000, 0.8978, 0.8908, 0.8853]
).to(device)
assert outputs.matches.shape == expected_matches_shape
assert outputs.matching_scores.shape == expected_matching_scores_shape
torch.testing.assert_close(predicted_top10_matches_indices, expected_top10_matches_indices, rtol=5e-3, atol=5e-3)
torch.testing.assert_close(predicted_top10_matching_scores, expected_top10_matching_scores, rtol=5e-3, atol=5e-3)
assert predicted_number_of_matches == expected_number_of_matches
ORIGINAL_TO_CONVERTED_KEY_MAPPING = {
r"matcher.backbone.layer(\d+).rbr_dense.conv": r"efficientloftr.backbone.stages.\1.blocks.0.conv1.conv",
r"matcher.backbone.layer(\d+).rbr_dense.bn": r"efficientloftr.backbone.stages.\1.blocks.0.conv1.norm",
r"matcher.backbone.layer(\d+).rbr_1x1.conv": r"efficientloftr.backbone.stages.\1.blocks.0.conv2.conv",
r"matcher.backbone.layer(\d+).rbr_1x1.bn": r"efficientloftr.backbone.stages.\1.blocks.0.conv2.norm",
r"matcher.backbone.layer(\d+).(\d+).rbr_dense.conv": r"efficientloftr.backbone.stages.\1.blocks.\2.conv1.conv",
r"matcher.backbone.layer(\d+).(\d+).rbr_dense.bn": r"efficientloftr.backbone.stages.\1.blocks.\2.conv1.norm",
r"matcher.backbone.layer(\d+).(\d+).rbr_1x1.conv": r"efficientloftr.backbone.stages.\1.blocks.\2.conv2.conv",
r"matcher.backbone.layer(\d+).(\d+).rbr_1x1.bn": r"efficientloftr.backbone.stages.\1.blocks.\2.conv2.norm",
r"matcher.backbone.layer(\d+).(\d+).rbr_identity": r"efficientloftr.backbone.stages.\1.blocks.\2.identity",
r"matcher.loftr_coarse.layers.(\d*[02468]).aggregate": lambda m: f"efficientloftr.local_feature_transformer.layers.{int(m.group(1)) // 2}.self_attention.aggregation.q_aggregation",
r"matcher.loftr_coarse.layers.(\d*[02468]).norm1": lambda m: f"efficientloftr.local_feature_transformer.layers.{int(m.group(1)) // 2}.self_attention.aggregation.norm",
r"matcher.loftr_coarse.layers.(\d*[02468]).q_proj": lambda m: f"efficientloftr.local_feature_transformer.layers.{int(m.group(1)) // 2}.self_attention.attention.q_proj",
r"matcher.loftr_coarse.layers.(\d*[02468]).k_proj": lambda m: f"efficientloftr.local_feature_transformer.layers.{int(m.group(1)) // 2}.self_attention.attention.k_proj",
r"matcher.loftr_coarse.layers.(\d*[02468]).v_proj": lambda m: f"efficientloftr.local_feature_transformer.layers.{int(m.group(1)) // 2}.self_attention.attention.v_proj",
r"matcher.loftr_coarse.layers.(\d*[02468]).merge": lambda m: f"efficientloftr.local_feature_transformer.layers.{int(m.group(1)) // 2}.self_attention.attention.o_proj",
r"matcher.loftr_coarse.layers.(\d*[02468]).mlp.(\d+)": lambda m: f"efficientloftr.local_feature_transformer.layers.{int(m.group(1)) // 2}.self_attention.mlp.fc{1 if m.group(2) == '0' else 2}",
r"matcher.loftr_coarse.layers.(\d*[02468]).norm2": lambda m: f"efficientloftr.local_feature_transformer.layers.{int(m.group(1)) // 2}.self_attention.mlp.layer_norm",
r"matcher.loftr_coarse.layers.(\d*[13579]).aggregate": lambda m: f"efficientloftr.local_feature_transformer.layers.{int(m.group(1)) // 2}.cross_attention.aggregation.q_aggregation",
r"matcher.loftr_coarse.layers.(\d*[13579]).norm1": lambda m: f"efficientloftr.local_feature_transformer.layers.{int(m.group(1)) // 2}.cross_attention.aggregation.norm",
r"matcher.loftr_coarse.layers.(\d*[13579]).q_proj": lambda m: f"efficientloftr.local_feature_transformer.layers.{int(m.group(1)) // 2}.cross_attention.attention.q_proj",
r"matcher.loftr_coarse.layers.(\d*[13579]).k_proj": lambda m: f"efficientloftr.local_feature_transformer.layers.{int(m.group(1)) // 2}.cross_attention.attention.k_proj",
r"matcher.loftr_coarse.layers.(\d*[13579]).v_proj": lambda m: f"efficientloftr.local_feature_transformer.layers.{int(m.group(1)) // 2}.cross_attention.attention.v_proj",
r"matcher.loftr_coarse.layers.(\d*[13579]).merge": lambda m: f"efficientloftr.local_feature_transformer.layers.{int(m.group(1)) // 2}.cross_attention.attention.o_proj",
r"matcher.loftr_coarse.layers.(\d*[13579]).mlp.(\d+)": lambda m: f"efficientloftr.local_feature_transformer.layers.{int(m.group(1)) // 2}.cross_attention.mlp.fc{1 if m.group(2) == '0' else 2}",
r"matcher.loftr_coarse.layers.(\d*[13579]).norm2": lambda m: f"efficientloftr.local_feature_transformer.layers.{int(m.group(1)) // 2}.cross_attention.mlp.layer_norm",
r"matcher.fine_preprocess.layer3_outconv": "refinement_layer.out_conv",
r"matcher.fine_preprocess.layer(\d+)_outconv.weight": lambda m: f"refinement_layer.out_conv_layers.{0 if int(m.group(1)) == 2 else m.group(1)}.out_conv1.weight",
r"matcher.fine_preprocess.layer(\d+)_outconv2\.0": lambda m: f"refinement_layer.out_conv_layers.{0 if int(m.group(1)) == 2 else m.group(1)}.out_conv2",
r"matcher.fine_preprocess.layer(\d+)_outconv2\.1": lambda m: f"refinement_layer.out_conv_layers.{0 if int(m.group(1)) == 2 else m.group(1)}.batch_norm",
r"matcher.fine_preprocess.layer(\d+)_outconv2\.3": lambda m: f"refinement_layer.out_conv_layers.{0 if int(m.group(1)) == 2 else m.group(1)}.out_conv3",
}
def convert_old_keys_to_new_keys(state_dict_keys: list[str]):
"""
This function should be applied only once, on the concatenated keys to efficiently rename using
the key mappings.
"""
output_dict = {}
if state_dict_keys is not None:
old_text = "\n".join(state_dict_keys)
new_text = old_text
for pattern, replacement in ORIGINAL_TO_CONVERTED_KEY_MAPPING.items():
if replacement is None:
new_text = re.sub(pattern, "", new_text) # an empty line
continue
new_text = re.sub(pattern, replacement, new_text)
output_dict = dict(zip(old_text.split("\n"), new_text.split("\n")))
return output_dict
@torch.no_grad()
def write_model(
model_path,
model_repo,
file_name,
organization,
push_to_hub=False,
):
os.makedirs(model_path, exist_ok=True)
# ------------------------------------------------------------
# EfficientLoFTR config
# ------------------------------------------------------------
config = EfficientLoFTRConfig()
config.architectures = ["EfficientLoFTRForKeypointMatching"]
config.save_pretrained(model_path)
print("Model config saved successfully...")
# ------------------------------------------------------------
# Convert weights
# ------------------------------------------------------------
print(f"Fetching all parameters from the checkpoint at {model_repo}/{file_name}...")
checkpoint_path = hf_hub_download(repo_id=model_repo, filename=file_name)
original_state_dict = torch.load(checkpoint_path, weights_only=True, map_location="cpu")["state_dict"]
print("Converting model...")
all_keys = list(original_state_dict.keys())
new_keys = convert_old_keys_to_new_keys(all_keys)
state_dict = {}
for key in all_keys:
new_key = new_keys[key]
state_dict[new_key] = original_state_dict.pop(key).contiguous().clone()
del original_state_dict
gc.collect()
print("Loading the checkpoint in a EfficientLoFTR model...")
device = "cuda" if torch.cuda.is_available() else "cpu"
with torch.device(device):
model = EfficientLoFTRForKeypointMatching(config)
model.load_state_dict(state_dict)
print("Checkpoint loaded successfully...")
del model.config._name_or_path
print("Saving the model...")
model.save_pretrained(model_path)
del state_dict, model
# Safety check: reload the converted model
gc.collect()
print("Reloading the model to check if it's saved correctly.")
model = EfficientLoFTRForKeypointMatching.from_pretrained(model_path)
print("Model reloaded successfully.")
model_name = "efficientloftr"
if model_repo == DEFAULT_MODEL_REPO:
print("Checking the model outputs...")
verify_model_outputs(model, device)
print("Model outputs verified successfully.")
if push_to_hub:
print("Pushing model to the hub...")
model.push_to_hub(
repo_id=f"{organization}/{model_name}",
commit_message="Add model",
)
config.push_to_hub(repo_id=f"{organization}/{model_name}", commit_message="Add config")
write_image_processor(model_path, model_name, organization, push_to_hub=push_to_hub)
def write_image_processor(save_dir, model_name, organization, push_to_hub=False):
image_processor = EfficientLoFTRImageProcessor()
image_processor.save_pretrained(save_dir)
if push_to_hub:
print("Pushing image processor to the hub...")
image_processor.push_to_hub(
repo_id=f"{organization}/{model_name}",
commit_message="Add image processor",
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--repo_id",
default=DEFAULT_MODEL_REPO,
type=str,
help="Model repo ID of the original EfficientLoFTR checkpoint you'd like to convert.",
)
parser.add_argument(
"--file_name",
default=DEFAULT_FILE,
type=str,
help="File name of the original EfficientLoFTR checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--save_model", action="store_true", help="Save model to local")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Push model and image preprocessor to the hub",
)
parser.add_argument(
"--organization",
default="zju-community",
type=str,
help="Hub organization in which you want the model to be uploaded.",
)
args = parser.parse_args()
write_model(
args.pytorch_dump_folder_path,
args.repo_id,
args.file_name,
args.organization,
push_to_hub=args.push_to_hub,
)
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/efficientloftr/convert_efficientloftr_to_hf.py",
"license": "Apache License 2.0",
"lines": 216,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/efficientloftr/image_processing_efficientloftr.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image processor class for SuperPoint."""
import numpy as np
from ... import is_torch_available, is_vision_available
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
ImageType,
PILImageResampling,
get_image_type,
infer_channel_dimension_format,
is_pil_image,
is_scaled_image,
is_valid_image,
to_numpy_array,
valid_images,
validate_preprocess_arguments,
)
from ...processing_utils import ImagesKwargs
from ...utils import TensorType, logging, requires_backends
if is_torch_available():
import torch
if is_vision_available():
import PIL
from PIL import Image, ImageDraw
from .modeling_efficientloftr import EfficientLoFTRKeypointMatchingOutput
logger = logging.get_logger(__name__)
class EfficientLoFTRImageProcessorKwargs(ImagesKwargs, total=False):
r"""
do_grayscale (`bool`, *optional*, defaults to `True`):
Whether to convert the image to grayscale. Can be overridden by `do_grayscale` in the `preprocess` method.
"""
do_grayscale: bool
# Copied from transformers.models.superpoint.image_processing_superpoint.is_grayscale
def is_grayscale(
image: np.ndarray,
input_data_format: str | ChannelDimension | None = None,
):
if input_data_format == ChannelDimension.FIRST:
if image.shape[0] == 1:
return True
return np.all(image[0, ...] == image[1, ...]) and np.all(image[1, ...] == image[2, ...])
elif input_data_format == ChannelDimension.LAST:
if image.shape[-1] == 1:
return True
return np.all(image[..., 0] == image[..., 1]) and np.all(image[..., 1] == image[..., 2])
# Copied from transformers.models.superpoint.image_processing_superpoint.convert_to_grayscale
def convert_to_grayscale(
image: ImageInput,
input_data_format: str | ChannelDimension | None = None,
) -> ImageInput:
"""
Converts an image to grayscale format using the NTSC formula. Only support numpy and PIL Image.
This function is supposed to return a 1-channel image, but it returns a 3-channel image with the same value in each
channel, because of an issue that is discussed in :
https://github.com/huggingface/transformers/pull/25786#issuecomment-1730176446
Args:
image (Image):
The image to convert.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image.
"""
requires_backends(convert_to_grayscale, ["vision"])
if isinstance(image, np.ndarray):
if is_grayscale(image, input_data_format=input_data_format):
return image
if input_data_format == ChannelDimension.FIRST:
gray_image = image[0, ...] * 0.2989 + image[1, ...] * 0.5870 + image[2, ...] * 0.1140
gray_image = np.stack([gray_image] * 3, axis=0)
elif input_data_format == ChannelDimension.LAST:
gray_image = image[..., 0] * 0.2989 + image[..., 1] * 0.5870 + image[..., 2] * 0.1140
gray_image = np.stack([gray_image] * 3, axis=-1)
return gray_image
if not isinstance(image, PIL.Image.Image):
return image
image = image.convert("L")
return image
# Copied from transformers.models.superglue.image_processing_superglue.validate_and_format_image_pairs
def validate_and_format_image_pairs(images: ImageInput):
error_message = (
"Input images must be a one of the following :",
" - A pair of PIL images.",
" - A pair of 3D arrays.",
" - A list of pairs of PIL images.",
" - A list of pairs of 3D arrays.",
)
def _is_valid_image(image):
"""images is a PIL Image or a 3D array."""
return is_pil_image(image) or (
is_valid_image(image) and get_image_type(image) != ImageType.PIL and len(image.shape) == 3
)
if isinstance(images, list):
if len(images) == 2 and all((_is_valid_image(image)) for image in images):
return images
if all(
isinstance(image_pair, list)
and len(image_pair) == 2
and all(_is_valid_image(image) for image in image_pair)
for image_pair in images
):
return [image for image_pair in images for image in image_pair]
raise ValueError(error_message)
class EfficientLoFTRImageProcessor(BaseImageProcessor):
r"""
Constructs a EfficientLoFTR image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden
by `do_resize` in the `preprocess` method.
size (`Dict[str, int]` *optional*, defaults to `{"height": 480, "width": 640}`):
Resolution of the output image after `resize` is applied. Only has an effect if `do_resize` is set to
`True`. Can be overridden by `size` in the `preprocess` method.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
method.
do_grayscale (`bool`, *optional*, defaults to `True`):
Whether to convert the image to grayscale. Can be overridden by `do_grayscale` in the `preprocess` method.
"""
model_input_names = ["pixel_values"]
valid_kwargs = EfficientLoFTRImageProcessorKwargs
def __init__(
self,
do_resize: bool = True,
size: dict[str, int] | None = None,
resample: PILImageResampling = PILImageResampling.BILINEAR,
do_rescale: bool = True,
rescale_factor: float = 1 / 255,
do_grayscale: bool = True,
**kwargs,
) -> None:
super().__init__(**kwargs)
size = size if size is not None else {"height": 480, "width": 640}
size = get_size_dict(size, default_to_square=False)
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_grayscale = do_grayscale
# Copied from transformers.models.superpoint.image_processing_superpoint.SuperPointImageProcessor.resize
def resize(
self,
image: np.ndarray,
size: dict[str, int],
data_format: str | ChannelDimension | None = None,
input_data_format: str | ChannelDimension | None = None,
**kwargs,
):
"""
Resize an image.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Dictionary of the form `{"height": int, "width": int}`, specifying the size of the output image.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the output image. If not provided, it will be inferred from the input
image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
size = get_size_dict(size, default_to_square=False)
return resize(
image,
size=(size["height"], size["width"]),
data_format=data_format,
input_data_format=input_data_format,
**kwargs,
)
# Copied from transformers.models.superglue.image_processing_superglue.SuperGlueImageProcessor.preprocess
def preprocess(
self,
images,
do_resize: bool | None = None,
size: dict[str, int] | None = None,
resample: PILImageResampling | None = None,
do_rescale: bool | None = None,
rescale_factor: float | None = None,
do_grayscale: bool | None = None,
return_tensors: str | TensorType | None = None,
data_format: ChannelDimension = ChannelDimension.FIRST,
input_data_format: str | ChannelDimension | None = None,
**kwargs,
) -> BatchFeature:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image pairs to preprocess. Expects either a list of 2 images or a list of list of 2 images list with
pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set
`do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the output image after `resize` has been applied. If `size["shortest_edge"]` >= 384, the image
is resized to `(size["shortest_edge"], size["shortest_edge"])`. Otherwise, the smaller edge of the
image will be matched to `int(size["shortest_edge"]/ crop_pct)`, after which the image is cropped to
`(size["shortest_edge"], size["shortest_edge"])`. Only has an effect if `do_resize` is set to `True`.
resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of `PILImageResampling`, filters. Only
has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_grayscale (`bool`, *optional*, defaults to `self.do_grayscale`):
Whether to convert the image to grayscale.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
resample = resample if resample is not None else self.resample
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_grayscale = do_grayscale if do_grayscale is not None else self.do_grayscale
size = size if size is not None else self.size
size = get_size_dict(size, default_to_square=False)
# Validate and convert the input images into a flattened list of images for all subsequent processing steps.
images = validate_and_format_image_pairs(images)
if not valid_images(images):
raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor")
validate_preprocess_arguments(
do_resize=do_resize,
size=size,
resample=resample,
do_rescale=do_rescale,
rescale_factor=rescale_factor,
)
# All transformations expect numpy arrays.
images = [to_numpy_array(image) for image in images]
if is_scaled_image(images[0]) and do_rescale:
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])
all_images = []
for image in images:
if do_resize:
image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
if do_rescale:
image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
if do_grayscale:
image = convert_to_grayscale(image, input_data_format=input_data_format)
image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
all_images.append(image)
# Convert back the flattened list of images into a list of pairs of images.
image_pairs = [all_images[i : i + 2] for i in range(0, len(all_images), 2)]
data = {"pixel_values": image_pairs}
return BatchFeature(data=data, tensor_type=return_tensors)
def post_process_keypoint_matching(
self,
outputs: "EfficientLoFTRKeypointMatchingOutput",
target_sizes: TensorType | list[tuple],
threshold: float = 0.0,
) -> list[dict[str, torch.Tensor]]:
"""
Converts the raw output of [`EfficientLoFTRKeypointMatchingOutput`] into lists of keypoints, scores and descriptors
with coordinates absolute to the original image sizes.
Args:
outputs ([`EfficientLoFTRKeypointMatchingOutput`]):
Raw outputs of the model.
target_sizes (`torch.Tensor` or `List[Tuple[Tuple[int, int]]]`, *optional*):
Tensor of shape `(batch_size, 2, 2)` or list of tuples of tuples (`Tuple[int, int]`) containing the
target size `(height, width)` of each image in the batch. This must be the original image size (before
any processing).
threshold (`float`, *optional*, defaults to 0.0):
Threshold to filter out the matches with low scores.
Returns:
`List[Dict]`: A list of dictionaries, each dictionary containing the keypoints in the first and second image
of the pair, the matching scores and the matching indices.
"""
if outputs.matches.shape[0] != len(target_sizes):
raise ValueError("Make sure that you pass in as many target sizes as the batch dimension of the mask")
if not all(len(target_size) == 2 for target_size in target_sizes):
raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch")
if isinstance(target_sizes, list):
image_pair_sizes = torch.tensor(target_sizes, device=outputs.matches.device)
else:
if target_sizes.shape[1] != 2 or target_sizes.shape[2] != 2:
raise ValueError(
"Each element of target_sizes must contain the size (h, w) of each image of the batch"
)
image_pair_sizes = target_sizes
keypoints = outputs.keypoints.clone()
keypoints = keypoints * image_pair_sizes.flip(-1).reshape(-1, 2, 1, 2)
keypoints = keypoints.to(torch.int32)
results = []
for keypoints_pair, matches, scores in zip(keypoints, outputs.matches, outputs.matching_scores):
# Filter out matches with low scores
valid_matches = torch.logical_and(scores > threshold, matches > -1)
matched_keypoints0 = keypoints_pair[0][valid_matches[0]]
matched_keypoints1 = keypoints_pair[1][valid_matches[1]]
matching_scores = scores[0][valid_matches[0]]
results.append(
{
"keypoints0": matched_keypoints0,
"keypoints1": matched_keypoints1,
"matching_scores": matching_scores,
}
)
return results
def visualize_keypoint_matching(
self,
images: ImageInput,
keypoint_matching_output: list[dict[str, torch.Tensor]],
) -> list["Image.Image"]:
"""
Plots the image pairs side by side with the detected keypoints as well as the matching between them.
Args:
images (`ImageInput`):
Image pairs to plot. Same as `EfficientLoFTRImageProcessor.preprocess`. Expects either a list of 2
images or a list of list of 2 images list with pixel values ranging from 0 to 255.
keypoint_matching_output (List[Dict[str, torch.Tensor]]]):
A post processed keypoint matching output
Returns:
`List[PIL.Image.Image]`: A list of PIL images, each containing the image pairs side by side with the detected
keypoints as well as the matching between them.
"""
images = validate_and_format_image_pairs(images)
images = [to_numpy_array(image) for image in images]
image_pairs = [images[i : i + 2] for i in range(0, len(images), 2)]
results = []
for image_pair, pair_output in zip(image_pairs, keypoint_matching_output):
height0, width0 = image_pair[0].shape[:2]
height1, width1 = image_pair[1].shape[:2]
plot_image = np.zeros((max(height0, height1), width0 + width1, 3), dtype=np.uint8)
plot_image[:height0, :width0] = image_pair[0]
plot_image[:height1, width0:] = image_pair[1]
plot_image_pil = Image.fromarray(plot_image)
draw = ImageDraw.Draw(plot_image_pil)
keypoints0_x, keypoints0_y = pair_output["keypoints0"].unbind(1)
keypoints1_x, keypoints1_y = pair_output["keypoints1"].unbind(1)
for keypoint0_x, keypoint0_y, keypoint1_x, keypoint1_y, matching_score in zip(
keypoints0_x, keypoints0_y, keypoints1_x, keypoints1_y, pair_output["matching_scores"]
):
color = self._get_color(matching_score)
draw.line(
(keypoint0_x, keypoint0_y, keypoint1_x + width0, keypoint1_y),
fill=color,
width=3,
)
draw.ellipse((keypoint0_x - 2, keypoint0_y - 2, keypoint0_x + 2, keypoint0_y + 2), fill="black")
draw.ellipse(
(keypoint1_x + width0 - 2, keypoint1_y - 2, keypoint1_x + width0 + 2, keypoint1_y + 2),
fill="black",
)
results.append(plot_image_pil)
return results
def _get_color(self, score):
"""Maps a score to a color."""
r = int(255 * (1 - score))
g = int(255 * score)
b = 0
return (r, g, b)
__all__ = ["EfficientLoFTRImageProcessor"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/efficientloftr/image_processing_efficientloftr.py",
"license": "Apache License 2.0",
"lines": 398,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/efficientloftr/modeling_efficientloftr.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Callable
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ... import initialization as init
from ...activations import ACT2CLS, ACT2FN
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import BackboneOutput
from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...processing_utils import Unpack
from ...pytorch_utils import compile_compatible_method_lru_cache
from ...utils import (
ModelOutput,
TransformersKwargs,
auto_docstring,
can_return_tuple,
torch_int,
)
from ...utils.generic import maybe_autocast, merge_with_config_defaults
from ...utils.output_capturing import capture_outputs
from .configuration_efficientloftr import EfficientLoFTRConfig
@dataclass
@auto_docstring(
custom_intro="""
Base class for outputs of EfficientLoFTR keypoint matching models. Due to the nature of keypoint detection and matching, the number
of keypoints is not fixed and can vary from image to image, which makes batching non-trivial. In the batch of
images, the maximum number of matches is set as the dimension of the matches and matching scores.
"""
)
class EfficientLoFTRKeypointMatchingOutput(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*):
Loss computed during training.
matches (`torch.FloatTensor` of shape `(batch_size, 2, num_matches)`):
Index of keypoint matched in the other image.
matching_scores (`torch.FloatTensor` of shape `(batch_size, 2, num_matches)`):
Scores of predicted matches.
keypoints (`torch.FloatTensor` of shape `(batch_size, num_keypoints, 2)`):
Absolute (x, y) coordinates of predicted keypoints in a given image.
hidden_states (`tuple[torch.FloatTensor, ...]`, *optional*):
Tuple of `torch.FloatTensor` (one for the output of each stage) of shape `(batch_size, 2, num_channels,
num_keypoints)`, returned when `output_hidden_states=True` is passed or when
`config.output_hidden_states=True`)
attentions (`tuple[torch.FloatTensor, ...]`, *optional*):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, 2, num_heads, num_keypoints,
num_keypoints)`, returned when `output_attentions=True` is passed or when `config.output_attentions=True`)
"""
loss: torch.FloatTensor | None = None
matches: torch.FloatTensor | None = None
matching_scores: torch.FloatTensor | None = None
keypoints: torch.FloatTensor | None = None
hidden_states: tuple[torch.FloatTensor] | None = None
attentions: tuple[torch.FloatTensor] | None = None
@compile_compatible_method_lru_cache(maxsize=32)
def compute_embeddings(inv_freq: torch.Tensor, embed_height: int, embed_width: int, hidden_size: int) -> torch.Tensor:
i_indices = torch.ones(embed_height, embed_width, dtype=inv_freq.dtype, device=inv_freq.device)
j_indices = torch.ones(embed_height, embed_width, dtype=inv_freq.dtype, device=inv_freq.device)
i_indices = i_indices.cumsum(0).unsqueeze(-1)
j_indices = j_indices.cumsum(1).unsqueeze(-1)
emb = torch.zeros(1, embed_height, embed_width, hidden_size // 2, dtype=inv_freq.dtype, device=inv_freq.device)
emb[:, :, :, 0::2] = i_indices * inv_freq
emb[:, :, :, 1::2] = j_indices * inv_freq
return emb
# Copied from transformers.models.llama.modeling_llama.LlamaRotaryEmbedding with Llama->EfficientLoFTR
class EfficientLoFTRRotaryEmbedding(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
# Ignore copy
def __init__(self, config: EfficientLoFTRConfig, device=None):
super().__init__()
self.config = config
self.rope_type = self.config.rope_parameters["rope_type"]
rope_init_fn: Callable = self.compute_default_rope_parameters
if self.rope_type != "default":
rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.register_buffer("original_inv_freq", inv_freq.clone(), persistent=False)
@staticmethod
# Ignore copy
def compute_default_rope_parameters(
config: EfficientLoFTRConfig | None = None,
device: Optional["torch.device"] = None,
seq_len: int | None = None,
) -> tuple["torch.Tensor", float]:
"""
Computes the inverse frequencies according to the original RoPE implementation
Args:
config ([`~transformers.PreTrainedConfig`]):
The model configuration.
device (`torch.device`):
The device to use for initialization of the inverse frequencies.
seq_len (`int`, *optional*):
The current sequence length. Unused for this type of RoPE.
Returns:
Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
"""
base = config.rope_parameters["rope_theta"]
partial_rotary_factor = config.rope_parameters.get("partial_rotary_factor", 1.0)
head_dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
dim = int(head_dim * partial_rotary_factor)
attention_factor = 1.0 # Unused in this type of RoPE
# Compute the inverse frequencies
inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
)
return inv_freq, attention_factor
# Ignore copy
@torch.no_grad()
def forward(
self, x: torch.Tensor, position_ids: torch.LongTensor | None = None, layer_type=None
) -> tuple[torch.Tensor, torch.Tensor]:
feats_height, feats_width = x.shape[-2:]
embed_height = (feats_height - self.config.q_aggregation_kernel_size) // self.config.q_aggregation_stride + 1
embed_width = (feats_width - self.config.q_aggregation_kernel_size) // self.config.q_aggregation_stride + 1
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with maybe_autocast(device_type=device_type, enabled=False): # Force float32
emb = compute_embeddings(self.inv_freq, embed_height, embed_width, self.config.hidden_size)
sin = emb.sin()
cos = emb.cos()
sin = sin.repeat_interleave(2, dim=-1)
cos = cos.repeat_interleave(2, dim=-1)
sin = sin.to(device=x.device, dtype=x.dtype)
cos = cos.to(device=x.device, dtype=x.dtype)
return cos, sin
# Copied from transformers.models.rt_detr_v2.modeling_rt_detr_v2.RTDetrV2ConvNormLayer with RTDetrV2->EfficientLoFTR
class EfficientLoFTRConvNormLayer(nn.Module):
def __init__(self, config, in_channels, out_channels, kernel_size, stride, padding=None, activation=None):
super().__init__()
self.conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size,
stride,
padding=(kernel_size - 1) // 2 if padding is None else padding,
bias=False,
)
self.norm = nn.BatchNorm2d(out_channels, config.batch_norm_eps)
self.activation = nn.Identity() if activation is None else ACT2CLS[activation]()
def forward(self, hidden_state):
hidden_state = self.conv(hidden_state)
hidden_state = self.norm(hidden_state)
hidden_state = self.activation(hidden_state)
return hidden_state
class EfficientLoFTRRepVGGBlock(GradientCheckpointingLayer):
"""
RepVGG architecture block introduced by the work "RepVGG: Making VGG-style ConvNets Great Again".
"""
def __init__(self, config: EfficientLoFTRConfig, stage_idx: int, block_idx: int):
super().__init__()
in_channels = config.stage_block_in_channels[stage_idx][block_idx]
out_channels = config.stage_block_out_channels[stage_idx][block_idx]
stride = config.stage_block_stride[stage_idx][block_idx]
activation = config.activation_function
self.conv1 = EfficientLoFTRConvNormLayer(
config, in_channels, out_channels, kernel_size=3, stride=stride, padding=1
)
self.conv2 = EfficientLoFTRConvNormLayer(
config, in_channels, out_channels, kernel_size=1, stride=stride, padding=0
)
self.identity = nn.BatchNorm2d(in_channels) if in_channels == out_channels and stride == 1 else None
self.activation = nn.Identity() if activation is None else ACT2FN[activation]
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
if self.identity is not None:
identity_out = self.identity(hidden_states)
else:
identity_out = 0
hidden_states = self.conv1(hidden_states) + self.conv2(hidden_states) + identity_out
hidden_states = self.activation(hidden_states)
return hidden_states
class EfficientLoFTRRepVGGStage(nn.Module):
def __init__(self, config: EfficientLoFTRConfig, stage_idx: int):
super().__init__()
self.blocks = nn.ModuleList([])
for block_idx in range(config.stage_num_blocks[stage_idx]):
self.blocks.append(
EfficientLoFTRRepVGGBlock(
config,
stage_idx,
block_idx,
)
)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
for block in self.blocks:
hidden_states = block(hidden_states)
return hidden_states
class EfficientLoFTRepVGG(nn.Module):
def __init__(self, config: EfficientLoFTRConfig):
super().__init__()
self.stages = nn.ModuleList([])
for stage_idx in range(len(config.stage_stride)):
stage = EfficientLoFTRRepVGGStage(config, stage_idx)
self.stages.append(stage)
def forward(self, hidden_states: torch.Tensor) -> list[torch.Tensor]:
outputs = []
for stage in self.stages:
hidden_states = stage(hidden_states)
outputs.append(hidden_states)
# Exclude first stage in outputs
outputs = outputs[1:]
return outputs
class EfficientLoFTRAggregationLayer(nn.Module):
def __init__(self, config: EfficientLoFTRConfig):
super().__init__()
hidden_size = config.hidden_size
self.q_aggregation = nn.Conv2d(
hidden_size,
hidden_size,
kernel_size=config.q_aggregation_kernel_size,
padding=0,
stride=config.q_aggregation_stride,
bias=False,
groups=hidden_size,
)
self.kv_aggregation = torch.nn.MaxPool2d(
kernel_size=config.kv_aggregation_kernel_size, stride=config.kv_aggregation_stride
)
self.norm = nn.LayerNorm(hidden_size)
def forward(
self,
hidden_states: torch.Tensor,
encoder_hidden_states: torch.Tensor | None = None,
) -> tuple[torch.Tensor, torch.Tensor]:
query_states = hidden_states
is_cross_attention = encoder_hidden_states is not None
kv_states = encoder_hidden_states if is_cross_attention else hidden_states
query_states = self.q_aggregation(query_states)
kv_states = self.kv_aggregation(kv_states)
query_states = query_states.permute(0, 2, 3, 1)
kv_states = kv_states.permute(0, 2, 3, 1)
hidden_states = self.norm(query_states)
encoder_hidden_states = self.norm(kv_states)
return hidden_states, encoder_hidden_states
# Copied from transformers.models.cohere.modeling_cohere.rotate_half
def rotate_half(x):
# Split and rotate. Note that this function is different from e.g. Llama.
x1 = x[..., ::2]
x2 = x[..., 1::2]
rot_x = torch.stack([-x2, x1], dim=-1).flatten(-2)
return rot_x
# Copied from transformers.models.cohere.modeling_cohere.apply_rotary_pos_emb
def apply_rotary_pos_emb(q, k, cos, sin, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
dtype = q.dtype
q = q.float()
k = k.float()
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed.to(dtype=dtype), k_embed.to(dtype=dtype)
# Copied from transformers.models.cohere.modeling_cohere.repeat_kv
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
# Copied from transformers.models.llama.modeling_llama.eager_attention_forward
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: torch.Tensor | None,
scaling: float,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
key_states = repeat_kv(key, module.num_key_value_groups)
value_states = repeat_kv(value, module.num_key_value_groups)
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
if attention_mask is not None:
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
class EfficientLoFTRAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: EfficientLoFTRConfig, layer_idx: int):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.attention_dropout = config.attention_dropout
self.is_causal = False
self.q_proj = nn.Linear(
config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
)
self.k_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.v_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.o_proj = nn.Linear(
config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias
)
def forward(
self,
hidden_states: torch.Tensor,
encoder_hidden_states: torch.Tensor | None = None,
position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, torch.Tensor | None]:
batch_size, seq_len, dim = hidden_states.shape
input_shape = hidden_states.shape[:-1]
query_states = self.q_proj(hidden_states).view(batch_size, seq_len, -1, dim)
current_states = encoder_hidden_states if encoder_hidden_states is not None else hidden_states
key_states = self.k_proj(current_states).view(batch_size, seq_len, -1, dim)
value_states = self.v_proj(current_states).view(batch_size, seq_len, -1, self.head_dim).transpose(1, 2)
if position_embeddings is not None:
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, unsqueeze_dim=2)
query_states = query_states.view(batch_size, seq_len, -1, self.head_dim).transpose(1, 2)
key_states = key_states.view(batch_size, seq_len, -1, self.head_dim).transpose(1, 2)
attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface(
self.config._attn_implementation, eager_attention_forward
)
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask=None,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class EfficientLoFTRMLP(nn.Module):
def __init__(self, config: EfficientLoFTRConfig):
super().__init__()
hidden_size = config.hidden_size
intermediate_size = config.intermediate_size
self.fc1 = nn.Linear(hidden_size * 2, intermediate_size, bias=False)
self.activation = ACT2FN[config.mlp_activation_function]
self.fc2 = nn.Linear(intermediate_size, hidden_size, bias=False)
self.layer_norm = nn.LayerNorm(hidden_size)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.fc1(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = self.fc2(hidden_states)
hidden_states = self.layer_norm(hidden_states)
return hidden_states
class EfficientLoFTRAggregatedAttention(nn.Module):
def __init__(self, config: EfficientLoFTRConfig, layer_idx: int):
super().__init__()
self.q_aggregation_kernel_size = config.q_aggregation_kernel_size
self.aggregation = EfficientLoFTRAggregationLayer(config)
self.attention = EfficientLoFTRAttention(config, layer_idx)
self.mlp = EfficientLoFTRMLP(config)
def forward(
self,
hidden_states: torch.Tensor,
encoder_hidden_states: torch.Tensor | None = None,
position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
batch_size, embed_dim, _, _ = hidden_states.shape
# Aggregate features
aggregated_hidden_states, aggregated_encoder_hidden_states = self.aggregation(
hidden_states, encoder_hidden_states
)
_, aggregated_h, aggregated_w, _ = aggregated_hidden_states.shape
# Multi-head attention
aggregated_hidden_states = aggregated_hidden_states.reshape(batch_size, -1, embed_dim)
aggregated_encoder_hidden_states = aggregated_encoder_hidden_states.reshape(batch_size, -1, embed_dim)
attn_output, _ = self.attention(
aggregated_hidden_states,
aggregated_encoder_hidden_states,
position_embeddings=position_embeddings,
**kwargs,
)
# Upsample features
# (batch_size, seq_len, embed_dim) -> (batch_size, embed_dim, h, w) with seq_len = h * w
attn_output = attn_output.permute(0, 2, 1)
attn_output = attn_output.reshape(batch_size, embed_dim, aggregated_h, aggregated_w)
attn_output = torch.nn.functional.interpolate(
attn_output, scale_factor=self.q_aggregation_kernel_size, mode="bilinear", align_corners=False
)
intermediate_states = torch.cat([hidden_states, attn_output], dim=1)
intermediate_states = intermediate_states.permute(0, 2, 3, 1)
output_states = self.mlp(intermediate_states)
output_states = output_states.permute(0, 3, 1, 2)
hidden_states = hidden_states + output_states
return hidden_states
class EfficientLoFTRLocalFeatureTransformerLayer(GradientCheckpointingLayer):
def __init__(self, config: EfficientLoFTRConfig, layer_idx: int):
super().__init__()
self.self_attention = EfficientLoFTRAggregatedAttention(config, layer_idx)
self.cross_attention = EfficientLoFTRAggregatedAttention(config, layer_idx)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
batch_size, _, embed_dim, height, width = hidden_states.shape
hidden_states = hidden_states.reshape(-1, embed_dim, height, width)
hidden_states = self.self_attention(hidden_states, position_embeddings=position_embeddings, **kwargs)
###
# Implementation of a bug in the original implementation regarding the cross-attention
# See : https://github.com/zju3dv/MatchAnything/issues/26
hidden_states = hidden_states.reshape(-1, 2, embed_dim, height, width)
features_0 = hidden_states[:, 0]
features_1 = hidden_states[:, 1]
features_0 = self.cross_attention(features_0, features_1, **kwargs)
features_1 = self.cross_attention(features_1, features_0, **kwargs)
hidden_states = torch.stack((features_0, features_1), dim=1)
###
return hidden_states
class EfficientLoFTRLocalFeatureTransformer(nn.Module):
def __init__(self, config: EfficientLoFTRConfig):
super().__init__()
self.layers = nn.ModuleList(
[
EfficientLoFTRLocalFeatureTransformerLayer(config, layer_idx=i)
for i in range(config.num_attention_layers)
]
)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
for layer in self.layers:
hidden_states = layer(hidden_states, position_embeddings=position_embeddings, **kwargs)
return hidden_states
class EfficientLoFTROutConvBlock(nn.Module):
def __init__(self, config: EfficientLoFTRConfig, hidden_size: int, intermediate_size: int):
super().__init__()
self.out_conv1 = nn.Conv2d(hidden_size, intermediate_size, kernel_size=1, stride=1, padding=0, bias=False)
self.out_conv2 = nn.Conv2d(
intermediate_size, intermediate_size, kernel_size=3, stride=1, padding=1, bias=False
)
self.batch_norm = nn.BatchNorm2d(intermediate_size)
self.activation = ACT2CLS[config.mlp_activation_function]()
self.out_conv3 = nn.Conv2d(intermediate_size, hidden_size, kernel_size=3, stride=1, padding=1, bias=False)
def forward(self, hidden_states: torch.Tensor, residual_states: torch.Tensor) -> torch.Tensor:
residual_states = self.out_conv1(residual_states)
residual_states = residual_states + hidden_states
residual_states = self.out_conv2(residual_states)
residual_states = self.batch_norm(residual_states)
residual_states = self.activation(residual_states)
residual_states = self.out_conv3(residual_states)
residual_states = nn.functional.interpolate(
residual_states, scale_factor=2.0, mode="bilinear", align_corners=False
)
return residual_states
class EfficientLoFTRFineFusionLayer(nn.Module):
def __init__(self, config: EfficientLoFTRConfig):
super().__init__()
self.fine_kernel_size = config.fine_kernel_size
fine_fusion_dims = config.fine_fusion_dims
self.out_conv = nn.Conv2d(
fine_fusion_dims[0], fine_fusion_dims[0], kernel_size=1, stride=1, padding=0, bias=False
)
self.out_conv_layers = nn.ModuleList()
for i in range(1, len(fine_fusion_dims)):
out_conv = EfficientLoFTROutConvBlock(config, fine_fusion_dims[i], fine_fusion_dims[i - 1])
self.out_conv_layers.append(out_conv)
def forward_pyramid(
self,
hidden_states: torch.Tensor,
residual_states: list[torch.Tensor],
) -> torch.Tensor:
hidden_states = self.out_conv(hidden_states)
hidden_states = nn.functional.interpolate(
hidden_states, scale_factor=2.0, mode="bilinear", align_corners=False
)
for i, layer in enumerate(self.out_conv_layers):
hidden_states = layer(hidden_states, residual_states[i])
return hidden_states
def forward(
self,
coarse_features: torch.Tensor,
residual_features: list[torch.Tensor] | tuple[torch.Tensor],
) -> tuple[torch.Tensor, torch.Tensor]:
"""
For each image pair, compute the fine features of pixels.
In both images, compute a patch of fine features center cropped around each coarse pixel.
In the first image, the feature patch is kernel_size large and long.
In the second image, it is (kernel_size + 2) large and long.
"""
batch_size, _, embed_dim, coarse_height, coarse_width = coarse_features.shape
coarse_features = coarse_features.reshape(-1, embed_dim, coarse_height, coarse_width)
residual_features = list(reversed(residual_features))
# 1. Fine feature extraction
fine_features = self.forward_pyramid(coarse_features, residual_features)
_, fine_embed_dim, fine_height, fine_width = fine_features.shape
fine_features = fine_features.reshape(batch_size, 2, fine_embed_dim, fine_height, fine_width)
fine_features_0 = fine_features[:, 0]
fine_features_1 = fine_features[:, 1]
# 2. Unfold all local windows in crops
stride = int(fine_height // coarse_height)
fine_features_0 = nn.functional.unfold(
fine_features_0, kernel_size=self.fine_kernel_size, stride=stride, padding=0
)
_, _, seq_len = fine_features_0.shape
fine_features_0 = fine_features_0.reshape(batch_size, -1, self.fine_kernel_size**2, seq_len)
fine_features_0 = fine_features_0.permute(0, 3, 2, 1)
fine_features_1 = nn.functional.unfold(
fine_features_1, kernel_size=self.fine_kernel_size + 2, stride=stride, padding=1
)
fine_features_1 = fine_features_1.reshape(batch_size, -1, (self.fine_kernel_size + 2) ** 2, seq_len)
fine_features_1 = fine_features_1.permute(0, 3, 2, 1)
return fine_features_0, fine_features_1
@auto_docstring
class EfficientLoFTRPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = EfficientLoFTRConfig
base_model_prefix = "efficientloftr"
main_input_name = "pixel_values"
input_modalities = ("image",)
supports_gradient_checkpointing = True
_supports_flash_attn = True
_supports_sdpa = True
_can_record_outputs = {
"hidden_states": EfficientLoFTRRepVGGBlock,
"attentions": EfficientLoFTRAttention,
}
@torch.no_grad()
def _init_weights(self, module: nn.Module) -> None:
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d, nn.Conv1d, nn.BatchNorm2d)):
init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
init.zeros_(module.bias)
if getattr(module, "running_mean", None) is not None:
init.zeros_(module.running_mean)
init.ones_(module.running_var)
init.zeros_(module.num_batches_tracked)
elif isinstance(module, nn.LayerNorm):
init.zeros_(module.bias)
init.ones_(module.weight)
elif isinstance(module, EfficientLoFTRRotaryEmbedding):
rope_fn = (
ROPE_INIT_FUNCTIONS[module.rope_type]
if module.rope_type != "default"
else module.compute_default_rope_parameters
)
buffer_value, _ = rope_fn(module.config)
init.copy_(module.inv_freq, buffer_value)
init.copy_(module.original_inv_freq, buffer_value)
# Copied from transformers.models.superpoint.modeling_superpoint.SuperPointPreTrainedModel.extract_one_channel_pixel_values with SuperPoint->EfficientLoFTR
def extract_one_channel_pixel_values(self, pixel_values: torch.FloatTensor) -> torch.FloatTensor:
"""
Assuming pixel_values has shape (batch_size, 3, height, width), and that all channels values are the same,
extract the first channel value to get a tensor of shape (batch_size, 1, height, width) for EfficientLoFTR. This is
a workaround for the issue discussed in :
https://github.com/huggingface/transformers/pull/25786#issuecomment-1730176446
Args:
pixel_values: torch.FloatTensor of shape (batch_size, 3, height, width)
Returns:
pixel_values: torch.FloatTensor of shape (batch_size, 1, height, width)
"""
return pixel_values[:, 0, :, :][:, None, :, :]
@auto_docstring(
custom_intro="""
EfficientLoFTR model taking images as inputs and outputting the features of the images.
"""
)
class EfficientLoFTRModel(EfficientLoFTRPreTrainedModel):
def __init__(self, config: EfficientLoFTRConfig):
super().__init__(config)
self.config = config
self.backbone = EfficientLoFTRepVGG(config)
self.local_feature_transformer = EfficientLoFTRLocalFeatureTransformer(config)
self.rotary_emb = EfficientLoFTRRotaryEmbedding(config=config)
self.post_init()
@merge_with_config_defaults
@capture_outputs
@auto_docstring
def forward(
self,
pixel_values: torch.FloatTensor,
labels: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> BackboneOutput:
r"""
Examples:
```python
>>> from transformers import AutoImageProcessor, AutoModel
>>> import torch
>>> from PIL import Image
>>> import httpx
>>> from io import BytesIO
>>> url = "https://github.com/magicleap/SuperGluePretrainedNetwork/blob/master/assets/phototourism_sample_images/london_bridge_78916675_4568141288.jpg?raw=true"
>>> with httpx.stream("GET", url) as response:
... image1 = Image.open(BytesIO(response.read()))
>>> url = "https://github.com/magicleap/SuperGluePretrainedNetwork/blob/master/assets/phototourism_sample_images/london_bridge_19481797_2295892421.jpg?raw=true"
>>> with httpx.stream("GET", url) as response:
... image2 = Image.open(BytesIO(response.read()))
>>> images = [image1, image2]
>>> processor = AutoImageProcessor.from_pretrained("zju-community/efficient_loftr")
>>> model = AutoModel.from_pretrained("zju-community/efficient_loftr")
>>> with torch.no_grad():
>>> inputs = processor(images, return_tensors="pt")
>>> outputs = model(**inputs)
```"""
if labels is not None:
raise ValueError("EfficientLoFTR is not trainable, no labels should be provided.")
if pixel_values.ndim != 5 or pixel_values.size(1) != 2:
raise ValueError("Input must be a 5D tensor of shape (batch_size, 2, num_channels, height, width)")
batch_size, _, channels, height, width = pixel_values.shape
pixel_values = pixel_values.reshape(batch_size * 2, channels, height, width)
pixel_values = self.extract_one_channel_pixel_values(pixel_values)
# 1. Local Feature CNN
features = self.backbone(pixel_values)
# Last stage outputs are coarse outputs
coarse_features = features[-1]
# Rest is residual features used in EfficientLoFTRFineFusionLayer
residual_features = features[:-1]
coarse_embed_dim, coarse_height, coarse_width = coarse_features.shape[-3:]
# 2. Coarse-level LoFTR module
cos, sin = self.rotary_emb(coarse_features)
cos = cos.expand(batch_size * 2, -1, -1, -1).reshape(batch_size * 2, -1, coarse_embed_dim)
sin = sin.expand(batch_size * 2, -1, -1, -1).reshape(batch_size * 2, -1, coarse_embed_dim)
position_embeddings = (cos, sin)
coarse_features = coarse_features.reshape(batch_size, 2, coarse_embed_dim, coarse_height, coarse_width)
coarse_features = self.local_feature_transformer(
coarse_features, position_embeddings=position_embeddings, **kwargs
)
features = (coarse_features,) + tuple(residual_features)
return BackboneOutput(feature_maps=features)
def mask_border(tensor: torch.Tensor, border_margin: int, value: bool | float | int) -> torch.Tensor:
"""
Mask a tensor border with a given value
Args:
tensor (`torch.Tensor` of shape `(batch_size, height_0, width_0, height_1, width_1)`):
The tensor to mask
border_margin (`int`) :
The size of the border
value (`Union[bool, int, float]`):
The value to place in the tensor's borders
Returns:
tensor (`torch.Tensor` of shape `(batch_size, height_0, width_0, height_1, width_1)`):
The masked tensor
"""
if border_margin <= 0:
return tensor
tensor[:, :border_margin] = value
tensor[:, :, :border_margin] = value
tensor[:, :, :, :border_margin] = value
tensor[:, :, :, :, :border_margin] = value
tensor[:, -border_margin:] = value
tensor[:, :, -border_margin:] = value
tensor[:, :, :, -border_margin:] = value
tensor[:, :, :, :, -border_margin:] = value
return tensor
def create_meshgrid(
height: int | torch.Tensor,
width: int | torch.Tensor,
normalized_coordinates: bool = False,
device: torch.device | None = None,
dtype: torch.dtype | None = None,
) -> torch.Tensor:
"""
Copied from kornia library : kornia/kornia/utils/grid.py:26
Generate a coordinate grid for an image.
When the flag ``normalized_coordinates`` is set to True, the grid is
normalized to be in the range :math:`[-1,1]` to be consistent with the pytorch
function :py:func:`torch.nn.functional.grid_sample`.
Args:
height (`int`):
The image height (rows).
width (`int`):
The image width (cols).
normalized_coordinates (`bool`):
Whether to normalize coordinates in the range :math:`[-1,1]` in order to be consistent with the
PyTorch function :py:func:`torch.nn.functional.grid_sample`.
device (`torch.device`):
The device on which the grid will be generated.
dtype (`torch.dtype`):
The data type of the generated grid.
Return:
grid (`torch.Tensor` of shape `(1, height, width, 2)`):
The grid tensor.
Example:
>>> create_meshgrid(2, 2)
tensor([[[[-1., -1.],
[ 1., -1.]],
<BLANKLINE>
[[-1., 1.],
[ 1., 1.]]]])
>>> create_meshgrid(2, 2, normalized_coordinates=False)
tensor([[[[0., 0.],
[1., 0.]],
<BLANKLINE>
[[0., 1.],
[1., 1.]]]])
"""
xs = torch.linspace(0, width - 1, width, device=device, dtype=dtype)
ys = torch.linspace(0, height - 1, height, device=device, dtype=dtype)
if normalized_coordinates:
xs = (xs / (width - 1) - 0.5) * 2
ys = (ys / (height - 1) - 0.5) * 2
grid = torch.stack(torch.meshgrid(ys, xs, indexing="ij"), dim=-1)
grid = grid.permute(1, 0, 2).unsqueeze(0)
return grid
def spatial_expectation2d(input: torch.Tensor, normalized_coordinates: bool = True) -> torch.Tensor:
r"""
Copied from kornia library : kornia/geometry/subpix/dsnt.py:76
Compute the expectation of coordinate values using spatial probabilities.
The input heatmap is assumed to represent a valid spatial probability distribution,
which can be achieved using :func:`~kornia.geometry.subpixel.spatial_softmax2d`.
Args:
input (`torch.Tensor` of shape `(batch_size, embed_dim, height, width)`):
The input tensor representing dense spatial probabilities.
normalized_coordinates (`bool`):
Whether to return the coordinates normalized in the range of :math:`[-1, 1]`. Otherwise, it will return
the coordinates in the range of the input shape.
Returns:
output (`torch.Tensor` of shape `(batch_size, embed_dim, 2)`)
Expected value of the 2D coordinates. Output order of the coordinates is (x, y).
Examples:
>>> heatmaps = torch.tensor([[[
... [0., 0., 0.],
... [0., 0., 0.],
... [0., 1., 0.]]]])
>>> spatial_expectation2d(heatmaps, False)
tensor([[[1., 2.]]])
"""
batch_size, embed_dim, height, width = input.shape
# Create coordinates grid.
grid = create_meshgrid(height, width, normalized_coordinates, input.device)
grid = grid.to(input.dtype)
pos_x = grid[..., 0].reshape(-1)
pos_y = grid[..., 1].reshape(-1)
input_flat = input.view(batch_size, embed_dim, -1)
# Compute the expectation of the coordinates.
expected_y = torch.sum(pos_y * input_flat, -1, keepdim=True)
expected_x = torch.sum(pos_x * input_flat, -1, keepdim=True)
output = torch.cat([expected_x, expected_y], -1)
return output.view(batch_size, embed_dim, 2)
@auto_docstring(
custom_intro="""
EfficientLoFTR model taking images as inputs and outputting the matching of them.
"""
)
class EfficientLoFTRForKeypointMatching(EfficientLoFTRPreTrainedModel):
"""EfficientLoFTR dense image matcher
Given two images, we determine the correspondences by:
1. Extracting coarse and fine features through a backbone
2. Transforming coarse features through self and cross attention
3. Matching coarse features to obtain coarse coordinates of matches
4. Obtaining full resolution fine features by fusing transformed and backbone coarse features
5. Refining the coarse matches using fine feature patches centered at each coarse match in a two-stage refinement
Yifan Wang, Xingyi He, Sida Peng, Dongli Tan and Xiaowei Zhou.
Efficient LoFTR: Semi-Dense Local Feature Matching with Sparse-Like Speed
In CVPR, 2024. https://huggingface.co/papers/2403.04765
"""
def __init__(self, config: EfficientLoFTRConfig):
super().__init__(config)
self.config = config
self.efficientloftr = EfficientLoFTRModel(config)
self.refinement_layer = EfficientLoFTRFineFusionLayer(config)
self.post_init()
def _get_matches_from_scores(self, scores: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
"""
Based on a keypoint score matrix, compute the best keypoint matches between the first and second image.
Since each image pair can have different number of matches, the matches are concatenated together for all pair
in the batch and a batch_indices tensor is returned to specify which match belong to which element in the batch.
Note:
This step can be done as a postprocessing step, because does not involve any model weights/params.
However, we keep it in the modeling code for consistency with other keypoint matching models AND for
easier torch.compile/torch.export (all ops are in torch).
Args:
scores (`torch.Tensor` of shape `(batch_size, height_0, width_0, height_1, width_1)`):
Scores of keypoints
Returns:
matched_indices (`torch.Tensor` of shape `(2, num_matches)`):
Indices representing which pixel in the first image matches which pixel in the second image
matching_scores (`torch.Tensor` of shape `(num_matches,)`):
Scores of each match
"""
batch_size, height0, width0, height1, width1 = scores.shape
scores = scores.view(batch_size, height0 * width0, height1 * width1)
# For each keypoint, get the best match
max_0 = scores.max(2, keepdim=True).values
max_1 = scores.max(1, keepdim=True).values
# 1. Thresholding
mask = scores > self.config.coarse_matching_threshold
# 2. Border removal
mask = mask.reshape(batch_size, height0, width0, height1, width1)
mask = mask_border(mask, self.config.coarse_matching_border_removal, False)
mask = mask.reshape(batch_size, height0 * width0, height1 * width1)
# 3. Mutual nearest neighbors
mask = mask * (scores == max_0) * (scores == max_1)
# 4. Fine coarse matches
masked_scores = scores * mask
matching_scores_0, max_indices_0 = masked_scores.max(1)
matching_scores_1, max_indices_1 = masked_scores.max(2)
matching_indices = torch.cat([max_indices_0, max_indices_1]).reshape(batch_size, 2, -1)
matching_scores = torch.stack([matching_scores_0, matching_scores_1], dim=1)
# For the keypoints not meeting the threshold score, set the indices to -1 which corresponds to no matches found
matching_indices = torch.where(matching_scores > 0, matching_indices, -1)
return matching_indices, matching_scores
def _coarse_matching(
self, coarse_features: torch.Tensor, coarse_scale: float
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
For each image pair, compute the matching confidence between each coarse element (by default (image_height / 8)
* (image_width / 8 elements)) from the first image to the second image.
Note:
This step can be done as a postprocessing step, because does not involve any model weights/params.
However, we keep it in the modeling code for consistency with other keypoint matching models AND for
easier torch.compile/torch.export (all ops are in torch).
Args:
coarse_features (`torch.Tensor` of shape `(batch_size, 2, hidden_size, coarse_height, coarse_width)`):
Coarse features
coarse_scale (`float`): Scale between the image size and the coarse size
Returns:
keypoints (`torch.Tensor` of shape `(batch_size, 2, num_matches, 2)`):
Keypoints coordinates.
matching_scores (`torch.Tensor` of shape `(batch_size, 2, num_matches)`):
The confidence matching score of each keypoint.
matched_indices (`torch.Tensor` of shape `(batch_size, 2, num_matches)`):
Indices which indicates which keypoint in an image matched with which keypoint in the other image. For
both image in the pair.
"""
batch_size, _, embed_dim, height, width = coarse_features.shape
# (batch_size, 2, embed_dim, height, width) -> (batch_size, 2, height * width, embed_dim)
coarse_features = coarse_features.permute(0, 1, 3, 4, 2)
coarse_features = coarse_features.reshape(batch_size, 2, -1, embed_dim)
coarse_features = coarse_features / coarse_features.shape[-1] ** 0.5
coarse_features_0 = coarse_features[:, 0]
coarse_features_1 = coarse_features[:, 1]
similarity = coarse_features_0 @ coarse_features_1.transpose(-1, -2)
similarity = similarity / self.config.coarse_matching_temperature
if self.config.coarse_matching_skip_softmax:
confidence = similarity
else:
confidence = nn.functional.softmax(similarity, 1) * nn.functional.softmax(similarity, 2)
confidence = confidence.view(batch_size, height, width, height, width)
matched_indices, matching_scores = self._get_matches_from_scores(confidence)
keypoints = torch.stack([matched_indices % width, matched_indices // width], dim=-1) * coarse_scale
return keypoints, matching_scores, matched_indices
def _get_first_stage_fine_matching(
self,
fine_confidence: torch.Tensor,
coarse_matched_keypoints: torch.Tensor,
fine_window_size: int,
fine_scale: float,
) -> tuple[torch.Tensor, torch.Tensor]:
"""
For each coarse pixel, retrieve the highest fine confidence score and index.
The index represents the matching between a pixel position in the fine window in the first image and a pixel
position in the fine window of the second image.
For example, for a fine_window_size of 64 (8 * 8), the index 2474 represents the matching between the index 38
(2474 // 64) in the fine window of the first image, and the index 42 in the second image. This means that 38
which corresponds to the position (4, 6) (4 // 8 and 4 % 8) is matched with the position (5, 2). In this example
the coarse matched coordinate will be shifted to the matched fine coordinates in the first and second image.
Note:
This step can be done as a postprocessing step, because does not involve any model weights/params.
However, we keep it in the modeling code for consistency with other keypoint matching models AND for
easier torch.compile/torch.export (all ops are in torch).
Args:
fine_confidence (`torch.Tensor` of shape `(num_matches, fine_window_size, fine_window_size)`):
First stage confidence of matching fine features between the first and the second image
coarse_matched_keypoints (`torch.Tensor` of shape `(2, num_matches, 2)`):
Coarse matched keypoint between the first and the second image.
fine_window_size (`int`):
Size of the window used to refine matches
fine_scale (`float`):
Scale between the size of fine features and coarse features
Returns:
indices (`torch.Tensor` of shape `(2, num_matches, 1)`):
Indices of the fine coordinate matched in the fine window
fine_matches (`torch.Tensor` of shape `(2, num_matches, 2)`):
Coordinates of matched keypoints after the first fine stage
"""
batch_size, num_keypoints, _, _ = fine_confidence.shape
fine_kernel_size = torch_int(fine_window_size**0.5)
fine_confidence = fine_confidence.reshape(batch_size, num_keypoints, -1)
values, indices = torch.max(fine_confidence, dim=-1)
indices = indices[..., None]
indices_0 = indices // fine_window_size
indices_1 = indices % fine_window_size
grid = create_meshgrid(
fine_kernel_size,
fine_kernel_size,
normalized_coordinates=False,
device=fine_confidence.device,
dtype=fine_confidence.dtype,
)
grid = grid - (fine_kernel_size // 2) + 0.5
grid = grid.reshape(1, 1, -1, 2).expand(batch_size, num_keypoints, -1, -1)
delta_0 = torch.gather(grid, 1, indices_0.unsqueeze(-1).expand(-1, -1, -1, 2)).squeeze(2)
delta_1 = torch.gather(grid, 1, indices_1.unsqueeze(-1).expand(-1, -1, -1, 2)).squeeze(2)
fine_matches_0 = coarse_matched_keypoints[:, 0] + delta_0 * fine_scale
fine_matches_1 = coarse_matched_keypoints[:, 1] + delta_1 * fine_scale
indices = torch.stack([indices_0, indices_1], dim=1)
fine_matches = torch.stack([fine_matches_0, fine_matches_1], dim=1)
return indices, fine_matches
def _get_second_stage_fine_matching(
self,
indices: torch.Tensor,
fine_matches: torch.Tensor,
fine_confidence: torch.Tensor,
fine_window_size: int,
fine_scale: float,
) -> torch.Tensor:
"""
For the given position in their respective fine windows, retrieve the 3x3 fine confidences around this position.
After applying softmax to these confidences, compute the 2D spatial expected coordinates.
Shift the first stage fine matching with these expected coordinates.
Note:
This step can be done as a postprocessing step, because does not involve any model weights/params.
However, we keep it in the modeling code for consistency with other keypoint matching models AND for
easier torch.compile/torch.export (all ops are in torch).
Args:
indices (`torch.Tensor` of shape `(batch_size, 2, num_keypoints)`):
Indices representing the position of each keypoint in the fine window
fine_matches (`torch.Tensor` of shape `(2, num_matches, 2)`):
Coordinates of matched keypoints after the first fine stage
fine_confidence (`torch.Tensor` of shape `(num_matches, fine_window_size, fine_window_size)`):
Second stage confidence of matching fine features between the first and the second image
fine_window_size (`int`):
Size of the window used to refine matches
fine_scale (`float`):
Scale between the size of fine features and coarse features
Returns:
fine_matches (`torch.Tensor` of shape `(2, num_matches, 2)`):
Coordinates of matched keypoints after the second fine stage
"""
batch_size, num_keypoints, _, _ = fine_confidence.shape
fine_kernel_size = torch_int(fine_window_size**0.5)
indices_0 = indices[:, 0]
indices_1 = indices[:, 1]
indices_1_i = indices_1 // fine_kernel_size
indices_1_j = indices_1 % fine_kernel_size
# matches_indices, indices_0, indices_1_i, indices_1_j of shape (num_matches, 3, 3)
batch_indices = torch.arange(batch_size, device=indices_0.device).reshape(batch_size, 1, 1, 1)
matches_indices = torch.arange(num_keypoints, device=indices_0.device).reshape(1, num_keypoints, 1, 1)
indices_0 = indices_0[..., None]
indices_1_i = indices_1_i[..., None]
indices_1_j = indices_1_j[..., None]
delta = create_meshgrid(3, 3, normalized_coordinates=True, device=indices_0.device).to(torch.long)
delta = delta[None, ...]
indices_1_i = indices_1_i + delta[..., 1]
indices_1_j = indices_1_j + delta[..., 0]
fine_confidence = fine_confidence.reshape(
batch_size, num_keypoints, fine_window_size, fine_kernel_size + 2, fine_kernel_size + 2
)
# (batch_size, seq_len, fine_window_size, fine_kernel_size + 2, fine_kernel_size + 2) -> (batch_size, seq_len, 3, 3)
fine_confidence = fine_confidence[batch_indices, matches_indices, indices_0, indices_1_i, indices_1_j]
fine_confidence = fine_confidence.reshape(batch_size, num_keypoints, 9)
fine_confidence = nn.functional.softmax(
fine_confidence / self.config.fine_matching_regress_temperature, dim=-1
)
heatmap = fine_confidence.reshape(batch_size, num_keypoints, 3, 3)
fine_coordinates_normalized = spatial_expectation2d(heatmap, True)[0]
fine_matches_0 = fine_matches[:, 0]
fine_matches_1 = fine_matches[:, 1] + (fine_coordinates_normalized * (3 // 2) * fine_scale)
fine_matches = torch.stack([fine_matches_0, fine_matches_1], dim=1)
return fine_matches
def _fine_matching(
self,
fine_features_0: torch.Tensor,
fine_features_1: torch.Tensor,
coarse_matched_keypoints: torch.Tensor,
fine_scale: float,
) -> torch.Tensor:
"""
For each coarse pixel with a corresponding window of fine features, compute the matching confidence between fine
features in the first image and the second image.
Fine features are sliced in two part :
- The first part used for the first stage are the first fine_hidden_size - config.fine_matching_slicedim (64 - 8
= 56 by default) features.
- The second part used for the second stage are the last config.fine_matching_slicedim (8 by default) features.
Each part is used to compute a fine confidence tensor of the following shape :
(batch_size, (coarse_height * coarse_width), fine_window_size, fine_window_size)
They correspond to the score between each fine pixel in the first image and each fine pixel in the second image.
Args:
fine_features_0 (`torch.Tensor` of shape `(num_matches, fine_kernel_size ** 2, fine_kernel_size ** 2)`):
Fine features from the first image
fine_features_1 (`torch.Tensor` of shape `(num_matches, (fine_kernel_size + 2) ** 2, (fine_kernel_size + 2)
** 2)`):
Fine features from the second image
coarse_matched_keypoints (`torch.Tensor` of shape `(2, num_matches, 2)`):
Keypoint coordinates found in coarse matching for the first and second image
fine_scale (`int`):
Scale between the size of fine features and coarse features
Returns:
fine_coordinates (`torch.Tensor` of shape `(2, num_matches, 2)`):
Matched keypoint between the first and the second image. All matched keypoints are concatenated in the
second dimension.
"""
batch_size, num_keypoints, fine_window_size, fine_embed_dim = fine_features_0.shape
fine_matching_slice_dim = self.config.fine_matching_slice_dim
fine_kernel_size = torch_int(fine_window_size**0.5)
# Split fine features into first and second stage features
split_fine_features_0 = torch.split(fine_features_0, fine_embed_dim - fine_matching_slice_dim, -1)
split_fine_features_1 = torch.split(fine_features_1, fine_embed_dim - fine_matching_slice_dim, -1)
# Retrieve first stage fine features
fine_features_0 = split_fine_features_0[0]
fine_features_1 = split_fine_features_1[0]
# Normalize first stage fine features
fine_features_0 = fine_features_0 / fine_features_0.shape[-1] ** 0.5
fine_features_1 = fine_features_1 / fine_features_1.shape[-1] ** 0.5
# Compute first stage confidence
fine_confidence = fine_features_0 @ fine_features_1.transpose(-1, -2)
fine_confidence = nn.functional.softmax(fine_confidence, 1) * nn.functional.softmax(fine_confidence, 2)
fine_confidence = fine_confidence.reshape(
batch_size, num_keypoints, fine_window_size, fine_kernel_size + 2, fine_kernel_size + 2
)
fine_confidence = fine_confidence[..., 1:-1, 1:-1]
first_stage_fine_confidence = fine_confidence.reshape(
batch_size, num_keypoints, fine_window_size, fine_window_size
)
fine_indices, fine_matches = self._get_first_stage_fine_matching(
first_stage_fine_confidence,
coarse_matched_keypoints,
fine_window_size,
fine_scale,
)
# Retrieve second stage fine features
fine_features_0 = split_fine_features_0[1]
fine_features_1 = split_fine_features_1[1]
# Normalize second stage fine features
fine_features_1 = fine_features_1 / fine_matching_slice_dim**0.5
# Compute second stage fine confidence
second_stage_fine_confidence = fine_features_0 @ fine_features_1.transpose(-1, -2)
fine_coordinates = self._get_second_stage_fine_matching(
fine_indices,
fine_matches,
second_stage_fine_confidence,
fine_window_size,
fine_scale,
)
return fine_coordinates
@auto_docstring
@can_return_tuple
def forward(
self,
pixel_values: torch.FloatTensor,
labels: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> EfficientLoFTRKeypointMatchingOutput:
r"""
Examples:
```python
>>> from transformers import AutoImageProcessor, AutoModel
>>> import torch
>>> from PIL import Image
>>> import httpx
>>> from io import BytesIO
>>> url = "https://github.com/magicleap/SuperGluePretrainedNetwork/blob/master/assets/phototourism_sample_images/london_bridge_78916675_4568141288.jpg?raw=true"
>>> with httpx.stream("GET", url) as response:
... image1 = Image.open(BytesIO(response.read()))
>>> url = "https://github.com/magicleap/SuperGluePretrainedNetwork/blob/master/assets/phototourism_sample_images/london_bridge_19481797_2295892421.jpg?raw=true"
>>> with httpx.stream("GET", url) as response:
... image2 = Image.open(BytesIO(response.read()))
>>> images = [image1, image2]
>>> processor = AutoImageProcessor.from_pretrained("zju-community/efficient_loftr")
>>> model = AutoModel.from_pretrained("zju-community/efficient_loftr")
>>> with torch.no_grad():
>>> inputs = processor(images, return_tensors="pt")
>>> outputs = model(**inputs)
```"""
if labels is not None:
raise ValueError("SuperGlue is not trainable, no labels should be provided.")
# 1. Extract coarse and residual features
model_outputs: BackboneOutput = self.efficientloftr(pixel_values, **kwargs)
features = model_outputs.feature_maps
# 2. Compute coarse-level matching
coarse_features = features[0]
coarse_embed_dim, coarse_height, coarse_width = coarse_features.shape[-3:]
batch_size, _, channels, height, width = pixel_values.shape
coarse_scale = height / coarse_height
coarse_keypoints, coarse_matching_scores, coarse_matched_indices = self._coarse_matching(
coarse_features, coarse_scale
)
# 3. Fine-level refinement
residual_features = features[1:]
coarse_features = coarse_features / self.config.hidden_size**0.5
fine_features_0, fine_features_1 = self.refinement_layer(coarse_features, residual_features)
# Filter fine features with coarse matches indices
_, _, num_keypoints = coarse_matching_scores.shape
batch_indices = torch.arange(batch_size)[..., None]
fine_features_0 = fine_features_0[batch_indices, coarse_matched_indices[:, 0]]
fine_features_1 = fine_features_1[batch_indices, coarse_matched_indices[:, 1]]
# 4. Computer fine-level matching
fine_height = torch_int(coarse_height * coarse_scale)
fine_scale = height / fine_height
matching_keypoints = self._fine_matching(fine_features_0, fine_features_1, coarse_keypoints, fine_scale)
matching_keypoints[:, :, :, 0] = matching_keypoints[:, :, :, 0] / width
matching_keypoints[:, :, :, 1] = matching_keypoints[:, :, :, 1] / height
loss = None
return EfficientLoFTRKeypointMatchingOutput(
loss=loss,
matches=coarse_matched_indices,
matching_scores=coarse_matching_scores,
keypoints=matching_keypoints,
hidden_states=model_outputs.hidden_states,
attentions=model_outputs.attentions,
)
__all__ = ["EfficientLoFTRPreTrainedModel", "EfficientLoFTRModel", "EfficientLoFTRForKeypointMatching"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/efficientloftr/modeling_efficientloftr.py",
"license": "Apache License 2.0",
"lines": 1144,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/efficientloftr/test_image_processing_efficientloftr.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import unittest
import numpy as np
import pytest
from parameterized import parameterized
from transformers.testing_utils import (
require_torch,
require_torch_accelerator,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available
from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.models.efficientloftr.modeling_efficientloftr import EfficientLoFTRKeypointMatchingOutput
if is_vision_available():
from transformers import EfficientLoFTRImageProcessor
if is_torchvision_available():
from transformers import EfficientLoFTRImageProcessorFast
def random_array(size):
return np.random.randint(255, size=size)
def random_tensor(size):
return torch.rand(size)
class EfficientLoFTRImageProcessingTester:
"""Tester for EfficientLoFTRImageProcessor"""
def __init__(
self,
parent,
batch_size=6,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
do_resize=True,
size=None,
do_grayscale=True,
):
size = size if size is not None else {"height": 480, "width": 640}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_grayscale = do_grayscale
def prepare_image_processor_dict(self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_grayscale": self.do_grayscale,
}
def expected_output_image_shape(self, images):
return 2, self.num_channels, self.size["height"], self.size["width"]
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False, pairs=True, batch_size=None):
batch_size = batch_size if batch_size is not None else self.batch_size
image_inputs = prepare_image_inputs(
batch_size=batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
if pairs:
image_inputs = [image_inputs[i : i + 2] for i in range(0, len(image_inputs), 2)]
return image_inputs
def prepare_keypoint_matching_output(self, pixel_values):
"""Prepare a fake output for the keypoint matching model with random matches between 50 keypoints per image."""
max_number_keypoints = 50
batch_size = len(pixel_values)
keypoints = torch.zeros((batch_size, 2, max_number_keypoints, 2))
matches = torch.full((batch_size, 2, max_number_keypoints), -1, dtype=torch.int)
scores = torch.zeros((batch_size, 2, max_number_keypoints))
for i in range(batch_size):
random_number_keypoints0 = np.random.randint(10, max_number_keypoints)
random_number_keypoints1 = np.random.randint(10, max_number_keypoints)
random_number_matches = np.random.randint(5, min(random_number_keypoints0, random_number_keypoints1))
keypoints[i, 0, :random_number_keypoints0] = torch.rand((random_number_keypoints0, 2))
keypoints[i, 1, :random_number_keypoints1] = torch.rand((random_number_keypoints1, 2))
random_matches_indices0 = torch.randperm(random_number_keypoints1, dtype=torch.int)[:random_number_matches]
random_matches_indices1 = torch.randperm(random_number_keypoints0, dtype=torch.int)[:random_number_matches]
matches[i, 0, random_matches_indices1] = random_matches_indices0
matches[i, 1, random_matches_indices0] = random_matches_indices1
scores[i, 0, random_matches_indices1] = torch.rand((random_number_matches,))
scores[i, 1, random_matches_indices0] = torch.rand((random_number_matches,))
return EfficientLoFTRKeypointMatchingOutput(keypoints=keypoints, matches=matches, matching_scores=scores)
@require_torch
@require_vision
class EfficientLoFTRImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = EfficientLoFTRImageProcessor if is_vision_available() else None
fast_image_processing_class = EfficientLoFTRImageProcessorFast if is_torchvision_available() else None
def setUp(self) -> None:
super().setUp()
self.image_processor_tester = EfficientLoFTRImageProcessingTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processing(self):
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "do_resize"))
self.assertTrue(hasattr(image_processing, "size"))
self.assertTrue(hasattr(image_processing, "do_rescale"))
self.assertTrue(hasattr(image_processing, "rescale_factor"))
self.assertTrue(hasattr(image_processing, "do_grayscale"))
def test_image_processor_from_dict_with_kwargs(self):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {"height": 480, "width": 640})
image_processor = image_processing_class.from_dict(
self.image_processor_dict, size={"height": 42, "width": 42}
)
self.assertEqual(image_processor.size, {"height": 42, "width": 42})
@unittest.skip(reason="SuperPointImageProcessor is always supposed to return a grayscaled image")
def test_call_numpy_4_channels(self):
pass
def test_number_and_format_of_images_in_input(self):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class.from_dict(self.image_processor_dict)
# Cases where the number of images and the format of lists in the input is correct
image_input = self.image_processor_tester.prepare_image_inputs(pairs=False, batch_size=2)
image_processed = image_processor.preprocess(image_input, return_tensors="pt")
self.assertEqual((1, 2, 3, 480, 640), tuple(image_processed["pixel_values"].shape))
image_input = self.image_processor_tester.prepare_image_inputs(pairs=True, batch_size=2)
image_processed = image_processor.preprocess(image_input, return_tensors="pt")
self.assertEqual((1, 2, 3, 480, 640), tuple(image_processed["pixel_values"].shape))
image_input = self.image_processor_tester.prepare_image_inputs(pairs=True, batch_size=4)
image_processed = image_processor.preprocess(image_input, return_tensors="pt")
self.assertEqual((2, 2, 3, 480, 640), tuple(image_processed["pixel_values"].shape))
image_input = self.image_processor_tester.prepare_image_inputs(pairs=True, batch_size=6)
image_processed = image_processor.preprocess(image_input, return_tensors="pt")
self.assertEqual((3, 2, 3, 480, 640), tuple(image_processed["pixel_values"].shape))
# Cases where the number of images or the format of lists in the input is incorrect
## List of 4 images
image_input = self.image_processor_tester.prepare_image_inputs(pairs=False, batch_size=4)
with self.assertRaises(ValueError) as cm:
image_processor.preprocess(image_input, return_tensors="pt")
self.assertEqual(ValueError, cm.exception.__class__)
## List of 3 images
image_input = self.image_processor_tester.prepare_image_inputs(pairs=False, batch_size=3)
with self.assertRaises(ValueError) as cm:
image_processor.preprocess(image_input, return_tensors="pt")
self.assertEqual(ValueError, cm.exception.__class__)
## List of 2 pairs and 1 image
image_input = self.image_processor_tester.prepare_image_inputs(pairs=True, batch_size=3)
with self.assertRaises(ValueError) as cm:
image_processor.preprocess(image_input, return_tensors="pt")
self.assertEqual(ValueError, cm.exception.__class__)
@parameterized.expand(
[
([random_array((3, 100, 200)), random_array((3, 100, 200))], (1, 2, 3, 480, 640)),
([[random_array((3, 100, 200)), random_array((3, 100, 200))]], (1, 2, 3, 480, 640)),
([random_tensor((3, 100, 200)), random_tensor((3, 100, 200))], (1, 2, 3, 480, 640)),
([random_tensor((3, 100, 200)), random_tensor((3, 100, 200))], (1, 2, 3, 480, 640)),
],
)
def test_valid_image_shape_in_input(self, image_input, output):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class.from_dict(self.image_processor_dict)
image_processed = image_processor.preprocess(image_input, return_tensors="pt")
self.assertEqual(output, tuple(image_processed["pixel_values"].shape))
@parameterized.expand(
[
(random_array((3, 100, 200)),),
([random_array((3, 100, 200))],),
(random_array((1, 3, 100, 200)),),
([[random_array((3, 100, 200))]],),
([[random_array((3, 100, 200))], [random_array((3, 100, 200))]],),
([random_array((1, 3, 100, 200)), random_array((1, 3, 100, 200))],),
(random_array((1, 1, 3, 100, 200)),),
],
)
def test_invalid_image_shape_in_input(self, image_input):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class.from_dict(self.image_processor_dict)
with self.assertRaises(ValueError) as cm:
image_processor(image_input, return_tensors="pt")
self.assertEqual(ValueError, cm.exception.__class__)
def test_input_images_properly_paired(self):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class.from_dict(self.image_processor_dict)
image_inputs = self.image_processor_tester.prepare_image_inputs()
pre_processed_images = image_processor(image_inputs, return_tensors="pt")
self.assertEqual(len(pre_processed_images["pixel_values"].shape), 5)
self.assertEqual(pre_processed_images["pixel_values"].shape[1], 2)
def test_input_not_paired_images_raises_error(self):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class.from_dict(self.image_processor_dict)
image_inputs = self.image_processor_tester.prepare_image_inputs(pairs=False)
with self.assertRaises(ValueError):
image_processor(image_inputs[0])
def test_input_image_properly_converted_to_grayscale(self):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class.from_dict(self.image_processor_dict)
image_inputs = self.image_processor_tester.prepare_image_inputs()
pre_processed_images = image_processor(image_inputs, return_tensors="pt")
for image_pair in pre_processed_images["pixel_values"]:
for image in image_pair:
self.assertTrue(
torch.all(image[0, ...] == image[1, ...]) and torch.all(image[1, ...] == image[2, ...])
)
def test_call_numpy(self):
# Test overwritten because SuperGlueImageProcessor combines images by pair to feed it into SuperGlue
# Initialize image_processing
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
# create random numpy tensors
image_pairs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True)
for image_pair in image_pairs:
self.assertEqual(len(image_pair), 2)
expected_batch_size = int(self.image_processor_tester.batch_size / 2)
# Test with 2 images
encoded_images = image_processing(image_pairs[0], return_tensors="pt").pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_pairs[0])
self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape))
# Test with list of pairs
encoded_images = image_processing(image_pairs, return_tensors="pt").pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_pairs)
self.assertEqual(tuple(encoded_images.shape), (expected_batch_size, *expected_output_image_shape))
# Test without paired images
image_pairs = self.image_processor_tester.prepare_image_inputs(
equal_resolution=False, numpify=True, pairs=False
)
with self.assertRaises(ValueError):
image_processing(image_pairs, return_tensors="pt").pixel_values
def test_call_pil(self):
# Test overwritten because SuperGlueImageProcessor combines images by pair to feed it into SuperGlue
# Initialize image_processing
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
# create random PIL images
image_pairs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False)
for image_pair in image_pairs:
self.assertEqual(len(image_pair), 2)
expected_batch_size = int(self.image_processor_tester.batch_size / 2)
# Test with 2 images
encoded_images = image_processing(image_pairs[0], return_tensors="pt").pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_pairs[0])
self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape))
# Test with list of pairs
encoded_images = image_processing(image_pairs, return_tensors="pt").pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_pairs)
self.assertEqual(tuple(encoded_images.shape), (expected_batch_size, *expected_output_image_shape))
# Test without paired images
image_pairs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, pairs=False)
with self.assertRaises(ValueError):
image_processing(image_pairs, return_tensors="pt").pixel_values
def test_call_pytorch(self):
# Test overwritten because SuperGlueImageProcessor combines images by pair to feed it into SuperGlue
# Initialize image_processing
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
image_pairs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)
for image_pair in image_pairs:
self.assertEqual(len(image_pair), 2)
expected_batch_size = int(self.image_processor_tester.batch_size / 2)
# Test with 2 images
encoded_images = image_processing(image_pairs[0], return_tensors="pt").pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_pairs[0])
self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape))
# Test with list of pairs
encoded_images = image_processing(image_pairs, return_tensors="pt").pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_pairs)
self.assertEqual(tuple(encoded_images.shape), (expected_batch_size, *expected_output_image_shape))
# Test without paired images
image_pairs = self.image_processor_tester.prepare_image_inputs(
equal_resolution=False, torchify=True, pairs=False
)
with self.assertRaises(ValueError):
image_processing(image_pairs, return_tensors="pt").pixel_values
def test_image_processor_with_list_of_two_images(self):
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
image_pairs = self.image_processor_tester.prepare_image_inputs(
equal_resolution=False, numpify=True, batch_size=2, pairs=False
)
self.assertEqual(len(image_pairs), 2)
self.assertTrue(isinstance(image_pairs[0], np.ndarray))
self.assertTrue(isinstance(image_pairs[1], np.ndarray))
expected_batch_size = 1
encoded_images = image_processing(image_pairs, return_tensors="pt").pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_pairs[0])
self.assertEqual(tuple(encoded_images.shape), (expected_batch_size, *expected_output_image_shape))
@require_torch
def test_post_processing_keypoint_matching(self):
def check_post_processed_output(post_processed_output, image_pair_size):
for post_processed_output, (image_size0, image_size1) in zip(post_processed_output, image_pair_size):
self.assertTrue("keypoints0" in post_processed_output)
self.assertTrue("keypoints1" in post_processed_output)
self.assertTrue("matching_scores" in post_processed_output)
keypoints0 = post_processed_output["keypoints0"]
keypoints1 = post_processed_output["keypoints1"]
all_below_image_size0 = torch.all(keypoints0[:, 0] <= image_size0[1]) and torch.all(
keypoints0[:, 1] <= image_size0[0]
)
all_below_image_size1 = torch.all(keypoints1[:, 0] <= image_size1[1]) and torch.all(
keypoints1[:, 1] <= image_size1[0]
)
all_above_zero0 = torch.all(keypoints0[:, 0] >= 0) and torch.all(keypoints0[:, 1] >= 0)
all_above_zero1 = torch.all(keypoints0[:, 0] >= 0) and torch.all(keypoints0[:, 1] >= 0)
self.assertTrue(all_below_image_size0)
self.assertTrue(all_below_image_size1)
self.assertTrue(all_above_zero0)
self.assertTrue(all_above_zero1)
all_scores_different_from_minus_one = torch.all(post_processed_output["matching_scores"] != -1)
self.assertTrue(all_scores_different_from_minus_one)
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class.from_dict(self.image_processor_dict)
image_inputs = self.image_processor_tester.prepare_image_inputs()
pre_processed_images = image_processor.preprocess(image_inputs, return_tensors="pt")
outputs = self.image_processor_tester.prepare_keypoint_matching_output(**pre_processed_images)
tuple_image_sizes = [
((image_pair[0].size[0], image_pair[0].size[1]), (image_pair[1].size[0], image_pair[1].size[1]))
for image_pair in image_inputs
]
tuple_post_processed_outputs = image_processor.post_process_keypoint_matching(outputs, tuple_image_sizes)
check_post_processed_output(tuple_post_processed_outputs, tuple_image_sizes)
tensor_image_sizes = torch.tensor(
[(image_pair[0].size, image_pair[1].size) for image_pair in image_inputs]
).flip(2)
tensor_post_processed_outputs = image_processor.post_process_keypoint_matching(outputs, tensor_image_sizes)
check_post_processed_output(tensor_post_processed_outputs, tensor_image_sizes)
@unittest.skip(reason="Many failing cases. This test needs a more deep investigation.")
def test_fast_is_faster_than_slow(self):
"""Override the generic test since EfficientLoFTR requires image pairs."""
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast speed test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(reason="Skipping slow/fast speed test as one of the image processors is not defined")
# Create image pairs for speed test
dummy_images = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=False)
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
# Time slow processor
start_time = time.time()
for _ in range(10):
_ = image_processor_slow(dummy_images, return_tensors="pt")
slow_time = time.time() - start_time
# Time fast processor
start_time = time.time()
for _ in range(10):
_ = image_processor_fast(dummy_images, return_tensors="pt")
fast_time = time.time() - start_time
# Fast should be faster (or at least not significantly slower)
self.assertLessEqual(
fast_time, slow_time * 1.2, "Fast processor should not be significantly slower than slow processor"
)
@require_vision
@require_torch
def test_slow_fast_equivalence(self):
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast equivalence test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
dummy_image = self.image_processor_tester.prepare_image_inputs(
equal_resolution=False, numpify=True, batch_size=2, pairs=False
)
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
encoding_slow = image_processor_slow(dummy_image, return_tensors="pt")
encoding_fast = image_processor_fast(dummy_image, return_tensors="pt")
self._assert_slow_fast_tensors_equivalence(encoding_slow.pixel_values, encoding_fast.pixel_values)
@slow
@require_torch_accelerator
@require_vision
@pytest.mark.torch_compile_test
def test_can_compile_fast_image_processor(self):
"""Override the generic test since EfficientLoFTR requires image pairs."""
if self.fast_image_processing_class is None:
self.skipTest("Skipping compilation test as fast image processor is not defined")
torch.compiler.reset()
input_image = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, torchify=False)
image_processor = self.fast_image_processing_class(**self.image_processor_dict)
output_eager = image_processor(input_image, device=torch_device, return_tensors="pt")
image_processor = torch.compile(image_processor, mode="reduce-overhead")
output_compiled = image_processor(input_image, device=torch_device, return_tensors="pt")
self._assert_slow_fast_tensors_equivalence(
output_eager.pixel_values, output_compiled.pixel_values, atol=1e-4, rtol=1e-4, mean_atol=1e-5
)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/efficientloftr/test_image_processing_efficientloftr.py",
"license": "Apache License 2.0",
"lines": 398,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/models/efficientloftr/test_modeling_efficientloftr.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import unittest
from functools import cached_property, reduce
from datasets import load_dataset
from transformers.models.efficientloftr import EfficientLoFTRConfig, EfficientLoFTRModel
from transformers.testing_utils import (
require_torch,
require_vision,
set_config_for_less_flaky_test,
set_model_for_less_flaky_test,
slow,
torch_device,
)
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import EfficientLoFTRForKeypointMatching
if is_vision_available():
from transformers import AutoImageProcessor
class EfficientLoFTRModelTester:
def __init__(
self,
parent,
batch_size=2,
image_width=6, # need to be a multiple of `stage_stride[0] * stage_stride[1]`
image_height=4, # need to be a multiple of `stage_stride[0] * stage_stride[1]`
stage_num_blocks: list[int] = [1, 1],
out_features: list[int] = [16, 16], # need to be >= 2 to make `config.fine_fusion_dims > 0`
stage_stride: list[int] = [2, 1],
q_aggregation_kernel_size: int = 1,
kv_aggregation_kernel_size: int = 1,
q_aggregation_stride: int = 1,
kv_aggregation_stride: int = 1,
num_attention_layers: int = 2,
num_attention_heads: int = 8,
hidden_size: int = 16,
coarse_matching_threshold: float = 0.0,
fine_kernel_size: int = 2,
coarse_matching_border_removal: int = 0,
):
self.parent = parent
self.batch_size = batch_size
self.image_width = image_width
self.image_height = image_height
self.stage_num_blocks = stage_num_blocks
self.out_features = out_features
self.stage_stride = stage_stride
self.q_aggregation_kernel_size = q_aggregation_kernel_size
self.kv_aggregation_kernel_size = kv_aggregation_kernel_size
self.q_aggregation_stride = q_aggregation_stride
self.kv_aggregation_stride = kv_aggregation_stride
self.num_attention_layers = num_attention_layers
self.num_attention_heads = num_attention_heads
self.hidden_size = hidden_size
self.coarse_matching_threshold = coarse_matching_threshold
self.coarse_matching_border_removal = coarse_matching_border_removal
self.fine_kernel_size = fine_kernel_size
def prepare_config_and_inputs(self):
# EfficientLoFTR expects a grayscale image as input
pixel_values = floats_tensor([self.batch_size, 2, 3, self.image_height, self.image_width])
config = self.get_config()
return config, pixel_values
def get_config(self):
return EfficientLoFTRConfig(
stage_num_blocks=self.stage_num_blocks,
out_features=self.out_features,
stage_stride=self.stage_stride,
q_aggregation_kernel_size=self.q_aggregation_kernel_size,
kv_aggregation_kernel_size=self.kv_aggregation_kernel_size,
q_aggregation_stride=self.q_aggregation_stride,
kv_aggregation_stride=self.kv_aggregation_stride,
num_attention_layers=self.num_attention_layers,
num_attention_heads=self.num_attention_heads,
hidden_size=self.hidden_size,
coarse_matching_threshold=self.coarse_matching_threshold,
coarse_matching_border_removal=self.coarse_matching_border_removal,
fine_kernel_size=self.fine_kernel_size,
)
def create_and_check_model(self, config, pixel_values):
model = EfficientLoFTRForKeypointMatching(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
maximum_num_matches = result.matches.shape[-1]
self.parent.assertEqual(
result.keypoints.shape,
(self.batch_size, 2, maximum_num_matches, 2),
)
self.parent.assertEqual(
result.matches.shape,
(self.batch_size, 2, maximum_num_matches),
)
self.parent.assertEqual(
result.matching_scores.shape,
(self.batch_size, 2, maximum_num_matches),
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class EfficientLoFTRModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (EfficientLoFTRForKeypointMatching, EfficientLoFTRModel) if is_torch_available() else ()
test_resize_embeddings = False
has_attentions = True
def setUp(self):
self.model_tester = EfficientLoFTRModelTester(self)
self.config_tester = ConfigTester(self, config_class=EfficientLoFTRConfig, has_text_modality=False)
def test_config(self):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="EfficientLoFTRForKeypointMatching does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="EfficientLoFTRForKeypointMatching does not support input and output embeddings")
def test_model_get_set_embeddings(self):
pass
@unittest.skip(reason="EfficientLoFTRForKeypointMatching does not use feedforward chunking")
def test_feed_forward_chunking(self):
pass
@unittest.skip(reason="This module does not support standalone training")
def test_training(self):
pass
@unittest.skip(reason="This module does not support standalone training")
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(reason="This module does not support standalone training")
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@unittest.skip(reason="This module does not support standalone training")
def test_training_gradient_checkpointing_use_reentrant_true(self):
pass
@unittest.skip(reason="EfficientLoFTR does not output any loss term in the forward pass")
def test_retain_grad_hidden_states_attentions(self):
pass
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["pixel_values"]
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.hidden_states
expected_num_hidden_states = len(self.model_tester.stage_num_blocks) + 1
self.assertEqual(len(hidden_states), expected_num_hidden_states)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[self.model_tester.image_height, self.model_tester.image_width],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def test_attention_outputs(self):
def check_attention_output(inputs_dict, config, model_class):
config._attn_implementation = "eager"
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
total_stride = reduce(lambda a, b: a * b, config.stage_stride)
hidden_size = (
self.model_tester.image_height // total_stride * self.model_tester.image_width // total_stride
)
expected_attention_shape = [
self.model_tester.num_attention_heads,
hidden_size,
hidden_size,
]
for i, attention in enumerate(attentions):
self.assertListEqual(
list(attention.shape[-3:]),
expected_attention_shape,
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
check_attention_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
check_attention_output(inputs_dict, config, model_class)
@slow
def test_model_from_pretrained(self):
from_pretrained_ids = ["zju-community/efficientloftr"]
for model_name in from_pretrained_ids:
model = EfficientLoFTRForKeypointMatching.from_pretrained(model_name)
self.assertIsNotNone(model)
def test_forward_labels_should_be_none(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
model_inputs = self._prepare_for_class(inputs_dict, model_class)
# Provide an arbitrary sized Tensor as labels to model inputs
model_inputs["labels"] = torch.rand((128, 128))
with self.assertRaises(ValueError) as cm:
model(**model_inputs)
self.assertEqual(ValueError, cm.exception.__class__)
def test_batching_equivalence(self, atol=1e-5, rtol=1e-5):
"""
This test is overwritten because the model outputs do not contain only regressive values but also keypoint
locations.
Similarly to the problem discussed about SuperGlue implementation
[here](https://github.com/huggingface/transformers/pull/29886#issuecomment-2482752787), the consequence of
having different scores for matching, makes the maximum indices differ. These indices are being used to compute
the keypoint coordinates. The keypoint coordinates, in the model outputs, are floating point tensors, so the
original implementation of this test cover this case. But the resulting tensors may have differences exceeding
the relative and absolute tolerance.
Therefore, similarly to SuperGlue integration test, for the key "keypoints" in the model outputs, we check the
number of differences in keypoint coordinates being less than a TODO given number
"""
def recursive_check(batched_object, single_row_object, model_name, key):
if isinstance(batched_object, (list, tuple)):
for batched_object_value, single_row_object_value in zip(batched_object, single_row_object):
recursive_check(batched_object_value, single_row_object_value, model_name, key)
elif isinstance(batched_object, dict):
for batched_object_value, single_row_object_value in zip(
batched_object.values(), single_row_object.values()
):
recursive_check(batched_object_value, single_row_object_value, model_name, key)
# do not compare returned loss (0-dim tensor) / codebook ids (int) / caching objects
elif batched_object is None or not isinstance(batched_object, torch.Tensor):
return
elif batched_object.dim() == 0:
return
# do not compare int or bool outputs as they are mostly computed with max/argmax/topk methods which are
# very sensitive to the inputs (e.g. tiny differences may give totally different results)
elif not torch.is_floating_point(batched_object):
return
else:
# indexing the first element does not always work
# e.g. models that output similarity scores of size (N, M) would need to index [0, 0]
slice_ids = tuple(slice(0, index) for index in single_row_object.shape)
batched_row = batched_object[slice_ids]
if key == "keypoints":
batched_row = torch.sum(batched_row, dim=-1)
single_row_object = torch.sum(single_row_object, dim=-1)
tolerance = 0.02 * single_row_object.shape[-1]
self.assertTrue(
torch.sum(~torch.isclose(batched_row, single_row_object, rtol=rtol, atol=atol)) < tolerance
)
else:
self.assertFalse(
torch.isnan(batched_row).any(), f"Batched output has `nan` in {model_name} for key={key}"
)
self.assertFalse(
torch.isinf(batched_row).any(), f"Batched output has `inf` in {model_name} for key={key}"
)
self.assertFalse(
torch.isnan(single_row_object).any(),
f"Single row output has `nan` in {model_name} for key={key}",
)
self.assertFalse(
torch.isinf(single_row_object).any(),
f"Single row output has `inf` in {model_name} for key={key}",
)
try:
torch.testing.assert_close(batched_row, single_row_object, atol=atol, rtol=rtol)
except AssertionError as e:
msg = f"Batched and Single row outputs are not equal in {model_name} for key={key}.\n\n"
msg += str(e)
raise AssertionError(msg)
config, batched_input = self.model_tester.prepare_config_and_inputs_for_common()
set_config_for_less_flaky_test(config)
for model_class in self.all_model_classes:
config.output_hidden_states = True
model_name = model_class.__name__
if hasattr(self.model_tester, "prepare_config_and_inputs_for_model_class"):
config, batched_input = self.model_tester.prepare_config_and_inputs_for_model_class(model_class)
batched_input_prepared = self._prepare_for_class(batched_input, model_class)
model = model_class(config).to(torch_device).eval()
set_model_for_less_flaky_test(model)
batch_size = self.model_tester.batch_size
single_row_input = {}
for key, value in batched_input_prepared.items():
if isinstance(value, torch.Tensor) and value.shape[0] % batch_size == 0:
# e.g. musicgen has inputs of size (bs*codebooks). in most cases value.shape[0] == batch_size
single_batch_shape = value.shape[0] // batch_size
single_row_input[key] = value[:single_batch_shape]
else:
single_row_input[key] = value
with torch.no_grad():
model_batched_output = model(**batched_input_prepared)
model_row_output = model(**single_row_input)
if isinstance(model_batched_output, torch.Tensor):
model_batched_output = {"model_output": model_batched_output}
model_row_output = {"model_output": model_row_output}
for key in model_batched_output:
# DETR starts from zero-init queries to decoder, leading to cos_similarity = `nan`
if hasattr(self, "zero_init_hidden_state") and "decoder_hidden_states" in key:
model_batched_output[key] = model_batched_output[key][1:]
model_row_output[key] = model_row_output[key][1:]
recursive_check(model_batched_output[key], model_row_output[key], model_name, key)
def prepare_imgs():
dataset = load_dataset("hf-internal-testing/image-matching-test-dataset", split="train")
image1 = dataset[0]["image"]
image2 = dataset[1]["image"]
image3 = dataset[2]["image"]
return [[image1, image2], [image3, image2]]
@require_torch
@require_vision
class EfficientLoFTRModelIntegrationTest(unittest.TestCase):
@cached_property
def default_image_processor(self):
return AutoImageProcessor.from_pretrained("zju-community/efficientloftr") if is_vision_available() else None
@slow
def test_inference(self):
model = EfficientLoFTRForKeypointMatching.from_pretrained(
"zju-community/efficientloftr", attn_implementation="eager"
).to(torch_device)
preprocessor = self.default_image_processor
images = prepare_imgs()
inputs = preprocessor(images=images, return_tensors="pt").to(torch_device)
with torch.no_grad():
outputs = model(**inputs, output_hidden_states=True, output_attentions=True)
predicted_top10 = torch.topk(outputs.matching_scores[0, 0], k=10)
predicted_top10_matches_indices = predicted_top10.indices
predicted_top10_matching_scores = predicted_top10.values
expected_number_of_matches = 4800
expected_matches_shape = torch.Size((len(images), 2, expected_number_of_matches))
expected_matching_scores_shape = torch.Size((len(images), 2, expected_number_of_matches))
expected_top10_matches_indices = torch.tensor(
[3145, 3065, 3143, 3144, 1397, 1705, 3151, 2422, 3066, 2342], dtype=torch.int64, device=torch_device
)
expected_top10_matching_scores = torch.tensor(
[0.9998, 0.9997, 0.9997, 0.9996, 0.9996, 0.9996, 0.9996, 0.9995, 0.9995, 0.9995], device=torch_device
)
self.assertEqual(outputs.matches.shape, expected_matches_shape)
self.assertEqual(outputs.matching_scores.shape, expected_matching_scores_shape)
torch.testing.assert_close(
predicted_top10_matches_indices, expected_top10_matches_indices, rtol=5e-3, atol=5e-3
)
torch.testing.assert_close(
predicted_top10_matching_scores, expected_top10_matching_scores, rtol=5e-3, atol=5e-3
)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/efficientloftr/test_modeling_efficientloftr.py",
"license": "Apache License 2.0",
"lines": 376,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/ernie4_5/configuration_ernie4_5.py | # Copyright (c) 2025 Baidu, Inc. and HuggingFace Inc. team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ernie 4.5 model configuration"""
from ...configuration_utils import PreTrainedConfig
from ...modeling_rope_utils import RopeParameters
class Ernie4_5Config(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Ernie4_5Model`]. It is used to instantiate an Ernie 4.5
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the Ernie 4.5 0.3B.
e.g. [baidu/ERNIE-4.5-0.3B-PT](https://huggingface.co/baidu/ERNIE-4.5-0.3B-PT)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 103424):
Vocabulary size of the Ernie 4.5 model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`Ernie4_5Model`]
hidden_size (`int`, *optional*, defaults to 1024):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 18):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*, defaults to 2):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 131072):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions.
pad_token_id (`int`, *optional*, defaults to 0):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 1):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 2):
End of stream token id.
tie_word_embeddings (`bool`, *optional*, defaults to `True`):
Whether to tie weight embeddings
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
use_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in any of the projections including mlp and attention for example.
head_dim (`int`, *optional*, defaults to 128):
The attention head dimension. If None, it will default to hidden_size // num_attention_heads
```python
>>> from transformers import Ernie4_5Model, Ernie4_5Config
>>> # Initializing a Ernie4_5 0.3B style configuration
>>> configuration = Ernie4_5Config()
>>> # Initializing a model from the 0.3B style configuration
>>> model = Ernie4_5Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "ernie4_5"
keys_to_ignore_at_inference = ["past_key_values"]
default_theta = 500000.0
# Default tensor parallel plan for base model `Ernie4_5Model`
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise",
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.gate_proj": "colwise",
"layers.*.mlp.up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
}
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
"norm": (["hidden_states"], ["hidden_states"]),
}
def __init__(
self,
vocab_size: int | None = 103424,
hidden_size: int | None = 1024,
intermediate_size: int | None = 3072,
num_hidden_layers: int | None = 18,
num_attention_heads: int | None = 16,
num_key_value_heads: int | None = 2,
hidden_act: str | None = "silu",
max_position_embeddings: int | None = 131072,
initializer_range: float | None = 0.02,
rms_norm_eps: int | None = 1e-05,
use_cache: int | None = True,
pad_token_id: int | None = 0,
bos_token_id: int | None = 1,
eos_token_id: int | None = 2,
tie_word_embeddings: bool | None = True,
rope_parameters: RopeParameters | dict[str, RopeParameters] | None = None,
use_bias: bool | None = False,
head_dim: int | None = 128,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.use_bias = use_bias
self.head_dim = head_dim if head_dim is not None else self.hidden_size // self.num_attention_heads
self.rope_parameters = rope_parameters
self.tie_word_embeddings = tie_word_embeddings
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
super().__init__(**kwargs)
__all__ = ["Ernie4_5Config"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/ernie4_5/configuration_ernie4_5.py",
"license": "Apache License 2.0",
"lines": 142,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/ernie4_5/convert_ernie4_5_tokenizer.py | # Copyright (c) 2025 HuggingFace Inc. team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from transformers import LlamaTokenizer, LlamaTokenizerFast
DEFAULT_CHAT_TEMPLATE = '{%- if not add_generation_prompt is defined -%}\n {%- set add_generation_prompt = true -%}\n{%- endif -%}\n{%- if not cls_token is defined -%}\n {%- set cls_token = "<|begin_of_sentence|>" -%}\n{%- endif -%}\n{%- if not sep_token is defined -%}\n {%- set sep_token = "<|end_of_sentence|>" -%}\n{%- endif -%}\n{{- cls_token -}}\n{%- for message in messages -%}\n {%- if message["role"] == "user" -%}\n {{- "User: " + message["content"] + "\n" -}}\n {%- elif message["role"] == "assistant" -%}\n {{- "Assistant: " + message["content"] + sep_token -}}\n {%- elif message["role"] == "system" -%}\n {{- message["content"] + "\n" -}}\n {%- endif -%}\n{%- endfor -%}\n{%- if add_generation_prompt -%}\n {{- "Assistant: " -}}\n{%- endif -%}'
DEFAULT_TEXT_ADD_TOKENS = [
"<mask:4>",
"<mask:5>",
"<mask:6>",
"<mask:7>",
]
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--repo_name",
help="Name of the repo where the tokenizer is located at.",
default="baidu/ERNIE-4.5-0.3B-Base-PT",
)
parser.add_argument(
"--push_to_hub",
help="Whether or not to push the model to the hub at `output_dir` instead of saving it locally.",
action="store_true",
default=False,
)
parser.add_argument(
"--output_dir",
help="Location to write the tokenizer",
)
args = parser.parse_args()
hf_tok = LlamaTokenizer.from_pretrained(
args.repo_name,
pad_token="<unk>",
cls_token="<|begin_of_sentence|>",
sep_token="<|end_of_sentence|>",
mask_token="<mask:1>",
add_bos_token=False,
add_prefix_space=False,
chat_template=DEFAULT_CHAT_TEMPLATE,
legacy=True,
)
hf_tok.model_max_length = 131072
hf_tok.init_kwargs.pop("auto_map", None)
# special tokens which we need to map as additional special tokens instead
hf_tok.init_kwargs.pop("header_start_token", None)
hf_tok.init_kwargs.pop("header_end_token", None)
hf_tok.init_kwargs.pop("sys_start_token", None)
hf_tok.init_kwargs.pop("sys_end_token", None)
for token in DEFAULT_TEXT_ADD_TOKENS:
hf_tok.add_tokens([token], special_tokens=True)
# save slow model and convert on load time
hf_tok.save_pretrained("/tmp/ernie4_5_tokenizer")
hf_tok_fast = LlamaTokenizerFast.from_pretrained("/tmp/ernie4_5_tokenizer", from_slow=True)
hf_tok_fast.save_pretrained(args.output_dir, push_to_hub=args.push_to_hub)
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/ernie4_5/convert_ernie4_5_tokenizer.py",
"license": "Apache License 2.0",
"lines": 64,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/ernie4_5/modular_ernie4_5.py | # Copyright (c) 2025 Baidu, Inc. and HuggingFace Inc. team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch Ernie 4.5 model"""
import torch
from torch import nn
from ...modeling_rope_utils import dynamic_rope_update
from ...utils import auto_docstring, can_return_tuple
from ...utils.generic import maybe_autocast
from ..glm.modeling_glm import rotate_half
from ..llama.modeling_llama import (
LlamaAttention,
LlamaForCausalLM,
LlamaMLP,
)
from ..olmo.modeling_olmo import OlmoRotaryEmbedding
from .configuration_ernie4_5 import Ernie4_5Config
class Ernie4_5RotaryEmbedding(OlmoRotaryEmbedding):
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with maybe_autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
# keeping it in full precision
return cos, sin
def apply_rotary_pos_emb(q, k, cos, sin, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
# glm rope style (with full dim) and full precision
original_dtype = q.dtype
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
# Interleave them instead of usual shape
cos = cos[..., : cos.shape[-1] // 2].repeat_interleave(2, dim=-1)
sin = sin[..., : sin.shape[-1] // 2].repeat_interleave(2, dim=-1)
q_embed = (q.float() * cos) + (rotate_half(q).float() * sin)
k_embed = (k.float() * cos) + (rotate_half(k).float() * sin)
return q_embed.to(original_dtype), k_embed.to(original_dtype)
class Ernie4_5MLP(LlamaMLP):
def __init__(self, config: Ernie4_5Config):
super().__init__(config)
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.use_bias)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.use_bias)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.use_bias)
class Ernie4_5Attention(LlamaAttention):
def __init__(self, config: Ernie4_5Config, layer_idx: int):
super().__init__(config, layer_idx)
self.attention_dropout = 0.0
self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.use_bias)
self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.use_bias)
self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.use_bias)
self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.use_bias)
class Ernie4_5ForCausalLM(LlamaForCausalLM):
@can_return_tuple
@auto_docstring
def forward(self, **super_kwargs):
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
"""
super().forward(**super_kwargs)
__all__ = [
"Ernie4_5ForCausalLM",
"Ernie4_5Model", # noqa: F822
"Ernie4_5PreTrainedModel", # noqa: F822
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/ernie4_5/modular_ernie4_5.py",
"license": "Apache License 2.0",
"lines": 98,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/ernie4_5_moe/configuration_ernie4_5_moe.py | # Copyright (c) 2025 Baidu, Inc. and HuggingFace Inc. team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ernie 4.5 MoE model configuration"""
from ...configuration_utils import PreTrainedConfig
from ...modeling_rope_utils import RopeParameters
from ...utils import logging
logger = logging.get_logger(__name__)
class Ernie4_5_MoeConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Ernie4_5_MoeModel`]. It is used to instantiate a
Ernie 4.5 MoE model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of [baidu/ERNIE-4.5-21B-A3B-PT](https://huggingface.co/baidu/ERNIE-4.5-21B-A3B-PT).
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 103424):
Vocabulary size of the Ernie 4.5 MoE model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`Ernie4_5_MoeModel`]
pad_token_id (`int`, *optional*, defaults to 0):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 1):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 2):
End of stream token id.
hidden_size (`int`, *optional*, defaults to 2560):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 12288):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 28):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 20):
Number of attention heads for each attention layer in the Transformer encoder.
num_key_value_heads (`int`, *optional*, defaults to 4):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `32`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 131072):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
tie_word_embeddings (`bool`, *optional*, defaults to `True`):
Whether the model's input and output word embeddings should be tied.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
use_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in any of the projections including mlp and attention for example.
moe_intermediate_size (`int`, *optional*, defaults to 1536):
Intermediate size of the routed expert.
moe_k (`int`, *optional*, defaults to 6):
Number of selected experts.
moe_num_experts (`int`, *optional*, defaults to 64):
Number of routed experts.
moe_num_shared_experts (`int`, *optional*, defaults to 2):
The number of experts that are shared for all MoE forwards.
moe_layer_start_index (`int`, *optional*, defaults to 1):
The first index at which MoE layers start to appear.
moe_layer_end_index (`int`, *optional*, defaults to -1):
The last possible index for a MoE layer.
moe_layer_interval (`int`, *optional*, defaults to 1):
The intervals between MoE layers to appear.
moe_norm_min (`float`, *optional*, defaults to 1e-12):
Minimum division value during routing normalization.
output_router_logits (`bool`, *optional*, defaults to `False`):
Whether or not the router logits should be returned by the model. Enabling this will also
allow the model to output the auxiliary loss, including load balancing loss and router z-loss.
router_aux_loss_coef (`float`, *optional*, defaults to 0.001):
The aux loss factor for the total loss.
```python
>>> from transformers import Ernie4_5_MoeModel, Ernie4_5_MoEConfig
>>> # Initializing a Ernie4_5_MoE style configuration
>>> configuration = Ernie4_5_MoEConfig()
>>> # Initializing a model from the ERNIE-4.5-21B-A3B style configuration
>>> model = Ernie4_5_MoeModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "ernie4_5_moe"
keys_to_ignore_at_inference = ["past_key_values"]
attribute_map = {"num_experts": "moe_num_experts", "num_experts_per_tok": "moe_k"}
default_theta = 500000.0
# Default tensor parallel plan for base model `Ernie4_5_MoE`
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise",
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.experts.gate_up_proj": "rowwise",
"layers.*.mlp.experts.down_proj": "rowwise",
"layers.*.mlp.shared_experts.gate_proj": "colwise",
"layers.*.mlp.shared_experts.up_proj": "colwise",
"layers.*.mlp.shared_experts.down_proj": "rowwise",
"layers.*.mlp.gate_proj": "colwise",
"layers.*.mlp.up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
}
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
"norm": (["hidden_states"], ["hidden_states"]),
}
def __init__(
self,
vocab_size: int | None = 103424,
pad_token_id: int | None = 0,
bos_token_id: int | None = 1,
eos_token_id: int | None = 2,
hidden_size: int | None = 2560,
intermediate_size: int | None = 12288,
num_hidden_layers: int | None = 28,
num_attention_heads: int | None = 20,
num_key_value_heads: int | None = 4,
hidden_act: str | None = "silu",
max_position_embeddings: int | None = 131072,
initializer_range: float | None = 0.02,
rms_norm_eps: int | None = 1e-5,
use_cache: bool | None = True,
tie_word_embeddings: bool | None = True,
rope_parameters: RopeParameters | dict[str, RopeParameters] | None = None,
use_bias: int | None = False,
moe_intermediate_size: int | None = 1536,
moe_k: int | None = 6,
moe_num_experts: int | None = 64,
moe_num_shared_experts: int | None = 2,
moe_layer_start_index: int | None = 1,
moe_layer_end_index: int | None = -1,
moe_layer_interval: int | None = 1,
moe_norm_min: int | None = 1e-12,
output_router_logits: bool | None = False,
router_aux_loss_coef: float | None = 0.001,
**kwargs,
):
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.use_bias = use_bias
# MoE arguments
self.moe_intermediate_size = moe_intermediate_size
self.moe_k = moe_k
self.moe_num_experts = moe_num_experts
self.moe_num_shared_experts = moe_num_shared_experts
self.moe_layer_start_index = moe_layer_start_index
self.moe_layer_end_index = self.num_hidden_layers - 1 if moe_layer_end_index == -1 else moe_layer_end_index
self.moe_layer_interval = moe_layer_interval
self.moe_norm_min = moe_norm_min
self.output_router_logits = output_router_logits
self.router_aux_loss_coef = router_aux_loss_coef
self.rope_parameters = rope_parameters
self.tie_word_embeddings = tie_word_embeddings
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
super().__init__(**kwargs)
__all__ = ["Ernie4_5_MoeConfig"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/ernie4_5_moe/configuration_ernie4_5_moe.py",
"license": "Apache License 2.0",
"lines": 184,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/ernie4_5_moe/modular_ernie4_5_moe.py | # Copyright (c) 2025 Baidu, Inc. and HuggingFace Inc. team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch Ernie 4.5 MoE model."""
import torch
import torch.nn.functional as F
from torch import nn
from ... import initialization as init
from ...cache_utils import Cache, DynamicCache
from ...masking_utils import create_causal_mask
from ...modeling_outputs import MoeModelOutputWithPast
from ...modeling_utils import PreTrainedModel
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
from ...utils.generic import maybe_autocast, merge_with_config_defaults
from ...utils.output_capturing import OutputRecorder, capture_outputs
from ..ernie4_5.modeling_ernie4_5 import Ernie4_5RotaryEmbedding, apply_rotary_pos_emb, rotate_half # noqa: F401
from ..llama.modeling_llama import LlamaAttention, LlamaRMSNorm
from ..mixtral.modeling_mixtral import (
MixtralExperts,
MixtralForCausalLM,
MixtralPreTrainedModel,
)
from ..qwen3_moe.modeling_qwen3_moe import Qwen3MoeDecoderLayer, Qwen3MoeMLP
from .configuration_ernie4_5_moe import Ernie4_5_MoeConfig
logger = logging.get_logger(__name__)
class Ernie4_5_MoeRMSNorm(LlamaRMSNorm):
pass
class Ernie4_5_MoeMLP(Qwen3MoeMLP):
def __init__(self, config, intermediate_size=None):
super().__init__(config, intermediate_size)
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.use_bias)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.use_bias)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.use_bias)
class Ernie4_5_MoeRotaryEmbedding(Ernie4_5RotaryEmbedding):
def __init__(self, config: Ernie4_5_MoeConfig, device=None):
super().__init__(config, device)
class Ernie4_5_MoeAttention(LlamaAttention):
def __init__(self, config: Ernie4_5_MoeConfig, layer_idx: int):
super().__init__(config, layer_idx)
self.attention_dropout = 0.0
self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.use_bias)
self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.use_bias)
self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.use_bias)
self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.use_bias)
class Ernie4_5_MoeStatics(nn.Module):
"""
Stores MoE (Mixture of Experts) statistics
- Bias for the gating
- Additionally, usage per expert in the original codebase
"""
def __init__(self, config):
super().__init__()
num_experts_groups = 1
num_experts = config.moe_num_experts
self.e_score_correction_bias = nn.Parameter(
torch.zeros(num_experts_groups, num_experts, dtype=torch.float32),
requires_grad=False,
)
def forward(self, hidden_states):
# NOTE: This is a workaround to enable TP with a module that only has parameters
#
# Otherwise, it stays as `DTensor` when called in the "super" forward
# 1. All other tensors are local (`torch.Tensor`)
# 2. Isolate does not work on `nn.Module` which only has parameters
return hidden_states + self.e_score_correction_bias.squeeze()
class Ernie4_5_MoeExperts(MixtralExperts):
def __init__(self, config):
super().__init__()
self.num_experts = config.moe_num_experts
self.intermediate_dim = config.moe_intermediate_size
class Ernie4_5_MoeTopKRouter(nn.Module):
def __init__(self, config):
super().__init__()
self.weight = nn.Parameter(torch.zeros(config.moe_num_experts, config.hidden_size, dtype=torch.float32))
self.moe_statics = Ernie4_5_MoeStatics(config)
self.top_k = config.moe_k
self.norm_min = config.moe_norm_min
def forward(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
device_type = (
hidden_states.device.type
if isinstance(hidden_states.device.type, str) and hidden_states.device.type != "mps"
else "cpu"
)
with maybe_autocast(device_type=device_type, enabled=False): # Force float32
router_logits = F.linear(hidden_states.float(), self.weight.float())
routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float)
_, selected_experts = torch.topk(self.moe_statics(routing_weights), self.top_k, dim=-1)
routing_weights = torch.gather(routing_weights, dim=-1, index=selected_experts)
routing_weights = routing_weights / torch.clamp(
routing_weights.sum(dim=-1, keepdim=True), min=self.norm_min
)
routing_weights = routing_weights.to(hidden_states.dtype)
return router_logits, selected_experts, routing_weights
class Ernie4_5_MoeSparseMoeBlock(nn.Module):
def __init__(self, config):
super().__init__()
self.hidden_dim = config.hidden_size
self.num_experts = config.moe_num_experts
self.top_k = config.moe_k
self.gate = Ernie4_5_MoeTopKRouter(config)
self.experts = Ernie4_5_MoeExperts(config)
self.shared_experts = None
if config.moe_num_shared_experts > 0:
self.shared_experts = Ernie4_5_MoeMLP(config, config.moe_intermediate_size * config.moe_num_shared_experts)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
batch_size, sequence_length, _ = hidden_states.shape
hidden_states = hidden_states.view(-1, self.hidden_dim)
if self.shared_experts is not None:
shared_output = self.shared_experts(hidden_states)
_, top_k_index, top_k_weights = self.gate(hidden_states)
final_hidden_states = self.experts(hidden_states, top_k_index, top_k_weights)
if self.shared_experts is not None:
final_hidden_states = final_hidden_states + shared_output
final_hidden_states = final_hidden_states.reshape(batch_size, sequence_length, self.hidden_dim)
return final_hidden_states.to(hidden_states.dtype)
class Ernie4_5_MoeDecoderLayer(Qwen3MoeDecoderLayer):
def __init__(self, config, layer_idx):
nn.Module.__init__(self)
self.hidden_size = config.hidden_size
self.self_attn = Ernie4_5_MoeAttention(config, layer_idx)
if (
((layer_idx + 1) % config.moe_layer_interval == 0)
and layer_idx >= config.moe_layer_start_index
and layer_idx <= config.moe_layer_end_index
):
self.mlp = Ernie4_5_MoeSparseMoeBlock(config)
else:
self.mlp = Ernie4_5_MoeMLP(config)
self.input_layernorm = Ernie4_5_MoeRMSNorm(config.hidden_size, config.rms_norm_eps)
self.post_attention_layernorm = Ernie4_5_MoeRMSNorm(config.hidden_size, config.rms_norm_eps)
@auto_docstring
class Ernie4_5_MoePreTrainedModel(MixtralPreTrainedModel):
config: Ernie4_5_MoeConfig
_no_split_modules = ["Ernie4_5_MoeDecoderLayer"]
# Not supporting multi-token prediction (MTP) atm
_keys_to_ignore_on_load_unexpected = ["mtp"]
_can_record_outputs = {
"router_logits": OutputRecorder(Ernie4_5_MoeTopKRouter, index=0),
"hidden_states": Ernie4_5_MoeDecoderLayer,
"attentions": Ernie4_5_MoeAttention,
}
_keep_in_fp32_modules_strict = ["gate.weight", "moe_statics"]
@torch.no_grad()
def _init_weights(self, module):
PreTrainedModel._init_weights(self, module)
if isinstance(module, Ernie4_5_MoeStatics):
init.zeros_(module.e_score_correction_bias)
elif isinstance(module, Ernie4_5_MoeExperts):
init.normal_(module.gate_up_proj, mean=0.0, std=self.config.initializer_range)
init.normal_(module.down_proj, mean=0.0, std=self.config.initializer_range)
@auto_docstring
class Ernie4_5_MoeModel(Ernie4_5_MoePreTrainedModel):
def __init__(self, config: Ernie4_5_MoeConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList(
[Ernie4_5_MoeDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = Ernie4_5_MoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = Ernie4_5_MoeRotaryEmbedding(config=config)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
@merge_with_config_defaults
@capture_outputs
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
use_cache: bool | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> MoeModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = create_causal_mask(
config=self.config,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
position_ids=position_ids,
)
hidden_states = inputs_embeds
# create position embeddings to be shared across the decoder layers
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
hidden_states = decoder_layer(
hidden_states,
position_embeddings=position_embeddings,
attention_mask=causal_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return MoeModelOutputWithPast( # only diff with Mistral is the output type, we need MoE
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
@auto_docstring
class Ernie4_5_MoeForCausalLM(MixtralForCausalLM):
def __init__(self, config):
PreTrainedModel.__init__(self, config)
self.model = Ernie4_5_MoeModel(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=config.use_bias)
self.router_aux_loss_coef = config.router_aux_loss_coef
self.num_experts = config.moe_num_experts
self.num_experts_per_tok = config.moe_k
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(self, **super_kwargs):
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
"""
super().forward(**super_kwargs)
__all__ = [
"Ernie4_5_MoeForCausalLM",
"Ernie4_5_MoeModel",
"Ernie4_5_MoePreTrainedModel",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/ernie4_5_moe/modular_ernie4_5_moe.py",
"license": "Apache License 2.0",
"lines": 257,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/ernie4_5/test_modeling_ernie4_5.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch Ernie4.5 model."""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import (
Expectations,
cleanup,
require_torch,
require_torch_accelerator,
slow,
torch_device,
)
from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester
if is_torch_available():
import torch
from transformers import (
AutoTokenizer,
Ernie4_5ForCausalLM,
Ernie4_5Model,
)
class Ernie4_5ModelTester(CausalLMModelTester):
if is_torch_available():
base_model_class = Ernie4_5Model
@require_torch
class Ernie4_5ModelTest(CausalLMModelTest, unittest.TestCase):
model_tester_class = Ernie4_5ModelTester
# Need to use `0.8` instead of `0.9` for `test_cpu_offload`
# This is because we are hitting edge cases with the causal_mask buffer
model_split_percents = [0.5, 0.7, 0.8]
# used in `test_torch_compile_for_training`
_torch_compile_train_cls = Ernie4_5ForCausalLM if is_torch_available() else None
@require_torch_accelerator
class Ernie4_5IntegrationTest(unittest.TestCase):
def setup(self):
cleanup(torch_device, gc_collect=True)
def tearDown(self):
cleanup(torch_device, gc_collect=True)
@slow
def test_ernie4_5_0p3B(self):
"""
An integration test for Ernie 4.5 0.3B.
"""
expected_texts = Expectations(
{
("xpu", 3): "User: Hey, are you conscious? Can you talk to me?\nAssistant: Hey! I'm here to help you with whatever you need. Are you feeling a bit overwhelmed or stressed? I'm here to listen and provide support.",
("cuda", None): "User: Hey, are you conscious? Can you talk to me?\nAssistant: Hey! I'm here to help you with whatever you need. Are you feeling a bit overwhelmed or stressed? I'm here to listen and provide support.",
}
) # fmt: skip
EXPECTED_TEXT = expected_texts.get_expectation()
tokenizer = AutoTokenizer.from_pretrained("baidu/ERNIE-4.5-0.3B-PT", revision="refs/pr/3")
model = Ernie4_5ForCausalLM.from_pretrained(
"baidu/ERNIE-4.5-0.3B-PT",
device_map="auto",
dtype=torch.bfloat16,
)
prompt = "Hey, are you conscious? Can you talk to me?"
messages = [{"role": "user", "content": prompt}]
text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
model_inputs = tokenizer([text], add_special_tokens=False, return_tensors="pt").to(model.device)
generated_ids = model.generate(
model_inputs.input_ids,
max_new_tokens=128,
do_sample=False,
)
generated_text = tokenizer.decode(generated_ids[0], skip_special_tokens=True).strip("\n")
self.assertEqual(generated_text, EXPECTED_TEXT)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/ernie4_5/test_modeling_ernie4_5.py",
"license": "Apache License 2.0",
"lines": 78,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/models/ernie4_5_moe/test_modeling_ernie4_5_moe.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch Ernie4.5 MoE model."""
import tempfile
import unittest
import pytest
from transformers import BitsAndBytesConfig, is_torch_available
from transformers.models.ernie4_5_moe.modeling_ernie4_5_moe import load_balancing_loss_func
from transformers.testing_utils import (
cleanup,
is_flaky,
require_bitsandbytes,
require_flash_attn,
require_torch,
require_torch_accelerator,
require_torch_large_accelerator,
slow,
torch_device,
)
from transformers.trainer_utils import set_seed
if is_torch_available():
import torch
from transformers import (
AutoTokenizer,
Ernie4_5_MoeForCausalLM,
Ernie4_5_MoeModel,
)
from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester
class Ernie4_5_MoeModelTester(CausalLMModelTester):
if is_torch_available():
base_model_class = Ernie4_5_MoeModel
@require_torch
class Ernie4_5_MoeModelTest(CausalLMModelTest, unittest.TestCase):
test_all_params_have_gradient = False
model_tester_class = Ernie4_5_MoeModelTester
@require_flash_attn
@require_torch_accelerator
@pytest.mark.flash_attn_test
@is_flaky()
@slow
def test_flash_attn_2_equivalence(self):
for model_class in self.all_model_classes:
if not model_class._supports_flash_attn:
self.skipTest(reason="Model does not support Flash Attention 2")
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model_fa = model_class.from_pretrained(
tmpdirname, dtype=torch.bfloat16, attn_implementation="flash_attention_2"
)
model_fa.to(torch_device)
model = model_class.from_pretrained(tmpdirname, dtype=torch.bfloat16, attn_implementation="eager")
model.to(torch_device)
dummy_input = inputs_dict[model_class.main_input_name]
dummy_input = dummy_input.to(torch_device)
outputs = model(dummy_input, output_hidden_states=True)
outputs_fa = model_fa(dummy_input, output_hidden_states=True)
logits = outputs.hidden_states[-1]
logits_fa = outputs_fa.hidden_states[-1]
# higher tolerance, not sure where it stems from
assert torch.allclose(logits_fa, logits, atol=1e-2, rtol=1e-2)
@is_flaky(max_attempts=2)
def test_load_balancing_loss(self):
r"""
Let's make sure we can actually compute the loss and do a backward on it.
"""
set_seed(42)
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.num_labels = 3
config.num_experts = 3
config.output_router_logits = True
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(config.pad_token_id).to(torch_device)
model = Ernie4_5_MoeForCausalLM(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=attention_mask)
bs, seqlen = input_ids.shape
self.assertEqual(result.router_logits[0].shape, (bs * seqlen, config.num_experts))
torch.testing.assert_close(result.aux_loss.cpu(), torch.tensor(2, dtype=torch.float32), rtol=1e-2, atol=1e-2)
# First, we make sure that adding padding tokens doesn't change the loss
# loss(input_ids, attention_mask=None) == loss(input_ids + padding, attention_mask=attention_mask_with_padding)
# (This length is selected from experiments)
pad_length = input_ids.shape[1] * 4
# Add extra tokens to input_ids and mask them out to simulate left padding
padding_block = torch.randint(
low=0,
high=config.vocab_size,
size=(input_ids.shape[0], pad_length),
dtype=torch.int32,
device=torch_device,
)
padding_block[padding_block == config.pad_token_id] = (config.pad_token_id + 1) % config.vocab_size
padded_input_ids = torch.cat((padding_block, input_ids), dim=1)
padded_attention_mask = torch.zeros_like(padded_input_ids, dtype=torch.long)
padded_attention_mask[:, pad_length:] = 1
padded_result = model(padded_input_ids, attention_mask=padded_attention_mask)
torch.testing.assert_close(result.aux_loss.cpu(), padded_result.aux_loss.cpu(), rtol=1e-4, atol=1e-4)
# We make sure that masking can change the loss using a deterministic synthetic example.
# This avoids flakiness when the model routes tokens uniformly.
num_experts = 3
top_k = 1
synthetic_logits = torch.tensor(
[
[10.0, 0.0, 0.0], # unmasked token -> expert 0
[10.0, 0.0, 0.0], # unmasked token -> expert 0
[0.0, 10.0, 0.0], # masked token -> expert 1
[0.0, 10.0, 0.0], # masked token -> expert 1
],
device=torch_device,
)
synthetic_mask = torch.tensor([[1, 1, 0, 0]], device=torch_device)
masked_loss = load_balancing_loss_func((synthetic_logits,), num_experts, top_k, synthetic_mask)
unmasked_loss = load_balancing_loss_func((synthetic_logits,), num_experts, top_k, attention_mask=None)
self.assertNotAlmostEqual(masked_loss.item(), unmasked_loss.item(), places=6)
@slow
@require_torch
class Ernie4_5_MoeIntegrationTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.model = None
@classmethod
def tearDownClass(cls):
del cls.model
cleanup(torch_device, gc_collect=True)
def setup(self):
cleanup(torch_device, gc_collect=True)
def tearDown(self):
cleanup(torch_device, gc_collect=True)
@classmethod
def get_large_model(cls):
cls.model = Ernie4_5_MoeForCausalLM.from_pretrained(
"baidu/ERNIE-4.5-21B-A3B-PT",
device_map="auto",
experts_implementation="eager",
quantization_config=BitsAndBytesConfig(load_in_4bit=True),
)
return cls.model
@classmethod
def get_small_model(cls):
cls.model = Ernie4_5_MoeForCausalLM.from_pretrained(
"hf-internal-testing/ERNIE-4.5-Small-Moe", device_map="auto", dtype="auto", experts_implementation="eager"
)
return cls.model
@require_torch_large_accelerator(memory=48) # Tested on A100 but requires around 48GiB
@require_bitsandbytes
def test_model_21b_a3b_generation(self):
EXPECTED_TEXT_COMPLETION = "User: Hey, are you conscious? Can you talk to me?\nAssistant: \nI don't have consciousness in the way humans do. I don't feel emotions, have thoughts, or experience awareness. However, I'm" # fmt: skip
model = self.get_large_model()
tokenizer = AutoTokenizer.from_pretrained("baidu/ERNIE-4.5-21B-A3B-PT")
prompt = "Hey, are you conscious? Can you talk to me?"
messages = [{"role": "user", "content": prompt}]
text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
model_inputs = tokenizer([text], add_special_tokens=False, return_tensors="pt").to(model.device)
generated_ids = model.generate(
model_inputs.input_ids,
max_new_tokens=32,
do_sample=False,
)
text = tokenizer.decode(generated_ids[0], skip_special_tokens=True).strip("\n")
self.assertEqual(EXPECTED_TEXT_COMPLETION, text)
def test_shortened_model_generation(self):
# This is gibberish which is expected as the model are the first x layers of the original 28B model
EXPECTED_TEXT_COMPLETION = 'User: Hey, are you conscious? Can you talk to me?\nAssistant: 不了的 tongues说话 dagat绵席裹着头phones<mask:11>odikèkèk<mask:11><mask:11>bun褶席席地说起来这么说的话的话retti upside upsideolate疡疡疡' # fmt: skip
model = self.get_small_model()
tokenizer = AutoTokenizer.from_pretrained("baidu/ERNIE-4.5-21B-A3B-PT")
prompt = "Hey, are you conscious? Can you talk to me?"
messages = [{"role": "user", "content": prompt}]
text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
model_inputs = tokenizer([text], add_special_tokens=False, return_tensors="pt").to(model.device)
generated_ids = model.generate(
model_inputs.input_ids,
max_new_tokens=32,
do_sample=False,
)
text = tokenizer.decode(generated_ids[0], skip_special_tokens=True).strip("\n")
self.assertEqual(EXPECTED_TEXT_COMPLETION, text)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/ernie4_5_moe/test_modeling_ernie4_5_moe.py",
"license": "Apache License 2.0",
"lines": 189,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/falcon_mamba/modular_falcon_mamba.py | # Copyright 2024 Tri Dao, Albert Gu, Technological Innovation Institute and HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch FALCONMAMBA model."""
import torch
from torch import nn
from ... import initialization as init
from ...utils import auto_docstring, logging
from ...utils.import_utils import is_mambapy_available, is_torch_greater_or_equal, is_torchdynamo_compiling, is_tracing
from ..mamba.configuration_mamba import MambaConfig
from ..mamba.modeling_mamba import (
MambaBlock,
MambaCache,
MambaCausalLMOutput,
MambaForCausalLM,
MambaMixer,
MambaModel,
MambaOutput,
MambaPreTrainedModel,
MambaRMSNorm,
)
logger = logging.get_logger(__name__)
if is_torch_greater_or_equal("2.9.0"):
from torch._higher_order_ops.associative_scan import associative_scan
else:
associative_scan = None
if is_mambapy_available():
from mambapy.pscan import pscan
else:
pscan = None
selective_state_update, selective_scan_fn, causal_conv1d_fn, causal_conv1d_update, falcon_mamba_inner_fn = (
None,
None,
None,
None,
None,
)
class FalconMambaConfig(MambaConfig):
"""
This is the configuration class to store the configuration of a [`FalconMambaModel`]. It is used to instantiate a FALCON_MAMBA
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the FALCON_MAMBA
[tiiuae/falcon-mamba-7b](https://huggingface.co/tiiuae/falcon-mamba-7b) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50280):
Vocabulary size of the FALCON_MAMBA model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`FalconMambaModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the embeddings and hidden states.
state_size (`int`, *optional*, defaults to 16): shape of the state space latents.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the model.
layer_norm_epsilon (`float`, *optional*, defaults to 1e-05):
The epsilon to use in the layer normalization layers.
pad_token_id (`int`, *optional*, defaults to 0):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 0):
The id of the beginning of sentence token in the vocabulary.
eos_token_id (`int`, *optional*, defaults to 0):
The id of the end of sentence token in the vocabulary.
expand (`int`, *optional*, defaults to 2): Expanding factor used to determine the intermediate size.
conv_kernel (`int`, *optional*, defaults to 4): Size of the convolution kernel.
use_bias (`bool`, *optional*, defaults to `False`):
Whether or not to use bias in ["in_proj", "out_proj"] of the mixer block
use_conv_bias (`bool`, *optional*, defaults to `True`):
Whether or not to use bias in the convolution layer of the mixer block.
hidden_act (`str`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
initializer_range (`float`, *optional*, defaults to 0.1):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
residual_in_fp32 (`bool`, *optional*, defaults to `True`):
Whether or not residuals should be in `float32`. If set to `False` residuals will keep the same `dtype` as the rest of the model
time_step_rank (`Union[int,str]`, *optional*, defaults to `"auto"`):
Rank of the discretization projection matrix. `"auto"` means that it will default to `math.ceil(self.hidden_size / 16)`
time_step_scale (`float`, *optional*, defaults to 1.0):
Scale used used to scale `dt_proj.bias`.
time_step_min (`float`, *optional*, defaults to 0.001):
Minimum `time_step` used to bound `dt_proj.bias`.
time_step_max (`float`, *optional*, defaults to 0.1):
Maximum `time_step` used to bound `dt_proj.bias`.
time_step_init_scheme (`float`, *optional*, defaults to `"random"`):
Init scheme used for `dt_proj.weight`. Should be one of `["random","uniform"]`
time_step_floor (`float`, *optional*, defaults to 0.0001):
Minimum clamping value of the `dt_proj.bias` layer initialization.
rescale_prenorm_residual (`bool`, *optional*, defaults to `False`):
Whether or not to rescale `out_proj` weights when initializing.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the cache should be used.
use_falcon_mambapy (`bool`, *optional*, defaults to `False`):
This argument corresponds to `use_mambapy` in MambaConfig.
Determines the fallback strategy during training if the CUDA-based official implementation of Mamba is not available. If `True`, the mamba.py implementation is used. If `False`, the naive and slower implementation is used. Consider switching to the naive version if memory is limited.
use_associative_scan (`bool`, *optional*, defaults to `True`):
Whether to use PyTorch's `torch._higher_order_ops.associative_scan` for the parallel scan instead of the naive
sequential implementation. The associative scan is only active during `torch.compile` tracing and
requires torch >= 2.9.0. Both paths are tested to produce numerically identical results (see
`test_associative_scan_matches_sequential`). Set to `False` to fall back to the sequential loop.
mixer_rms_eps (`float`, *optional*, defaults to 1e-06):
The RMS norm epsilon value that is used in the Mixer RMS norm for B, C and dt states.
tie_word_embeddings (`bool`, *optional*, defaults to `True`):
Whether to tie weight embeddings
Example:
```python
>>> from transformers import FalconMambaConfig, FalconMambaModel
>>> # Initializing a FalconMamba configuration
>>> configuration = FalconMambaConfig()
>>> # Initializing a model (with random weights) from the configuration
>>> model = FalconMambaModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
def __init__(
self,
vocab_size=50280,
hidden_size=768,
state_size=16,
num_hidden_layers=32,
layer_norm_epsilon=1e-5,
pad_token_id=0,
bos_token_id=0,
eos_token_id=0,
expand=2,
conv_kernel=4,
use_bias=False,
use_conv_bias=True,
hidden_act="silu",
initializer_range=0.1,
residual_in_fp32=True,
time_step_rank="auto",
time_step_scale=1.0,
time_step_min=0.001,
time_step_max=0.1,
time_step_init_scheme="random",
time_step_floor=1e-4,
rescale_prenorm_residual=False,
use_cache=True,
use_falcon_mambapy=False,
use_associative_scan=True,
mixer_rms_eps=1e-6,
tie_word_embeddings=True,
**kwargs,
):
super().__init__(
vocab_size=vocab_size,
hidden_size=hidden_size,
state_size=state_size,
num_hidden_layers=num_hidden_layers,
layer_norm_epsilon=layer_norm_epsilon,
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
expand=expand,
conv_kernel=conv_kernel,
use_bias=use_bias,
use_conv_bias=use_conv_bias,
hidden_act=hidden_act,
initializer_range=initializer_range,
residual_in_fp32=residual_in_fp32,
time_step_rank=time_step_rank,
time_step_scale=time_step_scale,
time_step_min=time_step_min,
time_step_max=time_step_max,
time_step_init_scheme=time_step_init_scheme,
time_step_floor=time_step_floor,
rescale_prenorm_residual=rescale_prenorm_residual,
use_cache=use_cache,
use_falcon_mambapy=use_falcon_mambapy,
use_associative_scan=use_associative_scan,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
self.mixer_rms_eps = mixer_rms_eps
# This is needed since mamba overrides the intermediate_size attribute
self.intermediate_size = (
int(expand * self.hidden_size)
if kwargs.get("intermediate_size") is None
else kwargs.get("intermediate_size")
)
class FalconMambaCache(MambaCache):
"""
Cache for falcon_mamba model which does not have attention mechanism and key value states.
Arguments:
config (`PreTrainedConfig):
The configuration file defining the shape-related attributes required to initialize the static cache.
max_batch_size (`int`):
The maximum batch size with which the model will be used. Note that a new instance must be instantiated if
a smaller batch size is used.
dtype (`torch.dtype`, *optional*, defaults to `torch.float16`):
The default `dtype` to use when initializing the layer.
device (`torch.device` or `str`, *optional*):
The device on which the cache should be initialized. Should be the same as the layer.
Example:
```python
>>> import torch
>>> from transformers import AutoTokenizer, FalconMambaForCausalLM, FalconMambaCache
>>> model = FalconMambaForCausalLM.from_pretrained("tiiuae/falcon-mamba-7b")
>>> tokenizer = AutoTokenizer.from_pretrained("tiiuae/falcon-mamba-7b")
>>> inputs = tokenizer(text="My name is FalconMamba", return_tensors="pt")
>>> # Prepare a cache class and pass it to model's forward
>>> cache_params = FalconMambaCache(config=model.config, max_batch_size=1, device=model.device, dtype=model.dtype)
>>> cache_position = torch.arange(len(inputs["input_ids"][0]), device=model.device) # sequence length
>>> outputs = model(**inputs, cache_params=cache_params, cache_position=cache_position, use_cache=True)
>>> outputs.cache_params
```
"""
def rms_forward(hidden_states, variance_epsilon=1e-6):
"""
Calculates simple RMSNorm with no learnable weights. `MambaRMSNorm` will
leverage this in order to multiply the final result with the RMSNorm weight
Args:
hidden_states (`torch.Tensor`):
Hidden states to normalize
variance_epsilon (`float`):
The eps value to add in the square root scaling factor
"""
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + variance_epsilon)
return hidden_states.to(input_dtype)
class FalconMambaMixer(MambaMixer):
def warn_slow_implementation(self):
is_fast_path_available = all(
(selective_state_update, selective_scan_fn, causal_conv1d_fn, causal_conv1d_update, falcon_mamba_inner_fn)
)
if not is_fast_path_available:
if self.use_falcon_mambapy:
if is_mambapy_available():
logger.warning_once(
"The fast path is not available because one of `(selective_state_update, selective_scan_fn, causal_conv1d_fn, causal_conv1d_update, mamba_inner_fn)`"
" is None. Falling back to the mamba.py backend. To install follow https://github.com/state-spaces/mamba/#installation for mamba-ssm and"
" https://github.com/Dao-AILab/causal-conv1d or `pip install kernels` for causal-conv1d"
)
else:
raise ImportError(
"use_mambapy is set to True but the mambapy package is not installed. To install it follow https://github.com/alxndrTL/mamba.py."
)
else:
logger.warning_once(
"The fast path is not available because one of `(selective_state_update, selective_scan_fn, causal_conv1d_fn, causal_conv1d_update, mamba_inner_fn)`"
" is None. Falling back to the sequential implementation of Mamba, as use_mambapy is set to False. To install follow https://github.com/state-spaces/mamba/#installation for mamba-ssm and"
" https://github.com/Dao-AILab/causal-conv1d or `pip install kernels` for causal-conv1d. For the mamba.py backend, follow https://github.com/alxndrTL/mamba.py."
)
def __init__(self, config: FalconMambaConfig, layer_idx: int):
super().__init__(config, layer_idx)
# Triton expects to pass RMS weights even if they are non learnable, thus we need to create these weights here
self.register_buffer(
"b_c_rms", torch.nn.Parameter(torch.ones(self.ssm_state_size), requires_grad=False), persistent=False
)
self.register_buffer(
"dt_rms", torch.nn.Parameter(torch.ones(self.intermediate_size), requires_grad=False), persistent=False
)
self.rms_eps = config.mixer_rms_eps
def cuda_kernels_forward(
self,
hidden_states: torch.Tensor,
cache_params: FalconMambaCache | None = None,
cache_position: torch.LongTensor | None = None,
attention_mask: torch.LongTensor | None = None,
):
# 1. Gated MLP's linear projection
projected_states = self.in_proj(hidden_states).transpose(1, 2)
if self.training and cache_params is None: # Doesn't support outputting the states -> used for training
contextualized_states = falcon_mamba_inner_fn(
projected_states,
self.conv1d.weight,
self.conv1d.bias if self.use_conv_bias else None,
self.x_proj.weight,
self.dt_proj.weight,
self.out_proj.weight,
self.out_proj.bias.float() if self.use_bias else None,
-torch.exp(self.A_log.float()),
None, # input-dependent B
None, # input-dependent C
self.D.float(),
delta_bias=self.dt_proj.bias.float(),
delta_softplus=True,
b_rms_weight=self.b_c_rms,
c_rms_weight=self.b_c_rms,
dt_rms_weight=self.dt_rms,
b_c_dt_rms_eps=self.rms_eps,
)
else:
hidden_states, gate = projected_states.chunk(2, dim=1)
if attention_mask is not None:
hidden_states = hidden_states * attention_mask.unsqueeze(1)
# 2. Convolution sequence transformation
conv_weights = self.conv1d.weight.view(self.conv1d.weight.size(0), self.conv1d.weight.size(2))
if cache_params is not None and cache_position[0] > 0:
hidden_states = causal_conv1d_update(
hidden_states.squeeze(-1),
cache_params.conv_states[self.layer_idx],
conv_weights,
self.conv1d.bias,
self.activation,
)
hidden_states = hidden_states.unsqueeze(-1)
else:
if cache_params is not None:
conv_states = nn.functional.pad(
hidden_states, (self.conv_kernel_size - hidden_states.shape[-1], 0)
)
cache_params.update_conv_state(self.layer_idx, conv_states, cache_position)
hidden_states = causal_conv1d_fn(
hidden_states, conv_weights, self.conv1d.bias, activation=self.activation
)
if attention_mask is not None:
hidden_states = hidden_states * attention_mask.unsqueeze(1)
# 3. State Space Model sequence transformation
# 3.a. input varying initialization of time_step, B and C
ssm_parameters = self.x_proj(hidden_states.transpose(1, 2))
time_step, B, C = torch.split(
ssm_parameters, [self.time_step_rank, self.ssm_state_size, self.ssm_state_size], dim=-1
)
B = rms_forward(B, variance_epsilon=self.rms_eps)
C = rms_forward(C, variance_epsilon=self.rms_eps)
time_step = rms_forward(time_step, variance_epsilon=self.rms_eps)
# In case the model has been quantized, we need a hack to properly call the `nn.Linear` module
# at the price of a small overhead.
if hasattr(self.config, "_is_quantized"):
discrete_time_step = (self.dt_proj(time_step) - self.dt_proj.bias).transpose(1, 2)
else:
discrete_time_step = self.dt_proj.weight @ time_step.transpose(1, 2)
A = -torch.exp(self.A_log.float())
# 3.c perform the recurrence y ← SSM(A, B, C)(x)
time_proj_bias = self.dt_proj.bias.float() if hasattr(self.dt_proj, "bias") else None
if cache_params is not None and cache_position[0] > 0:
scan_outputs = selective_state_update(
cache_params.ssm_states[self.layer_idx],
hidden_states[..., 0],
discrete_time_step[..., 0],
A,
B[:, 0],
C[:, 0],
self.D,
gate[..., 0],
time_proj_bias,
dt_softplus=True,
).unsqueeze(-1)
else:
scan_outputs, ssm_state = selective_scan_fn(
hidden_states,
discrete_time_step,
A,
B.transpose(1, 2),
C.transpose(1, 2),
self.D.float(),
gate,
time_proj_bias,
delta_softplus=True,
return_last_state=True,
)
if ssm_state is not None and cache_params is not None:
cache_params.update_ssm_state(self.layer_idx, ssm_state)
# 4. Final linear projection
contextualized_states = self.out_proj(scan_outputs.transpose(1, 2))
return contextualized_states
def slow_forward(
self,
input_states,
cache_params: FalconMambaCache | None = None,
cache_position: torch.LongTensor | None = None,
attention_mask: torch.LongTensor | None = None,
):
batch_size, seq_len, _ = input_states.shape
dtype = input_states.dtype
# 1. Gated MLP's linear projection
projected_states = self.in_proj(input_states).transpose(1, 2) # [batch, 2 * intermediate_size, seq_len]
hidden_states, gate = projected_states.chunk(2, dim=1)
if attention_mask is not None:
hidden_states = hidden_states * attention_mask.unsqueeze(1)
# 2. Convolution sequence transformation
if cache_params is not None:
ssm_state = cache_params.ssm_states[self.layer_idx].clone()
ssm_state = ssm_state.to(hidden_states.device)
# use `cache_position.shape[0]` to check whether we are in prefill
# stage, it's equivalent to check `cache_position[0] == 0`, which
# breaks dynamo fullgraph constraints
if cache_position is not None and cache_position.shape[0] == self.conv_kernel_size:
conv_state = nn.functional.pad(hidden_states, (self.conv_kernel_size - hidden_states.shape[-1], 0))
cache_params.update_conv_state(self.layer_idx, conv_state, cache_position)
hidden_states = self.act(
self.conv1d(hidden_states)[..., :seq_len]
) # [batch, intermediate_size, seq_len]
else:
conv_state = cache_params.update_conv_state(self.layer_idx, hidden_states, cache_position)
conv_state = conv_state.to(self.conv1d.weight.device)
hidden_states = torch.sum(conv_state * self.conv1d.weight[:, 0, :], dim=-1)
if self.use_conv_bias:
hidden_states += self.conv1d.bias
hidden_states = (
self.act(hidden_states).to(dtype).unsqueeze(-1)
) # [batch, intermediate_size, 1] : decoding
else:
ssm_state = torch.zeros(
(batch_size, self.intermediate_size, self.ssm_state_size), device=hidden_states.device, dtype=dtype
)
hidden_states = self.act(self.conv1d(hidden_states)[..., :seq_len]) # [batch, intermediate_size, seq_len]
if attention_mask is not None:
hidden_states = hidden_states * attention_mask.unsqueeze(1)
# 3. State Space Model sequence transformation
# 3.a. Selection: [batch, seq_len, self.time_step_rank + self.ssm_state_size * 2]
ssm_parameters = self.x_proj(hidden_states.transpose(1, 2))
time_step, B, C = torch.split(
ssm_parameters, [self.time_step_rank, self.ssm_state_size, self.ssm_state_size], dim=-1
)
B = rms_forward(B, variance_epsilon=self.rms_eps)
C = rms_forward(C, variance_epsilon=self.rms_eps)
time_step = rms_forward(time_step, variance_epsilon=self.rms_eps)
discrete_time_step = self.dt_proj(time_step) # [batch, seq_len, intermediate_size]
discrete_time_step = nn.functional.softplus(discrete_time_step).transpose(
1, 2
) # [batch, intermediate_size, seq_len]
# 3.b. Discretization: B and C to [batch, seq_len, intermediate_size, ssm_state_size] (SRAM)
A = -torch.exp(self.A_log.float()) # [intermediate_size, ssm_state_size]
discrete_A = torch.exp(
A[None, :, None, :] * discrete_time_step[:, :, :, None]
) # [batch, intermediate_size, seq_len, ssm_state_size]
discrete_B = (
discrete_time_step[:, :, :, None] * B[:, None, :, :].float()
) # [batch, intermediate_size, seq_len, ssm_state_size]
deltaB_u = discrete_B * hidden_states[:, :, :, None].float()
# 3.c perform the recurrence y ← SSM(A, B, C)(x)
if self.use_falcon_mambapy and self.training and cache_params is None:
hs = pscan(
discrete_A.transpose(1, 2), deltaB_u.transpose(1, 2)
) # [batch, seq_len, intermediate_size, ssm_state_size]
scan_output = (hs @ C.unsqueeze(-1)).squeeze(3).transpose(1, 2) # [batch, intermediate_size, seq_len]
scan_output = scan_output + hidden_states * self.D[None, :, None]
scan_output = scan_output * self.act(gate)
else:
# Use associative_scan for parallel computation when available
if (
self.use_associative_scan
and associative_scan is not None
and is_tracing(hidden_states)
and cache_params is None
):
def combine_fn(left, right):
a_left, b_left = left
a_right, b_right = right
return (a_left * a_right, a_right * b_left + b_right)
combine_mode = "pointwise" if discrete_A.device.type in ("cuda", "xpu") else "generic"
_, all_h = associative_scan(combine_fn, (discrete_A, deltaB_u), dim=2, combine_mode=combine_mode)
# all_h: [B, D, S, N] -> output: [B, D, S]
scan_output = (
torch.matmul(all_h.permute(0, 2, 1, 3).to(dtype), C.unsqueeze(-1)).squeeze(-1).permute(0, 2, 1)
)
ssm_state = all_h[:, :, -1, :]
else:
# Sequential loop for decoding or when associative_scan unavailable
scan_outputs = []
for i in range(seq_len):
ssm_state = (
discrete_A[:, :, i, :] * ssm_state + deltaB_u[:, :, i, :]
) # [batch, intermediate_size, ssm_state]
scan_output = torch.matmul(
ssm_state.to(dtype), C[:, i, :].unsqueeze(-1)
) # [batch, intermediate_size, 1]
scan_outputs.append(scan_output[:, :, 0])
scan_output = torch.stack(scan_outputs, dim=-1) # [batch, intermediate_size, seq_len]
scan_output = scan_output + (hidden_states * self.D[None, :, None])
scan_output = scan_output * self.act(gate)
if cache_params is not None:
cache_params.update_ssm_state(self.layer_idx, ssm_state)
# 4. Final linear projection
contextualized_states = self.out_proj(scan_output.transpose(1, 2)) # [batch, seq_len, hidden_size]
return contextualized_states
def forward(
self,
hidden_states,
cache_params: FalconMambaCache | None = None,
cache_position: torch.LongTensor | None = None,
attention_mask: torch.LongTensor | None = None,
):
is_fast_path_available = all(
(selective_state_update, selective_scan_fn, causal_conv1d_fn, causal_conv1d_update, falcon_mamba_inner_fn)
)
if is_fast_path_available and "cuda" in self.x_proj.weight.device.type and not is_torchdynamo_compiling():
return self.cuda_kernels_forward(hidden_states, cache_params, cache_position, attention_mask)
return self.slow_forward(hidden_states, cache_params, cache_position, attention_mask)
class FalconMambaRMSNorm(MambaRMSNorm):
def forward(self, hidden_states):
return self.weight.to(hidden_states.device) * rms_forward(
hidden_states, variance_epsilon=self.variance_epsilon
)
class FalconMambaBlock(MambaBlock):
pass
@auto_docstring
class FalconMambaPreTrainedModel(MambaPreTrainedModel):
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, FalconMambaMixer):
init.ones_(module.b_c_rms)
init.ones_(module.dt_rms)
class FalconMambaOutput(MambaOutput):
pass
class FalconMambaCausalLMOutput(MambaCausalLMOutput):
pass
class FalconMambaModel(MambaModel, FalconMambaPreTrainedModel):
def __init__(self, config):
FalconMambaPreTrainedModel.__init__(self, config)
self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
self.layers = nn.ModuleList(
[FalconMambaBlock(config, layer_idx=idx) for idx in range(config.num_hidden_layers)]
)
self.gradient_checkpointing = False
self.norm_f = FalconMambaRMSNorm(config.hidden_size, eps=config.layer_norm_epsilon)
# Initialize weights and apply final processing
self.post_init()
def load_hook(self, state_dict, prefix, *args):
raise AttributeError("Not needed for FalconMamba")
class FalconMambaForCausalLM(MambaForCausalLM):
pass
__all__ = [
"FalconMambaForCausalLM",
"FalconMambaModel",
"FalconMambaPreTrainedModel",
"FalconMambaCache",
"FalconMambaConfig",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/falcon_mamba/modular_falcon_mamba.py",
"license": "Apache License 2.0",
"lines": 532,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/glm4_moe/modular_glm4_moe.py | # Copyright 2025 The ZhipuAI Inc. team and HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch GLM-4-MOE model."""
import torch
from torch import nn
from ...configuration_utils import PreTrainedConfig
from ...modeling_rope_utils import RopeParameters
from ...utils import logging
from ..cohere.modeling_cohere import CohereAttention
from ..deepseek_v3.modeling_deepseek_v3 import (
DeepseekV3DecoderLayer,
DeepseekV3ForCausalLM,
DeepseekV3MLP,
DeepseekV3Model,
DeepseekV3PreTrainedModel,
DeepseekV3RMSNorm,
DeepseekV3TopkRouter,
)
from ..glm.modeling_glm import GlmRotaryEmbedding
from ..gpt_neox.modeling_gpt_neox import apply_rotary_pos_emb # noqa
logger = logging.get_logger(__name__)
class Glm4MoeConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Glm4MoeModel`]. It is used to instantiate a
Glm4Moe model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of [THUDM/GLM-4-100B-A10B](https://huggingface.co/THUDM/GLM-4-100B-A10B).
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 151552):
Vocabulary size of the Glm4Moe model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`Glm4MoeModel`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 10944):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 46):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 96):
Number of attention heads for each attention layer in the Transformer encoder.
num_key_value_heads (`int`, *optional*, defaults to 8):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `32`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 131072):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether the model's input and output word embeddings should be tied.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
moe_intermediate_size (`int`, *optional*, defaults to 1408):
Intermediate size of the routed expert.
num_experts_per_tok (`int`, *optional*, defaults to 8):
number of experts per token.
n_shared_experts (`int`, *optional*, defaults to 1):
Number of shared experts.
n_routed_experts (`int`, *optional*, defaults to 128):
Number of routed experts.
routed_scaling_factor (`float`, *optional*, defaults to 1.0):
Scaling factor or routed experts.
n_group (`int`, *optional*, defaults to 1):
Number of groups for routed experts.
topk_group (`int`, *optional*, defaults to 1):
Number of selected groups for each token(for each token, ensuring the selected experts is only within `topk_group` groups).
first_k_dense_replace (`int`, *optional*, defaults to 1):
Number of dense layers in shallow layers(embed->dense->dense->...->dense->moe->moe...->lm_head).
\--k dense layers--/
norm_topk_prob (`bool`, *optional*, defaults to `True`):
Whether to normalize the topk probabilities.
use_qk_norm (`bool`, *optional*, defaults to `False`):
Whether to use query-key normalization in the attention
bos_token_id (`int`, *optional*):
Beginning of stream token id.
eos_token_id (`int`, *optional*):
End of stream token id.
pad_token_id (`int`, *optional*):
Padding token id.
```python
>>> from transformers import Glm4MoeModel, Glm4MoeConfig
>>> # Initializing a Glm4Moe style configuration
>>> configuration = Glm4MoeConfig()
>>> # Initializing a model from the GLM-4-MOE-100B-A10B style configuration
>>> model = Glm4MoeModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "glm4_moe"
keys_to_ignore_at_inference = ["past_key_values"]
# Default tensor parallel plan for base model `Glm4Moe`
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise",
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.experts.gate_up_proj": "rowwise",
"layers.*.mlp.experts.down_proj": "rowwise",
"layers.*.mlp.gate_proj": "colwise",
"layers.*.mlp.up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
}
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
"norm": (["hidden_states"], ["hidden_states"]),
}
attribute_map = {
"num_local_experts": "n_routed_experts",
}
def __init__(
self,
vocab_size: int | None = 151552,
hidden_size: int | None = 4096,
intermediate_size: int | None = 10944,
num_hidden_layers: int | None = 46,
num_attention_heads: int | None = 96,
num_key_value_heads: int | None = 8,
hidden_act: str | None = "silu",
max_position_embeddings: int | None = 131072,
initializer_range: float | None = 0.02,
rms_norm_eps: int | None = 1e-5,
use_cache: bool | None = True,
tie_word_embeddings: bool | None = False,
rope_parameters: RopeParameters | dict[str, RopeParameters] | None = None,
attention_bias: bool | None = False,
attention_dropout: float | None = 0.0,
moe_intermediate_size: int | None = 1408,
num_experts_per_tok: int | None = 8,
n_shared_experts: int | None = 1,
n_routed_experts: int | None = 128,
routed_scaling_factor: float | None = 1.0,
n_group: int | None = 1,
topk_group: int | None = 1,
first_k_dense_replace: int | None = 1,
norm_topk_prob: bool | None = True,
use_qk_norm: bool | None = False,
bos_token_id: int | None = None,
eos_token_id: int | None = None,
pad_token_id: int | None = None,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.rope_parameters = rope_parameters
kwargs.setdefault("partial_rotary_factor", 0.5) # assign default for BC
# MoE arguments
self.moe_intermediate_size = moe_intermediate_size
self.num_experts_per_tok = num_experts_per_tok
self.n_group = n_group
self.topk_group = topk_group
self.n_shared_experts = n_shared_experts
self.n_routed_experts = n_routed_experts
self.routed_scaling_factor = routed_scaling_factor
self.first_k_dense_replace = first_k_dense_replace
self.norm_topk_prob = norm_topk_prob
self.use_qk_norm = use_qk_norm
self.tie_word_embeddings = tie_word_embeddings
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
super().__init__(**kwargs)
class Glm4MoeRotaryEmbedding(GlmRotaryEmbedding):
pass
class Glm4MoeAttention(CohereAttention):
def __init__(self, config: Glm4MoeConfig, layer_idx: int | None = None):
nn.Module.__init__(self)
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.rope_parameters = config.rope_parameters
self.attention_dropout = config.attention_dropout
self.is_causal = True
self.q_proj = nn.Linear(
config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
)
self.k_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.v_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False)
self.use_qk_norm = config.use_qk_norm
if self.use_qk_norm:
self.q_norm = Glm4MoeRMSNorm(self.head_dim, eps=config.rms_norm_eps)
self.k_norm = Glm4MoeRMSNorm(self.head_dim, eps=config.rms_norm_eps)
class Glm4MoeMLP(DeepseekV3MLP):
pass
class Glm4MoeTopkRouter(DeepseekV3TopkRouter):
def __init__(self, config: Glm4MoeConfig):
nn.Module.__init__(self)
self.config = config
self.top_k = config.num_experts_per_tok
self.n_routed_experts = config.n_routed_experts
self.routed_scaling_factor = config.routed_scaling_factor
self.n_group = config.n_group
self.topk_group = config.topk_group
self.norm_topk_prob = config.norm_topk_prob
self.weight = nn.Parameter(torch.empty((self.n_routed_experts, config.hidden_size)))
self.register_buffer("e_score_correction_bias", torch.zeros((self.n_routed_experts), dtype=torch.float32))
class Glm4MoeRMSNorm(DeepseekV3RMSNorm):
pass
class Glm4MoeDecoderLayer(DeepseekV3DecoderLayer):
pass
class Glm4MoePreTrainedModel(DeepseekV3PreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"model\.layers\.92.*", r"model\.layers\.46.*"]
class Glm4MoeModel(DeepseekV3Model):
pass
class Glm4MoeForCausalLM(DeepseekV3ForCausalLM):
pass
__all__ = [
"Glm4MoeConfig",
"Glm4MoePreTrainedModel",
"Glm4MoeModel",
"Glm4MoeForCausalLM",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/glm4_moe/modular_glm4_moe.py",
"license": "Apache License 2.0",
"lines": 257,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/glm4_moe/test_modeling_glm4_moe.py | # Copyright 2025 The ZhipuAI Inc. team and HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch GLM-4.5, GLM-4.6, GLM-4.7 model."""
import unittest
import pytest
import torch
from transformers import is_torch_available
from transformers.testing_utils import (
cleanup,
require_torch,
require_torch_accelerator,
slow,
torch_device,
)
from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester
if is_torch_available():
from transformers import AutoTokenizer, Glm4MoeForCausalLM, Glm4MoeModel
class Glm4MoeModelTester(CausalLMModelTester):
if is_torch_available():
base_model_class = Glm4MoeModel
def __init__(
self,
parent,
n_routed_experts=8,
n_shared_experts=1,
n_group=1,
topk_group=1,
num_experts_per_tok=8,
):
super().__init__(parent=parent, num_experts_per_tok=num_experts_per_tok)
self.n_routed_experts = n_routed_experts
self.n_shared_experts = n_shared_experts
self.n_group = n_group
self.topk_group = topk_group
@require_torch
class Glm4MoeModelTest(CausalLMModelTest, unittest.TestCase):
model_tester_class = Glm4MoeModelTester
# used in `test_torch_compile_for_training`. Skip as "Dynamic control flow in MoE"
_torch_compile_train_cls = None
model_split_percents = [0.5, 0.85, 0.9] # it tries to offload everything with the default value
@require_torch_accelerator
@slow
class Glm4MoeIntegrationTest(unittest.TestCase):
def tearDown(self):
# See LlamaIntegrationTest.tearDown(). Can be removed once LlamaIntegrationTest.tearDown() is removed.
cleanup(torch_device, gc_collect=False)
@slow
@require_torch_accelerator
@pytest.mark.torch_compile_test
def test_compile_static_cache(self):
NUM_TOKENS_TO_GENERATE = 40
EXPECTED_TEXT_COMPLETION = [
'hello, world!\'\'\')\nprint(\'hello, world!\')\nprint("hello, world!")\nprint("hello, world!")\nprint("hello, world!")\nprint("hello, world!")\nprint("hello, world!")\n',
"tell me the story of the first Thanksgiving. commonly known as the Pilgrims, arrived in the autumn of 1620. They were seeking religious freedom and a new life in the Plymouth Colony. Their first",
]
prompts = ["[gMASK]<sop>hello", "[gMASK]<sop>tell me"]
tokenizer = AutoTokenizer.from_pretrained("zai-org/GLM-4.5")
model = Glm4MoeForCausalLM.from_pretrained("zai-org/GLM-4.5", device_map=torch_device, dtype=torch.bfloat16)
inputs = tokenizer(prompts, return_tensors="pt", padding=True).to(model.device)
# Dynamic Cache
generated_ids = model.generate(**inputs, max_new_tokens=NUM_TOKENS_TO_GENERATE, do_sample=False)
dynamic_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, dynamic_text)
# Static Cache
generated_ids = model.generate(
**inputs, max_new_tokens=NUM_TOKENS_TO_GENERATE, do_sample=False, cache_implementation="static"
)
static_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, static_text)
# Static Cache + compile
model._cache = None # clear cache object, initialized when we pass `cache_implementation="static"`
model.forward = torch.compile(model.forward, mode="reduce-overhead", fullgraph=True)
generated_ids = model.generate(
**inputs, max_new_tokens=NUM_TOKENS_TO_GENERATE, do_sample=False, cache_implementation="static"
)
static_compiled_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, static_compiled_text)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/glm4_moe/test_modeling_glm4_moe.py",
"license": "Apache License 2.0",
"lines": 88,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/sam/image_processing_sam_fast.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Image processor class for SAM."""
import math
from copy import deepcopy
from itertools import product
from typing import Any, Optional, Union
import numpy as np
import torch
import torchvision.transforms.v2.functional as tvF
from torch.nn import functional as F
from torchvision.ops.boxes import batched_nms
from ...image_processing_utils import BatchFeature, get_size_dict
from ...image_processing_utils_fast import BaseImageProcessorFast
from ...image_transforms import (
group_images_by_shape,
reorder_images,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
SizeDict,
pil_torch_interpolation_mapping,
)
from ...processing_utils import Unpack
from ...utils import (
TensorType,
auto_docstring,
)
from .image_processing_sam import SamImageProcessorKwargs
@auto_docstring
class SamImageProcessorFast(BaseImageProcessorFast):
resample = PILImageResampling.BILINEAR
image_mean = IMAGENET_DEFAULT_MEAN
image_std = IMAGENET_DEFAULT_STD
size = {"longest_edge": 1024}
mask_size = {"longest_edge": 256}
do_resize = True
do_rescale = True
do_normalize = True
do_convert_rgb = True
valid_kwargs = SamImageProcessorKwargs
do_pad = True
pad_size = {"height": 1024, "width": 1024}
mask_pad_size = {"height": 256, "width": 256}
def __init__(self, **kwargs: Unpack[SamImageProcessorKwargs]):
super().__init__(**kwargs)
def _get_preprocess_shape(self, old_shape: tuple[int, int], longest_edge: int):
"""
Compute the output size given input size and target long side length.
"""
oldh, oldw = old_shape
scale = longest_edge * 1.0 / max(oldh, oldw)
newh, neww = oldh * scale, oldw * scale
newh = int(newh + 0.5)
neww = int(neww + 0.5)
return (newh, neww)
def resize(
self, image: "torch.Tensor", size: SizeDict, interpolation: Optional["tvF.InterpolationMode"], **kwargs
) -> "torch.Tensor":
"""
Resize an image to `(size["height"], size["width"])`.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Dictionary in the format `{"longest_edge": int}` specifying the size of the output image. The longest
edge of the image will be resized to the specified size, while the other edge will be resized to
maintain the aspect ratio.
interpolation:
`tvF.InterpolationMode` filter to use when resizing the image e.g. `tvF.InterpolationMode.BICUBIC`.
Returns:
`torch.Tensor`: The resized image.
"""
if not size.longest_edge:
raise ValueError(f"The `size` dictionary must contain the key `longest_edge`. Got {size.keys()}")
input_size = image.shape[-2:]
output_height, output_width = self._get_preprocess_shape(input_size, size.longest_edge)
return super().resize(
image, size=SizeDict(height=output_height, width=output_width), interpolation=interpolation, **kwargs
)
def _further_process_kwargs(
self,
size: SizeDict | None = None,
pad_size: SizeDict | None = None,
mask_size: SizeDict | None = None,
mask_pad_size: SizeDict | None = None,
default_to_square: bool | None = None,
image_mean: float | list[float] | None = None,
image_std: float | list[float] | None = None,
data_format: ChannelDimension | None = None,
**kwargs,
) -> dict:
"""
Update kwargs that need further processing before being validated
Can be overridden by subclasses to customize the processing of kwargs.
"""
if kwargs is None:
kwargs = {}
if size is not None:
size = SizeDict(**get_size_dict(size=size, default_to_square=default_to_square))
if pad_size is not None:
pad_size = SizeDict(**get_size_dict(pad_size, param_name="pad_size"))
if mask_size is not None:
mask_size = SizeDict(**get_size_dict(mask_size, param_name="mask_size"))
if mask_pad_size is not None:
mask_pad_size = SizeDict(**get_size_dict(mask_pad_size, param_name="mask_pad_size"))
if isinstance(image_mean, list):
image_mean = tuple(image_mean)
if isinstance(image_std, list):
image_std = tuple(image_std)
if data_format is None:
data_format = ChannelDimension.FIRST
kwargs["size"] = size
kwargs["pad_size"] = pad_size
kwargs["mask_size"] = mask_size
kwargs["mask_pad_size"] = mask_pad_size
kwargs["image_mean"] = image_mean
kwargs["image_std"] = image_std
kwargs["data_format"] = data_format
# torch resize uses interpolation instead of resample
# Check if resample is an int before checking if it's an instance of PILImageResampling
# because if pillow < 9.1.0, resample is an int and PILImageResampling is a module.
# Checking PILImageResampling will fail with error `TypeError: isinstance() arg 2 must be a type or tuple of types`.
resample = kwargs.pop("resample")
kwargs["interpolation"] = (
pil_torch_interpolation_mapping[resample] if isinstance(resample, (PILImageResampling, int)) else resample
)
return kwargs
@auto_docstring
def preprocess(
self,
images: ImageInput,
segmentation_maps: ImageInput | None = None,
**kwargs: Unpack[SamImageProcessorKwargs],
) -> BatchFeature:
r"""
segmentation_maps (`ImageInput`, *optional*):
The segmentation maps to preprocess.
"""
return super().preprocess(images, segmentation_maps, **kwargs)
def _preprocess_image_like_inputs(
self,
images: ImageInput,
segmentation_maps: ImageInput | None,
do_convert_rgb: bool,
input_data_format: ChannelDimension,
device: Union[str, "torch.device"] | None = None,
**kwargs: Unpack[SamImageProcessorKwargs],
) -> BatchFeature:
"""
Preprocess image-like inputs.
"""
images = self._prepare_image_like_inputs(
images=images, do_convert_rgb=do_convert_rgb, input_data_format=input_data_format, device=device
)
original_sizes = [image.shape[-2:] for image in images]
images_kwargs = kwargs.copy()
image_outputs = self._preprocess(images, **images_kwargs)
data = {
"pixel_values": image_outputs.pixel_values,
"original_sizes": original_sizes,
"reshaped_input_sizes": image_outputs.reshaped_input_sizes,
}
if segmentation_maps is not None:
processed_segmentation_maps = self._prepare_image_like_inputs(
images=segmentation_maps,
expected_ndims=2,
do_convert_rgb=False,
input_data_format=ChannelDimension.FIRST,
)
segmentation_maps_kwargs = kwargs.copy()
segmentation_maps_kwargs.update(
{
"do_normalize": False,
"do_rescale": False,
"interpolation": tvF.InterpolationMode.NEAREST_EXACT,
"size": segmentation_maps_kwargs.pop("mask_size"),
"pad_size": segmentation_maps_kwargs.pop("mask_pad_size"),
}
)
processed_segmentation_maps = self._preprocess(
images=processed_segmentation_maps, **segmentation_maps_kwargs
)
data["labels"] = processed_segmentation_maps["pixel_values"].squeeze(1).to(torch.int64)
return BatchFeature(data=data, tensor_type=kwargs["return_tensors"])
def _preprocess(
self,
images: list["torch.Tensor"],
do_resize: bool,
size: SizeDict,
interpolation: Optional["F.InterpolationMode"],
do_center_crop: bool,
crop_size: SizeDict,
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
image_mean: float | list[float] | None,
image_std: float | list[float] | None,
do_pad: bool | None,
pad_size: SizeDict | None,
disable_grouping: bool | None,
return_tensors: str | TensorType | None,
**kwargs,
) -> BatchFeature:
# Group images by size for batched resizing
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
resized_images_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_resize:
stacked_images = self.resize(image=stacked_images, size=size, interpolation=interpolation)
resized_images_grouped[shape] = stacked_images
resized_images = reorder_images(resized_images_grouped, grouped_images_index)
reshaped_input_sizes = [image.shape[-2:] for image in resized_images]
# Group images by size for further processing
# Needed in case do_resize is False, or resize returns images with different sizes
grouped_images, grouped_images_index = group_images_by_shape(resized_images, disable_grouping=disable_grouping)
processed_images_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_center_crop:
stacked_images = self.center_crop(stacked_images, crop_size)
# Fused rescale and normalize
stacked_images = self.rescale_and_normalize(
stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
processed_images_grouped[shape] = stacked_images
processed_images = reorder_images(processed_images_grouped, grouped_images_index)
if do_pad:
processed_images = self.pad(processed_images, pad_size=pad_size, disable_grouping=disable_grouping)
return BatchFeature(
data={"pixel_values": processed_images, "reshaped_input_sizes": reshaped_input_sizes},
tensor_type=return_tensors,
)
def generate_crop_boxes(
self,
image: "torch.Tensor",
target_size,
crop_n_layers: int = 0,
overlap_ratio: float = 512 / 1500,
points_per_crop: int | None = 32,
crop_n_points_downscale_factor: list[int] | None = 1,
device: Optional["torch.device"] = None,
):
"""
Generates a list of crop boxes of different sizes. Each layer has (2**i)**2 boxes for the ith layer.
Args:
image (`torch.Tensor`):
Input original image
target_size (`int`):
Target size of the resized image
crop_n_layers (`int`, *optional*, defaults to 0):
If >0, mask prediction will be run again on crops of the image. Sets the number of layers to run, where
each layer has 2**i_layer number of image crops.
overlap_ratio (`float`, *optional*, defaults to 512/1500):
Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of
the image length. Later layers with more crops scale down this overlap.
points_per_crop (`int`, *optional*, defaults to 32):
Number of points to sample from each crop.
crop_n_points_downscale_factor (`list[int]`, *optional*, defaults to 1):
The number of points-per-side sampled in layer n is scaled down by crop_n_points_downscale_factor**n.
device (`torch.device`, *optional*, defaults to None):
Device to use for the computation. If None, cpu will be used.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
return_tensors (`str`, *optional*, defaults to `pt`):
If `pt`, returns `torch.Tensor`.
"""
image = self._process_image(image)
crop_boxes, points_per_crop, cropped_images, input_labels = _generate_crop_boxes(
image,
target_size,
crop_n_layers,
overlap_ratio,
points_per_crop,
crop_n_points_downscale_factor,
)
if device is None:
device = torch.device("cpu")
crop_boxes = crop_boxes.to(device)
points_per_crop = points_per_crop.to(device)
# cropped_images stays as torch.Tensor
input_labels = input_labels.to(device)
return crop_boxes, points_per_crop, cropped_images, input_labels
def filter_masks(
self,
masks,
iou_scores,
original_size,
cropped_box_image,
pred_iou_thresh=0.88,
stability_score_thresh=0.95,
mask_threshold=0,
stability_score_offset=1,
):
"""
Filters the predicted masks by selecting only the ones that meets several criteria. The first criterion being
that the iou scores needs to be greater than `pred_iou_thresh`. The second criterion is that the stability
score needs to be greater than `stability_score_thresh`. The method also converts the predicted masks to
bounding boxes and pad the predicted masks if necessary.
Args:
masks (`torch.Tensor`):
Input masks.
iou_scores (`torch.Tensor`):
List of IoU scores.
original_size (`tuple[int,int]`):
Size of the original image.
cropped_box_image (`torch.Tensor`):
The cropped image.
pred_iou_thresh (`float`, *optional*, defaults to 0.88):
The threshold for the iou scores.
stability_score_thresh (`float`, *optional*, defaults to 0.95):
The threshold for the stability score.
mask_threshold (`float`, *optional*, defaults to 0):
The threshold for the predicted masks.
stability_score_offset (`float`, *optional*, defaults to 1):
The offset for the stability score used in the `_compute_stability_score` method.
"""
original_height, original_width = original_size
iou_scores = iou_scores.flatten(0, 1)
masks = masks.flatten(0, 1)
if masks.shape[0] != iou_scores.shape[0]:
raise ValueError("masks and iou_scores must have the same batch size.")
if masks.device != iou_scores.device:
iou_scores = iou_scores.to(masks.device)
batch_size = masks.shape[0]
keep_mask = torch.ones(batch_size, dtype=torch.bool, device=masks.device)
if pred_iou_thresh > 0.0:
keep_mask = keep_mask & (iou_scores > pred_iou_thresh)
# compute stability score
if stability_score_thresh > 0.0:
stability_scores = _compute_stability_score(masks, mask_threshold, stability_score_offset)
keep_mask = keep_mask & (stability_scores > stability_score_thresh)
scores = iou_scores[keep_mask]
masks = masks[keep_mask]
# binarize masks
masks = masks > mask_threshold
converted_boxes = _batched_mask_to_box(masks)
keep_mask = ~_is_box_near_crop_edge(
converted_boxes, cropped_box_image, [0, 0, original_width, original_height]
)
scores = scores[keep_mask]
masks = masks[keep_mask]
converted_boxes = converted_boxes[keep_mask]
masks = _pad_masks(masks, cropped_box_image, original_height, original_width)
# conversion to rle is necessary to run non-maximum suppression
masks = _mask_to_rle(masks)
return masks, scores, converted_boxes
def post_process_masks(
self,
masks,
original_sizes,
reshaped_input_sizes,
mask_threshold=0.0,
binarize=True,
pad_size=None,
):
"""
Remove padding and upscale masks to the original image size.
Args:
masks (`Union[List[torch.Tensor], List[np.ndarray]]`):
Batched masks from the mask_decoder in (batch_size, num_channels, height, width) format.
original_sizes (`Union[torch.Tensor, List[Tuple[int,int]]]`):
The original sizes of each image before it was resized to the model's expected input shape, in (height,
width) format.
reshaped_input_sizes (`Union[torch.Tensor, List[Tuple[int,int]]]`):
The size of each image as it is fed to the model, in (height, width) format. Used to remove padding.
mask_threshold (`float`, *optional*, defaults to 0.0):
The threshold to use for binarizing the masks.
binarize (`bool`, *optional*, defaults to `True`):
Whether to binarize the masks.
pad_size (`int`, *optional*, defaults to `self.pad_size`):
The target size the images were padded to before being passed to the model. If None, the target size is
assumed to be the processor's `pad_size`.
Returns:
(`torch.Tensor`): Batched masks in batch_size, num_channels, height, width) format, where (height, width)
is given by original_size.
"""
pad_size = self.pad_size if pad_size is None else pad_size
target_image_size = (pad_size["height"], pad_size["width"])
if isinstance(original_sizes, (torch.Tensor, np.ndarray)):
original_sizes = original_sizes.tolist()
if isinstance(reshaped_input_sizes, (torch.Tensor, np.ndarray)):
reshaped_input_sizes = reshaped_input_sizes.tolist()
output_masks = []
for i, original_size in enumerate(original_sizes):
if isinstance(masks[i], np.ndarray):
masks[i] = torch.from_numpy(masks[i])
elif not isinstance(masks[i], torch.Tensor):
raise TypeError("Input masks should be a list of `torch.tensors` or a list of `np.ndarray`")
interpolated_mask = F.interpolate(masks[i], target_image_size, mode="bilinear", align_corners=False)
interpolated_mask = interpolated_mask[..., : reshaped_input_sizes[i][0], : reshaped_input_sizes[i][1]]
interpolated_mask = F.interpolate(interpolated_mask, original_size, mode="bilinear", align_corners=False)
if binarize:
interpolated_mask = interpolated_mask > mask_threshold
output_masks.append(interpolated_mask)
return output_masks
def post_process_for_mask_generation(self, all_masks, all_scores, all_boxes, crops_nms_thresh):
"""
Post processes mask that are generated by calling the Non Maximum Suppression algorithm on the predicted masks.
Args:
all_masks (`torch.Tensor`):
List of all predicted segmentation masks
all_scores (`torch.Tensor`):
List of all predicted iou scores
all_boxes (`torch.Tensor`):
List of all bounding boxes of the predicted masks
crops_nms_thresh (`float`):
Threshold for NMS (Non Maximum Suppression) algorithm.
"""
return _post_process_for_mask_generation(all_masks, all_scores, all_boxes, crops_nms_thresh)
def _compute_stability_score(masks: "torch.Tensor", mask_threshold: float, stability_score_offset: int):
# One mask is always contained inside the other.
# Save memory by preventing unnecessary cast to torch.int64
intersections = (
(masks > (mask_threshold + stability_score_offset)).sum(-1, dtype=torch.int16).sum(-1, dtype=torch.int32)
)
unions = (masks > (mask_threshold - stability_score_offset)).sum(-1, dtype=torch.int16).sum(-1, dtype=torch.int32)
stability_scores = intersections / unions
return stability_scores
def _mask_to_rle(input_mask: "torch.Tensor"):
"""
Encodes masks the run-length encoding (RLE), in the format expected by pycoco tools.
"""
# Put in fortran order and flatten height and width
batch_size, height, width = input_mask.shape
input_mask = input_mask.permute(0, 2, 1).flatten(1)
# Compute change indices
diff = input_mask[:, 1:] ^ input_mask[:, :-1]
change_indices = diff.nonzero()
# Encode run length
out = []
for i in range(batch_size):
cur_idxs = change_indices[change_indices[:, 0] == i, 1] + 1
if len(cur_idxs) == 0:
# No changes => either all 0 or all 1
# If the entire mask is 0, RLE is [height*width] or if the entire mask is 1, RLE is [0, height*width].
if input_mask[i, 0] == 0:
out.append({"size": [height, width], "counts": [height * width]})
else:
out.append({"size": [height, width], "counts": [0, height * width]})
continue
btw_idxs = cur_idxs[1:] - cur_idxs[:-1]
counts = [] if input_mask[i, 0] == 0 else [0]
counts += [cur_idxs[0].item()] + btw_idxs.tolist() + [height * width - cur_idxs[-1].item()]
out.append({"size": [height, width], "counts": counts})
return out
def _batched_mask_to_box(masks: "torch.Tensor"):
"""
Computes the bounding boxes around the given input masks. The bounding boxes are in the XYXY format which
corresponds the following required indices:
- LEFT: left hand side of the bounding box
- TOP: top of the bounding box
- RIGHT: right of the bounding box
- BOTTOM: bottom of the bounding box
Return [0,0,0,0] for an empty mask. For input shape channel_1 x channel_2 x ... x height x width, the output shape
is channel_1 x channel_2 x ... x 4.
Args:
- masks (`torch.Tensor` of shape `(batch, nb_mask, height, width)`)
"""
# torch.max below raises an error on empty inputs, just skip in this case
if torch.numel(masks) == 0:
return torch.zeros(*masks.shape[:-2], 4, device=masks.device)
# Normalize shape to Cxheightxwidth
shape = masks.shape
height, width = shape[-2:]
# Get top and bottom edges
in_height, _ = torch.max(masks, dim=-1)
in_height_coords = in_height * torch.arange(height, device=in_height.device)[None, :]
bottom_edges, _ = torch.max(in_height_coords, dim=-1)
in_height_coords = in_height_coords + height * (~in_height)
top_edges, _ = torch.min(in_height_coords, dim=-1)
# Get left and right edges
in_width, _ = torch.max(masks, dim=-2)
in_width_coords = in_width * torch.arange(width, device=in_width.device)[None, :]
right_edges, _ = torch.max(in_width_coords, dim=-1)
in_width_coords = in_width_coords + width * (~in_width)
left_edges, _ = torch.min(in_width_coords, dim=-1)
# If the mask is empty the right edge will be to the left of the left edge.
# Replace these boxes with [0, 0, 0, 0]
empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)
out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1)
out = out * (~empty_filter).unsqueeze(-1)
# Return to original shape
out = out.reshape(*shape[:-2], 4)
return out
def _is_box_near_crop_edge(boxes, crop_box, orig_box, atol=20.0):
"""Filter masks at the edge of a crop, but not at the edge of the original image."""
crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device)
orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device)
left, top, _, _ = crop_box
offset = torch.tensor([[left, top, left, top]], device=boxes.device)
# Check if boxes has a channel dimension
if len(boxes.shape) == 3:
offset = offset.unsqueeze(1)
boxes = (boxes + offset).float()
near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0)
near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0)
near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge)
return torch.any(near_crop_edge, dim=1)
def _pad_masks(masks, crop_box: list[int], orig_height: int, orig_width: int):
left, top, right, bottom = crop_box
if left == 0 and top == 0 and right == orig_width and bottom == orig_height:
return masks
# Coordinate transform masks
pad_x, pad_y = orig_width - (right - left), orig_height - (bottom - top)
pad = (left, pad_x - left, top, pad_y - top)
return torch.nn.functional.pad(masks, pad, value=0)
def _generate_crop_boxes(
image,
target_size: int, # Is it tuple here?
crop_n_layers: int = 0,
overlap_ratio: float = 512 / 1500,
points_per_crop: int | None = 32,
crop_n_points_downscale_factor: list[int] | None = 1,
) -> tuple[list[list[int]], list[int]]:
"""
Generates a list of crop boxes of different sizes. Each layer has (2**i)**2 boxes for the ith layer.
Args:
image (Union[`numpy.ndarray`, `PIL.Image`, `torch.Tensor`]):
Image to generate crops for.
target_size (`int`):
Size of the smallest crop.
crop_n_layers (`int`, *optional*):
If `crops_n_layers>0`, mask prediction will be run again on crops of the image. Sets the number of layers
to run, where each layer has 2**i_layer number of image crops.
overlap_ratio (`int`, *optional*):
Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of the
image length. Later layers with more crops scale down this overlap.
points_per_crop (`int`, *optional*):
Number of points to sample per crop.
crop_n_points_downscale_factor (`int`, *optional*):
The number of points-per-side sampled in layer n is scaled down by crop_n_points_downscale_factor**n.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
if isinstance(image, list):
raise ValueError("Only one image is allowed for crop generation.")
original_size = image.shape[-2:]
points_grid = []
for i in range(crop_n_layers + 1):
n_points = int(points_per_crop / (crop_n_points_downscale_factor**i))
points_grid.append(_build_point_grid(n_points))
crop_boxes, layer_idxs = _generate_per_layer_crops(crop_n_layers, overlap_ratio, original_size)
cropped_images, point_grid_per_crop = _generate_crop_images(
crop_boxes, image, points_grid, layer_idxs, target_size, original_size
)
crop_boxes = torch.tensor(crop_boxes)
crop_boxes = crop_boxes.float()
points_per_crop = torch.stack(point_grid_per_crop)
points_per_crop = points_per_crop.unsqueeze(0).permute(0, 2, 1, 3)
cropped_images = torch.stack(cropped_images)
input_labels = torch.ones_like(points_per_crop[:, :, :, 0], dtype=torch.int64)
return crop_boxes, points_per_crop, cropped_images, input_labels
def _generate_per_layer_crops(crop_n_layers, overlap_ratio, original_size):
"""
Generates 2 ** (layers idx + 1) crops for each crop_n_layers. Crops are in the XYWH format : The XYWH format
consists of the following required indices:
- X: X coordinate of the top left of the bounding box
- Y: Y coordinate of the top left of the bounding box
- W: width of the bounding box
- H: height of the bounding box
"""
crop_boxes, layer_idxs = [], []
im_height, im_width = original_size
short_side = min(im_height, im_width)
# Original image
crop_boxes.append([0, 0, im_width, im_height])
layer_idxs.append(0)
for i_layer in range(crop_n_layers):
n_crops_per_side = 2 ** (i_layer + 1)
overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side))
crop_width = int(math.ceil((overlap * (n_crops_per_side - 1) + im_width) / n_crops_per_side))
crop_height = int(math.ceil((overlap * (n_crops_per_side - 1) + im_height) / n_crops_per_side))
crop_box_x0 = [int((crop_width - overlap) * i) for i in range(n_crops_per_side)]
crop_box_y0 = [int((crop_height - overlap) * i) for i in range(n_crops_per_side)]
for left, top in product(crop_box_x0, crop_box_y0):
box = [left, top, min(left + crop_width, im_width), min(top + crop_height, im_height)]
crop_boxes.append(box)
layer_idxs.append(i_layer + 1)
return crop_boxes, layer_idxs
def _build_point_grid(n_per_side: int) -> torch.Tensor:
"""Generates a 2D grid of points evenly spaced in [0,1]x[0,1]."""
offset = 1 / (2 * n_per_side)
points_one_side = torch.linspace(offset, 1 - offset, n_per_side)
points_x = torch.tile(points_one_side[None, :], (n_per_side, 1))
points_y = torch.tile(points_one_side[:, None], (1, n_per_side))
points = torch.stack([points_x, points_y], dim=-1).reshape(-1, 2)
return points
def _generate_crop_images(
crop_boxes, image, points_grid, layer_idxs, target_size, original_size, input_data_format=None
):
"""
Takes as an input bounding boxes that are used to crop the image. Based in the crops, the corresponding points are
also passed.
"""
cropped_images = []
total_points_per_crop = []
for i, crop_box in enumerate(crop_boxes):
left, top, right, bottom = crop_box
cropped_im = image[:, top:bottom, left:right]
cropped_images.append(cropped_im)
cropped_im_size = cropped_im.shape[-2:]
points_scale = torch.tensor(cropped_im_size).flip(dims=(0,)).unsqueeze(0)
points = points_grid[layer_idxs[i]] * points_scale
normalized_points = _normalize_coordinates(target_size, points, original_size)
total_points_per_crop.append(normalized_points)
return cropped_images, total_points_per_crop
def _normalize_coordinates(
target_size: int, coords: torch.Tensor, original_size: tuple[int, int], is_bounding_box=False
) -> torch.Tensor:
"""
Expects a numpy array of length 2 in the final dimension. Requires the original image size in (height, width)
format.
"""
old_height, old_width = original_size
scale = target_size * 1.0 / max(old_height, old_width)
new_height, new_width = old_height * scale, old_width * scale
new_width = int(new_width + 0.5)
new_height = int(new_height + 0.5)
coords = deepcopy(coords).float()
if is_bounding_box:
coords = coords.reshape(-1, 2, 2)
coords[..., 0] = coords[..., 0] * (new_width / old_width)
coords[..., 1] = coords[..., 1] * (new_height / old_height)
if is_bounding_box:
coords = coords.reshape(-1, 4)
return coords
def _rle_to_mask(rle: dict[str, Any]) -> torch.Tensor:
"""Compute a binary mask from an uncompressed RLE."""
height, width = rle["size"]
mask = torch.empty(height * width, dtype=bool)
idx = 0
parity = False
for count in rle["counts"]:
mask[idx : idx + count] = parity
idx += count
parity = not parity
mask = mask.reshape(width, height)
return mask.transpose(0, 1) # Reshape to original shape
def _post_process_for_mask_generation(rle_masks, iou_scores, mask_boxes, amg_crops_nms_thresh=0.7):
"""
Perform NMS (Non Maximum Suppression) on the outputs.
Args:
rle_masks (`torch.Tensor`):
binary masks in the RLE format
iou_scores (`torch.Tensor` of shape (nb_masks, 1)):
iou_scores predicted by the model
mask_boxes (`torch.Tensor`):
The bounding boxes corresponding to segmentation masks
amg_crops_nms_thresh (`float`, *optional*, defaults to 0.7):
NMS threshold.
"""
keep_by_nms = batched_nms(
boxes=mask_boxes.float(),
scores=iou_scores,
idxs=torch.zeros(mask_boxes.shape[0]),
iou_threshold=amg_crops_nms_thresh,
)
iou_scores = iou_scores[keep_by_nms]
rle_masks = [rle_masks[i] for i in keep_by_nms]
mask_boxes = mask_boxes[keep_by_nms]
masks = [_rle_to_mask(rle) for rle in rle_masks]
return masks, iou_scores, rle_masks, mask_boxes
__all__ = ["SamImageProcessorFast"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/sam/image_processing_sam_fast.py",
"license": "Apache License 2.0",
"lines": 678,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/sam/test_image_processing_sam.py | # Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from datasets import load_dataset
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torchvision_available
from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import SamImageProcessor
if is_torchvision_available():
from transformers import SamImageProcessorFast
class SamImageProcessingTester:
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
do_pad=True,
pad_size=None,
mask_size=None,
mask_pad_size=None,
do_resize=True,
size=None,
do_normalize=True,
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
):
size = size if size is not None else {"longest_edge": 20}
pad_size = pad_size if pad_size is not None else {"height": 20, "width": 20}
mask_size = mask_size if mask_size is not None else {"longest_edge": 12}
mask_pad_size = mask_pad_size if mask_pad_size is not None else {"height": 12, "width": 12}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_pad = do_pad
self.pad_size = pad_size
self.mask_size = mask_size
self.mask_pad_size = mask_pad_size
self.do_resize = do_resize
self.size = size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
def prepare_image_processor_dict(self):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"do_pad": self.do_pad,
"pad_size": self.pad_size,
"mask_size": self.mask_size,
"mask_pad_size": self.mask_pad_size,
}
def expected_output_image_shape(self, images):
return self.num_channels, self.pad_size["height"], self.pad_size["width"]
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
# Copied from transformers.tests.models.beit.test_image_processing_beit.prepare_semantic_single_inputs
def prepare_semantic_single_inputs():
ds = load_dataset("hf-internal-testing/fixtures_ade20k", split="test")
example = ds[0]
return example["image"], example["map"]
# Copied from transformers.tests.models.beit.test_image_processing_beit.prepare_semantic_batch_inputs
def prepare_semantic_batch_inputs():
ds = load_dataset("hf-internal-testing/fixtures_ade20k", split="test")
return list(ds["image"][:2]), list(ds["map"][:2])
@require_torch
@require_vision
class SamImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = SamImageProcessor if is_vision_available() else None
fast_image_processing_class = SamImageProcessorFast if is_torchvision_available() else None
def setUp(self):
super().setUp()
self.image_processor_tester = SamImageProcessingTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "image_mean"))
self.assertTrue(hasattr(image_processing, "image_std"))
self.assertTrue(hasattr(image_processing, "do_normalize"))
self.assertTrue(hasattr(image_processing, "do_resize"))
self.assertTrue(hasattr(image_processing, "size"))
self.assertTrue(hasattr(image_processing, "do_rescale"))
self.assertTrue(hasattr(image_processing, "rescale_factor"))
self.assertTrue(hasattr(image_processing, "do_pad"))
self.assertTrue(hasattr(image_processing, "pad_size"))
self.assertTrue(hasattr(image_processing, "mask_size"))
self.assertTrue(hasattr(image_processing, "mask_pad_size"))
def test_image_processor_from_dict_with_kwargs(self):
for image_processing_class in self.image_processor_list:
image_processing_class = image_processing_class(**self.image_processor_dict)
image_processor = image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {"longest_edge": 20})
image_processor = image_processing_class.from_dict(self.image_processor_dict, size={"longest_edge": 42})
self.assertEqual(image_processor.size, {"longest_edge": 42})
def test_call_segmentation_maps(self):
for image_processing_class in self.image_processor_list:
# Initialize image_processor
image_processor = image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)
maps = []
for image in image_inputs:
self.assertIsInstance(image, torch.Tensor)
maps.append(torch.zeros(image.shape[-2:]).long())
# Test not batched input
encoding = image_processor(image_inputs[0], maps[0], return_tensors="pt")
self.assertEqual(
encoding["pixel_values"].shape,
(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.pad_size["height"],
self.image_processor_tester.pad_size["width"],
),
)
self.assertEqual(
encoding["labels"].shape,
(
1,
self.image_processor_tester.mask_pad_size["height"],
self.image_processor_tester.mask_pad_size["width"],
),
)
self.assertEqual(encoding["labels"].dtype, torch.long)
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 255)
# Test batched
encoding = image_processor(image_inputs, maps, return_tensors="pt")
self.assertEqual(
encoding["pixel_values"].shape,
(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.pad_size["height"],
self.image_processor_tester.pad_size["width"],
),
)
self.assertEqual(
encoding["labels"].shape,
(
self.image_processor_tester.batch_size,
self.image_processor_tester.mask_pad_size["height"],
self.image_processor_tester.mask_pad_size["width"],
),
)
self.assertEqual(encoding["labels"].dtype, torch.long)
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 255)
# Test not batched input (PIL images)
image, segmentation_map = prepare_semantic_single_inputs()
encoding = image_processor(image, segmentation_map, return_tensors="pt")
self.assertEqual(
encoding["pixel_values"].shape,
(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.pad_size["height"],
self.image_processor_tester.pad_size["width"],
),
)
self.assertEqual(
encoding["labels"].shape,
(
1,
self.image_processor_tester.mask_pad_size["height"],
self.image_processor_tester.mask_pad_size["width"],
),
)
self.assertEqual(encoding["labels"].dtype, torch.long)
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 255)
# Test batched input (PIL images)
images, segmentation_maps = prepare_semantic_batch_inputs()
encoding = image_processor(images, segmentation_maps, return_tensors="pt")
self.assertEqual(
encoding["pixel_values"].shape,
(
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.pad_size["height"],
self.image_processor_tester.pad_size["width"],
),
)
self.assertEqual(
encoding["labels"].shape,
(
2,
self.image_processor_tester.mask_pad_size["height"],
self.image_processor_tester.mask_pad_size["width"],
),
)
self.assertEqual(encoding["labels"].dtype, torch.long)
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 255)
def test_slow_fast_equivalence(self):
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast equivalence test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
dummy_image, dummy_map = prepare_semantic_single_inputs()
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
image_encoding_slow = image_processor_slow(dummy_image, segmentation_maps=dummy_map, return_tensors="pt")
image_encoding_fast = image_processor_fast(dummy_image, segmentation_maps=dummy_map, return_tensors="pt")
self.assertTrue(torch.allclose(image_encoding_slow.pixel_values, image_encoding_fast.pixel_values, atol=1e-1))
self.assertLessEqual(
torch.mean(torch.abs(image_encoding_slow.pixel_values - image_encoding_fast.pixel_values)).item(), 1e-3
)
self.assertTrue(torch.allclose(image_encoding_slow.labels, image_encoding_fast.labels, atol=1e-1))
def test_slow_fast_equivalence_batched(self):
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast equivalence test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
dummy_images, dummy_maps = prepare_semantic_batch_inputs()
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
encoding_slow = image_processor_slow(dummy_images, segmentation_maps=dummy_maps, return_tensors="pt")
encoding_fast = image_processor_fast(dummy_images, segmentation_maps=dummy_maps, return_tensors="pt")
self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1))
self.assertLessEqual(
torch.mean(torch.abs(encoding_slow.pixel_values - encoding_fast.pixel_values)).item(), 1e-3
)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/sam/test_image_processing_sam.py",
"license": "Apache License 2.0",
"lines": 257,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/voxtral/configuration_voxtral.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...configuration_utils import PreTrainedConfig
from ..auto import CONFIG_MAPPING, AutoConfig
class VoxtralEncoderConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`VoxtralEncoder`]. It is used to instantiate a
Voxtral audio encoder according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the audio encoder of the Voxtral
architecture.
e.g. [mistralai/Voxtral-Mini-3B-2507](https://huggingface.co/mistralai/Voxtral-Mini-3B-2507)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 51866):
Vocabulary size of the model.
hidden_size (`int`, *optional*, defaults to 1280):
Dimensionality of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 5120):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 20):
Number of attention heads for each attention layer in the Transformer encoder.
scale_embedding (`bool`, *optional*, defaults to `False`):
Scale embeddings by dividing by sqrt(hidden_size) if True.
activation_function (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, "gelu",
num_mel_bins (`int`, *optional*, defaults to 128):
Number of mel features used per input features. Should correspond to the value used in the
`VoxtralProcessor` class.
max_source_positions (`int`, *optional*, defaults to 1500):
The maximum sequence length of log-mel filter-bank features that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
```python
>>> from transformers import VoxtralEncoderConfig, VoxtralEncoder
>>> # Initializing a VoxtralEncoderConfig
>>> configuration = VoxtralEncoderConfig()
>>> # Initializing a VoxtralEncoder (with random weights)
>>> model = VoxtralEncoder(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "voxtral_encoder"
attribute_map = {
"d_model": "hidden_size",
"encoder_layers": "num_hidden_layers",
"encoder_attention_heads": "num_attention_heads",
"encoder_ffn_dim": "intermediate_size",
"encoder_layerdrop": "layerdrop",
}
def __init__(
self,
vocab_size=51866,
hidden_size=1280,
intermediate_size=5120,
num_hidden_layers=32,
num_attention_heads=20,
scale_embedding=False,
activation_function="gelu",
num_mel_bins=128,
max_source_positions=1500,
initializer_range=0.02,
attention_dropout=0.0,
**kwargs,
):
super().__init__(**kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.scale_embedding = scale_embedding # scale factor will be sqrt(hidden_size) if True
self.activation_function = activation_function
self.num_mel_bins = num_mel_bins
self.max_source_positions = max_source_positions
self.initializer_range = initializer_range
# TODO: @eustlb, we do not use dropout and layerdrop, yet we need to hardcode them
# to be able to use Whisper with modular (here actually from Qwen2-Audio and copied from).
# After a future Whisper refactor, we should remove this.
self.dropout = 0.0
self.layerdrop = 0.0
self.activation_dropout = 0.0
self.attention_dropout = attention_dropout
class VoxtralConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`VoxtralForConditionalGeneration`]. It is used to instantiate an
Voxtral model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the Voxtral-Mini-3B.
e.g. [mistralai/Voxtral-Mini-3B-2507](https://huggingface.co/mistralai/Voxtral-Mini-3B-2507)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
audio_config (`Union[AutoConfig, dict]`, *optional*):
The config object or dictionary of the audio encoder.
text_config (`Union[AutoConfig, dict]`, *optional*):
The config object or dictionary of the text model.
audio_token_id (`int`, *optional*):
The image token index to encode the image prompt.
projector_hidden_act (`str`, *optional*, defaults to `"gelu"`):
The activation function (function or string) in the multi-modal projector.
```python
>>> from transformers import VoxtralForConditionalGeneration, VoxtralConfig
>>> # Initializing a Voxtral configuration
>>> configuration = VoxtralConfig(audio_token_id=24, projector_hidden_act="gelu")
>>> # Initializing a 3B model with random weights
>>> model = VoxtralForConditionalGeneration(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "voxtral"
sub_configs = {"text_config": AutoConfig, "audio_config": AutoConfig}
_default_text_config_kwargs = {
"vocab_size": 131072,
"hidden_size": 3072,
"intermediate_size": 8192,
"num_hidden_layers": 30,
"num_key_value_heads": 8,
"max_position_embeddings": 131072,
"rms_norm_eps": 1e-05,
"use_cache": True,
"rope_theta": 100000000.0,
"head_dim": 128,
}
def __init__(
self,
audio_config=None,
text_config=None,
audio_token_id=None,
projector_hidden_act="gelu",
**kwargs,
):
if isinstance(audio_config, dict):
audio_config["model_type"] = audio_config.get("model_type", "voxtral_encoder")
audio_config = CONFIG_MAPPING[audio_config["model_type"]](**audio_config)
elif audio_config is None:
audio_config = CONFIG_MAPPING["voxtral_encoder"]()
self.audio_config = audio_config
if isinstance(text_config, dict):
text_config["model_type"] = text_config.get("model_type", "llama")
text_config = CONFIG_MAPPING[text_config["model_type"]](
**{**self._default_text_config_kwargs, **text_config}
)
elif text_config is None:
text_config = CONFIG_MAPPING["llama"](**self._default_text_config_kwargs)
self.text_config = text_config
self.hidden_size = text_config.hidden_size
self.audio_token_id = audio_token_id
self.projector_hidden_act = projector_hidden_act
super().__init__(**kwargs)
__all__ = ["VoxtralEncoderConfig", "VoxtralConfig"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/voxtral/configuration_voxtral.py",
"license": "Apache License 2.0",
"lines": 165,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/voxtral/convert_voxtral_weights_to_hf.py | # Copyright 2025 HuggingFace Inc. team. All rights reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import gc
import json
import os
import re
import torch
from safetensors.torch import load_file
from transformers import (
MistralCommonBackend,
VoxtralConfig,
VoxtralForConditionalGeneration,
VoxtralProcessor,
WhisperFeatureExtractor,
)
from transformers.models.whisper.modeling_whisper import sinusoids
from transformers.utils.hub import cached_file
# fmt: off
STATE_DICT_MAPPING = {
# Text model keys
r"^output.weight": r"language_model.lm_head.weight",
r"^norm.weight": r"language_model.model.norm.weight",
r"^tok_embeddings.weight": r"language_model.model.embed_tokens.weight",
r"^layers.(\d+).attention_norm.weight": r"language_model.model.layers.\1.input_layernorm.weight",
r"^layers.(\d+).ffn_norm.weight": r"language_model.model.layers.\1.post_attention_layernorm.weight",
r"^layers.(\d+).attention.w(q|k|v|o).weight": r"language_model.model.layers.\1.self_attn.\2_proj.weight",
r"^layers.(\d+).feed_forward.w1.weight": r"language_model.model.layers.\1.mlp.gate_proj.weight",
r"^layers.(\d+).feed_forward.w2.weight": r"language_model.model.layers.\1.mlp.down_proj.weight",
r"^layers.(\d+).feed_forward.w3.weight": r"language_model.model.layers.\1.mlp.up_proj.weight",
r"mm_whisper_embeddings.tok_embeddings.weight": r"language_model.model.embed_tokens.weight",
# audio model keys
r"mm_whisper_embeddings.whisper_encoder\.conv_layers\.0\.(weight|bias)": r"audio_tower.conv1.\1",
r"mm_whisper_embeddings.whisper_encoder\.conv_layers\.1\.(weight|bias)": r"audio_tower.conv2.\1",
r"mm_whisper_embeddings.whisper_encoder\.transformer\.norm\.(weight|bias)": r"audio_tower.layer_norm.\1",
r"mm_whisper_embeddings.whisper_encoder\.transformer\.layers\.(\d+)\.attention\.w([qkv])\.(weight|bias)": r"audio_tower.layers.\1.self_attn.\2_proj.\3",
r"mm_whisper_embeddings.whisper_encoder\.transformer\.layers\.(\d+)\.attention\.wo\.(weight|bias)": r"audio_tower.layers.\1.self_attn.out_proj.\2",
r"mm_whisper_embeddings.whisper_encoder\.transformer\.layers\.(\d+)\.attention_norm\.(weight|bias)": r"audio_tower.layers.\1.self_attn_layer_norm.\2",
r"mm_whisper_embeddings.whisper_encoder\.transformer\.layers\.(\d+)\.feed_forward\.w1\.(weight|bias)": r"audio_tower.layers.\1.fc1.\2",
r"mm_whisper_embeddings.whisper_encoder\.transformer\.layers\.(\d+)\.feed_forward\.w2\.(weight|bias)": r"audio_tower.layers.\1.fc2.\2",
r"mm_whisper_embeddings.whisper_encoder\.transformer\.layers\.(\d+)\.ffn_norm\.(weight|bias)": r"audio_tower.layers.\1.final_layer_norm.\2",
r"mm_whisper_embeddings.audio_language_projection\.0\.weight": r"multi_modal_projector.linear_1.weight",
r"mm_whisper_embeddings.audio_language_projection\.2\.weight": r"multi_modal_projector.linear_2.weight",
}
# fmt: on
def convert_config(original_config: dict, max_position_embeddings: int = 131072):
original_audio_config = original_config.pop("multimodal")
original_audio_config = original_audio_config["whisper_model_args"]["encoder_args"]
original_text_config = original_config
# Text config
text_key_mapping = {
"hidden_size": "dim",
"num_hidden_layers": "n_layers",
"intermediate_size": "hidden_dim",
"num_attention_heads": "n_heads",
"num_key_value_heads": "n_kv_heads",
"rms_norm_eps": "norm_eps",
}
similar_text_keys_to_keep = [
"head_dim",
"vocab_size",
"rope_theta",
]
new_text_config_kwargs = {k: original_text_config[v] for k, v in text_key_mapping.items()}
new_text_config_kwargs.update({k: v for k, v in original_text_config.items() if k in similar_text_keys_to_keep})
# These are not always defined depending on `params.json`
new_text_config_kwargs["sliding_window"] = original_text_config.get("sliding_window", None)
new_text_config_kwargs["max_position_embeddings"] = original_text_config.get(
"max_seq_len", max_position_embeddings
)
# This may sometimes be a string in `params.json`
if new_text_config_kwargs["sliding_window"] is not None:
new_text_config_kwargs["sliding_window"] = int(new_text_config_kwargs["sliding_window"])
# Audio config
audio_key_mapping = {
"hidden_size": "dim",
"num_hidden_layers": "n_layers",
"intermediate_size": "hidden_dim",
"num_attention_heads": "n_heads",
"num_key_value_heads": "n_heads",
}
similar_audio_keys_to_keep = [
"head_dim",
"vocab_size",
]
new_audio_config_kwargs = {k: original_audio_config[v] for k, v in audio_key_mapping.items()}
new_audio_config_kwargs.update({k: v for k, v in original_audio_config.items() if k in similar_audio_keys_to_keep})
new_config = VoxtralConfig(
audio_config=new_audio_config_kwargs,
text_config=new_text_config_kwargs,
audio_token_id=24,
projector_hidden_act="gelu",
)
return new_config
def map_old_key_to_new(old_key):
"""Map of a key of the original state dict to the equivalent key in HF format"""
for pattern, replacement in STATE_DICT_MAPPING.items():
new_key, n_replace = re.subn(pattern, replacement, old_key)
# Early exit of the loop
if n_replace > 0:
return new_key
raise ValueError(f"Key: {old_key} could not be mapped (check the mapping).")
def permute_for_rope(tensor, n_heads, dim1, dim2):
"""Permute the weights for the ROPE formulation."""
tensor = tensor.view(n_heads, dim1 // n_heads // 2, 2, dim2)
tensor = tensor.transpose(1, 2)
tensor = tensor.reshape(dim1, dim2)
return tensor
def convert_state_dict(original_state_dict, config):
"""Convert a state dict file, when a single `nn.Module` is never sharded in different files (usual case)."""
new_dict = {}
num_attention_heads = config.num_attention_heads
hidden_size = config.hidden_size
head_dim = config.head_dim
num_key_value_heads = config.num_key_value_heads
key_value_dim = head_dim * num_key_value_heads
query_dim = head_dim * num_attention_heads
for old_key, tensor in original_state_dict.items():
new_key = map_old_key_to_new(old_key)
if "audio_tower" not in new_key:
if "q_proj" in new_key:
tensor = tensor.view(num_attention_heads, head_dim, hidden_size).reshape(query_dim, hidden_size)
tensor = permute_for_rope(tensor, num_attention_heads, query_dim, hidden_size)
elif "k_proj" in new_key:
tensor = tensor.view(num_key_value_heads, head_dim, hidden_size).reshape(key_value_dim, hidden_size)
tensor = permute_for_rope(tensor, num_key_value_heads, key_value_dim, hidden_size)
elif "v_proj" in new_key:
tensor = tensor.view(num_key_value_heads, head_dim, hidden_size).reshape(key_value_dim, hidden_size)
new_dict[new_key] = tensor
return new_dict
def write_model(
input_path_or_repo,
model_name,
config_name,
output_dir,
):
print("Converting the model.")
os.makedirs(output_dir, exist_ok=True)
# --------------
# convert config
# --------------
config_path = cached_file(input_path_or_repo, config_name)
with open(config_path, "r") as f:
original_config = json.load(f)
config = convert_config(original_config)
model = VoxtralForConditionalGeneration(config)
# ---------------
# convert weights
# ---------------
model_path = cached_file(input_path_or_repo, model_name)
print(f"Fetching all parameters from the checkpoint at {model_path}...")
state_dict = load_file(model_path)
print("Converting model...")
converted_state_dict = convert_state_dict(state_dict, config.text_config)
# we need to add embed positions as they are not in the state dict
with torch.no_grad(), torch.device("cuda"):
# TODO: @eustlb, we are here creating on GPU
# vllm initializes on device, while we save in state dict
embed_positions_weight = sinusoids(config.audio_config.max_source_positions, config.audio_config.hidden_size)
converted_state_dict["audio_tower.embed_positions.weight"] = embed_positions_weight.cpu()
# -------------------------
# load the weights and save
# -------------------------
print("Loading the checkpoint in a Voxtral model.")
with torch.device("meta"):
model = VoxtralForConditionalGeneration(config)
model.load_state_dict(converted_state_dict, strict=True, assign=True)
print("Checkpoint loaded successfully.")
del model.config._name_or_path
del model.generation_config._from_model_config
model.generation_config.pad_token_id = 11
print("Saving the model.")
model.save_pretrained(output_dir)
del state_dict, model
# Safety check: reload the converted model
gc.collect()
print("Reloading the model to check if it's saved correctly.")
VoxtralForConditionalGeneration.from_pretrained(output_dir, dtype=torch.bfloat16, device_map="auto")
print("Model reloaded successfully.")
def write_processor(input_path_or_repo: str, feature_extractor_path_or_repo: str, output_dir: str):
tokenizer = MistralCommonBackend.from_pretrained(input_path_or_repo)
feature_extractor = WhisperFeatureExtractor.from_pretrained(feature_extractor_path_or_repo)
print("Creating the processor...")
# Create the processor and save it
processor = VoxtralProcessor(
feature_extractor=feature_extractor,
tokenizer=tokenizer,
)
processor.save_pretrained(output_dir)
print("Processor saved successfully.")
def main():
parser = argparse.ArgumentParser(description="Convert Voxtral weights to Hugging Face format")
parser.add_argument(
"--input_path_or_repo",
type=str,
required=True,
help="Path or repo containing Csm weights",
)
parser.add_argument(
"--model_name",
type=str,
required=True,
help="Name of the model in input_path_or_repo",
)
parser.add_argument(
"--config_name",
type=str,
required=True,
help="Name of the config in input_path_or_repo",
)
parser.add_argument(
"--feature_extractor_path_or_repo",
type=str,
required=True,
help="Path or repo containing the feature extractor",
)
parser.add_argument(
"--output_dir",
help="Location to write HF model and tokenizer",
)
args = parser.parse_args()
write_model(
args.input_path_or_repo,
args.model_name,
args.config_name,
args.output_dir,
)
write_processor(
args.input_path_or_repo,
args.feature_extractor_path_or_repo,
args.output_dir,
)
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/voxtral/convert_voxtral_weights_to_hf.py",
"license": "Apache License 2.0",
"lines": 246,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/voxtral/modular_voxtral.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch import nn
from ...activations import ACT2FN
from ...cache_utils import Cache
from ...generation import GenerationMixin
from ...modeling_outputs import (
BaseModelOutputWithPast,
BaseModelOutputWithPooling,
CausalLMOutputWithPast,
)
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
from ...utils.generic import merge_with_config_defaults
from ...utils.output_capturing import capture_outputs
from ..auto import AutoModel, AutoModelForCausalLM
from ..qwen2_audio.modeling_qwen2_audio import (
Qwen2AudioAttention,
Qwen2AudioEncoder,
Qwen2AudioEncoderLayer,
Qwen2AudioPreTrainedModel,
)
from .configuration_voxtral import VoxtralConfig
class VoxtralAttention(Qwen2AudioAttention):
pass
class VoxtralEncoderLayer(Qwen2AudioEncoderLayer):
pass
class VoxtralPreTrainedModel(Qwen2AudioPreTrainedModel):
_supports_flex_attn = True
_supports_cache_class = True
_supports_attention_backend = True
_can_compile_fullgraph = True
_no_split_modules = None
# TODO: @eustlb, I would really prefer to use WhisperEncoder but it's messing with modular
@auto_docstring(
custom_intro="""
The Voxtral encoder, which is a Whisper encoder.
"""
)
class VoxtralEncoder(Qwen2AudioEncoder):
_can_record_outputs = {
"attentions": VoxtralAttention,
"hidden_states": VoxtralEncoderLayer,
}
@merge_with_config_defaults
@capture_outputs
def forward(
self,
input_features,
attention_mask=None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | BaseModelOutputWithPooling:
r"""
Args:
input_features (`torch.LongTensor` of shape `(batch_size, feature_size, sequence_length)`):
Float values of mel features extracted from the raw speech waveform. Raw speech waveform can be
obtained by loading a `.flac` or `.wav` audio file into an array of type `list[float]` or a
`numpy.ndarray`, *e.g.* via the soundfile library (`pip install soundfile`). To prepare the array into
`input_features`, the [`AutoFeatureExtractor`] should be used for extracting the mel features, padding
and conversion into a tensor of type `torch.FloatTensor`. See [`~WhisperFeatureExtractor.__call__`]
attention_mask (`torch.Tensor`)`, *optional*):
Voxtral does not support masking of the `input_features`, this argument is preserved for compatibility,
but it is not used. By default the silence in the input log mel spectrogram are ignored.
"""
expected_seq_length = self.config.max_source_positions * self.conv1.stride[0] * self.conv2.stride[0]
if input_features.shape[-1] != expected_seq_length:
raise ValueError(
f"Voxtral expects the mel input features to be of length {expected_seq_length}, but found {input_features.shape[-1]}. Make sure to pad the input mel features to {expected_seq_length}."
)
input_features = input_features.to(dtype=self.conv1.weight.dtype, device=self.conv1.weight.device)
inputs_embeds = nn.functional.gelu(self.conv1(input_features))
inputs_embeds = nn.functional.gelu(self.conv2(inputs_embeds))
inputs_embeds = inputs_embeds.permute(0, 2, 1)
embed_pos = self.embed_positions.weight
hidden_states = (inputs_embeds + embed_pos).to(inputs_embeds.dtype)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
for idx, encoder_layer in enumerate(self.layers):
layer_outputs = encoder_layer(
hidden_states,
attention_mask=attention_mask,
)
hidden_states = layer_outputs[0]
hidden_states = self.layer_norm(hidden_states)
return BaseModelOutputWithPooling(
last_hidden_state=hidden_states,
)
class VoxtralMultiModalProjector(nn.Module):
def __init__(self, config: VoxtralConfig):
super().__init__()
self.linear_1 = nn.Linear(config.audio_config.intermediate_size, config.text_config.hidden_size, bias=False)
self.act = ACT2FN[config.projector_hidden_act]
self.linear_2 = nn.Linear(config.text_config.hidden_size, config.text_config.hidden_size, bias=False)
def forward(self, audio_features):
hidden_states = self.linear_1(audio_features)
hidden_states = self.act(hidden_states)
hidden_states = self.linear_2(hidden_states)
return hidden_states
@auto_docstring(
custom_intro="""
The Voxtral model, which consists of Whisper encoder, a multi-modal projector and a LLama language model.
"""
)
class VoxtralForConditionalGeneration(VoxtralPreTrainedModel, GenerationMixin):
_keep_in_fp32_modules_strict = ["embed_positions"]
def __init__(self, config):
super().__init__(config)
self.vocab_size = config.text_config.vocab_size
self.audio_tower = AutoModel.from_config(config.audio_config)
self.language_model = AutoModelForCausalLM.from_config(config.text_config)
self.multi_modal_projector = VoxtralMultiModalProjector(config)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.language_model.get_input_embeddings()
def set_input_embeddings(self, value):
self.language_model.set_input_embeddings(value)
def get_output_embeddings(self):
return self.language_model.get_output_embeddings()
def set_output_embeddings(self, new_embeddings):
self.language_model.set_output_embeddings(new_embeddings)
def set_decoder(self, decoder):
self.language_model.set_decoder(decoder)
def get_decoder(self):
return self.language_model.get_decoder()
@can_return_tuple
@auto_docstring(
custom_intro="This method is used to get the audio embeddings from input features (a log mel spectrogram), meaning inferring the audio encoder and the multi-modal projector."
)
def get_audio_features(
self, input_features: torch.FloatTensor, **kwargs: Unpack[TransformersKwargs]
) -> tuple | BaseModelOutputWithPooling:
r"""
input_features (`torch.FloatTensor`):
Float values of mel features extracted from the raw speech waveform. Raw speech waveform can be
obtained by loading a `.flac` or `.wav` audio file into an array of type `list[float]` or a
`numpy.ndarray`, *e.g.* via the soundfile library (`pip install soundfile`). To prepare the array into
`input_features`, the [`AutoFeatureExtractor`] should be used for extracting the mel features, padding
and conversion into a tensor of type `torch.FloatTensor`. See [`~WhisperFeatureExtractor.__call__`]
"""
audio_outputs = self.audio_tower(input_features, return_dict=True, **kwargs)
audio_hidden_states = audio_outputs.last_hidden_state
audio_hidden_states = audio_hidden_states.reshape(-1, self.config.audio_config.intermediate_size)
audio_embeds = self.multi_modal_projector(audio_hidden_states)
audio_outputs.pooler_output = audio_embeds
return audio_outputs
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
input_features: torch.FloatTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
labels: torch.LongTensor | None = None,
use_cache: bool | None = None,
cache_position: torch.LongTensor | None = None,
logits_to_keep: int | torch.Tensor = 0,
**kwargs: Unpack[TransformersKwargs],
) -> CausalLMOutputWithPast:
r"""
Example:
```python
>>> from transformers import VoxtralForConditionalGeneration, AutoProcessor
>>> import torch
>>> device = "cuda" if torch.cuda.is_available() else "cpu"
>>> repo_id = "mistralai/Voxtral-Mini-3B-2507"
>>> processor = AutoProcessor.from_pretrained(repo_id)
>>> model = VoxtralForConditionalGeneration.from_pretrained(repo_id, dtype=torch.bfloat16, device_map=device)
>>> conversation = [
{
"role": "user",
"content": [
{
"type": "audio",
"url": "https://huggingface.co/datasets/hf-internal-testing/dummy-audio-samples/resolve/main/dude_where_is_my_car.wav",
},
{"type": "text", "text": "What can you tell me about this audio?"},
],
}
]
>>> inputs = processor.apply_chat_template(conversation)
>>> inputs = inputs.to(device, dtype=torch.bfloat16)
>>> outputs = model.generate(**inputs, max_new_tokens=30)
>>> processor.batch_decode(outputs[:, inputs.input_ids.shape[1]:], skip_special_tokens=True)
["This audio is a humorous conversation between two friends, likely in English, where one of them is trying to figure out what the other's tattoo says."]
```"""
if inputs_embeds is None:
inputs_embeds = self.get_input_embeddings()(input_ids)
if input_features is not None and input_ids is not None:
audio_embeds = self.get_audio_features(input_features, return_dict=True).pooler_output
# replace text-audio token placeholders with audio embeddings
audio_token_mask = (input_ids == self.config.audio_token_id).unsqueeze(-1)
inputs_embeds = inputs_embeds.masked_scatter(
audio_token_mask.to(inputs_embeds.device), audio_embeds.to(inputs_embeds.device)
)
outputs: BaseModelOutputWithPast = self.language_model(
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
labels=labels,
use_cache=use_cache,
cache_position=cache_position,
logits_to_keep=logits_to_keep,
**kwargs,
)
return outputs
def prepare_inputs_for_generation(self, *args, **kwargs):
# Overwritten -- we should not pass input_features when we are in cached decoding stage
input_features = kwargs.pop("input_features", None)
is_first_iteration = kwargs.get("is_first_iteration", False)
model_inputs = super().prepare_inputs_for_generation(*args, **kwargs)
if is_first_iteration or not kwargs.get("use_cache", True):
# input_features should only be passed when we are not in cached decoding stage
model_inputs["input_features"] = input_features
return model_inputs
__all__ = ["VoxtralPreTrainedModel", "VoxtralEncoder", "VoxtralForConditionalGeneration"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/voxtral/modular_voxtral.py",
"license": "Apache License 2.0",
"lines": 231,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/voxtral/processing_voxtral.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
from ...utils import auto_docstring, is_mistral_common_available, is_soundfile_available, is_torch_available, logging
if is_torch_available():
import torch
if is_soundfile_available():
import soundfile as sf
if is_mistral_common_available():
from mistral_common.protocol.transcription.request import TranscriptionRequest
from ...audio_utils import AudioInput, load_audio_as, make_list_of_audio
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import AllKwargsForChatTemplate, AudioKwargs, ProcessingKwargs, ProcessorMixin, Unpack
from ...tokenization_utils_base import PreTokenizedInput, TextInput
logger = logging.get_logger(__name__)
class VoxtralAudioKwargs(AudioKwargs, total=False):
"""
max_source_positions (`int`, *optional*, defaults to `3000`):
Maximum number of positions per chunk when splitting mel spectrogram features along the time dimension.
"""
max_source_positions: int | None
class VoxtralProcessorKwargs(ProcessingKwargs, total=False):
audio_kwargs: VoxtralAudioKwargs
_defaults = {
"text_kwargs": {
"padding": True,
},
"audio_kwargs": {
"sampling_rate": 16000,
"padding": True,
"truncation": False,
"pad_to_multiple_of": 480000,
"max_source_positions": 3000,
},
"common_kwargs": {
"return_tensors": "pt",
"return_dict": True,
"tokenize": True,
},
}
@auto_docstring
class VoxtralProcessor(ProcessorMixin):
def __init__(
self,
feature_extractor,
tokenizer,
):
self.audio_token_id = 24
self.audio_token = tokenizer.convert_ids_to_tokens(self.audio_token_id)
super().__init__(feature_extractor, tokenizer)
def _retrieve_input_features(self, audio, max_source_positions, **kwargs):
"""
Handles specific logic of Voxtral expected input features: audio arrays should be padded to next multiple of 480000 (duration is a multiple of 30s), see VoxtralProcessorKwargs' default audio_kwargs.
Then mel input features are extracted and stacked along batch dimension, splitting into chunks of max_source_positions.
"""
input_features_list = []
for audio_array in audio:
audio_inputs = self.feature_extractor(audio_array, **kwargs)
# let's split into chunks of max_source_positions, and then stack them along batch dimension
input_features = audio_inputs["input_features"].reshape(
self.feature_extractor.feature_size, -1, max_source_positions
)
input_features_list.append(input_features.transpose(0, 1))
return torch.cat(input_features_list)
def apply_chat_template(
self,
conversation: list[dict[str, str]] | list[list[dict[str, str]]],
**kwargs: Unpack[AllKwargsForChatTemplate],
) -> str:
"""
This method applies the model's chat completion template given a conversation. It relies on MistralCommonBackend's
[`~MistralCommonBackend.apply_chat_template`] to prepare input ids to the model and on WhisperFeatureExtractor's
[`~WhisperFeatureExtractor.__call__`] to prepare input features to the model.
Note that audio is padded to the nearest 30-second multiple prior to mel feature extraction.
A `conversation` is a list of messages, where each message is a dictionary with a `role` and a `content` field.
For Voxtral, `role` can be `"user"` or `"assistant"`.
The `content` field can be a string or a list of dictionaries with a `type` field. See example below.
```python
from huggingface_hub import hf_hub_download
from transformers.audio_utils import load_audio_as
audio_url = "https://huggingface.co/datasets/hf-internal-testing/dummy-audio-samples/resolve/main/bcn_weather.mp3"
audio_path = hf_hub_download(repo_id="hf-internal-testing/dummy-audio-samples", filename="bcn_weather.mp3", repo_type="dataset")
audio_base64 = load_audio_as(audio_path, return_format="base64", force_mono=True)
# audio + text
conversation = [
{
"role": "user",
"content": [
{"type": "audio", "url": audio_url},
{"type": "audio", "path": audio_path},
{"type": "audio", "base64": audio_base64},
{"type": "text", "text": "How many audio do you hear?"},
],
},
]
processor = VoxtralProcessor.from_pretrained("mistralai/Voxtral-Mini-3B-2507")
inputs = processor.apply_chat_template(conversation)
```
Args:
conversation (`Union[list[Dict, [str, str]], list[list[dict[str, str]]]]`):
The conversation to format.
"""
if kwargs.get("continue_final_message", False):
if kwargs.get("add_generation_prompt", False):
raise ValueError(
"continue_final_message and add_generation_prompt are not compatible. Use continue_final_message when you want the model to continue the final message, and add_generation_prompt when you want to add a header that will prompt it to start a new assistant message instead."
)
if kwargs.get("return_assistant_tokens_mask", False):
raise ValueError("continue_final_message is not compatible with return_assistant_tokens_mask.")
if isinstance(conversation, (list, tuple)) and (
isinstance(conversation[0], (list, tuple)) or hasattr(conversation[0], "content")
):
is_batched = True
conversations = conversation
else:
is_batched = False
conversations = [conversation]
# - `sampling_rate` is already fixed in `VoxtralProcessorKwargs._defaults` and audio loading is
# delegated to `mistral_common`'s tokenizer which handles it internally.
# - `load_audio_from_video` is irrelevant as Voxtral is a speech-only model with no video support.
# We strip them here to avoid passing unrecognized kwargs to `_merge_kwargs`.
unsupported_keys = {"sampling_rate", "load_audio_from_video"} & kwargs.keys()
if unsupported_keys:
for key in unsupported_keys:
kwargs.pop(key)
logger.warning(
f"{', '.join(sorted(unsupported_keys))} {'is' if len(unsupported_keys) == 1 else 'are'} not supported for VoxtralProcessor's apply_chat_template and will be ignored."
)
output_kwargs = self._merge_kwargs(
VoxtralProcessorKwargs,
**kwargs,
)
text_kwargs = output_kwargs["text_kwargs"]
audio_kwargs = output_kwargs["audio_kwargs"]
return_tensors = text_kwargs.get("return_tensors", None)
if return_tensors != "pt":
raise ValueError(f"{self.__class__.__name__} only supports `return_tensors='pt'`.")
tokenizer_kwargs = output_kwargs["text_kwargs"]
tokenizer_kwargs["return_tensors"] = None # let's not return tensors here
encoded_instruct_inputs = self.tokenizer.apply_chat_template(conversations, **tokenizer_kwargs)
if text_kwargs.get("tokenize", False):
if text_kwargs.get("return_dict", False):
audio = encoded_instruct_inputs.pop("audio", None)
data = dict(encoded_instruct_inputs)
if audio is not None:
max_source_positions = audio_kwargs.pop("max_source_positions")
data["input_features"] = self._retrieve_input_features(audio, max_source_positions, **audio_kwargs)
return BatchFeature(data=data, tensor_type=return_tensors)
if not is_batched:
return encoded_instruct_inputs[0]
return encoded_instruct_inputs
@auto_docstring(
custom_intro=r"""
Method to prepare text to be fed as input to the model. This method forwards the `text`
arguments to MistralCommonBackend's [`~MistralCommonBackend.__call__`] to encode
the text. Please refer to the docstring of the above methods for more information.
This method does not support audio. To prepare the audio, please use:
1. `apply_chat_template` [`~VoxtralProcessor.apply_chat_template`] method.
2. `apply_transcription_request` [`~VoxtralProcessor.apply_transcription_request`] method.
"""
)
def __call__(
self,
text: TextInput | PreTokenizedInput | list[TextInput] | list[PreTokenizedInput] | None,
**kwargs: Unpack[VoxtralProcessorKwargs],
):
if isinstance(text, str):
text = [text]
if any(self.audio_token in t for t in text):
raise ValueError(
f"{self.audio_token} is present in the provided text which is not supported by VoxtralProcessor. Please use the `apply_chat_template` method instead."
)
output_kwargs = self._merge_kwargs(VoxtralProcessorKwargs, **kwargs)
out = self.tokenizer(text, **output_kwargs["text_kwargs"])
return BatchFeature(data=out, tensor_type=output_kwargs["text_kwargs"].get("return_tensors", None))
# TODO: @eustlb, this should be moved to mistral_common + testing
def apply_transcription_request(
self,
audio: str | list[str] | AudioInput,
model_id: str,
language: str | list[str | None] | None = None,
sampling_rate: int | None = None,
format: str | list[str] | None = None,
**kwargs: Unpack[VoxtralProcessorKwargs],
):
"""
This method applies the model's transcription request template given a language and audio.
It relies on MistralCommonBackend and WhisperFeatureExtractor to prepare input ids and input features to the model.
```python
from transformers import VoxtralProcessor
model_id = "mistralai/Voxtral-Mini-3B-2507"
processor = VoxtralProcessor.from_pretrained(model_id)
language = "en"
audio = "https://huggingface.co/datasets/hf-internal-testing/dummy-audio-samples/resolve/main/obama.mp3"
# set the language is already know for better accuracy
inputs = processor.apply_transcription_request(language=language, audio=audio, model_id=model_id)
# but you can also let the model detect the language automatically
inputs = processor.apply_transcription_request(audio=audio, model_id=model_id)
```
Args:
audio (`str`, `list[str]`, `np.ndarray`, `torch.Tensor`, `list[np.ndarray]`, `list[torch.Tensor]`):
The audio or batch of audio to be prepared. If provided as a string, it should correspond to the path or url of the audio file.
model_id (`str`:
The hub model id of the model to use for transcription.
language (`str`, `list[Union[str, None]]`, *optional*):
The language or languages of the audio.
If not provided or None, automatic language detection will be used for all audio.
If provided as a string (a language code in the [ISO 639-1 alpha-2 format](https://en.wikipedia.org/wiki/ISO_639-1) e.g. `"en"`), it will be applied uniformly to all audio.
If provided as a list of strings/ None values, e.g. `["en", None, "fr"]`, will be applied to each audio individually with a one-to-one mapping,
with a None value indicating automatic language detection for that audio.
sampling_rate (`int`, *optional*):
The sampling rate of the audio. Necessary if it is provided as `np.ndarray`, `torch.Tensor`, `list[np.ndarray]`, `list[torch.Tensor]`.
Used to avoid silent errors when passing audio that is not in the expected sampling rate.
format (`str`, `list[str]`, *optional*):
The format of the audio, necessary if is provided as `np.ndarray`, `torch.Tensor`, `list[np.ndarray]`, `list[torch.Tensor]`.
"""
output_kwargs = self._merge_kwargs(
VoxtralProcessorKwargs,
**kwargs,
)
text_kwargs = output_kwargs["text_kwargs"]
audio_kwargs = output_kwargs["audio_kwargs"]
is_str = isinstance(audio, str)
is_list_of_str = all(isinstance(el, str) for el in audio)
is_list_of_audio = not (is_str or is_list_of_str)
if is_list_of_audio:
if sampling_rate is None:
logger.warning_once(
f"You've provided audio without specifying the sampling rate. It will be assumed to be {audio_kwargs['sampling_rate']}, which can result in silent errors."
)
elif sampling_rate != audio_kwargs["sampling_rate"]:
raise ValueError(
f"The sampling rate of the audio ({sampling_rate}) does not match the sampling rate of the processor ({audio_kwargs['sampling_rate']}). Please provide resampled the audio to the expected sampling rate."
)
sampling_rate = audio_kwargs["sampling_rate"]
# make sure to remove from text_kwargs and audio_kwargs
return_dict = text_kwargs.pop("return_dict", False)
tokenize = text_kwargs.pop("tokenize", False)
_ = audio_kwargs.pop("return_dict", False)
_ = audio_kwargs.pop("tokenize", False)
return_tensors = text_kwargs.pop("return_tensors", None)
if return_tensors != "pt":
raise ValueError(f"{self.__class__.__name__} only supports `return_tensors='pt'`.")
# validate audio input
if is_str:
audio = [load_audio_as(audio, return_format="buffer", force_mono=True, sampling_rate=sampling_rate)]
elif is_list_of_str:
audio = [
load_audio_as(el, return_format="buffer", force_mono=True, sampling_rate=sampling_rate) for el in audio
]
else:
audio = make_list_of_audio(audio)
if len(audio) != len(format):
raise ValueError(
f"When passed as a list of audio, the length ({len(audio)}) must match the number of format ({len(format)})"
)
audio_buffers = []
for array, f in zip(audio, format):
# Create new BytesIO object and write audio data to it
buffer = io.BytesIO()
# Convert to mono if needed
if array.ndim == 2:
array = array.mean(axis=1)
# Write to buffer with default format and sampling rate
sf.write(buffer, array, samplerate=audio_kwargs["sampling_rate"], format=f)
buffer.seek(0)
audio_buffers.append(buffer)
audio = audio_buffers
# validate language input
n_audio = len(audio)
if isinstance(language, str):
language = [language] * n_audio
elif language is None:
language = [None] * n_audio
if len(language) != n_audio:
raise ValueError(
f"When passed as a list of languages, the length ({len(language)}) must match the number of audio ({n_audio})"
)
input_ids = []
texts = []
audio_arrays = []
for audio_el, language_el in zip(audio, language):
openai_transcription_request = {
"model": model_id,
"file": audio_el,
"language": language_el,
}
transcription_request = TranscriptionRequest.from_openai(openai_transcription_request)
tokenized_transcription_request = self.tokenizer.tokenizer.encode_transcription(transcription_request)
input_ids.append(tokenized_transcription_request.tokens)
texts.append(tokenized_transcription_request.text)
audio_arrays.extend([el.audio_array for el in tokenized_transcription_request.audios])
if tokenize:
if return_dict:
# text are already tokenized but we need to pad etc
encoding = self.tokenizer(
input_ids,
add_special_tokens=False,
**text_kwargs,
)
data = dict(encoding)
# extract the input features
max_source_positions = audio_kwargs.pop("max_source_positions")
data["input_features"] = self._retrieve_input_features(
audio_arrays, max_source_positions, **audio_kwargs
)
return BatchFeature(data=data, tensor_type=return_tensors)
return texts
__all__ = ["VoxtralProcessor"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/voxtral/processing_voxtral.py",
"license": "Apache License 2.0",
"lines": 320,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:utils/scan_skipped_tests.py | # Copyright 2025 the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import re
from pathlib import Path
REPO_ROOT = Path().cwd()
COMMON_TEST_FILES: list[tuple[Path, str]] = [
(REPO_ROOT / "tests/test_modeling_common.py", "common"),
(REPO_ROOT / "tests/generation/test_utils.py", "GenerationMixin"),
]
MODELS_DIR = REPO_ROOT / "tests/models"
def get_common_tests(file_paths_with_origin: list[tuple[Path, str]]) -> dict[str, str]:
"""Extract all common test function names (e.g., 'test_forward')."""
tests_with_origin: dict[str, str] = {}
for file_path, origin_tag in file_paths_with_origin:
if not file_path.is_file():
continue
content = file_path.read_text(encoding="utf-8")
for test_name in re.findall(r"^\s*def\s+(test_[A-Za-z0-9_]+)", content, re.MULTILINE):
tests_with_origin[test_name] = origin_tag
return tests_with_origin
def get_models_and_test_files(models_dir: Path) -> tuple[list[str], list[Path]]:
if not models_dir.is_dir():
raise FileNotFoundError(f"Models directory not found at {models_dir}")
test_files: list[Path] = sorted(models_dir.rglob("test_modeling_*.py"))
model_names: list[str] = sorted({file_path.parent.name for file_path in test_files})
return model_names, test_files
def _extract_reason_from_decorators(decorators_block: str) -> str:
"""Extracts the reason string from a decorator block, if any."""
reason_match = re.search(r'reason\s*=\s*["\'](.*?)["\']', decorators_block)
if reason_match:
return reason_match.group(1)
reason_match = re.search(r'\((?:.*?,\s*)?["\'](.*?)["\']\)', decorators_block)
if reason_match:
return reason_match.group(1)
return decorators_block.strip().split("\n")[-1].strip()
def extract_test_info(file_content: str) -> dict[str, tuple[str, str]]:
"""
Parse a test file once and return a mapping of test functions to their
status and skip reason, e.g. {'test_forward': ('SKIPPED', 'too slow')}.
"""
result: dict[str, tuple[str, str]] = {}
pattern = re.compile(r"((?:^\s*@.*?\n)*?)^\s*def\s+(test_[A-Za-z0-9_]+)\b", re.MULTILINE)
for decorators_block, test_name in pattern.findall(file_content):
if "skip" in decorators_block:
result[test_name] = ("SKIPPED", _extract_reason_from_decorators(decorators_block))
else:
result[test_name] = ("RAN", "")
return result
def build_model_overrides(model_test_files: list[Path]) -> dict[str, dict[str, tuple[str, str]]]:
"""Return *model_name → {test_name → (status, reason)}* mapping."""
model_overrides: dict[str, dict[str, tuple[str, str]]] = {}
for file_path in model_test_files:
model_name = file_path.parent.name
file_content = file_path.read_text(encoding="utf-8")
model_overrides.setdefault(model_name, {}).update(extract_test_info(file_content))
return model_overrides
def save_json(obj: dict, output_path: Path) -> None:
output_path.parent.mkdir(parents=True, exist_ok=True)
output_path.write_text(json.dumps(obj, indent=2), encoding="utf-8")
def summarize_single_test(
test_name: str,
model_names: list[str],
model_overrides: dict[str, dict[str, tuple[str, str]]],
) -> dict[str, object]:
"""Print a concise terminal summary for *test_name* and return the raw data."""
models_ran, models_skipped, reasons_for_skipping = [], [], []
for model_name in model_names:
status, reason = model_overrides.get(model_name, {}).get(test_name, ("RAN", ""))
if status == "SKIPPED":
models_skipped.append(model_name)
reasons_for_skipping.append(f"{model_name}: {reason}")
else:
models_ran.append(model_name)
total_models = len(model_names)
skipped_ratio = len(models_skipped) / total_models if total_models else 0.0
print(f"\n== {test_name} ==")
print(f"Ran : {len(models_ran)}/{total_models}")
print(f"Skipped : {len(models_skipped)}/{total_models} ({skipped_ratio:.1%})")
for reason_entry in reasons_for_skipping[:10]:
print(f" - {reason_entry}")
if len(reasons_for_skipping) > 10:
print(" - ...")
return {
"models_ran": sorted(models_ran),
"models_skipped": sorted(models_skipped),
"skipped_proportion": round(skipped_ratio, 4),
"reasons_skipped": sorted(reasons_for_skipping),
}
def summarize_all_tests(
tests_with_origin: dict[str, str],
model_names: list[str],
model_overrides: dict[str, dict[str, tuple[str, str]]],
) -> dict[str, object]:
"""Return aggregated data for every discovered common test."""
results: dict[str, object] = {}
total_models = len(model_names)
test_names = list(tests_with_origin)
print(f"[INFO] Aggregating {len(test_names)} tests...")
for index, test_fn in enumerate(test_names, 1):
print(f" ({index}/{len(test_names)}) {test_fn}", end="\r")
models_ran, models_skipped, reasons_for_skipping = [], [], []
for model_name in model_names:
status, reason = model_overrides.get(model_name, {}).get(test_fn, ("RAN", ""))
if status == "SKIPPED":
models_skipped.append(model_name)
reasons_for_skipping.append(f"{model_name}: {reason}")
else:
models_ran.append(model_name)
skipped_ratio = len(models_skipped) / total_models if total_models else 0.0
results[test_fn] = {
"origin": tests_with_origin[test_fn],
"models_ran": sorted(models_ran),
"models_skipped": sorted(models_skipped),
"skipped_proportion": round(skipped_ratio, 4),
"reasons_skipped": sorted(reasons_for_skipping),
}
print("\n[INFO] Scan complete.")
return results
def main() -> None:
parser = argparse.ArgumentParser(
description="Scan model tests for overridden or skipped common or generate tests.",
)
parser.add_argument(
"--output_dir",
default=".",
help="Directory for JSON output (default: %(default)s)",
)
parser.add_argument(
"--test_method_name",
help="Scan only this test method (single‑test mode)",
)
args = parser.parse_args()
output_dir = Path(args.output_dir).expanduser()
test_method_name = args.test_method_name
tests_with_origin = get_common_tests(COMMON_TEST_FILES)
if test_method_name:
tests_with_origin = {test_method_name: tests_with_origin.get(test_method_name, "unknown")}
model_names, model_test_files = get_models_and_test_files(MODELS_DIR)
print(f"[INFO] Parsing {len(model_test_files)} model test files once each...")
model_overrides = build_model_overrides(model_test_files)
if test_method_name:
data = summarize_single_test(test_method_name, model_names, model_overrides)
json_path = output_dir / f"scan_{test_method_name}.json"
else:
data = summarize_all_tests(tests_with_origin, model_names, model_overrides)
json_path = output_dir / "all_tests_scan_result.json"
save_json(data, json_path)
print(f"\n[INFO] JSON saved to {json_path.resolve()}")
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/transformers",
"file_path": "utils/scan_skipped_tests.py",
"license": "Apache License 2.0",
"lines": 163,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:utils/compare_test_runs.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
def normalize_test_line(line):
line = line.strip()
# Normalize SKIPPED/XFAIL/etc with path:line and reason
match = re.match(r"^(SKIPPED|XFAIL|XPASS|EXPECTEDFAIL)\s+\[?\d*\]?\s*(\S+:\d+)", line)
if match:
status, location = match.groups()
return f"{status} {location}"
# Normalize ERROR/FAILED lines with optional message
if line.startswith("ERROR") or line.startswith("FAILED"):
return re.split(r"\s+-\s+", line)[0].strip()
return line
def parse_summary_file(file_path):
test_set = set()
with open(file_path, "r", encoding="utf-8") as f:
in_summary = False
for line in f:
if line.strip().startswith("==="):
in_summary = not in_summary
continue
if in_summary:
stripped = line.strip()
if stripped:
normalized = normalize_test_line(stripped)
test_set.add(normalized)
return test_set
def compare_job_sets(job_set1, job_set2):
all_job_names = sorted(set(job_set1) | set(job_set2))
report_lines = []
for job_name in all_job_names:
file1 = job_set1.get(job_name)
file2 = job_set2.get(job_name)
tests1 = parse_summary_file(file1) if file1 else set()
tests2 = parse_summary_file(file2) if file2 else set()
added = tests2 - tests1
removed = tests1 - tests2
if added or removed:
report_lines.append(f"=== Diff for job: {job_name} ===")
if removed:
report_lines.append("--- Absent in current run:")
for test in sorted(removed):
report_lines.append(f" - {test}")
if added:
report_lines.append("+++ Appeared in current run:")
for test in sorted(added):
report_lines.append(f" + {test}")
report_lines.append("") # blank line
return "\n".join(report_lines) if report_lines else "No differences found."
# Example usage:
# job_set_1 = {
# "albert": "prev/multi-gpu_run_models_gpu_models/albert_test_reports/summary_short.txt",
# "bloom": "prev/multi-gpu_run_models_gpu_models/bloom_test_reports/summary_short.txt",
# }
# job_set_2 = {
# "albert": "curr/multi-gpu_run_models_gpu_models/albert_test_reports/summary_short.txt",
# "bloom": "curr/multi-gpu_run_models_gpu_models/bloom_test_reports/summary_short.txt",
# }
# report = compare_job_sets(job_set_1, job_set_2)
# print(report)
| {
"repo_id": "huggingface/transformers",
"file_path": "utils/compare_test_runs.py",
"license": "Apache License 2.0",
"lines": 72,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/modernbert_decoder/modular_modernbert_decoder.py | # Copyright 2025 Johns Hopkins University, LightOn, and the HuggingFace Inc. team. All rights reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from collections.abc import Callable
from typing import Literal
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ... import initialization as init
from ...cache_utils import Cache, DynamicCache
from ...configuration_utils import PreTrainedConfig
from ...generation import GenerationMixin
from ...masking_utils import create_causal_mask, create_sliding_window_causal_mask
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, RopeParameters
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
from ...utils.generic import merge_with_config_defaults
from ...utils.output_capturing import capture_outputs
from ..modernbert.modeling_modernbert import (
ModernBertEmbeddings,
ModernBertMLP,
ModernBertPredictionHead,
ModernBertPreTrainedModel,
ModernBertRotaryEmbedding,
apply_rotary_pos_emb,
)
logger = logging.get_logger(__name__)
class ModernBertDecoderConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`ModernBertDecoderModel`]. It is used to instantiate a ModernBert
decoder model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the ModernBERT-base decoder.
e.g. [blab-jhu/test-32m-dec](https://huggingface.co/blab-jhu/test-32m-dec)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50368):
Vocabulary size of the ModernBert decoder model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`ModernBertDecoderModel`]
hidden_size (`int`, *optional*, defaults to 768):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 1152):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 22):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer decoder.
hidden_activation (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the decoder. Will default to `"gelu"`
if not specified.
max_position_embeddings (`int`, *optional*, defaults to 8192):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
initializer_cutoff_factor (`float`, *optional*, defaults to 2.0):
The cutoff factor for the truncated_normal_initializer for initializing all weight matrices.
norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
norm_bias (`bool`, *optional*, defaults to `False`):
Whether to use bias in the normalization layers.
pad_token_id (`int`, *optional*, defaults to 50283):
Padding token id.
eos_token_id (`int`, *optional*, defaults to 50282):
End of stream token id.
bos_token_id (`int`, *optional*, defaults to 50281):
Beginning of stream token id.
cls_token_id (`int`, *optional*, defaults to 50281):
Classification token id.
sep_token_id (`int`, *optional*, defaults to 50282):
Separation token id.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
embedding_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the embeddings.
mlp_bias (`bool`, *optional*, defaults to `False`):
Whether to use bias in the MLP layers.
mlp_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the MLP layers.
decoder_bias (`bool`, *optional*, defaults to `True`):
Whether to use bias in the decoder layers.
classifier_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the classifier.
classifier_bias (`bool`, *optional*, defaults to `False`):
Whether to use bias in the classifier.
classifier_activation (`str`, *optional*, defaults to `"gelu"`):
The activation function for the classifier.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
local_attention (`int`, *optional*, defaults to 128):
The sliding window size for local attention. Only used for layers that use local attention. Note that for
the decoder to match ModernBERT this is actually half of the sliding window size, so 128 => 64.
global_attn_every_n_layers (`int`, *optional*, defaults to 3):
Every `global_attn_every_n_layers` layers will use global attention instead of local attention.
layer_types (`list[str]`, *optional*):
List of layer types, one for each layer. If not specified, will be automatically generated based on
`global_attn_every_n_layers`. Should contain "full_attention" or "sliding_attention".
tie_word_embeddings (`bool`, *optional*, defaults to `True`):
Whether to tie weight embeddings
rope_parameters (`dict`, *optional*):
Dictionary mapping attention patterns (`"full_attention"`, `"sliding_attention"`) to `RopeParameters`.
Each value should be a dictionary containing `rope_type` and optional scaling parameters.
Examples:
```python
>>> from transformers import ModernBertDecoderModel, ModernBertDecoderConfig
>>> # Initializing a ModernBert decoder style configuration
>>> configuration = ModernBertDecoderConfig()
>>> # Initializing a model from the modernbert-base decoder style configuration
>>> model = ModernBertDecoderModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "modernbert-decoder"
keys_to_ignore_at_inference = ["past_key_values"]
default_theta = {"global": 160_000.0, "local": 10_000.0}
def __init__(
self,
vocab_size: int | None = 50368,
hidden_size: int | None = 768,
intermediate_size: int | None = 1152,
num_hidden_layers: int | None = 22,
num_attention_heads: int | None = 12,
hidden_activation: str | None = "gelu",
max_position_embeddings: int | None = 8192,
initializer_range: float | None = 0.02,
initializer_cutoff_factor: float | None = 2.0,
norm_eps: int | None = 1e-5,
norm_bias: bool | None = False,
pad_token_id: int | None = 50283,
eos_token_id: int | None = 50282,
bos_token_id: int | None = 50281,
cls_token_id: int | None = 50281,
sep_token_id: int | None = 50282,
attention_bias: bool | None = False,
attention_dropout: float | None = 0.0,
embedding_dropout: float | None = 0.0,
mlp_bias: bool | None = False,
mlp_dropout: float | None = 0.0,
decoder_bias: bool | None = True,
classifier_dropout: float | None = 0.0,
classifier_bias: bool | None = False,
classifier_activation: str | None = "gelu",
use_cache: bool | None = True,
local_attention: int | None = 128,
global_attn_every_n_layers: int | None = 3,
layer_types: list[str] | None = None,
tie_word_embeddings: bool | None = True,
rope_parameters: dict[Literal["full_attention", "sliding_attention"], RopeParameters] | None = None,
**kwargs,
):
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.cls_token_id = cls_token_id
self.sep_token_id = sep_token_id
self.tie_word_embeddings = tie_word_embeddings
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.initializer_range = initializer_range
self.initializer_cutoff_factor = initializer_cutoff_factor
self.norm_eps = norm_eps
self.norm_bias = norm_bias
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.hidden_activation = hidden_activation
self.embedding_dropout = embedding_dropout
self.mlp_bias = mlp_bias
self.mlp_dropout = mlp_dropout
self.decoder_bias = decoder_bias
self.classifier_dropout = classifier_dropout
self.classifier_bias = classifier_bias
self.classifier_activation = classifier_activation
self.use_cache = use_cache
self.global_attn_every_n_layers = global_attn_every_n_layers
# for consistency with ModernBert
self.reference_compile = False
# Set up layer_types for standardized layer type detection
self.layer_types = layer_types
if self.layer_types is None:
# Create layer_types based on the alternating pattern
self.layer_types = []
for layer_id in range(num_hidden_layers):
if layer_id % global_attn_every_n_layers != 0:
self.layer_types.append("sliding_attention")
else:
self.layer_types.append("full_attention")
# NOTE: sliding window numbers matches ModernBERT but is only half of it
self.sliding_window = local_attention // 2 if local_attention else -1
self.rope_parameters = rope_parameters
super().__init__(**kwargs)
def convert_rope_params_to_dict(self, ignore_keys_at_rope_validation=None, **kwargs):
rope_scaling = kwargs.pop("rope_scaling", None)
# Try to set `rope_scaling` if available, otherwise use `rope_parameters`. If we find `rope_parameters`
# as arg in the inputs, we can safely assume that it is in the new format. New naming used -> new format
default_rope_params = {
"sliding_attention": {"rope_type": "default"},
"full_attention": {"rope_type": "default"},
}
self.rope_parameters = self.rope_parameters if self.rope_parameters is not None else default_rope_params
if rope_scaling is not None:
self.rope_parameters["full_attention"].update(rope_scaling)
self.rope_parameters["sliding_attention"].update(rope_scaling)
# Set default values if not present
if self.rope_parameters.get("full_attention") is None:
self.rope_parameters["full_attention"] = {"rope_type": "default"}
self.rope_parameters["full_attention"].setdefault(
"rope_theta", kwargs.pop("global_rope_theta", self.default_theta["global"])
)
if self.rope_parameters.get("sliding_attention") is None:
self.rope_parameters["sliding_attention"] = {"rope_type": "default"}
self.rope_parameters["sliding_attention"].setdefault(
"rope_theta", kwargs.pop("local_rope_theta", self.default_theta["local"])
)
# Standardize and validate the correctness of rotary position embeddings parameters
self.standardize_rope_params()
self.validate_rope(ignore_keys=ignore_keys_at_rope_validation)
return kwargs
class ModernBertDecoderEmbeddings(ModernBertEmbeddings):
pass
class ModernBertDecoderMLP(ModernBertMLP):
pass
class ModernBertDecoderRotaryEmbedding(ModernBertRotaryEmbedding):
pass
def eager_attention_forward(
module: "ModernBertDecoderAttention",
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: torch.Tensor | None,
dropout: float = 0.0,
scaling: float | None = None,
sliding_window: int | None = None,
**kwargs,
) -> tuple[torch.Tensor, torch.Tensor | None]:
"""A simple eager attention implementation for ModernBERT decoder."""
if scaling is None:
scaling = module.head_dim**-0.5
# Compute attention scores
attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling
# Use the pre-computed attention mask
attn_weights = attn_weights + attention_mask
# upcast attention to fp32
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
class ModernBertDecoderAttention(nn.Module):
"""Performs causal multi-headed self attention for ModernBERT decoder.
It supports both local attention (sliding window) and global attention patterns.
"""
def __init__(self, config: ModernBertDecoderConfig, layer_idx: int | None = None):
super().__init__()
self.is_sliding = config.layer_types[layer_idx] == "sliding_attention"
self.config = config
self.layer_idx = layer_idx
self.head_dim = config.hidden_size // config.num_attention_heads
self.num_heads = config.num_attention_heads
self.all_head_size = self.head_dim * self.num_heads
self.scaling = self.head_dim**-0.5
self.attention_dropout = self.config.attention_dropout
self.is_causal = True
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})"
)
# NOTE: this is different than ModernBERT (separated QKV) so be sure to adapt to this
self.q_proj = nn.Linear(self.config.hidden_size, self.all_head_size, bias=self.config.attention_bias)
self.k_proj = nn.Linear(self.config.hidden_size, self.all_head_size, bias=self.config.attention_bias)
self.v_proj = nn.Linear(self.config.hidden_size, self.all_head_size, bias=self.config.attention_bias)
self.Wo = nn.Linear(config.hidden_size, config.hidden_size, bias=config.attention_bias)
self.out_drop = nn.Dropout(config.attention_dropout)
self.sliding_window = config.sliding_window if config.layer_types[layer_idx] == "sliding_attention" else None
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: torch.Tensor,
attention_mask: torch.Tensor | None,
past_key_values: Cache | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, torch.Tensor | None]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface(
self.config._attn_implementation, eager_attention_forward
)
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=self.attention_dropout if self.training else 0.0,
scaling=self.scaling,
sliding_window=self.sliding_window,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.out_drop(self.Wo(attn_output))
return attn_output, attn_weights
class ModernBertDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: ModernBertDecoderConfig, layer_idx: int | None = None):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.attention_type = config.layer_types[layer_idx]
self.attn_norm = (
nn.LayerNorm(config.hidden_size, eps=config.norm_eps, bias=config.norm_bias)
if layer_idx != 0
else nn.Identity()
)
self.attn = ModernBertDecoderAttention(config=config, layer_idx=layer_idx)
self.mlp_norm = nn.LayerNorm(config.hidden_size, eps=config.norm_eps, bias=config.norm_bias)
self.mlp = ModernBertDecoderMLP(config)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: torch.Tensor = None,
attention_mask: torch.Tensor | None = None,
past_key_values: Cache | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.FloatTensor, tuple[torch.FloatTensor, torch.FloatTensor] | None]:
residual = hidden_states
hidden_states = self.attn_norm(hidden_states)
# Self Attention
attn_outputs = self.attn(
hidden_states=hidden_states,
position_embeddings=position_embeddings,
attention_mask=attention_mask,
past_key_values=past_key_values,
cache_position=cache_position,
**kwargs,
)
hidden_states = attn_outputs[0]
# Add residual connection
hidden_states = residual + hidden_states
# MLP
residual = hidden_states
hidden_states = self.mlp_norm(hidden_states)
mlp_output = self.mlp(hidden_states)
hidden_states = residual + mlp_output
return hidden_states
class ModernBertDecoderPredictionHead(ModernBertPredictionHead):
pass
@auto_docstring
class ModernBertDecoderPreTrainedModel(ModernBertPreTrainedModel):
_skip_keys_device_placement = ["past_key_values"]
_no_split_modules = ["ModernBertDecoderLayer"]
_supports_flex_attn = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": ModernBertDecoderLayer,
"attentions": ModernBertDecoderAttention,
}
@torch.no_grad()
def _init_weights(self, module: nn.Module):
cutoff_factor = self.config.initializer_cutoff_factor
if cutoff_factor is None:
cutoff_factor = 3
def init_weight(module: nn.Module, std: float):
init.trunc_normal_(
module.weight,
mean=0.0,
std=std,
a=-cutoff_factor * std,
b=cutoff_factor * std,
)
if isinstance(module, nn.Linear):
if module.bias is not None:
init.zeros_(module.bias)
stds = {
"in": self.config.initializer_range,
"out": self.config.initializer_range / math.sqrt(2.0 * self.config.num_hidden_layers),
"embedding": self.config.initializer_range,
"final_out": self.config.hidden_size**-0.5,
}
if isinstance(module, ModernBertDecoderEmbeddings):
init_weight(module.tok_embeddings, stds["embedding"])
elif isinstance(module, ModernBertDecoderMLP):
init_weight(module.Wi, stds["in"])
init_weight(module.Wo, stds["out"])
elif isinstance(module, ModernBertDecoderAttention):
init_weight(module.q_proj, stds["in"])
init_weight(module.k_proj, stds["in"])
init_weight(module.v_proj, stds["in"])
init_weight(module.Wo, stds["out"])
elif isinstance(module, ModernBertDecoderPredictionHead):
init_weight(module.dense, stds["out"])
elif isinstance(module, ModernBertDecoderForSequenceClassification):
init_weight(module.classifier, stds["final_out"])
elif isinstance(module, ModernBertDecoderForCausalLM):
init_weight(module.decoder, stds["out"])
elif isinstance(module, nn.LayerNorm):
init.ones_(module.weight)
if module.bias is not None:
init.zeros_(module.bias)
elif isinstance(module, ModernBertDecoderRotaryEmbedding):
for layer_type in module.layer_types:
rope_init_fn = module.compute_default_rope_parameters
if module.rope_type[layer_type] != "default":
rope_init_fn = ROPE_INIT_FUNCTIONS[module.rope_type[layer_type]]
curr_inv_freq, _ = rope_init_fn(module.config, layer_type=layer_type)
init.copy_(getattr(module, f"{layer_type}_inv_freq"), curr_inv_freq)
init.copy_(getattr(module, f"{layer_type}_original_inv_freq"), curr_inv_freq)
@auto_docstring
class ModernBertDecoderModel(ModernBertDecoderPreTrainedModel):
def __init__(self, config: ModernBertDecoderConfig):
super().__init__(config)
self.config = config
self.embeddings = ModernBertDecoderEmbeddings(config)
self.layers = nn.ModuleList(
[ModernBertDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.final_norm = nn.LayerNorm(config.hidden_size, eps=config.norm_eps, bias=config.norm_bias)
self.rotary_emb = ModernBertDecoderRotaryEmbedding(config=config)
self.gradient_checkpointing = False
self.post_init()
def get_input_embeddings(self):
return self.embeddings.tok_embeddings
def set_input_embeddings(self, value):
self.embeddings.tok_embeddings = value
@merge_with_config_defaults
@capture_outputs
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.Tensor | None = None,
use_cache: bool | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, ...] | BaseModelOutputWithPast:
if (input_ids is None) == (inputs_embeds is None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if input_ids is not None:
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
batch_size, seq_length = input_ids.shape[:2]
else:
batch_size, seq_length = inputs_embeds.shape[:2]
# Handle past_key_values and cache setup
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens,
past_seen_tokens + seq_length,
device=input_ids.device if input_ids is not None else inputs_embeds.device,
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0).expand(batch_size, -1)
# Calculate embeddings
hidden_states = self.embeddings(input_ids=input_ids, inputs_embeds=inputs_embeds)
# It may already have been prepared by e.g. `generate`
if not isinstance(causal_mask_mapping := attention_mask, dict):
# Prepare mask arguments
mask_kwargs = {
"config": self.config,
"inputs_embeds": hidden_states,
"attention_mask": attention_mask,
"cache_position": cache_position,
"past_key_values": past_key_values,
"position_ids": position_ids,
}
causal_mask_mapping = {
"full_attention": create_causal_mask(**mask_kwargs),
"sliding_attention": create_sliding_window_causal_mask(**mask_kwargs),
}
position_embeddings = {}
for layer_type in self.config.layer_types:
position_embeddings[layer_type] = self.rotary_emb(hidden_states, position_ids, layer_type)
for decoder_layer in self.layers:
hidden_states = decoder_layer(
hidden_states,
attention_mask=causal_mask_mapping[decoder_layer.attention_type],
position_embeddings=position_embeddings[decoder_layer.attention_type],
past_key_values=past_key_values,
cache_position=cache_position,
position_ids=position_ids,
**kwargs,
)
hidden_states = self.final_norm(hidden_states)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
@auto_docstring(
custom_intro="""
The ModernBert Decoder Model with a language modeling head on top for causal language modeling (CLM).
"""
)
class ModernBertDecoderForCausalLM(ModernBertDecoderPreTrainedModel, GenerationMixin):
_tied_weights_keys = {"decoder.weight": "model.embeddings.tok_embeddings.weight"}
def __init__(self, config: ModernBertDecoderConfig):
super().__init__(config)
self.config = config
self.model = ModernBertDecoderModel(config)
self.lm_head = ModernBertDecoderPredictionHead(config)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=config.decoder_bias)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.decoder
def set_output_embeddings(self, new_embeddings):
self.decoder = new_embeddings
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.Tensor | None = None,
labels: torch.LongTensor | None = None,
use_cache: bool | None = None,
logits_to_keep: int | torch.Tensor = 0,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | CausalLMOutputWithPast:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Returns:
[`~modeling_outputs.CausalLMOutputWithPast`]
comprising various elements depending on the configuration and inputs.
Example:
```python
>>> from transformers import AutoTokenizer, ModernBertDecoderForCausalLM
>>> model = ModernBertDecoderForCausalLM.from_pretrained("blab-jhu/test-32m-dec")
>>> tokenizer = AutoTokenizer.from_pretrained("blab-jhu/test-32m-dec")
>>> prompt = "The capital of France is"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=1)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"The capital of France is Paris"
```
"""
outputs: BaseModelOutputWithPast = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.decoder(self.lm_head(hidden_states[:, slice_indices, :]))
loss = None
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss()
shift_logits = shift_logits.view(-1, self.config.vocab_size)
shift_labels = shift_labels.view(-1)
shift_labels = shift_labels.to(shift_logits.device)
loss = loss_fct(shift_logits, shift_labels)
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@auto_docstring(
custom_intro="""
The ModernBert Decoder Model with a sequence classification head on top (linear layer).
[`ModernBertDecoderForSequenceClassification`] uses the last token in order to do the classification, as other causal models
(e.g. GPT-1, GPT-2) do.
Since it does classification on the last token, it requires to know the position of the last token. If a
`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
each row of the batch).
"""
)
class ModernBertDecoderForSequenceClassification(ModernBertDecoderPreTrainedModel):
def __init__(self, config: ModernBertDecoderConfig):
super().__init__(config)
self.num_labels = config.num_labels
self.model = ModernBertDecoderModel(config)
self.head = ModernBertDecoderPredictionHead(config)
self.classifier = nn.Linear(config.hidden_size, config.num_labels, bias=config.classifier_bias)
self.drop = torch.nn.Dropout(config.classifier_dropout)
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring(checkpoint="blab-jhu/test-32m-dec")
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.Tensor | None = None,
labels: torch.LongTensor | None = None,
use_cache: bool | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | SequenceClassifierOutputWithPast:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
transformer_outputs = self.model(
input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
**kwargs,
)
hidden_states = transformer_outputs[0]
hidden_states = self.drop(self.head(hidden_states))
logits = self.classifier(hidden_states)
if input_ids is not None:
batch_size, sequence_length = input_ids.shape[:2]
else:
batch_size, sequence_length = inputs_embeds.shape[:2]
if self.config.pad_token_id is None and batch_size != 1:
raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
if self.config.pad_token_id is None:
last_non_pad_token = -1
elif input_ids is not None:
# To handle both left- and right- padding, we take the rightmost token that is not equal to pad_token_id
non_pad_mask = (input_ids != self.config.pad_token_id).to(logits.device, torch.int32)
token_indices = torch.arange(input_ids.shape[-1], device=logits.device, dtype=torch.int32)
last_non_pad_token = (token_indices * non_pad_mask).argmax(-1)
else:
last_non_pad_token = -1
logger.warning_once(
f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
"unexpected if using padding tokens in conjunction with `inputs_embeds.`"
)
pooled_logits = logits[torch.arange(batch_size, device=logits.device), last_non_pad_token]
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(pooled_logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(pooled_logits, labels)
return SequenceClassifierOutputWithPast(
loss=loss,
logits=pooled_logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
__all__ = [
"ModernBertDecoderConfig",
"ModernBertDecoderModel",
"ModernBertDecoderPreTrainedModel",
"ModernBertDecoderForCausalLM",
"ModernBertDecoderForSequenceClassification",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/modernbert_decoder/modular_modernbert_decoder.py",
"license": "Apache License 2.0",
"lines": 714,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/modernbert_decoder/test_modeling_modernbert_decoder.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import AutoTokenizer, is_torch_available
from transformers.testing_utils import (
require_torch,
slow,
torch_device,
)
from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester
if is_torch_available():
import torch
from transformers import (
ModernBertDecoderForCausalLM,
ModernBertDecoderForSequenceClassification,
ModernBertDecoderModel,
)
class ModernBertDecoderModelTester(CausalLMModelTester):
if is_torch_available():
base_model_class = ModernBertDecoderModel
@require_torch
class ModernBertDecoderModelTest(CausalLMModelTest, unittest.TestCase):
model_tester_class = ModernBertDecoderModelTester
def test_model_rope_scaling_frequencies(self):
"""Tests the frequency properties of the different RoPE scaling types on the model RoPE layer."""
# ModernBertDecoder has different RoPE configs per layer type
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
# Retrieves the RoPE layer class from the base model class. Uses `.named_modules()` to avoid hardcoding the
# named location of the RoPE layer class.
base_model = self.model_tester.base_model_class(config)
possible_rope_attributes = [
"pos_emb",
"rotary_emb", # most common case
"global_rotary_emb",
"local_rotary_emb",
]
for name, module in base_model.named_modules():
if any(potential_name in name for potential_name in possible_rope_attributes):
rope_class = type(module)
break
scaling_factor = 10
short_input_length = 10
long_input_length = int(config.max_position_embeddings * 1.5)
# Inputs
x = torch.randn(
1, dtype=torch.float32, device=torch_device
) # used exclusively to get the dtype and the device
position_ids_short = torch.arange(short_input_length, dtype=torch.long, device=torch_device)
position_ids_short = position_ids_short.unsqueeze(0)
position_ids_long = torch.arange(long_input_length, dtype=torch.long, device=torch_device)
position_ids_long = position_ids_long.unsqueeze(0)
# Sanity check original RoPE
rope_params = {"rope_type": "default", "rope_theta": 10_000.0}
config.rope_parameters = {"sliding_attention": rope_params, "full_attention": rope_params}
original_rope = rope_class(config=config).to(torch_device)
original_cos_short, original_sin_short = original_rope(x, position_ids_short, layer_type="sliding_attention")
original_cos_long, original_sin_long = original_rope(x, position_ids_long, layer_type="sliding_attention")
torch.testing.assert_close(original_cos_short, original_cos_long[:, :short_input_length, :])
torch.testing.assert_close(original_sin_short, original_sin_long[:, :short_input_length, :])
# Sanity check linear RoPE scaling
# New position "x" should match original position with index "x/scaling_factor"
rope_params = {"rope_type": "linear", "factor": scaling_factor, "rope_theta": 10_000.0}
config.rope_parameters = {"sliding_attention": rope_params, "full_attention": rope_params}
linear_scaling_rope = rope_class(config=config).to(torch_device)
linear_cos_short, linear_sin_short = linear_scaling_rope(x, position_ids_short, layer_type="sliding_attention")
linear_cos_long, linear_sin_long = linear_scaling_rope(x, position_ids_long, layer_type="sliding_attention")
torch.testing.assert_close(linear_cos_short, linear_cos_long[:, :short_input_length, :])
torch.testing.assert_close(linear_sin_short, linear_sin_long[:, :short_input_length, :])
for new_position in range(0, long_input_length, scaling_factor):
original_position = int(new_position // scaling_factor)
torch.testing.assert_close(linear_cos_long[:, new_position, :], original_cos_long[:, original_position, :])
torch.testing.assert_close(linear_sin_long[:, new_position, :], original_sin_long[:, original_position, :])
# Sanity check Dynamic NTK RoPE scaling
# Scaling should only be observed after a long input is fed. We can observe that the frequencies increase
# with scaling_factor (or that `inv_freq` decreases)
rope_params = {"rope_type": "dynamic", "factor": scaling_factor, "rope_theta": 10_000.0}
config.rope_parameters = {"sliding_attention": rope_params, "full_attention": rope_params}
ntk_scaling_rope = rope_class(config=config).to(torch_device)
ntk_cos_short, ntk_sin_short = ntk_scaling_rope(x, position_ids_short, layer_type="sliding_attention")
ntk_cos_long, ntk_sin_long = ntk_scaling_rope(x, position_ids_long, layer_type="sliding_attention")
torch.testing.assert_close(ntk_cos_short, original_cos_short)
torch.testing.assert_close(ntk_sin_short, original_sin_short)
with self.assertRaises(AssertionError):
torch.testing.assert_close(ntk_cos_long, original_cos_long)
with self.assertRaises(AssertionError):
torch.testing.assert_close(ntk_sin_long, original_sin_long)
self.assertTrue(
(ntk_scaling_rope.sliding_attention_inv_freq <= original_rope.sliding_attention_inv_freq).all()
)
# Sanity check Yarn RoPE scaling
# Scaling should be over the entire input
rope_params = {"rope_type": "yarn", "factor": scaling_factor, "rope_theta": 10_000.0}
config.rope_parameters = {"sliding_attention": rope_params, "full_attention": rope_params}
yarn_scaling_rope = rope_class(config=config).to(torch_device)
yarn_cos_short, yarn_sin_short = yarn_scaling_rope(x, position_ids_short, layer_type="sliding_attention")
yarn_cos_long, yarn_sin_long = yarn_scaling_rope(x, position_ids_long, layer_type="sliding_attention")
torch.testing.assert_close(yarn_cos_short, yarn_cos_long[:, :short_input_length, :])
torch.testing.assert_close(yarn_sin_short, yarn_sin_long[:, :short_input_length, :])
with self.assertRaises(AssertionError):
torch.testing.assert_close(yarn_cos_short, original_cos_short)
with self.assertRaises(AssertionError):
torch.testing.assert_close(yarn_sin_short, original_sin_short)
with self.assertRaises(AssertionError):
torch.testing.assert_close(yarn_cos_long, original_cos_long)
with self.assertRaises(AssertionError):
torch.testing.assert_close(yarn_sin_long, original_sin_long)
@slow
@require_torch
class ModernBertDecoderIntegrationTest(unittest.TestCase):
def test_inference_causal_lm(self):
model = ModernBertDecoderForCausalLM.from_pretrained("blab-jhu/test-32m-dec", attn_implementation="eager")
tokenizer = AutoTokenizer.from_pretrained("blab-jhu/test-32m-dec")
inputs = tokenizer("Paris is the capital of", return_tensors="pt")
with torch.no_grad():
output = model(**inputs)[0]
expected_shape = torch.Size((1, 7, model.config.vocab_size))
self.assertEqual(output.shape, expected_shape)
# compare the actual values for a slice.
expected_slice = torch.tensor(
[[[-8.0183, -7.1578, -0.4453], [-6.2909, -6.1557, 4.9063], [-6.7689, -5.8068, 6.1078]]]
)
torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
def test_inference_no_head(self):
model = ModernBertDecoderModel.from_pretrained("blab-jhu/test-32m-dec", attn_implementation="eager")
tokenizer = AutoTokenizer.from_pretrained("blab-jhu/test-32m-dec")
inputs = tokenizer("Paris is the capital of", return_tensors="pt")
with torch.no_grad():
output = model(**inputs)[0]
expected_shape = torch.Size((1, 7, model.config.hidden_size))
self.assertEqual(output.shape, expected_shape)
# compare the actual values for a slice.
expected_slice = torch.tensor(
[[[-0.0306, -0.0115, 0.0007], [-0.2485, -0.1381, 0.0872], [0.3133, -0.1777, 0.1667]]]
)
torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
def test_generation(self):
model = ModernBertDecoderForCausalLM.from_pretrained("blab-jhu/test-32m-dec", attn_implementation="eager")
tokenizer = AutoTokenizer.from_pretrained("blab-jhu/test-32m-dec")
inputs = tokenizer("The weather today is", return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=10, do_sample=False)
output_text = tokenizer.batch_decode(outputs, skip_special_tokens=True)
# Check that we got some reasonable output
self.assertEqual(len(output_text), 1)
self.assertTrue(len(output_text[0]) > len("The weather today is"))
def test_sliding_window_long_context(self):
"""
Test that ModernBertDecoder works with sliding window attention for longer sequences.
"""
model = ModernBertDecoderForCausalLM.from_pretrained("blab-jhu/test-32m-dec", attn_implementation="eager")
tokenizer = AutoTokenizer.from_pretrained("blab-jhu/test-32m-dec")
# Create a longer input to test sliding window attention
long_input = "This is a test. " * 50 # Repeat to make it longer
inputs = tokenizer(long_input, return_tensors="pt", truncation=True, max_length=512)
outputs = model.generate(**inputs, max_new_tokens=20, do_sample=False)
# Check that generation worked with longer context
self.assertEqual(outputs.shape[0], 1)
self.assertGreater(outputs.shape[1], inputs["input_ids"].shape[1])
def test_sequence_classification(self):
"""
Test that ModernBertDecoderForSequenceClassification works correctly.
"""
model = ModernBertDecoderForSequenceClassification.from_pretrained(
"blab-jhu/test-32m-dec", num_labels=2, attn_implementation="eager"
)
tokenizer = AutoTokenizer.from_pretrained("blab-jhu/test-32m-dec")
# Test with sample input
inputs = tokenizer("This is a positive example.", return_tensors="pt")
with torch.no_grad():
outputs = model(**inputs)
# Check output shape
expected_shape = (1, 2) # batch_size=1, num_labels=2
self.assertEqual(outputs.logits.shape, expected_shape)
# Test with labels
labels = torch.tensor([1])
outputs_with_loss = model(**inputs, labels=labels)
# Check that loss is computed
self.assertIsNotNone(outputs_with_loss.loss)
self.assertTrue(isinstance(outputs_with_loss.loss.item(), float))
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/modernbert_decoder/test_modeling_modernbert_decoder.py",
"license": "Apache License 2.0",
"lines": 189,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/tokenization_mistral_common.py | # Copyright 2025 Mistral AI and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import shutil
from collections.abc import Callable, Sequence
from enum import Enum
from pathlib import Path
from typing import Any, Literal, Union, overload
import numpy as np
from huggingface_hub import create_repo
from transformers.audio_utils import load_audio_as
from transformers.tokenization_utils_base import (
VERY_LARGE_INTEGER,
AddedToken,
BatchEncoding,
EncodedInput,
PreTokenizedInput,
PreTrainedTokenizerBase,
TextInput,
TruncationStrategy,
)
from transformers.utils import PaddingStrategy, TensorType, add_end_docstrings, logging, to_py_obj
from transformers.utils.import_utils import is_mistral_common_available, is_torch_available, requires
if is_mistral_common_available():
from mistral_common.protocol.instruct.request import ChatCompletionRequest
from mistral_common.protocol.instruct.validator import ValidationMode
from mistral_common.tokens.tokenizers.base import SpecialTokenPolicy, SpecialTokens
from mistral_common.tokens.tokenizers.mistral import MistralTokenizer
from mistral_common.tokens.tokenizers.tekken import Tekkenizer
from mistral_common.tokens.tokenizers.utils import (
download_tokenizer_from_hf_hub,
get_one_valid_tokenizer_file,
)
if is_torch_available():
import torch
logger = logging.get_logger(__name__)
ENCODE_KWARGS_DOCSTRING = r"""
add_special_tokens (`bool`, *optional*, defaults to `True`):
Whether or not to add special tokens when encoding the sequences. This will use the underlying
`PretrainedTokenizerBase.build_inputs_with_special_tokens` function, which defines which tokens are
automatically added to the input ids. This is useful if you want to add `bos` or `eos` tokens
automatically. When Tokenizer is loading with `finetuning` mode it adds both `bos` and `eos`. Else, for "test" mode it only adds `bos`.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence is provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or
to the maximum acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
stride (`int`, *optional*, defaults to 0):
If set to a number along with `max_length`, the overflowing tokens returned when
`return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence
returned to provide some overlap between truncated and overflowing sequences. The value of this
argument defines the number of overlapping tokens.
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value. Requires `padding` to be activated.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
`>= 7.5` (Volta).
padding_side (`str`, *optional*):
The side on which the model should have padding applied. Should be selected between ['right', 'left'].
Default value is picked from the class attribute of the same name.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
"""
ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING = r"""
return_token_type_ids (`bool`, *optional*):
Whether to return token type IDs. For `MistralCommonBackend` it returns a list of zeros of the sequence length as only one sequence is supported.
[What are token type IDs?](../glossary#token-type-ids)
return_attention_mask (`bool`, *optional*):
Whether to return the attention mask. If left to the default, will return the attention mask according
to the specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
return_overflowing_tokens (`bool`, *optional*, defaults to `False`):
Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch
of pairs) is provided with `truncation_strategy = longest_first` or `True`, an error is raised instead
of returning overflowing tokens.
return_special_tokens_mask (`bool`, *optional*, defaults to `False`):
Whether or not to return special tokens mask information.
return_length (`bool`, *optional*, defaults to `False`):
Whether or not to return the lengths of the encoded inputs.
verbose (`bool`, *optional*, defaults to `True`):
Whether or not to print more information and warnings.
return_offsets_mapping (`Literal[False]`, *optional*): False, kept to match Transformers' signature.
split_special_tokens (`Literal[False]`, *optional*): False, kept to match Transformers' signature.
**kwargs: passed to the `self.tokenize()` method
Return:
[`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model.
[What are input IDs?](../glossary#input-ids)
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names`).
[What are attention masks?](../glossary#attention-mask)
- **overflowing_tokens** -- List of overflowing tokens sequences (when a `max_length` is specified and
`return_overflowing_tokens=True`).
- **num_truncated_tokens** -- Number of tokens truncated (when a `max_length` is specified and
`return_overflowing_tokens=True`).
- **special_tokens_mask** -- List of 0s and 1s, with 1 specifying added special tokens and 0 specifying
regular sequence tokens (when `add_special_tokens=True` and `return_special_tokens_mask=True`).
- **length** -- The length of the inputs (when `return_length=True`)
"""
class MistralTokenizerType(str, Enum):
"""Enum for the different type of tokenizer."""
spm = "spm"
tekken = "tekken"
@overload
def _maybe_remove_lang(text: str, skip_special_tokens: bool) -> str: ...
@overload
def _maybe_remove_lang(text: list[str], skip_special_tokens: bool) -> list[str]: ...
def _maybe_remove_lang(text: str | list[str], skip_special_tokens: bool) -> str | list[str]:
# in the specific case of Voxtral, the added f"lang:xx" (always a two char language code since it follows ISO 639-1 alpha-2 format)
# is not considered as a special token by mistral-common and is encoded/ decoded as normal text.
# Nevertheless we should remove it to ease users life.
if not skip_special_tokens:
return text
if isinstance(text, str):
return re.sub(r"^lang:[a-z]{2}", "", text)
return [re.sub(r"^lang:[a-z]{2}", "", string) for string in text]
_MAP_SPECIAL_TOKENS = {
"bos_token": SpecialTokens.bos.value,
"eos_token": SpecialTokens.eos.value,
"pad_token": SpecialTokens.pad.value,
"unk_token": SpecialTokens.unk.value,
}
_VALID_INIT_KWARGS = {"_from_auto", "backend", "files_loaded"}
@requires(backends=("mistral-common",))
class MistralCommonBackend(PreTrainedTokenizerBase):
"""
Class to wrap `mistral-common` tokenizers.
`mistral-common` is the official tokenizer library for Mistral AI models. To use it, you need to install it with:
```bash
pip install transformers[mistral-common]
```
Otherwise the tokenizer falls back to the Transformers implementation of the tokenizer.
For more info on `mistral-common`, see [mistral-common](https://github.com/mistralai/mistral-common).
This class is a wrapper around a `mistral_common.tokens.tokenizers.mistral.MistralTokenizer`.
It provides a Hugging Face compatible interface to tokenize using the official mistral-common tokenizer and inherits from the `PreTrainedTokenizerBase` class.
Here are the key behavior differences with the `PythonBackend` class:
- Pair of sequences are not supported. The signature has been kept for compatibility but all arguments related to pair of sequences are ignored. The return values for pairs are returned as `None`.
- The `is_split_into_words` argument is not supported.
- It is not possible to add new tokens to the tokenizer. Special tokens are handled differently from Transformers. In `mistral-common`, special tokens are never encoded directly. This means that: `tokenizer.encode("<s>")` will not return the ID of the `<s>` token. Instead, it will return a list of IDs corresponding to the tokenization of the string `"<s>"`. For more information, see the [mistral-common documentation](https://mistralai.github.io/mistral-common/usage/tokenizers/#special-tokens).
If you have suggestions to improve this class, please open an issue on the [mistral-common GitHub repository](https://github.com/mistralai/mistral-common/issues) if it is related to the tokenizer or on the [Transformers GitHub repository](https://github.com/huggingface/transformers/issues) if it is related to the Hugging Face interface.
"""
model_input_names: list[str] = ["input_ids", "attention_mask"]
padding_side: str = "left"
truncation_side: str = "right"
SPECIAL_TOKENS_ATTRIBUTES = [
"bos_token",
"eos_token",
"unk_token",
"pad_token",
]
def __init__(
self,
tokenizer_path: str | os.PathLike | Path,
mode: ValidationMode = ValidationMode.test,
model_max_length: int = VERY_LARGE_INTEGER,
padding_side: str = "left",
truncation_side: str = "right",
model_input_names: list[str] | None = None,
clean_up_tokenization_spaces: bool = False,
**kwargs,
):
"""
Constructs a `MistralCommonBackend`.
- **model_input_names** (`list[str]`) -- A list of inputs expected in the forward pass of the model.
- **padding_side** (`str`) -- The default value for the side on which the model should have padding applied.
Should be `'right'` or `'left'`.
- **truncation_side** (`str`) -- The default value for the side on which the model should have truncation
applied. Should be `'right'` or `'left'`.
Args:
tokenizer_path (`str` or `os.PathLike` or `Path`):
Path to the tokenizer file to load the `MistralTokenizer`.
mode (`Union[str, ValidationMode]`, *optional*, defaults to `ValidationMode.test`):
The mode to use for the tokenizer. This will be passed to the `MistralTokenizer` constructor. Possible values are:
- `"finetuning"` or `ValidationMode.finetuning`: The fine-tuning mode.
- `"test"` or `ValidationMode.test`: The test mode.
It changes how the tokenizer validates the input and prepares the request to the model.
model_max_length (`int`, *optional*):
The maximum length (in number of tokens) for the inputs to the transformer model. When the tokenizer is
loaded with [`~tokenization_utils_base.PreTrainedTokenizerBase.from_pretrained`], this will be set to the
value stored for the associated model in `max_model_input_sizes` (see above). If no value is provided, will
default to VERY_LARGE_INTEGER (`int(1e30)`).
padding_side (`str`, *optional*):
The side on which the model should have padding applied. Should be selected between ['right', 'left'].
Default value is picked from the class attribute of the same name.
truncation_side (`str`, *optional*):
The side on which the model should have truncation applied. Should be selected between ['right', 'left'].
Default value is picked from the class attribute of the same name.
model_input_names (`List[str]`, *optional*):
The list of inputs accepted by the forward pass of the model (like `"token_type_ids"` or
`"attention_mask"`). Default value is picked from the class attribute of the same name.
clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
Whether or not the model should clean up the spaces that were added when splitting the input text during the
tokenization process.
"""
if kwargs and not set(kwargs.keys()).issubset(_VALID_INIT_KWARGS):
raise ValueError(f"Kwargs {list(kwargs.keys())} are not supported to init `MistralCommonBackend`.")
self.init_kwargs = {
"tokenizer_path": tokenizer_path,
"mode": mode,
"model_max_length": model_max_length,
"padding_side": padding_side,
"truncation_side": truncation_side,
"model_input_names": model_input_names,
"clean_up_tokenization_spaces": clean_up_tokenization_spaces,
}
self._tokenizer_path = Path(tokenizer_path)
self._mode = self._get_validation_mode(mode)
self.tokenizer: MistralTokenizer = MistralTokenizer.from_file(str(self._tokenizer_path), mode=self._mode)
self._tokenizer_type = (
MistralTokenizerType.tekken
if isinstance(self.tokenizer.instruct_tokenizer.tokenizer, Tekkenizer)
else MistralTokenizerType.spm
)
self._cache_get_vocab: dict[str, int] | None = None
self._all_special_ids = self._get_all_special_ids()
self._all_special_tokens = self.convert_ids_to_tokens(self.all_special_ids)
super().__init__(
truncation_side=truncation_side,
padding_side=padding_side,
model_max_length=model_max_length,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
extra_special_tokens=None, # Not used by this backend.
model_specific_special_tokens=None, # Not used by this backend.
model_input_names=model_input_names or self.model_input_names,
**_MAP_SPECIAL_TOKENS,
**kwargs,
)
@property
def mode(self) -> ValidationMode:
"""
`ValidationMode`: The mode used by the tokenizer. Possible values are:
- `"finetuning"` or `ValidationMode.finetuning`: The finetuning mode.
- `"test"` or `ValidationMode.test`: The test mode.
It changes how the tokenizer validates the input and prepares the request to the model.
"""
return self._mode
@property
def all_special_ids(self) -> list[int]:
"""
`list[int]`: List the ids of the special tokens(`'<unk>'`, `'<cls>'`, etc.).
"""
return sorted(self._all_special_ids)
@property
def all_special_tokens(self) -> list[str]:
"""
`list[str]`: A list of all unique special tokens.
"""
return self._all_special_tokens
@property
def vocab_size(self) -> int:
"""
Returns the size of the vocabulary.
`int`: Size of the vocabulary.
"""
return self.tokenizer.instruct_tokenizer.tokenizer.n_words
def get_vocab(self) -> dict[str, int]:
"""
Returns the vocabulary as a dictionary of token to index.
This is a lossy conversion. There may be multiple token ids that decode to the same
string due to partial UTF-8 byte sequences being converted to �.
Returns:
`Dict[str, int]`: The vocabulary.
"""
if self._cache_get_vocab is None:
# We reverse the order to make sure that the first token is the one to be returned when there are multiple tokens with the same string representation.
vocab = self.tokenizer.instruct_tokenizer.tokenizer.vocab()
self._cache_get_vocab = {token: self._piece_to_id(token, False) for token in vocab}
# Order the dict.
self._cache_get_vocab = dict(
sorted(((k, v) for k, v in self._cache_get_vocab.items()), key=lambda x: x[1])
)
return self._cache_get_vocab
def __len__(self):
"""
Size of the full vocabulary with the added tokens.
"""
return self.vocab_size
@add_end_docstrings(
ENCODE_KWARGS_DOCSTRING,
"""
**kwargs: Not supported by `MistralCommonBackend.encode`.
Will raise an error if used.
""",
"""
Returns:
`list[int]`, `torch.Tensor`: The tokenized ids of the text.
""",
)
def encode(
self,
text: TextInput | EncodedInput,
text_pair: None = None,
add_special_tokens: bool = True,
padding: bool | str | PaddingStrategy = False,
truncation: bool | str | TruncationStrategy | None = None,
max_length: int | None = None,
stride: int = 0,
pad_to_multiple_of: int | None = None,
padding_side: str | None = None,
return_tensors: str | TensorType | None = None,
verbose: bool = True,
return_offsets_mapping: Literal[False] = False,
split_special_tokens: Literal[False] = False,
**kwargs,
) -> list[int]:
"""
Converts a string to a sequence of ids (integer), using the tokenizer and vocabulary.
Args:
text (`str` or `list[int]`):
The first sequence to be encoded. This can be a string or a list of integers (tokenized string ids).
text_pair (`None`, *optional*):
Not supported by `MistralCommonBackend.encode`. Kept to match `PreTrainedTokenizerBase.encode` signature.
"""
if return_offsets_mapping or split_special_tokens:
raise ValueError(
"`MistralCommonBackend` does not support `return_offsets_mapping` and `split_special_tokens`."
)
if truncation in [TruncationStrategy.ONLY_FIRST, TruncationStrategy.ONLY_SECOND, "only_first", "only_second"]:
raise ValueError(
"Truncation strategy `only_first` and `only_second` are not supported by `MistralCommonBackend`."
)
if kwargs:
raise ValueError(f"Kwargs {list(kwargs.keys())} are not supported by `MistralCommonBackend.encode`.")
if text_pair:
raise ValueError("`MistralCommonBackend.encode` does not support `text_pair`.")
return super().encode(
text=text,
text_pair=text_pair,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
stride=stride,
return_tensors=return_tensors,
pad_to_multiple_of=pad_to_multiple_of,
padding_side=padding_side,
verbose=verbose,
)
def _decode(
self,
token_ids: int | list[int],
skip_special_tokens: bool = False,
clean_up_tokenization_spaces: bool | None = None,
**kwargs,
) -> str:
if kwargs:
raise ValueError(f"Kwargs {list(kwargs.keys())} are not supported by `MistralCommonBackend.decode`.")
token_ids = to_py_obj(token_ids)
if isinstance(token_ids, int):
token_ids = [token_ids]
special_token_policy = SpecialTokenPolicy.IGNORE if skip_special_tokens else SpecialTokenPolicy.KEEP
text = self.tokenizer.decode(token_ids, special_token_policy=special_token_policy)
# Apply tokenizer-specific cleanup if available and requested
clean_up_tokenization_spaces = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
text = self.clean_up_tokenization(text)
return _maybe_remove_lang(text=text, skip_special_tokens=skip_special_tokens)
def decode(
self,
token_ids: Union[int, list[int], list[list[int]], np.ndarray, "torch.Tensor"],
skip_special_tokens: bool = False,
clean_up_tokenization_spaces: bool | None = None,
**kwargs,
) -> str | list[str]:
"""
Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special
tokens and clean up tokenization spaces.
Args:
token_ids (`Union[int, list[int], list[list[int]], np.ndarray, torch.Tensor]`):
A single sequence or a batch (list of sequences) of tokenized input ids. Can be obtained using the
`__call__` method.
skip_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not to remove special tokens in the decoding.
clean_up_tokenization_spaces (`bool`, *optional*):
Whether or not to clean up the tokenization spaces. If `None`, will default to
`self.clean_up_tokenization_spaces`.
kwargs (additional keyword arguments, *optional*):
Not supported by `MistralCommonBackend.decode`.
Will raise an error if used.
Returns:
`Union[str, list[str]]`: The decoded string for a single sequence, or a list of decoded strings for a
batch of sequences.
"""
if kwargs:
raise ValueError(f"Kwargs {list(kwargs.keys())} are not supported by `MistralCommonBackend.decode`.")
return super().decode(
token_ids=token_ids,
skip_special_tokens=skip_special_tokens,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
)
def batch_decode(
self,
sequences: Union[list[int], list[list[int]], np.ndarray, "torch.Tensor"],
skip_special_tokens: bool = False,
clean_up_tokenization_spaces: bool | None = None,
**kwargs,
) -> list[str]:
"""
Convert a list of lists of token ids into a list of strings by calling decode.
This method is provided for backwards compatibility. The `decode` method now handles batched input natively,
so you can use `decode` directly instead of `batch_decode`.
Args:
sequences (`Union[list[int], list[list[int]], np.ndarray, torch.Tensor]`):
List of tokenized input ids. Can be obtained using the `__call__` method.
skip_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not to remove special tokens in the decoding.
clean_up_tokenization_spaces (`bool`, *optional*):
Whether or not to clean up the tokenization spaces. If `None`, will default to
`self.clean_up_tokenization_spaces`.
kwargs (additional keyword arguments, *optional*):
Not supported by `MistralCommonBackend.batch_decode`.
Will raise an error if used.
Returns:
`list[str]`: The list of decoded sentences.
"""
if kwargs:
raise ValueError(f"Kwargs {list(kwargs.keys())} are not supported by `MistralCommonBackend.batch_decode`.")
return super().batch_decode(
sequences=sequences,
skip_special_tokens=skip_special_tokens,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
)
@overload
def convert_ids_to_tokens(self, ids: int, skip_special_tokens: bool = False) -> str: ...
@overload
def convert_ids_to_tokens(self, ids: list[int], skip_special_tokens: bool = False) -> list[str]: ...
def convert_ids_to_tokens(self, ids: int | list[int], skip_special_tokens: bool = False) -> str | list[str]:
"""
Converts a single index or a sequence of indices in a token or a sequence of tokens, using the vocabulary and
added tokens.
Args:
ids (`int` or `list[int]`):
The token id (or token ids) to convert to tokens.
skip_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not to remove special tokens in the decoding.
Returns:
`str` or `list[str]`: The decoded token(s).
"""
if isinstance(ids, int):
return_int = True
ids = [ids]
else:
return_int = False
tokens: list[str] = []
for token_id in ids:
if self.tokenizer.instruct_tokenizer.tokenizer.is_special(token_id) and skip_special_tokens:
continue
tokens.append(self.tokenizer.instruct_tokenizer.tokenizer.id_to_piece(token_id))
if return_int and tokens == []:
raise ValueError(f"Invalid token id {ids[0]}.")
elif return_int:
return tokens[0]
return tokens
def _tekken_piece_to_id(self, piece: str, warn: bool) -> int:
tekken_tokenizer = self.tokenizer.instruct_tokenizer.tokenizer
assert isinstance(tekken_tokenizer, Tekkenizer), type(tekken_tokenizer)
piece_bytes = piece.encode("utf-8")
shift = tekken_tokenizer.num_special_tokens
try:
return shift + tekken_tokenizer._tekken_token2id_nospecial[piece_bytes]
except KeyError:
piece_str = piece_bytes.decode("utf-8")
if piece_str in tekken_tokenizer._special_tokens_reverse_vocab:
return tekken_tokenizer._special_tokens_reverse_vocab[piece_str]
if warn:
logger.warning("Failed to convert token %s to id, replacing with <unk>", piece_bytes)
return tekken_tokenizer.unk_id
def _piece_to_id(self, piece: str, warn: bool) -> int:
if self._tokenizer_type == MistralTokenizerType.spm:
return self.tokenizer.instruct_tokenizer.tokenizer._model.piece_to_id(piece)
elif self._tokenizer_type == MistralTokenizerType.tekken:
return self._tekken_piece_to_id(piece, warn)
else:
raise ValueError(f"Unknown tokenizer type: {self._tokenizer_type}")
def convert_tokens_to_ids(self, tokens: str | list[str]) -> int | list[int]:
"""
Converts a token string (or a sequence of tokens) in a single integer id (or a sequence of ids), using the
vocabulary.
Args:
tokens (`str` or `list[str]`): One or several token(s) to convert to token id(s).
Returns:
`int` or `list[int]`: The token id or list of token ids.
"""
if isinstance(tokens, str):
one_token = True
tokens = [tokens]
else:
one_token = False
ids: list[int] = []
for token in tokens:
ids.append(self._piece_to_id(token, True))
if one_token:
return ids[0]
return ids
def _text_to_ids(self, text: TextInput, add_special_tokens: bool) -> list[int]:
"""
Converts a string into a sequence of tokens ids, using the tokenizer.
"""
add_eos = add_special_tokens and self._mode == ValidationMode.finetuning
tokens_ids = self.tokenizer.instruct_tokenizer.tokenizer.encode(text, bos=add_special_tokens, eos=add_eos)
return tokens_ids
def tokenize(
self,
text: TextInput,
return_offsets_mapping: Literal[False] = False,
split_special_tokens: Literal[False] = False,
**kwargs,
) -> list[str]:
"""
Converts a string into a sequence of tokens, using the tokenizer.
Split in words for word-based vocabulary or sub-words for sub-word-based vocabularies.
Args:
text (`str`):
The sequence to be encoded.
return_offsets_mapping (`Literal[False]`, *optional*): False, kept to match Transformers' signature.
split_special_tokens (`Literal[False]`, *optional*): False, kept to match Transformers' signature.
**kwargs (additional keyword arguments):
Not supported by `MistralCommonBackend.tokenize`.
Will raise an error if used.
Returns:
`list[str]`: The list of tokens.
"""
if return_offsets_mapping or split_special_tokens:
raise ValueError(
"`MistralCommonBackend` does not support `return_offsets_mapping` and `split_special_tokens`."
)
if kwargs:
raise ValueError(f"Kwargs {list(kwargs.keys())} are not supported by `MistralCommonBackend.tokenize`.")
return self.convert_ids_to_tokens(self._text_to_ids(text, add_special_tokens=False), skip_special_tokens=False)
def _get_all_special_ids(self) -> set[int]:
if self._tokenizer_type == MistralTokenizerType.tekken:
return self.tokenizer.instruct_tokenizer.tokenizer._special_token_ids
elif self._tokenizer_type == MistralTokenizerType.spm:
return {
token_id
for token_id in range(self.tokenizer.instruct_tokenizer.tokenizer.n_words)
if self.tokenizer.instruct_tokenizer.tokenizer.is_special(token_id)
}
else:
raise ValueError(f"Unknown tokenizer type: {self._tokenizer_type}")
def get_special_tokens_mask(
self, token_ids_0: list[int], token_ids_1: None = None, already_has_special_tokens: bool = False
) -> list[int]:
"""
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.
Args:
token_ids_0 (`list[int]`): List of ids of the sequence.
token_ids_1 (`None`, *optional*): None, kept to match Transformers' implementation.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if token_ids_1 is not None:
raise ValueError(
"`token_ids_1` is not supported by `MistralCommonBackend` and should be `None`, kept for compatibility."
)
if already_has_special_tokens:
return [1 if int(token_id) in self._all_special_ids else 0 for token_id in token_ids_0]
if self.mode == ValidationMode.test:
# [BOS] seq0
return [1] + ([0] * len(token_ids_0))
else:
# [BOS] seq0 [EOS]
return [1] + ([0] * len(token_ids_0)) + [1]
def _encode_plus( # type: ignore[override]
self,
text: TextInput | PreTokenizedInput | EncodedInput,
text_pair: None = None,
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: int | None = None,
stride: int = 0,
is_split_into_words: bool = False,
pad_to_multiple_of: int | None = None,
padding_side: str | None = None,
return_tensors: str | TensorType | None = None,
return_token_type_ids: bool | None = None,
return_attention_mask: bool | None = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_length: bool = False,
verbose: bool = True,
return_offsets_mapping: Literal[False] = False,
split_special_tokens: Literal[False] = False,
**kwargs,
) -> BatchEncoding:
# Detect batched inputs (list of sequences)
if text_pair is not None:
raise ValueError("`MistralCommonBackend` does not support `text_pair != None` for `_encode_plus`.")
if return_offsets_mapping or split_special_tokens:
raise ValueError(
"`MistralCommonBackend` does not support `return_offsets_mapping` and `split_special_tokens`."
)
if kwargs:
raise ValueError(f"Kwargs {list(kwargs.keys())} are not supported by `MistralCommonBackend._encode_plus`.")
is_batched = isinstance(text, (list, tuple)) and (
(not text and not is_split_into_words)
or (text and is_split_into_words and isinstance(text[0], (list, tuple)))
or (text and not is_split_into_words and isinstance(text[0], (str, list, tuple)))
)
if is_batched:
batch_outputs = {}
one_overflowed = False
for current_text in text:
current_output = self._encode_plus(
text=current_text,
text_pair=None,
add_special_tokens=add_special_tokens,
padding_strategy=PaddingStrategy.DO_NOT_PAD, # we pad in batch afterward
truncation_strategy=truncation_strategy,
max_length=max_length,
stride=stride,
is_split_into_words=is_split_into_words,
pad_to_multiple_of=None, # we pad in batch afterward
padding_side=None, # we pad in batch afterward
return_tensors=None, # We convert the whole batch to tensors at the end
return_token_type_ids=return_token_type_ids,
return_attention_mask=False, # we pad in batch afterward
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_length=return_length,
verbose=verbose,
)
for key, value in current_output.items():
batch_outputs.setdefault(key, []).append(value)
# To ensure the list is built for each sample, we need to add this.
if return_overflowing_tokens and not return_tensors:
if "overflowing_tokens" not in current_output:
batch_outputs.setdefault("overflowing_tokens", []).append([0])
batch_outputs.setdefault("num_truncated_tokens", []).append([0])
else:
one_overflowed = True
# Remove overflow-related keys before tensor conversion if return_tensors is set
# Slow tokenizers don't support returning these as tensors
if return_overflowing_tokens and (return_tensors or not one_overflowed):
batch_outputs.pop("overflowing_tokens", None)
batch_outputs.pop("num_truncated_tokens", None)
batch_outputs = self.pad(
batch_outputs,
padding=padding_strategy.value,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
padding_side=padding_side,
return_attention_mask=return_attention_mask,
)
return BatchEncoding(batch_outputs, tensor_type=return_tensors)
def get_input_ids(text):
if isinstance(text, str):
return self._text_to_ids(text, False)
elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], int):
return text
else:
raise ValueError(f"Input {text} is not valid. Should be a string, or a list/tuple of integers.")
first_ids = get_input_ids(text)
return self.prepare_for_model(
first_ids,
pair_ids=None,
add_special_tokens=add_special_tokens,
padding=padding_strategy.value,
truncation=truncation_strategy.value,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
padding_side=padding_side,
return_tensors=return_tensors,
prepend_batch_axis=True,
return_attention_mask=return_attention_mask,
return_token_type_ids=return_token_type_ids,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_length=return_length,
verbose=verbose,
)
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def prepare_for_model(
self,
ids: list[int],
pair_ids: None = None,
add_special_tokens: bool = True,
padding: bool | str | PaddingStrategy = False,
truncation: bool | str | TruncationStrategy | None = None,
max_length: int | None = None,
stride: int = 0,
pad_to_multiple_of: int | None = None,
padding_side: str | None = None,
return_tensors: str | TensorType | None = None,
return_token_type_ids: bool | None = None,
return_attention_mask: bool | None = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_length: bool = False,
verbose: bool = True,
prepend_batch_axis: bool = False,
return_offsets_mapping: Literal[False] = False,
split_special_tokens: Literal[False] = False,
**kwargs,
) -> BatchEncoding:
"""
Prepares a sequence of input id so that it can be used by the model. It
adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
manages a moving window (with user defined stride) for overflowing tokens.
Args:
ids (`list[int]`):
Tokenized input ids of the first sequence.
pair_ids (`None`, *optional*):
Not supported by `MistralCommonBackend`. Kept to match the interface of `PreTrainedTokenizerBase`.
"""
if return_offsets_mapping or split_special_tokens:
raise ValueError(
"`MistralCommonBackend` does not support `return_offsets_mapping` and `split_special_tokens`."
)
if pair_ids is not None:
raise ValueError(
"`pair_ids` is not supported by `MistralCommonBackend` and should be `None`, kept for compatibility."
)
if kwargs:
raise ValueError(
f"Kwargs {list(kwargs.keys())} are not supported by `MistralCommonBackend.prepare_for_model`."
)
padding_strategy, truncation_strategy, max_length, _ = self._get_padding_truncation_strategies(
padding=padding,
truncation=truncation,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
verbose=verbose,
**kwargs,
)
# Validation
if (
return_overflowing_tokens
and truncation_strategy == TruncationStrategy.LONGEST_FIRST
and pair_ids is not None
):
raise ValueError(
"Not possible to return overflowing tokens for pair of sequences with the "
"`longest_first`. Please select another truncation strategy than `longest_first`, "
"for instance `only_second` or `only_first`."
)
# Defaults
if return_token_type_ids is None:
return_token_type_ids = "token_type_ids" in self.model_input_names
if return_attention_mask is None:
return_attention_mask = "attention_mask" in self.model_input_names
# Truncation
num_special = self.num_special_tokens_to_add(pair=False) if add_special_tokens else 0
total_len = len(ids) + len(pair_ids or []) + num_special
overflowing_tokens = []
if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and total_len > max_length:
ids, _, overflowing_tokens = self.truncate_sequences(
ids,
pair_ids=None,
num_tokens_to_remove=total_len - max_length,
truncation_strategy=truncation_strategy,
stride=stride,
)
# Add special tokens
if add_special_tokens:
sequence = self.build_inputs_with_special_tokens(ids, None)
token_type_ids = self.create_token_type_ids_from_sequences(ids, None)
else:
sequence = ids
token_type_ids = [0] * len(sequence)
# Build output
encoded_inputs = {"input_ids": sequence}
if return_token_type_ids:
encoded_inputs["token_type_ids"] = token_type_ids
if return_special_tokens_mask:
encoded_inputs["special_tokens_mask"] = (
self.get_special_tokens_mask(ids, None) if add_special_tokens else [0] * len(sequence)
)
if return_overflowing_tokens and not return_tensors and overflowing_tokens:
encoded_inputs["overflowing_tokens"] = overflowing_tokens
encoded_inputs["num_truncated_tokens"] = total_len - max_length if max_length else 0
# Check sequence length and warn if needed
self._eventual_warn_about_too_long_sequence(encoded_inputs["input_ids"], max_length, verbose)
# Pad
if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask:
encoded_inputs = self.pad(
encoded_inputs,
max_length=max_length,
padding=padding_strategy.value,
pad_to_multiple_of=pad_to_multiple_of,
padding_side=padding_side,
return_attention_mask=return_attention_mask,
)
if return_length:
encoded_inputs["length"] = len(encoded_inputs["input_ids"])
return BatchEncoding(encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis)
def truncate_sequences( # type: ignore[override]
self,
ids: list[int],
pair_ids: None = None,
num_tokens_to_remove: int = 0,
truncation_strategy: str | TruncationStrategy = "longest_first",
stride: int = 0,
**kwargs,
) -> tuple[list[int], None, list[int]]:
"""
Truncates a sequence pair in-place following the strategy.
Args:
ids (`list[int]`):
Tokenized input ids. Can be obtained from a string by chaining the `tokenize` and
`convert_tokens_to_ids` methods.
pair_ids (`None`, *optional*):
Not supported by `MistralCommonBackend`. Kept to match the signature of `PreTrainedTokenizerBase.truncate_sequences`.
num_tokens_to_remove (`int`, *optional*, defaults to 0):
Number of tokens to remove using the truncation strategy.
truncation_strategy (`str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `'longest_first'`):
The strategy to follow for truncation. Can be:
- `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided.
- `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater
than the model maximum admissible input size).
stride (`int`, *optional*, defaults to 0):
If set to a positive number, the overflowing tokens returned will contain some tokens from the main
sequence returned. The value of this argument defines the number of additional tokens.
Returns:
`Tuple[list[int], None, list[int]]`: The truncated `ids` and the list of
overflowing tokens. `None` is returned to match Transformers signature.
"""
if pair_ids:
raise ValueError("`pair_ids` is not supported by `MistralCommonBackend.truncate_sequences`.")
if not isinstance(truncation_strategy, TruncationStrategy):
truncation_strategy = TruncationStrategy(truncation_strategy)
if truncation_strategy in [
TruncationStrategy.ONLY_FIRST,
TruncationStrategy.ONLY_SECOND,
]:
raise ValueError(f"{truncation_strategy=} is not supported by `MistralCommonBackend`.")
if num_tokens_to_remove <= 0:
return ids, None, []
overflowing_tokens = []
if truncation_strategy == TruncationStrategy.LONGEST_FIRST:
window_len = min(len(ids), stride + num_tokens_to_remove)
if self.truncation_side == "left":
overflowing_tokens = ids[:window_len]
ids = ids[num_tokens_to_remove:]
else:
overflowing_tokens = ids[-window_len:]
ids = ids[:-num_tokens_to_remove]
return ids, None, overflowing_tokens
def apply_chat_template( # type: ignore[override]
self,
conversation: list[dict[str, str]] | list[list[dict[str, str]]],
tools: list[dict | Callable] | None = None,
add_generation_prompt: bool = False,
continue_final_message: bool = False,
tokenize: bool = True,
padding: bool | str | PaddingStrategy = False,
truncation: bool = False,
max_length: int | None = None,
return_tensors: str | TensorType | None = None,
return_dict: bool = True,
**kwargs,
) -> str | list[int] | list[str] | list[list[int]] | BatchEncoding:
"""
Converts a list of dictionaries with `"role"` and `"content"` keys to a list of token
ids.
Args:
conversation (Union[List[Dict[str, str]], List[List[Dict[str, str]]]]): A list of dicts
with "role" and "content" keys, representing the chat history so far.
tools (`List[Union[Dict, Callable]]`, *optional*):
A list of tools (callable functions) that will be accessible to the model. If the template does not
support function calling, this argument will have no effect. Each tool should be passed as a JSON Schema,
giving the name, description and argument types for the tool. See our
[chat templating guide](https://huggingface.co/docs/transformers/main/en/chat_templating#automated-function-conversion-for-tool-use)
for more information.
add_generation_prompt (`bool`, *optional*):
This argument is a no-op for `MistralCommonBackend`. However, it cannot be used at the same time as `continue_final_message` to keep the API consistent.
If any conversation ends with an assistant message, it will raise an error. In such cases, use `continue_final_message` instead.
continue_final_message (bool, *optional*):
If this is set, the chat will be formatted so that the final
message in the chat is open-ended, without any EOS tokens. The model will continue this message
rather than starting a new one. This allows you to "prefill" part of
the model's response for it. Cannot be used at the same time as `add_generation_prompt`.
tokenize (`bool`, defaults to `True`):
Whether to tokenize the output. If `False`, the output will be a string.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding
index) among:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, defaults to `False`):
Whether to truncate sequences at the maximum length. Has no effect if tokenize is `False`.
max_length (`int`, *optional*):
Maximum length (in tokens) to use for padding or truncation. Has no effect if tokenize is `False`. If
not specified, the tokenizer's `max_length` attribute will be used as a default.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors of a particular framework. Has no effect if tokenize is `False`. Acceptable
values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
return_dict (`bool`, defaults to `False`):
Whether to return a dictionary with named outputs. Has no effect if tokenize is `False`.
If at least one conversation contains an image, its pixel values will be returned in the `pixel_values` key.
kwargs (additional keyword arguments, *optional*):
Not supported by `MistralCommonBackend.apply_chat_template`.
Will raise an error if used.
Returns:
`Union[str, list[int], list[str], list[list[int]], BatchEncoding]`: The tokenized chat so far, including control tokens. This output is ready to pass to the model, either directly or via methods like `generate()`.
"""
if kwargs:
raise ValueError(
f"Kwargs {list(kwargs.keys())} are not supported by `MistralCommonBackend.apply_chat_template`."
)
if not isinstance(truncation, bool):
raise TypeError("`truncation` must be a boolean for `apply_chat_template` method.")
if add_generation_prompt and continue_final_message:
raise ValueError("Cannot use both `add_generation_prompt` and `continue_final_message`.")
if isinstance(conversation, (list, tuple)) and (
isinstance(conversation[0], (list, tuple)) or hasattr(conversation[0], "messages")
):
conversations = conversation
is_batched = True
else:
conversations = [conversation]
is_batched = False
if add_generation_prompt:
for conversation in conversations:
last_message = conversation[-1]
if last_message.get("role") == "assistant":
raise ValueError(
"The last message in the conversation is already an assistant message. Consider using `continue_final_message` instead."
)
def _maybe_adapt_message(message: dict[str, Any]) -> None:
"""Adapt message to `mistral-common` format and leave validation to `mistral-common`."""
if not isinstance(message, dict):
return message
maybe_list_content: str | list[dict[str, str | dict[str, Any]]] | None = message.get("content")
if not maybe_list_content or isinstance(maybe_list_content, str):
return message
normalized_content: list[dict[str, str | dict[str, Any]]] = []
message = message.copy()
for content in maybe_list_content:
content_type = content.get("type", None)
if not content_type:
continue
elif content_type == "image":
maybe_url: str | None = content.get("url")
maybe_path: str | None = content.get("path")
maybe_base64: str | None = content.get("base64")
if maybe_url:
image_content = maybe_url
elif maybe_path:
if not maybe_path.startswith("file://"):
maybe_path = Path(maybe_path).resolve().as_uri()
image_content = maybe_path
elif maybe_base64:
if not maybe_base64.startswith("data:image"):
maybe_base64 = "data:image/unk;base64," + maybe_base64
image_content = maybe_base64
else:
raise ValueError("Image content must be specified.")
normalized_content.append({"type": "image_url", "image_url": {"url": image_content}})
elif content_type == "audio":
maybe_url: str | None = content.get("url")
maybe_path: str | None = content.get("path")
maybe_base64: str | None = content.get("base64")
if maybe_url or maybe_path:
audio_data = load_audio_as(maybe_url or maybe_path, return_format="dict", force_mono=True)
normalized_content.append({"type": "input_audio", "input_audio": audio_data})
continue
if not maybe_base64:
raise ValueError("Audio content must be specified.")
normalized_content.append({"type": "audio_url", "audio_url": {"url": maybe_base64}})
else:
normalized_content.append(content)
message["content"] = normalized_content
return message
outputs = []
images: list[np.ndarray] = []
audios: list[np.ndarray] = []
for conversation in conversations:
messages: list[dict[str, str | list[dict[str, str | dict[str, Any]]]]] = []
for message in conversation:
message = _maybe_adapt_message(message)
messages.append(message)
chat_request = ChatCompletionRequest.from_openai(
messages=messages,
tools=tools,
continue_final_message=continue_final_message,
)
tokenized_request = self.tokenizer.encode_chat_completion(chat_request)
if tokenize:
outputs.append(tokenized_request.tokens)
else:
outputs.append(tokenized_request.text)
images.extend(tokenized_request.images)
audios.extend([el.audio_array for el in tokenized_request.audios])
if not is_batched:
outputs = outputs[0]
if tokenize:
out = self(
outputs,
padding=padding,
truncation=truncation,
max_length=max_length,
add_special_tokens=False,
return_tensors=return_tensors,
)
if return_dict:
if images:
pixel_values: list[np.ndarray] | np.ndarray | torch.Tensor
if return_tensors == "pt":
if not is_torch_available():
raise ImportError(
"Unable to convert output to PyTorch tensors format, PyTorch is not installed."
)
pixel_values = torch.from_numpy(np.stack(images))
elif return_tensors == "np":
pixel_values = np.array(images)
elif return_tensors is None:
pixel_values = images
else:
raise ValueError(f"Unsupported return_tensors type: {return_tensors}")
out.data["pixel_values"] = pixel_values
if audios:
if return_tensors is not None:
raise NotImplementedError(
"When passing audio content in apply_chat_template, `return_tensors` must be None since we cannot batch the audio inputs. The returned audio will be a list of numpy arrays."
)
# Transformers convention is audio for plural audio (audio does not take a "s")
out.data["audio"] = audios
return out
else:
return out["input_ids"]
else:
logger.warning(
"`MistralCommonBackend.apply_chat_template(..., tokenize=False)` is unsafe and may lead to unexpected behavior."
" Please consider using `tokenize=True` instead and don't encode the output manually."
)
return outputs
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: None = None) -> list[int]:
"""
Build model inputs from a sequence by adding special tokens.
This method dynamically builds inputs based on the tokenizer's `mode`:
- `"test"`: seq0 [EOS]
- `"finetuning"`: [BOS] seq0
Args:
token_ids_0 (`list[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`None`, *optional*): None, kept to match Transformers' signature.
Returns:
`list[int]`: List of input IDs with the appropriate special tokens.
"""
if token_ids_1 is not None:
raise ValueError(
"`MistralCommonBackend` does not implement `token_ids_1 != None` for `build_inputs_with_special_tokens`."
)
if self.mode == ValidationMode.test:
# [BOS] seq0
return [self.bos_token_id] + token_ids_0
else:
# [BOS] seq0 [EOS]
return [self.bos_token_id] + token_ids_0 + [self.eos_token_id]
def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: None = None) -> list[int]:
"""
Create a mask of zeroes from the token ids with special tokens added.
Kept to match Transformers' implementation.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`None`, *optional*): None, kept to match Transformers' signature.
Returns:
`list[int]`: Token type IDs according to the configured pattern.
"""
if token_ids_1 is not None:
raise ValueError(
"`MistralCommonBackend` does not implement `token_ids_1 != None` for `create_token_type_ids_from_sequences`."
)
sequence = self.build_inputs_with_special_tokens(token_ids_0)
return [0] * len(sequence)
def num_special_tokens_to_add(self, pair: Literal[False] = False) -> int:
"""
Returns the number of added tokens when encoding a sequence with special tokens.
<Tip>
This encodes a dummy input and checks the number of added tokens, and is therefore not efficient. Do not put
this inside your training loop.
</Tip>
Args:
pair (`Literal[False]`, *optional*): False, kept to match Transformer's signature.
Returns:
`int`: Number of special tokens added to sequences.
"""
if pair:
raise ValueError(
"`MistralCommonBackend` does not implement `pair = True` for `num_special_tokens_to_add`."
)
return len(self.build_inputs_with_special_tokens([], None))
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def __call__(
self,
text: TextInput | EncodedInput | list[TextInput] | list[EncodedInput] | None = None,
text_pair: None = None,
text_target: None = None,
text_pair_target: None = None,
add_special_tokens: bool = True,
padding: bool | str | PaddingStrategy = False,
truncation: bool | str | TruncationStrategy | None = None,
max_length: int | None = None,
stride: int = 0,
pad_to_multiple_of: int | None = None,
padding_side: str | None = None,
return_tensors: str | TensorType | None = None,
return_attention_mask: bool | None = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_length: bool = False,
verbose: bool = True,
return_offsets_mapping: Literal[False] = False,
split_special_tokens: Literal[False] = False,
**kwargs,
) -> BatchEncoding:
"""
Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
sequences.
Args:
text (`str`, `list[str]`, `list[list[str]]`, *optional*):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of int
(encoded strings).
text_pair (`None`, *optional*):
Not supported by `MistralCommonBackend`. Kept to match the signature of `PreTrainedTokenizerBase.__call__`.
text_target (`None`, *optional*):
Not supported by `MistralCommonBackend`. Kept to match the signature of `PreTrainedTokenizerBase.__call__`.
text_pair_target (`None`, *optional*):
Not supported by `MistralCommonBackend`. Kept to match the signature of `PreTrainedTokenizerBase.__call__`.
"""
if return_offsets_mapping or split_special_tokens:
raise ValueError(
"`MistralCommonBackend` does not support `return_offsets_mapping` and `split_special_tokens`."
)
if truncation in [TruncationStrategy.ONLY_FIRST, TruncationStrategy.ONLY_SECOND, "only_first", "only_second"]:
raise ValueError(
"Truncation strategy `only_first` and `only_second` are not supported by `MistralCommonBackend`."
)
if kwargs:
raise ValueError(f"Kwargs {list(kwargs.keys())} are not supported by `MistralCommonBackend.__call__`.")
if text_pair or text_target or text_pair_target:
raise ValueError(
"`text_pair`, `text_target` and `text_pair_target` are not supported by `MistralCommonBackend`."
)
return super().__call__(
text=text,
text_pair=text_pair,
text_target=text_target,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
padding_side=padding_side,
return_tensors=return_tensors,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_length=return_length,
verbose=verbose,
)
@classmethod
def from_pretrained(
cls,
pretrained_model_name_or_path: str | os.PathLike,
*init_inputs,
mode: str | ValidationMode = ValidationMode.test,
cache_dir: str | os.PathLike | None = None,
force_download: bool = False,
local_files_only: bool = False,
token: str | bool | None = None,
revision: str = "main",
model_max_length: int = VERY_LARGE_INTEGER,
padding_side: str = "left",
truncation_side: str = "right",
model_input_names: list[str] | None = None,
clean_up_tokenization_spaces: bool = False,
**kwargs,
):
r"""
Instantiate a `MistralCommonBackend` from a predefined
tokenizer.
Args:
pretrained_model_name_or_path (`str` or `os.PathLike`):
Can be either:
- A string, the *model id* of a predefined tokenizer hosted inside a model repo on huggingface.co.
- A path to a *directory* containing the tokenizer config, for instance saved
using the [`MistralCommonBackend.tokenization_mistral_common.save_pretrained`] method, e.g.,
`./my_model_directory/`.
mode (`Union[str, ValidationMode]`, *optional*, defaults to `ValidationMode.test`):
Validation mode for the `MistralTokenizer` tokenizer. Possible values are:
- `"finetuning"` or `ValidationMode.finetuning`: The fine-tuning mode.
- `"test"` or `ValidationMode.test`: The test mode.
It changes how the tokenizer validates the input and prepares the request to the model.
cache_dir (`str` or `os.PathLike`, *optional*):
Path to a directory in which a downloaded predefined tokenizer vocabulary files should be cached if the
standard cache should not be used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force the (re-)download the vocabulary files and override the cached versions if they
exist.
token (`str` or *bool*, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
when running `hf auth login` (stored in `~/.huggingface`).
local_files_only (`bool`, *optional*, defaults to `False`):
Whether or not to only rely on local files and not to attempt to download any files.
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
padding_side (`str`, *optional*, defaults to `"left"`):
The side on which the model should have padding applied. Should be selected between ['right', 'left'].
Default value is picked from the class attribute of the same name.
truncation_side (`str`, *optional*, defaults to `"right"`):
The side on which the model should have truncation applied. Should be selected between ['right', 'left'].
model_input_names (`List[str]`, *optional*):
The list of inputs accepted by the forward pass of the model (like `"token_type_ids"` or
`"attention_mask"`). Default value is picked from the class attribute of the same name.
clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
Whether or not the model should clean up the spaces that were added when splitting the input text during the
tokenization process.
kwargs (additional keyword arguments, *optional*):
Not supported by `MistralCommonBackend.from_pretrained`.
Will raise an error if used.
"""
if init_inputs:
raise ValueError("`init_inputs` are not supported by `MistralCommonBackend.from_pretrained`.")
# Handle kwargs and AutoTokenizer/AutoProcessor case
valid_kwargs = _VALID_INIT_KWARGS.union(
{"trust_remote_code", "_from_pipeline", "_commit_hash", "dtype", "subfolder"}
)
if kwargs and not set(kwargs.keys()).issubset(valid_kwargs):
raise ValueError(
f"Some kwargs in {list(kwargs.keys())} are not supported by `MistralCommonBackend.from_pretrained`."
)
mode = cls._get_validation_mode(mode)
if not os.path.isdir(pretrained_model_name_or_path):
tokenizer_path = download_tokenizer_from_hf_hub(
repo_id=pretrained_model_name_or_path,
cache_dir=cache_dir,
token=token,
revision=revision,
force_download=force_download,
local_files_only=local_files_only,
)
else:
candidate_files = os.listdir(pretrained_model_name_or_path)
tokenizer_path = os.path.join(pretrained_model_name_or_path, get_one_valid_tokenizer_file(candidate_files))
return cls(
tokenizer_path=tokenizer_path,
mode=mode,
model_max_length=model_max_length,
padding_side=padding_side,
truncation_side=truncation_side,
model_input_names=model_input_names,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
)
def save_pretrained( # type: ignore[override]
self,
save_directory: str | os.PathLike | Path,
push_to_hub: bool = False,
token: str | bool | None = None,
commit_message: str | None = None,
repo_id: str | None = None,
private: bool | None = None,
**kwargs,
) -> tuple[str, ...]:
"""
Save the full tokenizer state.
This method make sure the full tokenizer can then be re-loaded using the
[`~MistralCommonBackend.tokenization_mistral_common.from_pretrained`] class method.
Args:
save_directory (`str` or `os.PathLike`): The path to a directory where the tokenizer will be saved.
push_to_hub (`bool`, *optional*, defaults to `False`):
Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the
repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
namespace).
token (`str` or *bool*, *optional*, defaults to `None`):
The token to use to push to the model hub. If `True`, will use the token in the `HF_TOKEN` environment
variable.
commit_message (`str`, *optional*): The commit message to use when pushing to the hub.
repo_id (`str`, *optional*): The name of the repository to which push to the Hub.
private (`bool`, *optional*): Whether the model repository is private or not.
kwargs (`Dict[str, Any]`, *optional*):
Not supported by `MistralCommonBackend.save_pretrained`.
Will raise an error if used.
Returns:
A tuple of `str`: The files saved.
"""
if kwargs:
raise ValueError(
f"Kwargs {list(kwargs.keys())} are not supported by `MistralCommonBackend.save_pretrained`."
)
save_directory = Path(save_directory)
save_directory.mkdir(parents=True, exist_ok=True)
shutil.copy(self._tokenizer_path, save_directory)
if push_to_hub:
repo_id = repo_id or str(save_directory).split(os.path.sep)[-1]
repo_id = create_repo(repo_id, token=token, private=private, exist_ok=True).repo_id
files_timestamps = self._get_files_timestamps(save_directory)
self._upload_modified_files(
save_directory,
repo_id,
files_timestamps,
commit_message=commit_message,
token=token,
)
return (str(save_directory / self._tokenizer_path.name),)
@staticmethod
def _get_validation_mode(mode: str | ValidationMode) -> ValidationMode:
"""Get the validation mode from a string or a ValidationMode."""
_invalid_mode_msg = (
f"Invalid `mistral-common` tokenizer mode: {mode}. Possible values are 'finetuning' or 'test'."
)
if isinstance(mode, str):
try:
mode = ValidationMode[mode]
except KeyError:
raise ValueError(_invalid_mode_msg)
elif not isinstance(mode, (str, ValidationMode)):
raise ValueError(_invalid_mode_msg)
if mode not in [ValidationMode.finetuning, ValidationMode.test]:
raise ValueError(_invalid_mode_msg)
return mode
def __repr__(self) -> str:
# MistralCommonBackend does not implement added_tokens_decoder, so we need a custom repr
return (
f"{self.__class__.__name__}(name_or_path='{self.name_or_path}',"
f" vocab_size={self.vocab_size}, model_max_length={self.model_max_length},"
f" padding_side='{self.padding_side}', truncation_side='{self.truncation_side}',"
f" special_tokens={self.special_tokens_map})"
)
def added_tokens_decoder(self):
raise NotImplementedError("`MistralCommonBackend` does not implement `added_tokens_decoder`.")
def add_special_tokens(
self,
special_tokens_dict: dict[str, str | AddedToken | Sequence[str | AddedToken]],
replace_extra_special_tokens: bool = True,
):
r"""`MistralCommonBackend` does not implement `add_special_tokens` by design.
If you would like this behaviour to be implemented, please open an issue in the `Transformers` or `mistral-common` repositories to request it.
"""
raise NotImplementedError("`MistralCommonBackend` does not implement `add_special_tokens`.")
def add_tokens( # type: ignore[override]
self,
special_tokens_dict: dict[str, str | AddedToken | Sequence[str | AddedToken]],
replace_extra_special_tokens: bool = True,
):
"""
`MistralCommonBackend` does not implement `add_special_tokens` by design.
If you would like this behaviour to be implemented, please open an issue in the `Transformers` or `mistral-common` repositories to request it.
"""
raise NotImplementedError("`MistralCommonBackend` does not implement `add_tokens`.")
def convert_added_tokens(cls, obj: AddedToken | Any, save: bool = False, add_type_field: bool = True): # type: ignore[override]
"""
`MistralCommonBackend` does not implement `convert_added_tokens` by design.
If you would like this behaviour to be implemented, please open an issue in the `Transformers` or `mistral-common` repositories to request it.
"""
raise NotImplementedError("`MistralCommonBackend` does not implement `convert_added_tokens`.")
def get_chat_template(self, chat_template: str | None = None, tools: list[dict] | None = None) -> str:
"""`MistralCommonBackend` does not implement `get_chat_template` by design as `mistral-common` does not use chat templates."""
raise NotImplementedError("`MistralCommonBackend` does not implement `get_chat_template`.")
def save_chat_templates(
self,
save_directory: str | os.PathLike,
tokenizer_config: dict,
filename_prefix: str | None,
save_jinja_files: bool,
):
"""`MistralCommonBackend` does not implement `save_chat_templates` by design as `mistral-common` does not use chat templates."""
raise NotImplementedError("`MistralCommonBackend` does not implement `save_chat_templates`.")
def save_vocabulary(self, save_directory: str, filename_prefix: str | None = None) -> tuple[str, ...]:
"""
`MistralCommonBackend` does not implement `save_vocabulary` by design.
This is because `mistral-common` is configured by one tokenizer file. If you'd like to save the vocabulary, please consider using the `save_pretrained` method instead.
"""
raise NotImplementedError("`MistralCommonBackend` does not implement `save_vocabulary`.")
# Backward compatibility alias for codebases still importing the legacy name.
MistralCommonTokenizer = MistralCommonBackend
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/tokenization_mistral_common.py",
"license": "Apache License 2.0",
"lines": 1417,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/perception_lm/configuration_perception_lm.py | # Copyright 2025 Meta Platforms, Inc. and the HuggingFace Inc. team. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PerceptionLM model configuration"""
from ...configuration_utils import PreTrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING, AutoConfig
from ..timm_wrapper.configuration_timm_wrapper import TimmWrapperConfig
logger = logging.get_logger(__name__)
class PerceptionLMConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`PerceptionLMForConditionalGeneration`]. It is used to instantiate an
PerceptionLM model according to the specified arguments, defining the model architecture.
Example models:
- [facebook/Perception-LM-1B](https://huggingface.co/facebook/Perception-LM-1B).
- [facebook/Perception-LM-3B](https://huggingface.co/facebook/Perception-LM-3B).
- [facebook/Perception-LM-8B](https://huggingface.co/facebook/Perception-LM-8B).
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vision_config (`Union[TimmWrapperConfig, dict]`, *optional*, defaults to `TimmWrapperConfig()`):
The config object or dictionary of the vision backbone.
text_config (`Union[PreTrainedConfig, dict]`, *optional*, defaults to `LlamaConfig()`):
The config object or dictionary of the text backbone.
vision_use_cls_token (`bool`, *optional*, defaults to `True`):
Whether CLS token is used in the vision backbone. If used, we remove CLS token embedding from vision output.
projector_pooling_ratio (`int`, *optional*, defaults to 1):
The pooling ratio used in the multimodal projector.
image_token_id (`int`, *optional*, defaults to 128002):
The image token index to encode the image prompt.
video_token_id (`int`, *optional*, defaults to 128003):
The video token index to encode the video prompt.
"""
model_type = "perception_lm"
sub_configs = {"text_config": AutoConfig, "vision_config": TimmWrapperConfig}
def __init__(
self,
vision_config=None,
text_config=None,
vision_use_cls_token=True,
projector_pooling_ratio=1,
image_token_id=128002,
video_token_id=128003,
**kwargs,
):
self.image_token_id = image_token_id
self.video_token_id = video_token_id
if isinstance(vision_config, dict):
vision_config = TimmWrapperConfig(**vision_config)
elif isinstance(vision_config, TimmWrapperConfig):
pass
elif vision_config is None:
vision_config = TimmWrapperConfig()
self.vision_config = vision_config
self.vision_use_cls_token = vision_use_cls_token
if isinstance(text_config, dict):
text_config["model_type"] = text_config.get("model_type", "llama")
text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config)
elif text_config is None:
text_config = CONFIG_MAPPING["llama"]()
self.text_config = text_config
self.projector_pooling_ratio = projector_pooling_ratio
super().__init__(**kwargs)
__all__ = ["PerceptionLMConfig"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/perception_lm/configuration_perception_lm.py",
"license": "Apache License 2.0",
"lines": 73,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/perception_lm/convert_perception_lm_weights_to_hf.py | # Copyright 2025 Meta Platforms, Inc. and the HuggingFace Inc. team. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import gc
import json
import os
import tempfile
import warnings
import torch
from timm.models.eva import checkpoint_filter_fn
from tokenizers import AddedToken, processors
from transformers import (
GenerationConfig,
LlamaConfig,
LlamaTokenizer,
PreTrainedTokenizerFast,
)
from transformers.convert_slow_tokenizer import TikTokenConverter
from transformers.models.auto.modeling_auto import AutoModel
from transformers.models.perception_lm.configuration_perception_lm import (
PerceptionLMConfig,
)
from transformers.models.perception_lm.image_processing_perception_lm_fast import (
PerceptionLMImageProcessorFast,
)
from transformers.models.perception_lm.modeling_perception_lm import (
PerceptionLMForConditionalGeneration,
)
from transformers.models.perception_lm.processing_perception_lm import (
PerceptionLMProcessor,
)
from transformers.models.perception_lm.video_processing_perception_lm import (
PerceptionLMVideoProcessor,
)
from transformers.models.timm_wrapper.configuration_timm_wrapper import TimmWrapperConfig
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"
)
LlamaTokenizerFast = None
"""
Sample usage:
```
python src/transformers/models/perception_lm/convert_perception_lm_weights_to_hf.py \
--input_dir /path/to/downloaded/perception_lm/model_path --output_dir /output/path
```
Thereafter, models can be loaded via:
```py
from transformers import LlamaForCausalLM, LlamaTokenizer
model = LlamaForCausalLM.from_pretrained("/output/path")
tokenizer = LlamaTokenizer.from_pretrained("/output/path")
```
Important note: you need to be able to host the whole model in RAM to execute this script (even if the biggest versions
come in several checkpoints they each contain a part of each weight of the model, so we need to load them all in RAM).
If you want your tokenizer to add a bos automatically you should update the tokenizer._tokenizers.post_processor:
```py
from tokenizers import processors
bos = "<|begin_of_text|>"
tokenizer._tokenizers.post_processor = processors.Sequence(
[
processors.ByteLevel(trim_offsets=False),
processors.TemplateProcessing(
single=f"{bos}:0 $A:0",
pair=f"{bos}:0 $A:0 {bos}:1 $B:1",
special_tokens=[
(bos, tokenizer.encode(bos)),
],
),
]
)
```
"""
BOS_ADDED_TOKEN = AddedToken(
"<|begin_of_text|>",
single_word=False,
lstrip=False,
rstrip=False,
normalized=False,
special=True,
)
EOS_ADDED_TOKEN = AddedToken(
"<|end_of_text|>",
single_word=False,
lstrip=False,
rstrip=False,
normalized=False,
special=True,
)
EOT_ADDED_TOKEN = AddedToken(
"<|eot_id|>",
single_word=False,
lstrip=False,
rstrip=False,
normalized=False,
special=True,
)
DEFAULT_SPECIAL_TOKENS = {
"perception_lm": [
"<|begin_of_text|>",
"<|end_of_text|>",
"<|image|>",
"<|video|>",
"<|reserved_special_token_2|>",
"<|reserved_special_token_3|>",
"<|start_header_id|>",
"<|end_header_id|>",
"<|reserved_special_token_4|>",
"<|eot_id|>", # End of turn
]
+ [f"<|reserved_special_token_{i}|>" for i in range(5, 256 - 5)]
}
CHAT_TEMPLATE = (
"{{- bos_token }}"
"{%- if messages[0]['role'] == 'system' -%}"
" {%- set system_message = messages[0]['content']|trim %}\n"
" {%- set messages = messages[1:] %}\n"
"{%- else %}"
" {%- set system_message = 'You are a helpful language and vision assistant. You are able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language.' %}"
"{%- endif %}"
"{{- '<|start_header_id|>system<|end_header_id|>\\n\\n' }}"
"{{- system_message }}"
"{{- '<|eot_id|>' }}"
"{%- for message in messages %}"
"{{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n' }}"
"{%- for content in message['content'] | selectattr('type', 'equalto', 'image') %}"
"{{ '<|image|>' }}"
"{%- endfor %}"
"{%- for content in message['content'] | selectattr('type', 'equalto', 'video') %}"
"{{ '<|video|>' }}"
"{%- endfor %}"
"{%- for content in message['content'] | selectattr('type', 'equalto', 'text') %}"
"{{- content['text'] | trim }}"
"{%- endfor %}"
"{{'<|eot_id|>' }}"
"{%- endfor %}"
"{%- if add_generation_prompt %}"
"{{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}"
"{%- endif %}"
)
def compute_intermediate_size(n, ffn_dim_multiplier=1, multiple_of=256):
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3)) + multiple_of - 1) // multiple_of)
def read_json(path):
with open(path, "r") as f:
return json.load(f)
def write_json(text, path):
with open(path, "w") as f:
json.dump(text, f)
def write_weights(state_dict, index_dict, param_count, filename):
for k, v in state_dict.items():
index_dict["weight_map"][k] = filename
param_count += v.numel()
torch.save(state_dict, filename)
print(f"Saved {filename}")
return param_count
def write_model(
model_path,
input_base_path,
params,
image_token_id,
tokenizer=None,
num_shards=None,
push_to_hub=False,
):
print("Converting the model.")
num_shards = 1
model_params = params.get("model", params)
n_layers = model_params["n_layers"]
n_heads = model_params["n_heads"]
dim = model_params["dim"]
dims_per_head = dim // n_heads
base = model_params.get("rope_theta", 10000.0)
inv_freq = 1.0 / (base ** (torch.arange(0, dims_per_head, 2).float() / dims_per_head))
context_length = model_params["max_seqlen"]
max_position_embeddings = context_length
tie_word_embeddings = model_params.get("weight_tying", False)
projector_pooling_ratio = model_params.get("pooling_ratio", 1)
if model_params.get("n_kv_heads", None) is not None:
num_key_value_heads = model_params["n_kv_heads"] # for GQA / MQA
key_value_dim = dims_per_head * num_key_value_heads
else: # compatibility with other checkpoints
num_key_value_heads = n_heads
key_value_dim = dim
# permute for sliced rotary
def permute(w, n_heads, dim1=dim, dim2=dim):
return w.view(n_heads, dim1 // n_heads // 2, 2, dim2).transpose(1, 2).reshape(dim1, dim2)
with tempfile.TemporaryDirectory() as tmp_model_path:
print(f"Fetching all parameters from the checkpoint at {input_base_path}.")
# Load weights
if num_shards == 1:
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
loaded = torch.load(
os.path.join(input_base_path, "consolidated.pth"),
map_location="cpu",
weights_only=True,
)
else:
# Sharded
checkpoint_list = sorted([file for file in os.listdir(input_base_path) if file.endswith(".pth")])
print("Loading in order:", checkpoint_list)
loaded = [
torch.load(
os.path.join(input_base_path, file),
map_location="cpu",
weights_only=True,
)
for file in checkpoint_list
]
param_count = 0
index_dict = {"weight_map": {}}
for layer_i in range(n_layers):
filename = f"pytorch_model-{layer_i + 1}-of-{n_layers + 2}.bin"
assert num_shards == 1, "PerceptionLM does not support sharded weights"
state_dict = {
f"model.language_model.layers.{layer_i}.self_attn.q_proj.weight": permute(
loaded[f"layers.{layer_i}.attention.wq.weight"], n_heads=n_heads
),
f"model.language_model.layers.{layer_i}.self_attn.k_proj.weight": permute(
loaded[f"layers.{layer_i}.attention.wk.weight"],
n_heads=num_key_value_heads,
dim1=key_value_dim,
),
f"model.language_model.layers.{layer_i}.self_attn.v_proj.weight": loaded[
f"layers.{layer_i}.attention.wv.weight"
],
f"model.language_model.layers.{layer_i}.self_attn.o_proj.weight": loaded[
f"layers.{layer_i}.attention.wo.weight"
],
f"model.language_model.layers.{layer_i}.mlp.gate_proj.weight": loaded[
f"layers.{layer_i}.feed_forward.w1.weight"
],
f"model.language_model.layers.{layer_i}.mlp.down_proj.weight": loaded[
f"layers.{layer_i}.feed_forward.w2.weight"
],
f"model.language_model.layers.{layer_i}.mlp.up_proj.weight": loaded[
f"layers.{layer_i}.feed_forward.w3.weight"
],
f"model.language_model.layers.{layer_i}.input_layernorm.weight": loaded[
f"layers.{layer_i}.attention_norm.weight"
],
f"model.language_model.layers.{layer_i}.post_attention_layernorm.weight": loaded[
f"layers.{layer_i}.ffn_norm.weight"
],
}
state_dict[f"model.language_model.layers.{layer_i}.self_attn.rotary_emb.inv_freq"] = inv_freq
for k, v in state_dict.items():
index_dict["weight_map"][k] = filename
param_count += v.numel()
torch.save(state_dict, os.path.join(tmp_model_path, filename))
print(f"Saved {filename}")
filename = f"pytorch_model-{n_layers + 1}-of-{n_layers + 2}.bin"
state_dict = {
"model.language_model.embed_tokens.weight": loaded["tok_embeddings.weight"],
"model.language_model.norm.weight": loaded["norm.weight"],
"model.multi_modal_projector.linear_1.weight": loaded["vision_projector.projector.0.weight"],
"model.multi_modal_projector.linear_2.weight": loaded["vision_projector.projector.2.weight"],
"model.multi_modal_projector.linear_1.bias": loaded["vision_projector.projector.0.bias"],
"model.multi_modal_projector.linear_2.bias": loaded["vision_projector.projector.2.bias"],
}
if not tie_word_embeddings:
state_dict["lm_head.weight"] = loaded["output.weight"]
for k, v in state_dict.items():
index_dict["weight_map"][k] = filename
param_count += v.numel()
torch.save(state_dict, os.path.join(tmp_model_path, filename))
print(f"Saved {filename}")
filename = f"pytorch_model-{n_layers + 2}-of-{n_layers + 2}.bin"
state_dict = {k.replace("vision_model.", ""): v for k, v in loaded.items() if "vision_model" in k}
vision_params = model_params["vision_model"]
if vision_params["layers"] == 23 and vision_params["width"] == 1024:
architecture = "vit_pe_core_large_patch14_336"
elif vision_params["layers"] == 47 and vision_params["width"] == 1536:
architecture = "vit_pe_core_gigantic_patch14_448"
else:
raise ValueError(
f"Unsupported PE config: {vision_params['layers']} layers and {vision_params['width']} width"
)
vision_config = TimmWrapperConfig.from_pretrained(
f"timm/{architecture}.fb",
model_args={
"embed_dim": vision_params["width"],
"depth": vision_params["layers"],
"img_size": (vision_params["image_size"], vision_params["image_size"]),
"global_pool": "",
"use_post_transformer_norm": vision_params["use_ln_post"],
"init_values": vision_params["ls_init_value"],
"ref_feat_shape": (
vision_params["image_size"] // vision_params["patch_size"],
vision_params["image_size"] // vision_params["patch_size"],
),
},
)
perception_encoder = AutoModel.from_config(vision_config)
state_dict = checkpoint_filter_fn(state_dict, perception_encoder)
state_dict = {"model.vision_tower.timm_model." + k: v for k, v in state_dict.items()}
for k, v in state_dict.items():
index_dict["weight_map"][k] = filename
param_count += v.numel()
torch.save(state_dict, os.path.join(tmp_model_path, filename))
print(f"Saved {filename}")
# Write configs
index_dict["metadata"] = {"total_size": param_count * 2}
write_json(index_dict, os.path.join(tmp_model_path, "pytorch_model.bin.index.json"))
ffn_dim_multiplier = model_params.get("ffn_dim_multiplier", 1)
multiple_of = model_params.get("multiple_of", 256)
bos_token_id = tokenizer.convert_tokens_to_ids("<|begin_of_text|>")
eos_token_id = [tokenizer.convert_tokens_to_ids(t) for t in ["<|end_of_text|>", "<|eot_id|>"]]
use_scaled_rope = model_params["use_scaled_rope"]
if use_scaled_rope:
rope_parameters = {
"factor": model_params["rope_scale_factor"] * 1.0,
"low_freq_factor": model_params.get("low_freq_factor", 1.0) * 1.0,
"high_freq_factor": model_params.get("high_freq_factor", 4.0) * 1.0,
"original_max_position_embeddings": 8192,
"rope_type": "llama3",
}
else:
rope_parameters = None
text_config = LlamaConfig(
hidden_size=dim,
intermediate_size=compute_intermediate_size(dim, ffn_dim_multiplier, multiple_of),
num_attention_heads=model_params["n_heads"],
num_hidden_layers=model_params["n_layers"],
rms_norm_eps=model_params["norm_eps"],
num_key_value_heads=num_key_value_heads,
vocab_size=len(tokenizer),
rope_theta=base,
rope_parameters=rope_parameters,
max_position_embeddings=max_position_embeddings,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
)
config = PerceptionLMConfig(
text_config=text_config.to_dict(),
vision_config=vision_config.to_dict(),
projector_pooling_ratio=projector_pooling_ratio,
vision_use_cls_token=vision_params["use_cls_token"],
image_token_id=tokenizer.image_token_id,
video_token_id=tokenizer.video_token_id,
)
config.save_pretrained(tmp_model_path)
generation_config = GenerationConfig(
do_sample=False,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
)
generation_config.save_pretrained(tmp_model_path)
# Make space so we can load the model properly now.
del state_dict
# output_weight = loaded.get("output.weight", None)
del loaded
gc.collect()
print("Loading the checkpoint in a PerceptionLM model.")
model = PerceptionLMForConditionalGeneration.from_pretrained(
tmp_model_path, dtype=torch.bfloat16, low_cpu_mem_usage=True
)
# if not tie_word_embeddings:
# if output_weight is None:
# raise ValueError("Output weight/lm_head is not found in the checkpoint.")
# model.lm_head.load_state_dict({"weight": output_weight})
# Avoid saving this as part of the config.
del model.config._name_or_path
model.config.dtype = torch.bfloat16
print("Saving in the Transformers format.")
model_name = model_path.split(os.path.sep)[-1]
if push_to_hub:
print("Pushing to the hub.")
model.push_to_hub(model_name, private=True)
else:
print("Saving to disk.")
model.save_pretrained(model_name)
class Llama3Converter(TikTokenConverter):
def __init__(
self,
vocab_file,
special_tokens=None,
context_length=11520,
**kwargs,
):
super().__init__(vocab_file, additional_special_tokens=special_tokens, **kwargs)
tokenizer = self.converted()
self.converted_tokenizer = PreTrainedTokenizerFast(
tokenizer_object=tokenizer,
bos_token="<|begin_of_text|>",
eos_token="<|eot_id|>",
model_input_names=["input_ids", "attention_mask"],
model_max_length=context_length,
clean_up_tokenization_spaces=True,
extra_special_tokens={
"image_token": "<|image|>",
"video_token": "<|video|>",
"pad_token": "<|end_of_text|>",
},
)
self.converted_tokenizer.image_token_id = self.converted_tokenizer.encode(
self.converted_tokenizer.image_token, add_special_tokens=False
)[0]
self.converted_tokenizer.video_token_id = self.converted_tokenizer.encode(
self.converted_tokenizer.video_token, add_special_tokens=False
)[0]
self.update_post_processor(self.converted_tokenizer)
# finer special_tokens_map.json
self.converted_tokenizer._bos_token = BOS_ADDED_TOKEN
self.converted_tokenizer._eos_token = EOT_ADDED_TOKEN
# We can't do this while building the tokenizer because we have no easy access to the bos token id
def update_post_processor(self, tokenizer):
tokenizer._tokenizer.post_processor = processors.Sequence(
[
processors.ByteLevel(trim_offsets=False),
processors.TemplateProcessing(
single="<|begin_of_text|> $A",
pair="<|begin_of_text|>:0 $A:0 <|begin_of_text|>:1 $B:1",
special_tokens=[
(
"<|begin_of_text|>",
tokenizer.convert_tokens_to_ids("<|begin_of_text|>"),
),
],
),
]
)
def write_tokenizer(
tokenizer_path,
input_tokenizer_path,
special_tokens=None,
params=None,
push_to_hub=False,
):
print("Converting the tokenizer.")
tokenizer_class = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
context_length = params["model"]["max_seqlen"]
tokenizer = Llama3Converter(
input_tokenizer_path,
special_tokens,
context_length,
).converted_tokenizer
tokenizer.image_token_id = tokenizer.encode(tokenizer.image_token, add_special_tokens=False)[0]
processor_config = {
"pooling_ratio": params["model"]["pooling_ratio"],
"patch_size": params["model"]["vision_model"]["patch_size"],
"processor_class": "PerceptionLMProcessor",
}
tile_size = params["model"]["vision_model"]["image_size"]
image_preprocessor_config = {
"image_processor_type": "PerceptionLMImageProcessorFast",
"vision_input_type": params["data"]["vision_input_type"],
"tile_size": tile_size,
"max_num_tiles": params["data"]["max_num_tiles"],
"max_frame_tiles": 1,
"size": {"height": tile_size, "width": tile_size},
"do_resize": True,
"do_rescale": True,
"do_normalize": True,
"image_mean": [0.5, 0.5, 0.5],
"image_std": [0.5, 0.5, 0.5],
}
image_preprocessor = PerceptionLMImageProcessorFast(**image_preprocessor_config)
video_preprocessor_config = {
"video_processor_type": "PerceptionLMVideoProcessor",
"size": {"height": tile_size, "width": tile_size},
}
video_preprocessor = PerceptionLMVideoProcessor(**video_preprocessor_config)
processor = PerceptionLMProcessor(
image_processor=image_preprocessor,
video_processor=video_preprocessor,
tokenizer=tokenizer,
chat_template=CHAT_TEMPLATE,
**processor_config,
)
if push_to_hub:
print(f"Pushing a {tokenizer_class.__name__} to the Hub repo - {tokenizer_path}.")
model_name = tokenizer_path.split(os.path.sep)[-1]
processor.push_to_hub(model_name, private=True)
else:
print(f"Saving a {tokenizer_class.__name__} to {tokenizer_path}.")
processor.save_pretrained(tokenizer_path)
return tokenizer
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_dir",
help="Location of Llama weights, which contains tokenizer.model and model folders",
)
parser.add_argument(
"--output_dir",
help="Location to write HF model and tokenizer",
)
parser.add_argument(
"--push_to_hub",
help="Whether or not to push the model to the hub at `output_dir` instead of saving it locally.",
action="store_true",
default=False,
)
parser.add_argument(
"--num_shards",
default=None,
type=int,
help="The number of individual shards used for the model. Does not have to be the same as the number of consolidated_xx.pth",
)
parser.add_argument(
"--special_tokens",
default=None,
type=list[str],
help="The list of special tokens that should be added to the model.",
)
args = parser.parse_args()
if args.special_tokens is None:
# no special tokens by default
args.special_tokens = DEFAULT_SPECIAL_TOKENS.get("perception_lm", [])
params = read_json(os.path.join(args.input_dir, "params.json"))
spm_path = os.path.join(args.input_dir, "tokenizer.model")
tokenizer = write_tokenizer(
args.output_dir,
spm_path,
special_tokens=args.special_tokens,
params=params,
push_to_hub=args.push_to_hub,
)
write_model(
model_path=args.output_dir,
input_base_path=args.input_dir,
params=params,
image_token_id=tokenizer.image_token_id,
tokenizer=tokenizer,
num_shards=args.num_shards,
push_to_hub=args.push_to_hub,
)
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/perception_lm/convert_perception_lm_weights_to_hf.py",
"license": "Apache License 2.0",
"lines": 543,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/perception_lm/image_processing_perception_lm_fast.py | # Copyright 2025 Meta Platforms, Inc. and the HuggingFace Inc. team. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Image processor class for PerceptionLM."""
import math
from functools import reduce
import numpy as np
import torch
from torchvision.transforms import functional as F
from ...image_processing_utils import (
BatchFeature,
)
from ...image_processing_utils_fast import (
BaseImageProcessorFast,
get_image_size,
group_images_by_shape,
reorder_images,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
PILImageResampling,
)
from ...processing_utils import ImagesKwargs, Unpack
from ...utils import (
TensorType,
auto_docstring,
)
class PerceptionLMImageProcessorKwargs(ImagesKwargs, total=False):
r"""
vision_input_type (`str`, *optional*, defaults to `"thumb+tile"`):
Vision processing strategy. `"thumb+tile"` uses both thumbnails and multiple tiles for
multi-scale processing, otherwise uses single tile for lower memory usage.
tile_size (`int`, *optional*, defaults to `448`):
Height and width dimension (in pixels) of each tile used for image processing.
max_num_tiles (`int`, *optional*, defaults to `36`):
Maximum number of tiles an image can be split into based on its aspect ratio.
"""
vision_input_type: str | None
tile_size: int
max_num_tiles: int
@auto_docstring
class PerceptionLMImageProcessorFast(BaseImageProcessorFast):
resample = PILImageResampling.BICUBIC
image_mean = IMAGENET_STANDARD_MEAN
image_std = IMAGENET_STANDARD_STD
do_resize = True
do_center_crop = False
do_rescale = True
do_normalize = True
do_convert_rgb = True
vision_input_type = "thumb+tile"
tile_size = 448
max_num_tiles = 36
size = {"width": 448, "height": 448} # for backward compatibility in tests
valid_kwargs = PerceptionLMImageProcessorKwargs
def __init__(self, **kwargs: Unpack[PerceptionLMImageProcessorKwargs]) -> None:
super().__init__(**kwargs)
@auto_docstring
def preprocess(self, images, **kwargs: Unpack[PerceptionLMImageProcessorKwargs]) -> BatchFeature:
return super().preprocess(images, **kwargs)
@staticmethod
def _factors(n: int):
"""Return all factors of a number."""
return set(
reduce(
list.__add__,
([i, n // i] for i in range(1, int(n**0.5) + 1) if n % i == 0),
)
)
def _find_supported_aspect_ratios(self):
"""
This function computes all the allowed aspect ratios for a fixed
number of input chunks. The order of returned items matters for the result of `_fit_image_to_canvas` function.
If tie exists in `_fit_image_to_canvas`, the latter in `_find_supported_aspect_ratios` wins.
For example, with `num_tiles=5`, it will return:
{
0.2: [(1, 5)],
5.0: [(5, 1)],
0.25: [(1, 4)],
1.0: [(2, 2), (1, 1)],
4.0: [(4, 1)],
0.3333333333333333: [(1, 3)],
3.0: [(3, 1)],
0.5: [(1, 2)],
2.0: [(2, 1)]
}
"""
asp_dict = {}
for chunk_size in range(self.max_num_tiles, 0, -1):
_factors = sorted(self._factors(chunk_size))
_asp_ratios = [(x, chunk_size // x) for x in _factors]
for ratio in _asp_ratios:
k = ratio[0] / ratio[1]
if k not in asp_dict:
asp_dict[k] = [ratio]
else:
asp_dict[k].append(ratio)
return asp_dict
def _get_image_height_width(
self, image_width: int, image_height: int, target_width: int, target_height: int
) -> tuple[int, int]:
"""
Given image width, height and target width, height for the canvas, return the dimensions of how the image would be resized
with aspect ratio preservation.
"""
scale = image_width / image_height
if scale > 1.0:
# Width is larger than height
# Rescaling factor is the minimum of the two scaling factors. Else one side would be outside of the canvas.
rescaling_factor = min(target_width / image_width, target_height / image_height)
# Set new width to target width and height to the rescaled height.
new_w = rescaling_factor * image_width
new_h = math.floor(new_w / scale)
else:
# Height is larger than width
# Rescaling factor is the minimum of the two scaling factors. Else one side would be outside of the canvas.
rescaling_factor = min(target_width / image_width, target_height / image_height)
# Set new height to target height and width to the rescaled width.
new_h = rescaling_factor * image_height
new_w = math.floor(new_h * scale)
return new_w, new_h
def _fit_image_to_canvas(self, img_width: int, img_height: int, tile_size: int):
"""
Given an image width, height and target number of chunks this function will see if the image
can be fit into any of the canvases that can be build from arranging the tiles in a grid.
If the image can be fit onto several canvases, it will return the canvas where the shorter edge
of the image will be largest.
"""
# Initialize the optimal canvas to None. If no canvas is found where image fits, function returns None.
optimal_canvas = None
optimal_image_width_height = None
scale = img_width / img_height
# Gather all potential supported image resolutions and iterate through them to find best match
potential_arrangements = [
item for sublist in self._find_supported_aspect_ratios().values() for item in sublist
]
for n_w, n_h in potential_arrangements:
# Compute the canvas size
canvas_width, canvas_height = n_w * tile_size, n_h * tile_size
# Check if image can fit into the canvas without downsampling
if canvas_width >= img_width and canvas_height >= img_height:
# If we did not find a good canvas yet, we will use the current one
if optimal_canvas is None:
# Set optimal canvas and determine the actual image height and width in the canvas with aspect ratio preserving resampling
optimal_canvas = (n_w, n_h)
optimal_image_width_height = self._get_image_height_width(
image_width=img_width,
image_height=img_height,
target_width=n_w * tile_size,
target_height=n_h * tile_size,
)
else:
# If we already found an optimal canvas before, we will check if the shorter edge of the image will be larger than the current optimal canvas.
# This means we can potentially upsample the image resolution which is beneficial to performance.
image_width_height = self._get_image_height_width(
image_width=img_width,
image_height=img_height,
target_width=n_w * tile_size,
target_height=n_h * tile_size,
)
# Llama3V dynamic tiling. Prioritize biggest canvas.
if (scale < 1.0 and (image_width_height[0] >= optimal_image_width_height[0])) or (
scale >= 1.0 and (image_width_height[1] >= optimal_image_width_height[1])
):
optimal_canvas = (n_w, n_h)
optimal_image_width_height = image_width_height
return optimal_canvas
def _find_closest_aspect_ratio(self, img_width: int, img_height: int, tile_size: int) -> tuple:
"""
Given an image width, height and target number of chunks
this function will find the closest supported aspect ratio.
"""
target_aspect_ratio = img_width / img_height
asp_dict = self._find_supported_aspect_ratios()
closest_aspect_ratio = None
if target_aspect_ratio >= 1:
closest_aspect_ratio = min(
[k for k in asp_dict if k <= target_aspect_ratio],
key=lambda x: abs(x - target_aspect_ratio),
)
tiles_given_aspect_ratio = asp_dict[closest_aspect_ratio]
# select largest width
return max(tiles_given_aspect_ratio, key=lambda x: x[0])
else:
closest_aspect_ratio = min(
[k for k in asp_dict if k > target_aspect_ratio],
key=lambda x: abs(1 / x - 1 / target_aspect_ratio),
)
tiles_given_aspect_ratio = asp_dict[closest_aspect_ratio]
# select largest height
return max(tiles_given_aspect_ratio, key=lambda x: x[1])
def _split(self, image: torch.Tensor, ncw: int, nch: int) -> torch.Tensor:
# Split image into number of required tiles (width x height)
batch_size, num_channels, height, width = image.size()
image = image.view(batch_size, num_channels, nch, height // nch, ncw, width // ncw)
# Permute dimensions to reorder the axes
image = image.permute(0, 2, 4, 1, 3, 5).contiguous()
# Reshape into the desired output shape (batch_size * 4, num_channels, width/2, height/2)
image = image.view(batch_size, ncw * nch, num_channels, height // nch, width // ncw)
return image
def resize(
self,
image: np.ndarray,
tile_size: int,
max_num_tiles: int,
resample: PILImageResampling = PILImageResampling.BICUBIC,
input_data_format: str | ChannelDimension | None = None,
):
height, width = get_image_size(image, channel_dim=input_data_format)
if max_num_tiles > 1:
aspect_ratio = self._fit_image_to_canvas(img_width=width, img_height=height, tile_size=tile_size)
if aspect_ratio is None:
# If we did not find a canvas, we have to find the closest aspect ratio and downsample the image
aspect_ratio = self._find_closest_aspect_ratio(img_width=width, img_height=height, tile_size=tile_size)
else:
aspect_ratio = (1, 1)
new_width, new_height = aspect_ratio[0] * tile_size, aspect_ratio[1] * tile_size
image = F.resize(image, (new_height, new_width), interpolation=resample)
return image, aspect_ratio
def _preprocess(
self,
images: list["torch.Tensor"],
do_resize: bool,
do_rescale: bool | None,
rescale_factor: int | float | None,
do_normalize: bool | None,
image_mean: float | list[float] | None,
image_std: float | list[float] | None,
vision_input_type: str,
tile_size: int,
max_num_tiles: int,
return_tensors: str | TensorType | None,
disable_grouping: bool,
**kwargs: Unpack[PerceptionLMImageProcessorKwargs],
) -> BatchFeature:
# Group images by size for batched transformation
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
resized_images_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_resize:
if vision_input_type == "thumb+tile":
thumbnails, _ = self.resize(stacked_images, tile_size, max_num_tiles=1)
images_for_tiling, (tiles_w, tiles_h) = self.resize(
stacked_images, tile_size, max_num_tiles=max_num_tiles
)
image_tiles = self._split(images_for_tiling, tiles_w, tiles_h)
stacked_images = torch.cat([thumbnails.unsqueeze(1), image_tiles], dim=1)
else: # vanilla single tile for low memory devices
stacked_images, _ = self.resize(stacked_images, tile_size, max_num_tiles=1)
resized_images_grouped[shape] = stacked_images
resized_images = reorder_images(resized_images_grouped, grouped_images_index)
grouped_images, grouped_images_index = group_images_by_shape(resized_images, disable_grouping=disable_grouping)
processed_images_grouped = {}
for shape, stacked_images in grouped_images.items():
# Fused rescale and normalize
stacked_images = self.rescale_and_normalize(
stacked_images,
do_rescale,
rescale_factor,
do_normalize,
image_mean,
image_std,
)
processed_images_grouped[shape] = stacked_images
processed_images = reorder_images(processed_images_grouped, grouped_images_index)
processed_images = [p[None] if p.ndim == 3 else p for p in processed_images] # add tiles dimension if needed
return BatchFeature(data={"pixel_values": processed_images}, tensor_type=return_tensors)
__all__ = ["PerceptionLMImageProcessorFast"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/perception_lm/image_processing_perception_lm_fast.py",
"license": "Apache License 2.0",
"lines": 279,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/perception_lm/modular_perception_lm.py | # Copyright 2025 Meta Platforms, Inc. and the HuggingFace Inc. team. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch PerceptionLM model."""
import math
import torch
import torch.nn.functional as F
from torch import nn
from ...cache_utils import Cache
from ...modeling_outputs import BaseModelOutputWithPooling
from ...processing_utils import Unpack
from ...utils import (
TransformersKwargs,
auto_docstring,
can_return_tuple,
logging,
torch_compilable_check,
)
from ..auto import AutoModel
from ..llava.modeling_llava import (
LlavaCausalLMOutputWithPast,
LlavaForConditionalGeneration,
LlavaModel,
LlavaModelOutputWithPast,
LlavaPreTrainedModel,
)
from .configuration_perception_lm import PerceptionLMConfig
logger = logging.get_logger(__name__)
class PerceptionLMAdaptiveAvgPooling(nn.Module):
def __init__(self, pooling_ratio=2):
super().__init__()
self.pooling_ratio = pooling_ratio
def forward(self, hidden_states):
b, num_tokens, c = hidden_states.shape
h = int(math.sqrt(num_tokens))
if h * h != num_tokens:
raise ValueError(f"num_tokens {num_tokens} is expected to be a square number")
shape = (h // self.pooling_ratio, h // self.pooling_ratio)
hidden_states = hidden_states.permute(0, 2, 1).reshape(b, -1, h, h)
hidden_states = F.adaptive_avg_pool2d(hidden_states, shape)
hidden_states = hidden_states.flatten(2).transpose(1, 2)
return hidden_states
class PerceptionLMMultiModalProjector(nn.Module):
def __init__(self, config: PerceptionLMConfig):
super().__init__()
input_size = config.vision_config.model_args["embed_dim"]
output_size = config.text_config.hidden_size
self.linear_1 = nn.Linear(
in_features=input_size,
out_features=output_size,
bias=True,
)
self.gelu = nn.GELU()
self.linear_2 = nn.Linear(
in_features=output_size,
out_features=output_size,
bias=True,
)
self.pooling = (
PerceptionLMAdaptiveAvgPooling(config.projector_pooling_ratio)
if config.projector_pooling_ratio > 1
else nn.Identity()
)
def forward(self, features):
features = features.permute(1, 0, 2) # NLD -> LND
features = self.linear_1(features)
features = self.gelu(features)
features = self.linear_2(features)
features = features.permute(1, 0, 2) # LND -> NLD
features = self.pooling(features)
return features
class PerceptionLMPreTrainedModel(LlavaPreTrainedModel):
base_model_prefix = "model"
class PerceptionLMModelOutputWithPast(LlavaModelOutputWithPast):
r"""
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
image_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`.
Image hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
video_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size `(batch_size, num_videos, sequence_length, hidden_size)`.
Video hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
"""
video_hidden_states: torch.FloatTensor | None = None
class PerceptionLMCausalLMOutputWithPast(LlavaCausalLMOutputWithPast):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
image_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`.
Image hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
video_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size `(batch_size, num_videos, sequence_length, hidden_size)`.
Video hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
"""
video_hidden_states: torch.FloatTensor | None = None
@auto_docstring
class PerceptionLMModel(LlavaModel):
_checkpoint_conversion_mapping = {}
def __init__(self, config: PerceptionLMConfig):
super().__init__(config)
self.vision_tower = AutoModel.from_config(config.vision_config)
self.multi_modal_projector = PerceptionLMMultiModalProjector(config)
self.language_model = AutoModel.from_config(config.text_config)
@can_return_tuple
@auto_docstring(
custom_intro="Obtains image last hidden states from the vision tower and apply multimodal projection."
)
def get_image_features(
self,
pixel_values: torch.FloatTensor,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | BaseModelOutputWithPooling:
image_outputs = self.vision_tower(pixel_values.flatten(0, 1), return_dict=True, **kwargs)
last_hidden_state = image_outputs.last_hidden_state
if self.config.vision_use_cls_token:
last_hidden_state = last_hidden_state[:, 1:, :]
image_features = self.multi_modal_projector(last_hidden_state)
image_outputs.pooler_output = image_features
return image_outputs
def get_placeholder_mask(
self,
input_ids: torch.LongTensor,
inputs_embeds: torch.FloatTensor,
image_features: torch.FloatTensor | None = None,
video_features: torch.FloatTensor | None = None,
):
"""
Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is
equal to the length of multimodal features. If the lengths are different, an error is raised.
"""
if input_ids is None:
special_image_mask = inputs_embeds == self.get_input_embeddings()(
torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device)
)
special_image_mask = special_image_mask.all(-1)
special_video_mask = inputs_embeds == self.get_input_embeddings()(
torch.tensor(self.config.video_token_id, dtype=torch.long, device=inputs_embeds.device)
)
special_video_mask = special_video_mask.all(-1)
else:
special_image_mask = input_ids == self.config.image_token_id
special_video_mask = input_ids == self.config.video_token_id
n_image_tokens = special_image_mask.sum()
special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
if image_features is not None:
torch_compilable_check(
inputs_embeds[special_image_mask].numel() == image_features.numel(),
f"Image features and image tokens do not match, tokens: {n_image_tokens}, features: {image_features.size()[:-1].numel()}",
)
n_video_tokens = special_video_mask.sum()
special_video_mask = special_video_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
if video_features is not None:
torch_compilable_check(
inputs_embeds[special_video_mask].numel() == video_features.numel(),
f"Video features and video tokens do not match, tokens: {n_video_tokens}, features: {video_features.size()[:-1].numel()}",
)
return special_image_mask, special_video_mask
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
pixel_values: torch.FloatTensor | None = None,
pixel_values_videos: torch.FloatTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
use_cache: bool | None = None,
output_attentions: bool | None = None,
output_hidden_states: bool | None = None,
cache_position: torch.LongTensor | None = None,
logits_to_keep: int | torch.Tensor = 0,
**lm_kwargs,
) -> tuple | PerceptionLMModelOutputWithPast:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if (pixel_values is not None or pixel_values_videos is not None) and inputs_embeds is not None:
raise ValueError(
"You cannot specify both (pixel_values or pixel_values_videos) and inputs_embeds at the same time, and must specify either one"
)
if inputs_embeds is None:
inputs_embeds = self.get_input_embeddings()(input_ids)
image_features = None
if pixel_values is not None:
image_features = self.get_image_features(pixel_values=pixel_values, return_dict=True).pooler_output
image_features = image_features.to(inputs_embeds.device, dtype=inputs_embeds.dtype)
special_image_mask, _ = self.get_placeholder_mask(
input_ids, inputs_embeds=inputs_embeds, image_features=image_features
)
inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features)
video_features = None
if pixel_values_videos is not None:
video_features = self.get_image_features(pixel_values=pixel_values_videos, return_dict=True).pooler_output
video_features = video_features.to(inputs_embeds.device, dtype=inputs_embeds.dtype)
_, special_video_mask = self.get_placeholder_mask(
input_ids, inputs_embeds=inputs_embeds, video_features=video_features
)
inputs_embeds = inputs_embeds.masked_scatter(special_video_mask, video_features)
outputs = self.language_model(
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
cache_position=cache_position,
logits_to_keep=logits_to_keep,
**lm_kwargs,
)
return PerceptionLMModelOutputWithPast(
last_hidden_state=outputs.last_hidden_state,
hidden_states=outputs.hidden_states,
past_key_values=outputs.past_key_values,
attentions=outputs.attentions,
image_hidden_states=image_features if pixel_values is not None else None,
video_hidden_states=(video_features if pixel_values_videos is not None else None),
)
@auto_docstring
class PerceptionLMForConditionalGeneration(LlavaForConditionalGeneration):
_checkpoint_conversion_mapping = {}
def prepare_inputs_for_generation(
self,
input_ids,
past_key_values=None,
inputs_embeds=None,
pixel_values=None,
pixel_values_videos=None,
attention_mask=None,
cache_position=None,
logits_to_keep=None,
is_first_iteration=False,
**kwargs,
):
# Overwritten -- in specific circumstances we don't want to forward image inputs to the model
model_inputs = super().prepare_inputs_for_generation(
input_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
logits_to_keep=logits_to_keep,
is_first_iteration=is_first_iteration,
**kwargs,
)
if is_first_iteration or not kwargs.get("use_cache", True):
# Pixel values are used only in the first iteration if available
# In subsequent iterations, they are already merged with text and cached
# NOTE: first iteration doesn't have to be prefill, it can be the first
# iteration with a question and cached system prompt (continue generate from cache)
model_inputs["pixel_values"] = pixel_values
model_inputs["pixel_values_videos"] = pixel_values_videos
return model_inputs
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
pixel_values: torch.FloatTensor | None = None,
pixel_values_videos: torch.FloatTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
labels: torch.LongTensor | None = None,
use_cache: bool | None = None,
output_attentions: bool | None = None,
output_hidden_states: bool | None = None,
cache_position: torch.LongTensor | None = None,
logits_to_keep: int | torch.Tensor = 0,
**lm_kwargs,
) -> tuple | PerceptionLMCausalLMOutputWithPast:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
from transformers import AutoProcessor, AutoModelForImageTextToText
from huggingface_hub import hf_hub_download
MODEL_PATH = "facebook/Perception-LM-1B"
processor = AutoProcessor.from_pretrained(MODEL_PATH, use_fast=True)
model = AutoModelForImageTextToText.from_pretrained(MODEL_PATH).to("cuda")
test_image_file = hf_hub_download(
repo_id="shumingh/perception_lm_test_images",
filename="14496_0.PNG",
repo_type="dataset",
)
conversation = [
{
"role": "user",
"content": [
{
"type": "image",
"url": test_image_file,
},
{"type": "text", "text": "Describe the bar plot in the image."},
],
}
]
inputs = processor.apply_chat_template(
[conversation],
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors="pt",
)
inputs = inputs.to(model.device)
generate_ids = model.generate(**inputs, max_new_tokens=256)
input_length = inputs["input_ids"].shape[1]
generate_ids_without_inputs = generate_ids[:, input_length:]
for output in processor.batch_decode(generate_ids_without_inputs, skip_special_tokens=True):
print(output)
```"""
outputs = self.model(
input_ids=input_ids,
pixel_values=pixel_values,
pixel_values_videos=pixel_values_videos,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
cache_position=cache_position,
logits_to_keep=logits_to_keep,
**lm_kwargs,
)
hidden_states = outputs[0]
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(
logits=logits,
labels=labels,
vocab_size=self.config.text_config.vocab_size,
**lm_kwargs,
)
return PerceptionLMCausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=outputs.image_hidden_states,
video_hidden_states=outputs.video_hidden_states,
)
def get_image_features(self, **kwargs):
raise AttributeError("Not needed for PerceptionLM")
__all__ = [
"PerceptionLMForConditionalGeneration",
"PerceptionLMPreTrainedModel",
"PerceptionLMModel",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/perception_lm/modular_perception_lm.py",
"license": "Apache License 2.0",
"lines": 383,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/perception_lm/processing_perception_lm.py | # Copyright 2025 Meta Platforms, Inc. and the HuggingFace Inc. team. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Processor class for PerceptionLM.
"""
from collections.abc import Iterable
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...image_utils import ImageInput, get_image_size, to_numpy_array
from ...processing_utils import MultiModalData, ProcessingKwargs, ProcessorMixin, Unpack
from ...tokenization_utils_base import PreTokenizedInput, TextInput
from ...utils import auto_docstring, logging
from ...video_utils import VideoInput
logger = logging.get_logger(__name__)
class PerceptionLMProcessorKwargs(ProcessingKwargs, total=False):
_defaults = {
"text_kwargs": {
"padding": False,
"return_mm_token_type_ids": False,
},
}
@auto_docstring
class PerceptionLMProcessor(ProcessorMixin):
def __init__(
self,
video_processor=None,
image_processor=None,
tokenizer=None,
patch_size=None,
chat_template=None,
pooling_ratio=2,
**kwargs,
):
r"""
patch_size (`int`, *optional*):
Patch size from the vision tower.
pooling_ratio (`int`, *optional*, defaults to 2):
Pooling ratio for vision tokens. If not 1, 2D adaptive pooling is applied over projected vision tokens.
"""
self.patch_size = patch_size
self.pooling_ratio = pooling_ratio
self.image_token = tokenizer.image_token
self.video_token = tokenizer.video_token
self.image_token_id = tokenizer.image_token_id
self.video_token_id = tokenizer.video_token_id
super().__init__(video_processor, image_processor, tokenizer, chat_template=chat_template)
@auto_docstring
def __call__(
self,
images: ImageInput | None = None,
text: TextInput | PreTokenizedInput | list[TextInput] | list[PreTokenizedInput] = None,
videos: VideoInput | None = None,
**kwargs: Unpack[PerceptionLMProcessorKwargs],
) -> BatchFeature:
r"""
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is provided.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is provided).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is provided.
- **pixel_values_videos** -- Video pixel values to be fed to a model. Returned when `videos` is provided.
"""
if text is None:
raise ValueError(
"You have to specify at least `text` input. Optionally, you can also specify `images` or `videos`."
)
output_kwargs = self._merge_kwargs(
PerceptionLMProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
if images is not None:
image_inputs = self.image_processor(images=images, **output_kwargs["images_kwargs"])
else:
image_inputs = {}
if videos is not None:
videos_inputs = self.video_processor(videos, **output_kwargs["videos_kwargs"])
else:
videos_inputs = {}
if isinstance(text, str):
text = [text]
elif not isinstance(text, list) and not isinstance(text[0], str):
raise TypeError("Invalid input text. Please provide a string, or a list of strings")
# try to expand inputs in processing if we have the necessary parts
prompt_strings = []
pixel_values = iter(image_inputs.get("pixel_values", []))
pixel_values_videos = iter(videos_inputs.get("pixel_values_videos", []))
for sample in text:
# Replace the media token with the expanded media token sequence
sample = self._expand_media_tokens(sample, self.tokenizer.image_token, pixel_values)
sample = self._expand_media_tokens(sample, self.tokenizer.video_token, pixel_values_videos)
prompt_strings.append(sample)
return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None)
return_mm_token_type_ids = output_kwargs["text_kwargs"].pop("return_mm_token_type_ids", False)
text_inputs = self.tokenizer(prompt_strings, **output_kwargs["text_kwargs"], return_tensors=None)
self._check_special_mm_tokens(prompt_strings, text_inputs, modalities=["image", "video"])
if return_mm_token_type_ids:
array_ids = np.array(text_inputs["input_ids"])
mm_token_type_ids = np.zeros_like(text_inputs["input_ids"])
mm_token_type_ids[array_ids == self.image_token_id] = 1
text_inputs["mm_token_type_ids"] = mm_token_type_ids.tolist()
return BatchFeature(data={**text_inputs, **image_inputs, **videos_inputs}, tensor_type=return_tensors)
def _expand_media_tokens(self, sample, media_token: str, media_iter: Iterable):
media_count = sample.count(media_token)
if media_count > 0:
media_list = [next(media_iter) for _ in range(media_count)]
sample_splits = sample.split(media_token)
media_token_list = []
for media in media_list:
height, width = get_image_size(to_numpy_array(media))
num_tiles = media.shape[0]
num_media_tokens = (
(height // self.patch_size // self.pooling_ratio)
* (width // self.patch_size // self.pooling_ratio)
* num_tiles
)
media_token_list.append(num_media_tokens)
sample = ""
for i, num_media_tokens in enumerate(media_token_list):
sample += sample_splits[i]
sample += media_token * num_media_tokens
sample += sample_splits[-1]
return sample
def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs):
"""
Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.
Args:
image_sizes (`list[list[int]]`, *optional*):
The input sizes formatted as (height, width) per each image.
Returns:
`MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided
input modalities, along with other useful data.
"""
vision_data = {}
if image_sizes is not None:
images_kwargs = PerceptionLMProcessorKwargs._defaults.get("images_kwargs", {})
images_kwargs.update(kwargs)
tile_size = images_kwargs.get("tile_size", None) or self.image_processor.tile_size
vision_input_type = images_kwargs.get("vision_input_type", None) or self.image_processor.vision_input_type
num_image_tokens = []
num_image_patches = []
for height, width in image_sizes:
if vision_input_type == "thumb+tile":
aspect_ratio = self.image_processor._fit_image_to_canvas(
img_width=width, img_height=height, tile_size=tile_size
)
if aspect_ratio is None:
aspect_ratio = self.image_processor._find_closest_aspect_ratio(
img_width=width, img_height=height, tile_size=tile_size
)
num_tiles = aspect_ratio[0] * aspect_ratio[1] + 1 # base image and tiles
else:
num_tiles = 1
num_image_tokens.append(
(tile_size // self.patch_size // self.pooling_ratio)
* (tile_size // self.patch_size // self.pooling_ratio)
* num_tiles
)
num_image_patches.append(num_tiles)
vision_data.update({"num_image_tokens": num_image_tokens, "num_image_patches": num_image_patches})
return MultiModalData(**vision_data)
__all__ = ["PerceptionLMProcessor"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/perception_lm/processing_perception_lm.py",
"license": "Apache License 2.0",
"lines": 173,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/perception_lm/video_processing_perception_lm.py | # Copyright 2025 Meta Platforms, Inc. and the HuggingFace Inc. team. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Video processor class for PerceptionLM."""
from ...image_utils import IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, PILImageResampling
from ...video_processing_utils import BaseVideoProcessor
class PerceptionLMVideoProcessor(BaseVideoProcessor):
resample = PILImageResampling.BICUBIC
image_mean = IMAGENET_STANDARD_MEAN
image_std = IMAGENET_STANDARD_STD
size = {"height": 448, "width": 448}
do_resize = True
do_center_crop = False
do_rescale = True
do_normalize = True
do_convert_rgb = True
__all__ = ["PerceptionLMVideoProcessor"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/perception_lm/video_processing_perception_lm.py",
"license": "Apache License 2.0",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/perception_lm/test_image_processing_perception_lm.py | # Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from transformers.image_utils import IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available
from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
if is_torchvision_available():
from transformers import PerceptionLMImageProcessorFast
class PerceptionLMImageProcessingTester:
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
do_resize=True,
tile_size=16,
do_normalize=True,
image_mean=IMAGENET_STANDARD_MEAN,
image_std=IMAGENET_STANDARD_STD,
do_convert_rgb=True,
max_num_tiles=4,
vision_input_type="thumb+tile",
resample=Image.Resampling.BICUBIC, # dummy value
size={"shortest_edge": 20}, # dummy value
):
super().__init__()
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.tile_size = tile_size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.do_convert_rgb = do_convert_rgb
self.max_num_tiles = max_num_tiles
self.vision_input_type = vision_input_type
self.resample = resample
self.size = size
def prepare_image_processor_dict(self):
return {
"do_resize": self.do_resize,
"tile_size": self.tile_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
"max_num_tiles": self.max_num_tiles,
"vision_input_type": self.vision_input_type,
"resample": self.resample,
"size": self.size,
}
def expected_output_image_shape(self, images):
return self.num_channels, self.crop_size["height"], self.crop_size["width"]
# Copied from tests.models.clip.test_image_processing_clip.CLIPImageProcessingTester.prepare_image_inputs
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
@require_torch
@require_vision
class PerceptionLMImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
fast_image_processing_class = PerceptionLMImageProcessorFast if is_torchvision_available() else None
test_slow_image_processor = False
def setUp(self):
super().setUp()
self.image_processor_tester = PerceptionLMImageProcessingTester(self)
@property
# Copied from tests.models.clip.test_image_processing_clip.CLIPImageProcessingTest.image_processor_dict
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "do_resize"))
self.assertTrue(hasattr(image_processing, "tile_size"))
self.assertTrue(hasattr(image_processing, "do_normalize"))
self.assertTrue(hasattr(image_processing, "image_mean"))
self.assertTrue(hasattr(image_processing, "image_std"))
self.assertTrue(hasattr(image_processing, "do_convert_rgb"))
self.assertTrue(hasattr(image_processing, "max_num_tiles"))
self.assertTrue(hasattr(image_processing, "vision_input_type"))
def test_image_processor_from_dict_with_kwargs(self):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.tile_size, 16)
self.assertEqual(image_processor.max_num_tiles, 4)
self.assertEqual(image_processor.vision_input_type, "thumb+tile")
image_processor = image_processing_class.from_dict(
self.image_processor_dict, tile_size=42, max_num_tiles=9
)
self.assertEqual(image_processor.tile_size, 42)
self.assertEqual(image_processor.max_num_tiles, 9)
self.assertEqual(image_processor.vision_input_type, "thumb+tile")
def test_call_pil(self):
for image_processing_class in self.image_processor_list:
# Initialize image_processing
image_processing = image_processing_class(**self.image_processor_dict)
# create random PIL images
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True)
for image in image_inputs:
self.assertIsInstance(image, Image.Image)
# Test not batched input
encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values
expected_output_image_shape = (1, 5, 3, 16, 16)
self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape)
# Test batched
encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values
expected_output_image_shape = (7, 5, 3, 16, 16)
self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape)
def test_call_numpy(self):
for image_processing_class in self.image_processor_list:
# Initialize image_processing
image_processing = image_processing_class(**self.image_processor_dict)
# create random numpy tensors
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, numpify=True)
for image in image_inputs:
self.assertIsInstance(image, np.ndarray)
# Test not batched input
encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values
expected_output_image_shape = (1, 5, 3, 16, 16)
self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape)
# Test batched
encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values
expected_output_image_shape = (7, 5, 3, 16, 16)
self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape)
def test_call_pytorch(self):
for image_processing_class in self.image_processor_list:
# Initialize image_processing
image_processing = image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, torchify=True)
for image in image_inputs:
self.assertIsInstance(image, torch.Tensor)
# Test not batched input
encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values
expected_output_image_shape = (1, 5, 3, 16, 16)
self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape)
# Test batched
encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values
expected_output_image_shape = (7, 5, 3, 16, 16)
self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape)
@unittest.skip(reason="PerceptionLMImageProcessor doesn't treat 4 channel PIL and numpy consistently yet")
def test_call_numpy_4_channels(self):
pass
def test_nested_input(self):
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True)
# Test batched as a list of images
encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values
expected_output_image_shape = (7, 5, 3, 16, 16)
self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape)
# Test batched as a nested list of images, where each sublist is one batch
image_inputs_nested = [image_inputs[:3], image_inputs[3:]]
encoded_images_nested = image_processing(image_inputs_nested, return_tensors="pt").pixel_values
expected_output_image_shape = (7, 5, 3, 16, 16)
self.assertEqual(tuple(encoded_images_nested.shape), expected_output_image_shape)
# Image processor should return same pixel values, independently of ipnut format
self.assertTrue((encoded_images_nested == encoded_images).all())
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/perception_lm/test_image_processing_perception_lm.py",
"license": "Apache License 2.0",
"lines": 189,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/models/perception_lm/test_modeling_perception_lm.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch PerceptionLM model."""
import unittest
from huggingface_hub import hf_hub_download
from transformers import (
AutoProcessor,
BitsAndBytesConfig,
PerceptionLMConfig,
PerceptionLMForConditionalGeneration,
PerceptionLMModel,
is_torch_available,
)
from transformers.testing_utils import (
cleanup,
require_bitsandbytes,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
if is_torch_available():
import torch
class PerceptionLMVisionText2TextModelTester:
def __init__(
self,
parent,
image_token_id=0,
video_token_id=2,
seq_length=7,
tie_word_embeddings=True,
projector_pooling_ratio=1,
text_config={
"model_type": "llama",
"seq_length": 7,
"is_training": True,
"use_input_mask": True,
"use_token_type_ids": False,
"use_labels": True,
"vocab_size": 99,
"hidden_size": 32,
"num_hidden_layers": 2,
"num_attention_heads": 4,
"intermediate_size": 37,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"attention_probs_dropout_prob": 0.1,
"max_position_embeddings": 512,
"type_vocab_size": 16,
"type_sequence_label_size": 2,
"initializer_range": 0.02,
"num_labels": 3,
"num_choices": 4,
"pad_token_id": 1,
},
is_training=True,
vision_config={
"architecture": "vit_pe_core_large_patch14_336",
"model_args": {
"embed_dim": 64,
"img_size": (14, 14),
"depth": 2,
"global_pool": "",
"use_post_transformer_norm": False,
"init_values": 0.1,
"ref_feat_shape": (1, 1),
},
},
):
self.parent = parent
self.image_token_id = image_token_id
self.video_token_id = video_token_id
self.text_config = text_config
self.vision_config = vision_config
self.pad_token_id = text_config["pad_token_id"]
self.num_hidden_layers = text_config["num_hidden_layers"]
self.vocab_size = text_config["vocab_size"]
self.hidden_size = text_config["hidden_size"]
self.num_attention_heads = text_config["num_attention_heads"]
self.is_training = is_training
self.tie_word_embeddings = tie_word_embeddings
self.batch_size = 3
self.num_tiles = 1
self.num_frames = 1
self.num_channels = 3
self.image_size = self.vision_config["model_args"]["img_size"][0]
self.num_image_tokens = (self.vision_config["model_args"]["img_size"][0] // 14) ** 2
self.num_video_tokens = (self.vision_config["model_args"]["img_size"][0] // 14) ** 2
self.seq_length = seq_length + self.num_image_tokens
self.encoder_seq_length = self.seq_length
def get_config(self):
return PerceptionLMConfig(
text_config=self.text_config,
vision_config=self.vision_config,
vision_use_cls_token=True,
image_token_id=self.image_token_id,
video_token_id=self.video_token_id,
tie_word_embeddings=self.tie_word_embeddings,
)
def prepare_config_and_inputs(self):
pixel_values = floats_tensor(
[
self.batch_size,
self.num_tiles,
self.num_channels,
self.vision_config["model_args"]["img_size"][0],
self.vision_config["model_args"]["img_size"][1],
]
)
pixel_values_videos = floats_tensor(
[
self.batch_size,
self.num_frames,
self.num_channels,
self.vision_config["model_args"]["img_size"][0],
self.vision_config["model_args"]["img_size"][1],
]
)
config = self.get_config()
return config, pixel_values, pixel_values_videos
def prepare_config_and_inputs_for_common(self):
config, pixel_values, pixel_values_videos = self.prepare_config_and_inputs()
input_ids = ids_tensor([self.batch_size, self.seq_length], config.text_config.vocab_size - 2) + 2
attention_mask = torch.ones(input_ids.shape, dtype=torch.long).to(torch_device)
input_ids[input_ids == config.image_token_id] = self.pad_token_id
input_ids[input_ids == config.video_token_id] = self.pad_token_id
input_ids[:, : self.num_image_tokens] = config.image_token_id
input_ids[:, self.num_image_tokens : self.num_video_tokens + self.num_image_tokens] = config.video_token_id
inputs_dict = {
"pixel_values": pixel_values,
"pixel_values_videos": pixel_values_videos,
"input_ids": input_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class PerceptionLMForConditionalGenerationModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
"""
Model tester for `PerceptionLMForConditionalGeneration`.
"""
all_model_classes = (
(
PerceptionLMModel,
PerceptionLMForConditionalGeneration,
)
if is_torch_available()
else ()
)
_is_composite = True
def setUp(self):
self.model_tester = PerceptionLMVisionText2TextModelTester(self)
common_properties = [
"image_token_id",
"video_token_id",
]
self.config_tester = ConfigTester(
self,
config_class=PerceptionLMConfig,
has_text_modality=False,
common_properties=common_properties,
)
def test_config(self):
self.config_tester.run_common_tests()
# overwrite inputs_embeds tests because we need to delete "pixel values" for LVLMs
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = self._prepare_for_class(inputs_dict, model_class)
input_ids = inputs["input_ids"]
del inputs["input_ids"]
del inputs["pixel_values"]
del inputs["pixel_values_videos"]
wte = model.get_input_embeddings()
inputs["inputs_embeds"] = wte(input_ids)
with torch.no_grad():
model(**inputs)
# overwrite inputs_embeds tests because we need to delete "pixel values" for LVLMs
# while some other models require pixel_values to be present
def test_inputs_embeds_matches_input_ids(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = self._prepare_for_class(inputs_dict, model_class)
input_ids = inputs["input_ids"]
del inputs["input_ids"]
del inputs["pixel_values"]
del inputs["pixel_values_videos"]
inputs_embeds = model.get_input_embeddings()(input_ids)
with torch.no_grad():
out_ids = model(input_ids=input_ids, **inputs)[0]
out_embeds = model(inputs_embeds=inputs_embeds, **inputs)[0]
torch.testing.assert_close(out_embeds, out_ids)
def test_mismatching_num_image_tokens(self):
"""
Tests that VLMs through an error with explicit message saying what is wrong
when number of images doesn't match number of image tokens in the text.
Also we need to test multi-image cases when one prompr has multiple image tokens.
"""
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
if model_class == PerceptionLMModel:
continue
model = model_class(config).to(torch_device)
model.eval()
_ = model(**input_dict) # successful forward with no modifications
# remove one image but leave the image token in text
input_dict["pixel_values"] = input_dict["pixel_values"][-1:, ...]
with self.assertRaisesRegex(ValueError, "Image features and image tokens do not match"):
_ = model(**input_dict)
# simulate multi-image case by concatenating inputs where each has exactly one image/image-token
input_ids = input_dict["input_ids"][:1]
pixel_values = input_dict["pixel_values"][:1]
input_ids = torch.cat([input_ids, input_ids], dim=0)
# one image and two image tokens raise an error
with self.assertRaisesRegex(ValueError, "Image features and image tokens do not match"):
_ = model(input_ids=input_ids, pixel_values=pixel_values)
# two images and two image tokens don't raise an error
pixel_values = torch.cat([pixel_values, pixel_values], dim=0)
_ = model(input_ids=input_ids, pixel_values=pixel_values)
def test_training(self):
self.all_model_classes = (PerceptionLMForConditionalGeneration,) if is_torch_available() else ()
super().test_training()
def test_training_gradient_checkpointing(self):
self.all_model_classes = (PerceptionLMForConditionalGeneration,) if is_torch_available() else ()
super().test_training_gradient_checkpointing()
def test_training_gradient_checkpointing_use_reentrant_false(self):
self.all_model_classes = (PerceptionLMForConditionalGeneration,) if is_torch_available() else ()
super().test_training_gradient_checkpointing_use_reentrant_false()
def test_training_gradient_checkpointing_use_reentrant_true(self):
self.all_model_classes = (PerceptionLMForConditionalGeneration,) if is_torch_available() else ()
super().test_training_gradient_checkpointing_use_reentrant_true()
@unittest.skip(
reason="PE/TIMM's attention implementation is self configured and won't raise ValueError on global attention implementation."
)
def test_flash_attn_2_can_dispatch_composite_models(self):
pass
@unittest.skip(
"VLMs need lots of steps to prepare images/mask correctly to get pad-free inputs. Can be tested as part of LLM test"
)
def test_flash_attention_2_padding_matches_padding_free_with_position_ids(self):
pass
@unittest.skip("ViT PE / TimmWrapperModel cannot be tested with meta device")
def test_can_be_initialized_on_meta(self):
pass
@unittest.skip("Specifying both inputs_embeds and pixel_values are not supported for PerceptionLM")
def test_generate_from_inputs_embeds_0_greedy(self):
pass
@unittest.skip("Specifying both inputs_embeds and pixel_values are not supported for PerceptionLM")
def test_generate_from_inputs_embeds_1_beam_search(self):
pass
@unittest.skip("Specifying both inputs_embeds and pixel_values are not supported for PerceptionLM")
def test_generate_from_inputs_embeds_with_static_cache(self):
pass
## Skip flash attention releated tests below
## correct configuration:
## from_pretrained(model_id, attn_implementation={"text_config": "flash_attention_2", "vision_config": "eager"}
@unittest.skip("Flash attn test is not configured correctly as we need to configure vision/timm model to 'eager'.")
def test_eager_matches_fa2_generate(self):
pass
@unittest.skip("Flash attn test is not configured correctly as we need to configure vision/timm model to 'eager'.")
def test_flash_attn_2_fp32_ln(self):
pass
@unittest.skip("Flash attn test is not configured correctly as we need to configure vision/timm model to 'eager'.")
def test_flash_attn_2_from_config(self):
pass
@unittest.skip("SDPA test is not configured correctly as we need to configure vision/timm model to 'eager'.")
def test_eager_matches_sdpa_generate_with_dynamic_cache(self):
pass
@unittest.skip("Flash attn test is not configured correctly as we need to configure vision/timm model to 'eager'.")
def test_flash_attn_2_inference_equivalence_right_padding(self):
pass
@unittest.skip("SDPA test is not configured correctly as we need to configure vision/timm model to 'eager'.")
def test_eager_matches_sdpa_generate(self):
pass
@unittest.skip("Flash attn test is not configured correctly as we need to configure vision/timm model to 'eager'.")
def test_flash_attn_2_inference_equivalence(self):
pass
@unittest.skip(
"PerceptionLMForConditionalGeneration does not have language_model, vision_tower, multi_modal_projector."
)
def test_sdpa_can_dispatch_composite_models(self):
pass
@unittest.skip("Cannot set `output_attentions` for timm models.")
def test_attention_outputs(self):
pass
@unittest.skip("Cannot set `output_attentions` for timm models.")
def test_retain_grad_hidden_states_attentions(self):
pass
@unittest.skip("Cannot set `output_attentions` for timm models.")
def test_generate_compilation_all_outputs(self):
pass
@unittest.skip("Cannot set output_attentions on timm models.")
def test_get_image_features_attentions(self):
pass
def _image_features_get_expected_num_hidden_states(self, model_tester=None):
# For models that rely on timm for their vision backend, it's hard to infer how many layers the model has
# from the timm config alone. So, we're just hardcoding the expected number of hidden states here.
return 2
TEST_MODEL_PATH = "facebook/Perception-LM-1B"
@require_torch
@require_bitsandbytes
@slow
class PerceptionLMForConditionalGenerationIntegrationTest(unittest.TestCase):
def setUp(self):
self.processor = AutoProcessor.from_pretrained(TEST_MODEL_PATH)
self.image_file = hf_hub_download(
repo_id="shumingh/perception_lm_test_images",
filename="14496_0.PNG",
repo_type="dataset",
)
self.video_file = hf_hub_download(
repo_id="shumingh/perception_lm_test_videos",
filename="GUWR5TyiY-M_000012_000022.mp4",
repo_type="dataset",
)
self.conversation1 = [
{
"role": "user",
"content": [
{"type": "image", "url": self.image_file},
{"type": "text", "text": "Describe the bar plot in the image."},
],
}
]
self.conversation2 = [
{
"role": "user",
"content": [
{
"type": "video",
"url": self.video_file,
},
{"type": "text", "text": "Can you describe the video in detail?"},
],
}
]
def tearDown(self):
cleanup(torch_device, gc_collect=True)
def test_small_model_integration_test(self):
model = PerceptionLMForConditionalGeneration.from_pretrained(
TEST_MODEL_PATH, quantization_config=BitsAndBytesConfig(load_in_4bit=True), cache_dir="./"
)
inputs = self.processor.apply_chat_template(
[self.conversation1],
num_frames=32,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors="pt",
padding=True,
padding_side="left",
).to(torch_device)
generate_ids = model.generate(**inputs, max_new_tokens=18)
input_length = inputs["input_ids"].shape[1]
generate_ids_without_inputs = generate_ids[:, input_length:]
EXPECTED_DECODED_TEXT = "The bar plot displays the values of four categories: step, horror, mood, and lumber" # fmt: skip
self.assertEqual(
self.processor.decode(generate_ids_without_inputs[0], skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
def test_small_model_integration_test_batched(self):
model = PerceptionLMForConditionalGeneration.from_pretrained(
TEST_MODEL_PATH, quantization_config=BitsAndBytesConfig(load_in_4bit=True)
)
processor = AutoProcessor.from_pretrained(TEST_MODEL_PATH)
inputs = processor.apply_chat_template(
[self.conversation1, self.conversation2],
num_frames=32,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors="pt",
padding=True,
padding_side="left",
).to(torch_device)
generate_ids = model.generate(**inputs, max_new_tokens=18)
input_length = inputs["input_ids"].shape[1]
generate_ids_without_inputs = generate_ids[:, input_length:]
EXPECTED_DECODED_TEXT = ['The bar plot displays the values of four categories: step, horror, mood, and lumber', 'The video shows a group of people in green shirts and white shorts performing a jump rope routine'] # fmt: skip
self.assertEqual(
processor.batch_decode(generate_ids_without_inputs, skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
def test_generation_no_images(self):
# model_id = "facebook/Perception-LM-1B"
model = PerceptionLMForConditionalGeneration.from_pretrained(
TEST_MODEL_PATH, quantization_config=BitsAndBytesConfig(load_in_4bit=True)
)
processor = AutoProcessor.from_pretrained(TEST_MODEL_PATH)
# Prepare inputs with no images
inputs = processor(text="Hello, I am", return_tensors="pt").to(torch_device)
# Make sure that `generate` works
_ = model.generate(**inputs, max_new_tokens=20)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/perception_lm/test_modeling_perception_lm.py",
"license": "Apache License 2.0",
"lines": 411,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/models/perception_lm/test_video_processing_perception_lm.py | # Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers.image_utils import IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torchvision_available, is_vision_available
from ...test_video_processing_common import VideoProcessingTestMixin, prepare_video_inputs
if is_vision_available():
if is_torchvision_available():
from transformers import PerceptionLMVideoProcessor
class PerceptionLMVideoProcessingTester:
def __init__(
self,
parent,
batch_size=5,
num_frames=8,
num_channels=3,
min_resolution=30,
max_resolution=80,
do_resize=True,
size=None,
do_center_crop=True,
crop_size=None,
do_normalize=True,
image_mean=IMAGENET_STANDARD_MEAN,
image_std=IMAGENET_STANDARD_STD,
do_convert_rgb=True,
):
size = size if size is not None else {"height": 20, "width": 20}
crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18}
self.parent = parent
self.batch_size = batch_size
self.num_frames = num_frames
self.num_channels = num_channels
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.do_convert_rgb = do_convert_rgb
def prepare_video_processor_dict(self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def expected_output_video_shape(self, images):
return self.num_frames, self.num_channels, self.crop_size["height"], self.crop_size["width"]
def prepare_video_inputs(self, equal_resolution=False, return_tensors="pil"):
videos = prepare_video_inputs(
batch_size=self.batch_size,
num_frames=self.num_frames,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
return_tensors=return_tensors,
)
return videos
@require_torch
@require_vision
class PerceptionLMVideoProcessingTest(VideoProcessingTestMixin, unittest.TestCase):
fast_video_processing_class = PerceptionLMVideoProcessor if is_torchvision_available() else None
def setUp(self):
super().setUp()
self.video_processor_tester = PerceptionLMVideoProcessingTester(self)
@property
def video_processor_dict(self):
return self.video_processor_tester.prepare_video_processor_dict()
def test_video_processor_properties(self):
video_processing = self.fast_video_processing_class(**self.video_processor_dict)
self.assertTrue(hasattr(video_processing, "do_resize"))
self.assertTrue(hasattr(video_processing, "size"))
self.assertTrue(hasattr(video_processing, "do_center_crop"))
self.assertTrue(hasattr(video_processing, "center_crop"))
self.assertTrue(hasattr(video_processing, "do_normalize"))
self.assertTrue(hasattr(video_processing, "image_mean"))
self.assertTrue(hasattr(video_processing, "image_std"))
self.assertTrue(hasattr(video_processing, "do_convert_rgb"))
def test_video_processor_from_dict_with_kwargs(self):
video_processor = self.fast_video_processing_class.from_dict(self.video_processor_dict)
self.assertEqual(video_processor.size, {"height": 20, "width": 20})
self.assertEqual(video_processor.crop_size, {"height": 18, "width": 18})
video_processor = self.fast_video_processing_class.from_dict(self.video_processor_dict, size=42, crop_size=84)
self.assertEqual(video_processor.size, {"height": 42, "width": 42})
self.assertEqual(video_processor.crop_size, {"height": 84, "width": 84})
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/perception_lm/test_video_processing_perception_lm.py",
"license": "Apache License 2.0",
"lines": 106,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/lfm2/configuration_lfm2.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...configuration_utils import PreTrainedConfig
from ...modeling_rope_utils import RopeParameters
class Lfm2Config(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Lfm2Model`]. It is used to instantiate a LFM2
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the LFM2-1.2B model.
e.g. [LiquidAI/LFM2-1.2B](https://huggingface.co/LiquidAI/LFM2-1.2B)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 65536):
Vocabulary size of the LLaMA model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`Lfm2Model`]
hidden_size (`int`, *optional*, defaults to 2560):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 12288):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*, defaults to 8):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
max_position_embeddings (`int`, *optional*, defaults to 128000):
The maximum sequence length that this model might ever be used with. Lfm2 1 supports up to 2048 tokens,
Lfm2 2 up to 4096, CodeLfm2 up to 16384.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*, defaults to 0):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 1):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 2):
End of stream token id.
tie_word_embeddings (`bool`, *optional*, defaults to `True`):
Whether to tie weight embeddings
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
conv_bias (`bool`, *optional*, defaults to `False`):
Whether to use bias in the conv layers.
conv_L_cache (`int`, *optional*, defaults to 3):
L_cache dim in the conv layers.
block_multiple_of (`int`, *optional*, defaults to 256):
Multiple for the `intermediate_size`.
block_ffn_dim_multiplier (`float`, *optional*, defaults to 1.0):
Multiplier for the `intermediate_size`.
block_auto_adjust_ff_dim (`bool`, *optional*, defaults to `True`):
Whether to adjust the dim of the `intermediate_size`.
full_attn_idxs (`Optional`, *optional*):
Index of the layers which use attention.
layer_types (`Optional`, *optional*):
Type of each layers.
```python
>>> from transformers import Lfm2Model, Lfm2Config
>>> # Initializing a LFM2 model
>>> configuration = Lfm2Config()
>>> # Initializing a model from the LFM2-1.2B style configuration
>>> model = Lfm2Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "lfm2"
keys_to_ignore_at_inference = ["past_key_values"]
default_theta = 1000000.0
def __init__(
self,
vocab_size: int | None = 65536,
hidden_size: int | None = 2560,
intermediate_size: int | None = 12288,
num_hidden_layers: int | None = 32,
num_attention_heads: int | None = 32,
num_key_value_heads: int | None = 8,
max_position_embeddings: int | None = 128_000,
initializer_range: float | None = 0.02,
norm_eps: float | None = 0.00001,
use_cache: bool | None = True,
pad_token_id: int | None = 0,
bos_token_id: int | None = 1,
eos_token_id: int | None = 2,
tie_word_embeddings: bool | None = True,
rope_parameters: RopeParameters | dict[str, RopeParameters] | None = None,
conv_bias: bool | None = False,
conv_L_cache: int | None = 3,
block_multiple_of: int | None = 256,
block_ffn_dim_multiplier: float | None = 1.0,
block_auto_adjust_ff_dim: bool | None = True,
full_attn_idxs: list[int] | None = None,
layer_types: list[str] | None = None,
**kwargs,
):
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.max_position_embeddings = max_position_embeddings
self.use_cache = use_cache
self.norm_eps = norm_eps
self.initializer_range = initializer_range
# attn operator config
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
# custom operator config
self.conv_bias = conv_bias
self.conv_L_cache = conv_L_cache
# MLP config
self.intermediate_size = kwargs.get("block_ff_dim", intermediate_size) # to fit original config keys
self.block_multiple_of = block_multiple_of
self.block_ffn_dim_multiplier = block_ffn_dim_multiplier
self.block_auto_adjust_ff_dim = block_auto_adjust_ff_dim
self.layer_types = layer_types
if self.layer_types is None:
full_attn_idxs = full_attn_idxs if full_attn_idxs is not None else list(range(num_hidden_layers))
self.layer_types = ["full_attention" if i in full_attn_idxs else "conv" for i in range(num_hidden_layers)]
self.rope_parameters = rope_parameters
tie_word_embeddings = kwargs.get("tie_embedding", tie_word_embeddings) # to fit original config keys
self.tie_word_embeddings = tie_word_embeddings
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
super().__init__(**kwargs)
__all__ = ["Lfm2Config"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/lfm2/configuration_lfm2.py",
"license": "Apache License 2.0",
"lines": 147,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/lfm2/modular_lfm2.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Callable
from typing import Any
import torch
import torch.nn.functional as F
from torch import nn
from ...masking_utils import create_causal_mask
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import BaseModelOutputWithPast
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, logging
from ...utils.import_utils import is_causal_conv1d_available, is_torchdynamo_compiling
from ..bamba.modeling_bamba import apply_mask_to_padding_states
from ..gemma2.modeling_gemma2 import Gemma2RotaryEmbedding
from ..llama.modeling_llama import (
LlamaAttention,
LlamaForCausalLM,
LlamaModel,
LlamaPreTrainedModel,
LlamaRMSNorm,
apply_rotary_pos_emb,
eager_attention_forward,
)
from .configuration_lfm2 import Lfm2Config
if is_causal_conv1d_available():
from causal_conv1d import causal_conv1d_fn, causal_conv1d_update
else:
causal_conv1d_fn, causal_conv1d_update = None, None
kernel_modules = (causal_conv1d_fn, causal_conv1d_update)
is_fast_path_available = all(kernel_modules)
logger = logging.get_logger(__name__)
class Lfm2RMSNorm(LlamaRMSNorm):
pass
class Lfm2RotaryEmbedding(Gemma2RotaryEmbedding):
pass
class Lfm2MLP(nn.Module):
def __init__(self, config: Lfm2Config):
super().__init__()
intermediate_size = config.intermediate_size
if config.block_auto_adjust_ff_dim:
intermediate_size = int(2 * intermediate_size / 3)
# custom dim factor multiplier
if config.block_ffn_dim_multiplier is not None:
intermediate_size = int(config.block_ffn_dim_multiplier * intermediate_size)
intermediate_size = config.block_multiple_of * (
(intermediate_size + config.block_multiple_of - 1) // config.block_multiple_of
)
self.w1 = nn.Linear(config.hidden_size, intermediate_size, bias=False)
self.w3 = nn.Linear(config.hidden_size, intermediate_size, bias=False)
self.w2 = nn.Linear(intermediate_size, config.hidden_size, bias=False)
def forward(self, x):
return self.w2(F.silu(self.w1(x)) * self.w3(x))
class Lfm2HybridConvCache:
"""
Attention and conv cache for Lfm2.
It stores the Key and Value states as a list of tensors, one for each layer.
Attention layer cache shape: `[batch_size, num_heads, seq_len, head_dim]`.
Conv layer cache shape: `[batch_size, hidden_size, L_cache-1]`.
"""
# Override @property existing in Cache
max_batch_size = None
is_compileable = False
key_cache = None
value_cache = None
def __init__(
self,
config: Lfm2Config,
max_batch_size: int,
dtype: torch.dtype = torch.float32,
device: torch.device | str | None = None,
):
self.key_cache = []
self.value_cache = []
self.max_batch_size = max_batch_size
self.layer_types = config.layer_types
self.first_attention_layer = self.layer_types.index("full_attention")
self.conv_L_cache = config.conv_L_cache
self._dtype = dtype
self.conv_cache: list[torch.Tensor] = []
device = torch.device(device) if device is not None else None
for _ in range(config.num_hidden_layers):
conv_state = torch.zeros(
self.max_batch_size,
config.hidden_size,
self.conv_L_cache,
dtype=self._dtype,
device=device,
)
self.conv_cache.append(conv_state)
self.key_cache.append(torch.tensor([]))
self.value_cache.append(torch.tensor([]))
def update(
self,
key_states: torch.Tensor,
value_states: torch.Tensor,
layer_idx: int,
cache_kwargs: dict[str, Any] | None = None,
) -> tuple[torch.Tensor, torch.Tensor]:
"""
Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`.
Parameters:
key_states (`torch.Tensor`):
The new key states to cache.
value_states (`torch.Tensor`):
The new value states to cache.
layer_idx (`int`):
The index of the layer to cache the states for.
cache_kwargs (`Dict[str, Any]`, `optional`):
Additional arguments for the cache subclass. No additional arguments are used in `DynamicCache`.
Return:
A tuple containing the updated key and value states.
"""
# Update the cache
if self.key_cache[layer_idx].numel() == 0:
self.key_cache[layer_idx] = key_states
self.value_cache[layer_idx] = value_states
else:
self.key_cache[layer_idx] = torch.cat([self.key_cache[layer_idx], key_states], dim=-2)
self.value_cache[layer_idx] = torch.cat([self.value_cache[layer_idx], value_states], dim=-2)
return self.key_cache[layer_idx], self.value_cache[layer_idx]
def reorder_cache(self, beam_idx: torch.LongTensor):
"""Reorders the cache for beam search, given the selected beam indices."""
for layer_idx in range(len(self.key_cache)):
if self.key_cache[layer_idx].numel():
device = self.key_cache[layer_idx].device
self.key_cache[layer_idx] = self.key_cache[layer_idx].index_select(0, beam_idx.to(device))
device = self.value_cache[layer_idx].device
self.value_cache[layer_idx] = self.value_cache[layer_idx].index_select(0, beam_idx.to(device))
if self.conv_cache[layer_idx].numel():
device = self.conv_cache[layer_idx].device
self.conv_cache[layer_idx] = self.conv_cache[layer_idx].index_select(0, beam_idx.to(device))
def get_seq_length(self, layer_idx: int | None = 0) -> int:
"""Returns the sequence length of the cached states. A layer index can be optionally passed."""
# take any layer that contains cache and not empty tensor
layer_idx = self.first_attention_layer if self.layer_types[layer_idx] != "full_attention" else layer_idx
if len(self.key_cache) <= layer_idx or self.key_cache[layer_idx].numel() == 0:
return 0
return self.key_cache[layer_idx].shape[-2]
def get_mask_sizes(self, cache_position: torch.Tensor, layer_idx: int) -> tuple[int, int]:
"""
Return a tuple (kv_length, kv_offset) corresponding to the length and offset that will be returned for
the given layer at `layer_idx`.
The masks are then prepared according to the given lengths (kv_length, kv_offset) and patterns (i.e. sliding_window, chunk_size),
for each layer.
"""
full_mask_kv_offset = 0
query_length = cache_position.shape[0]
past_seen_tokens = self.get_seq_length()
kv_length = query_length + past_seen_tokens
return kv_length, full_mask_kv_offset
def crop(self, max_length: int):
"""Crop the cache to the given length"""
if max_length < 0:
max_length = self.get_seq_length() - abs(max_length)
if self.get_seq_length() <= max_length:
return
for idx in range(len(self.key_cache)):
if self.key_cache[idx].numel():
self.key_cache[idx] = self.key_cache[idx][..., :max_length, :]
self.value_cache[idx] = self.value_cache[idx][..., :max_length, :]
def __len__(self) -> int:
return len(self.key_cache)
def reset(self):
for layer_idx in range(len(self.conv_cache)):
# In-place ops prevent breaking the static address
self.conv_cache[layer_idx].zero_()
class Lfm2Attention(LlamaAttention):
def __init__(self, config: Lfm2Config, layer_idx: int):
super().__init__(config, layer_idx)
self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False)
self.out_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False)
self.q_layernorm = Lfm2RMSNorm(self.head_dim, eps=config.norm_eps)
self.k_layernorm = Lfm2RMSNorm(self.head_dim, eps=config.norm_eps)
del self.o_proj
del self.attention_dropout
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: torch.Tensor | None,
past_key_values: Lfm2HybridConvCache | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs,
) -> tuple[torch.Tensor, torch.Tensor | None]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_layernorm(self.q_proj(hidden_states).view(*hidden_shape)).transpose(1, 2)
key_states = self.k_layernorm(self.k_proj(hidden_states).view(*hidden_shape)).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(*hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface(
self.config._attn_implementation, eager_attention_forward
)
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
output = self.out_proj(attn_output)
return output, attn_weights
class Lfm2ShortConv(nn.Module):
def __init__(
self,
config: Lfm2Config,
layer_idx: int,
):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.L_cache = config.conv_L_cache
self.bias = config.conv_bias
self.conv = nn.Conv1d(
in_channels=config.hidden_size,
out_channels=config.hidden_size,
kernel_size=self.L_cache,
groups=config.hidden_size,
bias=self.bias,
padding=self.L_cache - 1,
)
self.in_proj = nn.Linear(config.hidden_size, 3 * config.hidden_size, bias=self.bias)
self.out_proj = nn.Linear(config.hidden_size, config.hidden_size, bias=self.bias)
def cuda_kernels_forward(
self,
x: torch.Tensor,
past_key_values: Lfm2HybridConvCache | None = None,
cache_position: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
):
x = apply_mask_to_padding_states(x, attention_mask)
BCx = self.in_proj(x).transpose(-1, -2)
B, C, x = BCx.chunk(3, dim=-2)
Bx = B * x
conv_weights = self.conv.weight.view(self.conv.weight.size(0), self.conv.weight.size(2))
if past_key_values is not None and cache_position[0] > 0:
conv_out = causal_conv1d_update(
Bx.squeeze(-1),
past_key_values.conv_cache[self.layer_idx],
conv_weights,
self.conv.bias,
None,
)
conv_out = conv_out.unsqueeze(-1)
else:
if past_key_values is not None:
conv_state = nn.functional.pad(Bx, (self.L_cache - Bx.shape[-1], 0))
past_key_values.conv_cache[self.layer_idx].copy_(conv_state)
conv_out = causal_conv1d_fn(Bx, conv_weights, self.conv.bias, activation=None)
y = C * conv_out
y = self.out_proj(y.transpose(-1, -2).contiguous())
return y
def slow_forward(
self,
x: torch.Tensor,
past_key_values: Lfm2HybridConvCache | None = None,
cache_position: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
):
seqlen = x.shape[1]
x = apply_mask_to_padding_states(x, attention_mask)
BCx = self.in_proj(x).transpose(-1, -2)
B, C, x = BCx.chunk(3, dim=-2)
Bx = B * x
if past_key_values is not None and cache_position[0] > 0:
conv_state = past_key_values.conv_cache[self.layer_idx]
cache_position = cache_position.clamp(0, self.L_cache - 1)
conv_state = conv_state.roll(shifts=-1, dims=-1)
conv_state[:, :, cache_position] = Bx.to(device=conv_state.device, dtype=conv_state.dtype)
past_key_values.conv_cache[self.layer_idx].copy_(conv_state)
conv_out = torch.sum(conv_state.to(Bx.device) * self.conv.weight[:, 0, :], dim=-1)
if self.bias:
conv_out += self.conv.bias
conv_out = conv_out.unsqueeze(-1)
else:
if past_key_values is not None:
conv_state = nn.functional.pad(Bx, (self.L_cache - Bx.shape[-1], 0))
past_key_values.conv_cache[self.layer_idx].copy_(conv_state)
conv_out = self.conv(Bx)[..., :seqlen]
y = C * conv_out
y = y.transpose(-1, -2).contiguous()
y = self.out_proj(y)
return y
def forward(
self,
hidden_states: torch.Tensor,
past_key_values: Lfm2HybridConvCache | None = None,
cache_position: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
):
if is_fast_path_available and "cuda" in hidden_states.device.type and not is_torchdynamo_compiling():
return self.cuda_kernels_forward(hidden_states, past_key_values, cache_position, attention_mask)
return self.slow_forward(hidden_states, past_key_values, cache_position, attention_mask)
class Lfm2DecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: Lfm2Config, layer_idx: int):
super().__init__()
self.is_attention_layer = config.layer_types[layer_idx] == "full_attention"
if self.is_attention_layer:
self.self_attn = Lfm2Attention(config, layer_idx)
else:
self.conv = Lfm2ShortConv(config, layer_idx)
self.feed_forward = Lfm2MLP(config)
self.operator_norm = Lfm2RMSNorm(config.hidden_size, eps=config.norm_eps)
self.ffn_norm = Lfm2RMSNorm(config.hidden_size, eps=config.norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Lfm2HybridConvCache | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs,
) -> torch.Tensor:
residual = hidden_states
if self.is_attention_layer:
hidden_states, _ = self.self_attn(
hidden_states=self.operator_norm(hidden_states),
position_embeddings=position_embeddings,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
cache_position=cache_position,
**kwargs,
)
else:
hidden_states = self.conv(
hidden_states=self.operator_norm(hidden_states),
past_key_values=past_key_values,
cache_position=cache_position,
attention_mask=attention_mask,
)
hidden_states = hidden_states + residual
hidden_states = hidden_states + self.feed_forward(self.ffn_norm(hidden_states))
return hidden_states
class Lfm2PreTrainedModel(LlamaPreTrainedModel):
_can_compile_fullgraph = False
class Lfm2Model(LlamaModel):
def __init__(self, config: Lfm2Config):
super().__init__(config)
self.embedding_norm = Lfm2RMSNorm(config.hidden_size, eps=config.norm_eps)
del self.norm
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Lfm2HybridConvCache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
use_cache: bool | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
batch_size = inputs_embeds.shape[0]
past_key_values = Lfm2HybridConvCache(
config=self.config, max_batch_size=batch_size, dtype=self.dtype, device=self.device
)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = create_causal_mask(
config=self.config,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
position_ids=position_ids,
)
# Skip masking for decoding stage. We check shape here to be compile-friendly
linear_attention = attention_mask if inputs_embeds.shape[1] != 1 else None
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids)
# decoder layers
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
layer_mask = causal_mask if decoder_layer.is_attention_layer else linear_attention
hidden_states = decoder_layer(
hidden_states,
attention_mask=layer_mask,
position_embeddings=position_embeddings,
position_ids=position_ids,
past_key_values=past_key_values,
cache_position=cache_position,
**kwargs,
)
hidden_states = self.embedding_norm(hidden_states)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
class Lfm2ForCausalLM(LlamaForCausalLM):
pass
__all__ = ["Lfm2ForCausalLM", "Lfm2Model", "Lfm2PreTrainedModel"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/lfm2/modular_lfm2.py",
"license": "Apache License 2.0",
"lines": 424,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/lfm2/test_modeling_lfm2.py | # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch LLaMA model."""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import (
require_torch,
require_torch_accelerator,
slow,
torch_device,
)
from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester
if is_torch_available():
import torch
from transformers import Lfm2ForCausalLM, Lfm2Model
from transformers.models.lfm2.modeling_lfm2 import Lfm2HybridConvCache
class Lfm2ModelTester(CausalLMModelTester):
if is_torch_available():
base_model_class = Lfm2Model
def __init__(
self,
parent,
layer_types=["full_attention", "conv"],
):
super().__init__(parent)
self.layer_types = layer_types
@require_torch
class Lfm2ModelTest(CausalLMModelTest, unittest.TestCase):
model_tester_class = Lfm2ModelTester
# used in `test_torch_compile_for_training`
_torch_compile_train_cls = Lfm2ForCausalLM if is_torch_available() else None
def _check_past_key_values_for_generate(self, batch_size, past_key_values, seq_length, config):
self.assertIsInstance(past_key_values, Lfm2HybridConvCache)
# (batch, kv heads, seq_length, head_dim)
num_heads = getattr(config, "num_key_value_heads", config.num_attention_heads)
head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
attention_shape = (batch_size, num_heads, seq_length, head_dim)
conv_shape = (batch_size, config.hidden_size, config.conv_L_cache)
for i in range(config.num_hidden_layers):
if config.layer_types[i] == "full_attention":
self.assertEqual(past_key_values.key_cache[i].shape, attention_shape)
self.assertEqual(past_key_values.value_cache[i].shape, attention_shape)
else:
self.assertEqual(past_key_values.conv_cache[i].shape, conv_shape)
def _check_caches_are_equal(self, cache1: Lfm2HybridConvCache, cache2: Lfm2HybridConvCache):
if not isinstance(cache1, Lfm2HybridConvCache) or not isinstance(cache2, Lfm2HybridConvCache):
raise ValueError("The wrong cache is being used!")
if not len(cache1) == len(cache2):
raise ValueError("Both caches do not have the same number of layers.")
num_layers = len(cache1)
for idx in range(num_layers):
torch.testing.assert_close(cache1.key_cache[idx], cache2.key_cache[idx])
torch.testing.assert_close(cache1.value_cache[idx], cache2.value_cache[idx])
torch.testing.assert_close(cache1.conv_cache[idx], cache2.conv_cache[idx])
def test_attention_outputs(self):
"""Lfm2Moe alternates between attention and short-conv layers."""
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
# force eager attention to support output attentions
config._attn_implementation = "eager"
seq_len = getattr(self.model_tester, "seq_length", None)
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class._from_config(config, attn_implementation="eager").to(torch_device).eval()
config = model.config
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), sum(layer == "full_attention" for layer in config.layer_types))
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config).to(torch_device).eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), sum(layer == "full_attention" for layer in config.layer_types))
self.assertListEqual(list(attentions[0].shape[-3:]), [config.num_attention_heads, seq_len, seq_len])
out_len = len(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config).to(torch_device).eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
self_attentions = outputs.attentions
self.assertEqual(out_len + 1, len(outputs))
self.assertEqual(len(self_attentions), sum(layer == "full_attention" for layer in config.layer_types))
self.assertListEqual(list(self_attentions[0].shape[-3:]), [config.num_attention_heads, seq_len, seq_len])
@require_torch_accelerator
@slow
class Lfm2IntegrationTest(unittest.TestCase):
pass
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/lfm2/test_modeling_lfm2.py",
"license": "Apache License 2.0",
"lines": 106,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/deepseek_v2/modular_deepseek_v2.py | # Copyright 2025 HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Callable
import torch
import torch.nn.functional as F
from torch import nn
from ... import initialization as init
from ...cache_utils import Cache
from ...modeling_rope_utils import RopeParameters, dynamic_rope_update
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...utils import logging
from ...utils.generic import is_flash_attention_requested, maybe_autocast
from ..llama.configuration_llama import LlamaConfig
from ..llama.modeling_llama import (
LlamaDecoderLayer,
LlamaForCausalLM,
LlamaForSequenceClassification,
LlamaMLP,
LlamaModel,
LlamaPreTrainedModel,
LlamaRMSNorm,
LlamaRotaryEmbedding,
eager_attention_forward,
)
from ..qwen2_moe.modeling_qwen2_moe import Qwen2MoeExperts
logger = logging.get_logger(__name__)
class DeepseekV2Config(LlamaConfig):
r"""
This is the configuration class to store the configuration of a [`DeepseekV2Model`]. It is used to instantiate a DeepSeek
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of DeepSeek-V2-Lite" [deepseek-ai/DeepSeek-V2-Lite"](https://huggingface.co/deepseek-ai/DeepSeek-V2-Lite").
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 32000):
Vocabulary size of the DeepSeek model. Defines the number of different tokens that can be represented by the
`input_ids` passed when calling [`DeepseekV2Model`].
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 11008):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*):
The number of key-value heads used to implement Grouped Query Attention (GQA). If
`num_key_value_heads=num_attention_heads`, the model will use Multi-Head Attention (MHA). If
`num_key_value_heads=1`, the model will use Multi-Query Attention (MQA). Otherwise, GQA is used.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated normal initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon value used by the RMS normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/value attentions (useful for inference optimization).
pad_token_id (`int`, *optional*):
Padding token ID.
bos_token_id (`int`, *optional*, defaults to 1):
Beginning-of-sequence token ID.
eos_token_id (`int`, *optional*, defaults to 2):
End-of-sequence token ID.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie input and output embeddings.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value, and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout probability applied to attention weights.
mlp_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias term in the MLP layers.
first_k_dense_replace (`int`, *optional*, defaults to 0):
Number of dense layers in the shallow layers before switching to MoE layers.
kv_lora_rank (`int`, *optional*, defaults to 512):
Rank of the LoRA decomposition for key-value projections.
q_lora_rank (`int`, *optional*, defaults to 1536):
Rank of the LoRA decomposition for query projections.
Specifically, it determines the dimensionality to which the query (q) vectors are compressed before being expanded back to their original size.
It reduces computational overhead while maintaining model performance.
n_group (`int`, *optional*):
Number of groups for routed experts.
n_routed_experts (`int`, *optional*, defaults to 64):
Number of routed experts (None indicates a dense model).
n_shared_experts (`int`, *optional*, defaults to 2):
Number of shared experts (None indicates a dense model).
qk_nope_head_dim (`int`, *optional*, defaults to 128):
The head dimension for the QK (query-key) projections when using NOPE (Neural Operator Position Encoding).
qk_rope_head_dim (`int`, *optional*, defaults to 64):
The head dimension for QK projections when using RoPE.
routed_scaling_factor (`float`, *optional*, defaults to 1.0):
Scaling factor for routed experts in MoE models.
topk_group (`int`, *optional*):
Number of selected groups per token for expert selection.
topk_method (`str`, *optional*, defaults to `"greedy"`):
The method used for selecting top-k experts in the routed gate mechanism.
norm_topk_prob (`bool`, *optional*, defaults to `False`):
Whether to renormalize the router probabilities when `top_k > 1`. This flag is kept for backward
compatibility with previously released checkpoints and runtimes relying on the legacy DeepSeek config.
v_head_dim (`int`, *optional*, defaults to 128):
The dimension of value projections in the attention layers.
num_experts_per_tok (`int`, *optional*):
The number of experts selected per token. If `None`, the model behaves as a dense Transformer.
moe_intermediate_size (`int`, *optional*, defaults to 1407):
Dimension of the MoE (Mixture of Experts) representations.
```python
>>> from transformers import DeepseekV2Model, DeepseekV2Config
>>> # Initializing a DeepSeek-V2 style configuration
>>> configuration = DeepseekV2Config()
>>> # Accessing the model configuration
>>> model = DeepseekV2Model(configuration)
>>> print(model.config)
```
"""
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise",
"layers.*.self_attn.q_a_proj": "colwise",
"layers.*.self_attn.q_b_proj": "colwise",
"layers.*.self_attn.kv_b_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.experts.gate_up_proj": "packed_colwise",
"layers.*.mlp.experts.down_proj": "rowwise",
}
model_type = "deepseek_v2"
keys_to_ignore_at_inference = ["past_key_values"]
def __init__(
self,
vocab_size: int | None = 32000,
hidden_size: int | None = 4096,
intermediate_size: int | None = 11008,
num_hidden_layers: int | None = 32,
num_attention_heads: int | None = 32,
num_key_value_heads: int | None = None,
hidden_act: str | None = "silu",
max_position_embeddings: int | None = 2048,
initializer_range: float | None = 0.02,
rms_norm_eps: int | None = 1e-6,
use_cache: bool | None = True,
pad_token_id: int | None = None,
bos_token_id: int | None = 1,
eos_token_id: int | None = 2,
tie_word_embeddings: bool | None = False,
rope_parameters: RopeParameters | dict[str, RopeParameters] | None = None,
attention_bias: bool | None = False,
attention_dropout: float | None = 0.0,
mlp_bias: bool | None = False,
first_k_dense_replace: int | None = 0,
kv_lora_rank: int | None = 512,
q_lora_rank: int | None = 1536,
n_group: int | None = None,
n_routed_experts: int | None = 64,
n_shared_experts: int | None = 2,
qk_nope_head_dim: int | None = 128,
qk_rope_head_dim: int | None = 64,
routed_scaling_factor: float | None = 1.0,
topk_group: int | None = None,
topk_method: str | None = "greedy",
norm_topk_prob: bool | None = False,
v_head_dim: int | None = 128,
num_experts_per_tok: int | None = None,
moe_intermediate_size: int | None = 1407,
**kwargs,
):
self.first_k_dense_replace = first_k_dense_replace
self.kv_lora_rank = kv_lora_rank
self.q_lora_rank = q_lora_rank
self.n_group = n_group
self.n_routed_experts = n_routed_experts
self.n_shared_experts = n_shared_experts
self.qk_nope_head_dim = qk_nope_head_dim
self.qk_rope_head_dim = qk_rope_head_dim
self.routed_scaling_factor = routed_scaling_factor
self.topk_group = topk_group
self.topk_method = topk_method
self.norm_topk_prob = norm_topk_prob
self.v_head_dim = v_head_dim
self.num_experts_per_tok = num_experts_per_tok
self.moe_intermediate_size = moe_intermediate_size
super().__init__(**kwargs)
self.head_dim = qk_rope_head_dim
del self.pretraining_tp
def apply_rotary_emb(
xq: torch.Tensor,
xk: torch.Tensor,
freqs_cis: torch.Tensor,
) -> tuple[torch.Tensor, torch.Tensor]:
xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2))
xk_ = torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2))
# Broadcast to [1, 1, seq_len, dim // 2]
freqs_cis = freqs_cis.unsqueeze(1).to(xq_.device)
xq_out = torch.view_as_real(xq_ * freqs_cis).flatten(3).type_as(xq)
xk_out = torch.view_as_real(xk_ * freqs_cis).flatten(3).type_as(xk)
return xq_out, xk_out
class DeepseekV2Experts(Qwen2MoeExperts):
def __init__(self, config):
super().__init__(config)
self.num_experts = config.n_routed_experts
class DeepseekV2Moe(nn.Module):
def __init__(self, config: DeepseekV2Config):
super().__init__()
self.config = config
self.experts = DeepseekV2Experts(config)
self.gate = nn.Linear(config.hidden_size, config.n_routed_experts, bias=False)
if config.n_shared_experts is not None:
intermediate_size = config.moe_intermediate_size * config.n_shared_experts
self.shared_experts = DeepseekV2MLP(config=config, intermediate_size=intermediate_size)
self.routed_scaling_factor = config.routed_scaling_factor
self.topk_method = config.topk_method
self.num_group = config.n_group
self.top_k = config.num_experts_per_tok
self.topk_group = config.topk_group
def route_tokens_to_experts(self, router_logits):
batch_size, seq_len, hidden_dim = router_logits.shape
router_logits = router_logits.view(-1, hidden_dim)
router_logits = router_logits.softmax(dim=-1, dtype=torch.float32)
if self.topk_method == "greedy":
topk_weight, topk_idx = torch.topk(router_logits, k=self.top_k, dim=-1, sorted=False)
elif self.topk_method == "group_limited_greedy":
group_scores = router_logits.view(batch_size * seq_len, self.num_group, -1).max(dim=-1).values
group_idx = torch.topk(group_scores, k=self.topk_group, dim=-1, sorted=False)[1]
group_mask = torch.zeros_like(group_scores)
group_mask.scatter_(1, group_idx, 1)
score_mask = (
group_mask.unsqueeze(-1)
.expand(batch_size * seq_len, self.num_group, self.num_experts // self.num_group)
.reshape(batch_size * seq_len, -1)
)
tmp_scores = router_logits.masked_fill(~score_mask.bool(), 0.0)
topk_weight, topk_idx = torch.topk(tmp_scores, k=self.top_k, dim=-1, sorted=False)
topk_weight = topk_weight * self.routed_scaling_factor
return topk_idx, topk_weight
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
residuals = hidden_states
orig_shape = hidden_states.shape
router_logits = nn.functional.linear(hidden_states.type(torch.float32), self.gate.weight.type(torch.float32))
topk_indices, topk_weights = self.route_tokens_to_experts(router_logits)
hidden_states = hidden_states.view(-1, hidden_states.shape[-1])
hidden_states = self.experts(hidden_states, topk_indices, topk_weights).view(*orig_shape)
hidden_states = hidden_states + self.shared_experts(residuals)
return hidden_states
class DeepseekV2MLP(LlamaMLP):
def __init__(self, config: DeepseekV2Config, hidden_size=None, intermediate_size=None):
super().__init__(config)
self.hidden_size = config.hidden_size if hidden_size is None else hidden_size
self.intermediate_size = config.intermediate_size if intermediate_size is None else intermediate_size
class DeepseekV2RMSNorm(LlamaRMSNorm):
pass
class DeepseekV2RotaryEmbedding(LlamaRotaryEmbedding):
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with maybe_autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.to(x.device) @ position_ids_expanded).transpose(1, 2)
freqs_cis = torch.polar(torch.ones_like(freqs), freqs) # Convert to complex representation
freqs_cis = freqs_cis * self.attention_scaling
return freqs_cis
class DeepseekV2Attention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: DeepseekV2Config, layer_idx: int | None = None):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.attention_dropout = config.attention_dropout
self.hidden_size = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = config.head_dim
self.max_position_embeddings = config.max_position_embeddings
self.q_lora_rank = config.q_lora_rank
self.qk_rope_head_dim = config.qk_rope_head_dim
self.kv_lora_rank = config.kv_lora_rank
self.v_head_dim = config.v_head_dim
self.qk_nope_head_dim = config.qk_nope_head_dim
self.qk_head_dim = config.qk_nope_head_dim + config.qk_rope_head_dim
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.is_causal = True
if self.q_lora_rank is None:
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.qk_head_dim, bias=False)
else:
self.q_a_proj = nn.Linear(self.hidden_size, config.q_lora_rank, bias=config.attention_bias)
self.q_a_layernorm = DeepseekV2RMSNorm(config.q_lora_rank)
self.q_b_proj = nn.Linear(config.q_lora_rank, self.num_heads * self.qk_head_dim, bias=False)
self.kv_a_proj_with_mqa = nn.Linear(
self.hidden_size,
config.kv_lora_rank + config.qk_rope_head_dim,
bias=config.attention_bias,
)
self.kv_a_layernorm = DeepseekV2RMSNorm(config.kv_lora_rank)
self.kv_b_proj = nn.Linear(
config.kv_lora_rank,
self.num_heads * (self.qk_head_dim - self.qk_rope_head_dim + self.v_head_dim),
bias=False,
)
self.o_proj = nn.Linear(
self.num_heads * self.v_head_dim,
self.hidden_size,
bias=config.attention_bias,
)
self.scaling = self.qk_head_dim ** (-0.5)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor | None = None,
past_key_values: Cache | None = None,
cache_position: torch.LongTensor | None = None,
position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None,
**kwargs,
) -> tuple[torch.Tensor, torch.Tensor | None, tuple[torch.Tensor] | None]:
batch_size, seq_length = hidden_states.shape[:-1]
query_shape = (batch_size, seq_length, -1, self.qk_head_dim)
key_shape = (batch_size, seq_length, -1, self.qk_nope_head_dim + self.v_head_dim)
if self.q_lora_rank is None:
q = self.q_proj(hidden_states)
else:
q = self.q_b_proj(self.q_a_layernorm(self.q_a_proj(hidden_states)))
q = q.view(query_shape).transpose(1, 2)
q_nope, q_pe = torch.split(q, [self.qk_nope_head_dim, self.qk_rope_head_dim], dim=-1)
compressed_kv = self.kv_a_proj_with_mqa(hidden_states)
k_nope, k_pe = torch.split(compressed_kv, [self.kv_lora_rank, self.qk_rope_head_dim], dim=-1)
k_nope = self.kv_b_proj(self.kv_a_layernorm(k_nope)).view(key_shape).transpose(1, 2)
k_nope, value_states = torch.split(k_nope, [self.qk_nope_head_dim, self.v_head_dim], dim=-1)
k_pe = k_pe.view(batch_size, 1, seq_length, self.qk_rope_head_dim)
q_pe, k_pe = apply_rotary_emb(q_pe, k_pe, position_embeddings.to(q_pe.device))
k_pe = k_pe.expand(*k_nope.shape[:-1], -1)
query_states = torch.cat((q_nope, q_pe), dim=-1)
key_states = torch.cat((k_nope, k_pe), dim=-1)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
if is_flash_attention_requested(self.config) and self.qk_head_dim != self.v_head_dim:
value_states = F.pad(value_states, [0, self.qk_head_dim - self.v_head_dim])
attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface(
self.config._attn_implementation, eager_attention_forward
)
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
if is_flash_attention_requested(self.config) and self.qk_head_dim != self.v_head_dim:
attn_output = attn_output[:, :, :, : self.v_head_dim]
attn_output = attn_output.reshape(batch_size, seq_length, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class DeepseekV2DecoderLayer(LlamaDecoderLayer):
def __init__(self, config: DeepseekV2Config, layer_idx: int):
super().__init__(config, layer_idx)
self.self_attn = DeepseekV2Attention(config=config, layer_idx=layer_idx)
self.mlp = DeepseekV2Moe(config) if layer_idx >= config.first_k_dense_replace else DeepseekV2MLP(config)
self.input_layernorm = DeepseekV2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = DeepseekV2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
class DeepseekV2PreTrainedModel(LlamaPreTrainedModel):
@torch.no_grad()
def _init_weights(self, module):
PreTrainedModel._init_weights(self, module)
if isinstance(module, DeepseekV2Experts):
init.normal_(module.gate_up_proj, mean=0.0, std=self.config.initializer_range)
init.normal_(module.down_proj, mean=0.0, std=self.config.initializer_range)
class DeepseekV2Model(LlamaModel):
pass
class DeepseekV2ForCausalLM(LlamaForCausalLM):
pass
class DeepseekV2ForSequenceClassification(LlamaForSequenceClassification):
pass
__all__ = [
"DeepseekV2PreTrainedModel",
"DeepseekV2Model",
"DeepseekV2ForCausalLM",
"DeepseekV2ForSequenceClassification",
"DeepseekV2Config",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/deepseek_v2/modular_deepseek_v2.py",
"license": "Apache License 2.0",
"lines": 395,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/deepseek_v2/test_modeling_deepseek_v2.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch DeepSeekV2 model."""
import unittest
from transformers import BitsAndBytesConfig, Cache, is_torch_available
from transformers.testing_utils import require_torch, require_torch_accelerator, slow, torch_device
from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester
if is_torch_available():
import torch
from transformers import AutoTokenizer, DeepseekV2ForCausalLM, DeepseekV2Model
from transformers.models.deepseek_v2.modeling_deepseek_v2 import DeepseekV2RotaryEmbedding
class DeepseekV2ModelTester(CausalLMModelTester):
if is_torch_available():
base_model_class = DeepseekV2Model
def __init__(
self,
parent,
n_routed_experts=8,
kv_lora_rank=32,
q_lora_rank=16,
qk_nope_head_dim=64,
qk_rope_head_dim=64,
):
super().__init__(parent=parent)
self.n_routed_experts = n_routed_experts
self.kv_lora_rank = kv_lora_rank
self.q_lora_rank = q_lora_rank
self.qk_nope_head_dim = qk_nope_head_dim
self.qk_rope_head_dim = qk_rope_head_dim
@require_torch
class DeepseekV2ModelTest(CausalLMModelTest, unittest.TestCase):
test_all_params_have_gradient = False
model_tester_class = DeepseekV2ModelTester
model_split_percents = [0.5, 0.7, 0.8]
# used in `test_torch_compile_for_training`
_torch_compile_train_cls = DeepseekV2ForCausalLM if is_torch_available() else None
def _check_past_key_values_for_generate(self, batch_size, past_key_values, seq_length, config):
"""Needs to be overridden as deepseek has special MLA cache format (though we don't really use the MLA)"""
self.assertIsInstance(past_key_values, Cache)
# (batch, head, seq_length, head_features)
expected_common_shape = (
batch_size,
getattr(config, "num_key_value_heads", config.num_attention_heads),
seq_length,
)
expected_key_shape = expected_common_shape + (config.qk_nope_head_dim + config.qk_rope_head_dim,)
expected_value_shape = expected_common_shape + (config.v_head_dim,)
for layer in past_key_values.layers:
self.assertEqual(layer.keys.shape, expected_key_shape)
self.assertEqual(layer.values.shape, expected_value_shape)
def test_model_rope_scaling_frequencies(self):
"""
Overwritten: DeepseekV2 implements RoPE in the complex domain, as opposed to in the real domain with
`sin` and `cos`. Nevertheless, the checks are the same as in the original test.
"""
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
scaling_factor = 10
short_input_length = 10
long_input_length = int(config.max_position_embeddings * 1.5)
# Inputs
x = torch.randn(
1, dtype=torch.float32, device=torch_device
) # used exclusively to get the dtype and the device
position_ids_short = torch.arange(short_input_length, dtype=torch.long, device=torch_device)
position_ids_short = position_ids_short.unsqueeze(0)
position_ids_long = torch.arange(long_input_length, dtype=torch.long, device=torch_device)
position_ids_long = position_ids_long.unsqueeze(0)
# Sanity check original RoPE
original_rope = DeepseekV2RotaryEmbedding(config=config).to(torch_device)
original_freqs_cis_short = original_rope(x, position_ids_short)
original_freqs_cis_long = original_rope(x, position_ids_long)
torch.testing.assert_close(original_freqs_cis_short, original_freqs_cis_long[:, :short_input_length, :])
# Sanity check linear RoPE scaling
# New position "x" should match original position with index "x/scaling_factor"
config.rope_parameters = {"rope_type": "linear", "rope_theta": 10000.0, "factor": scaling_factor}
linear_scaling_rope = DeepseekV2RotaryEmbedding(config=config).to(torch_device)
linear_freqs_cis_short = linear_scaling_rope(x, position_ids_short)
linear_freqs_cis_long = linear_scaling_rope(x, position_ids_long)
torch.testing.assert_close(linear_freqs_cis_short, linear_freqs_cis_long[:, :short_input_length, :])
# Sanity check Dynamic NTK RoPE scaling
# Scaling should only be observed after a long input is fed. We can observe that the frequencies increase
# with scaling_factor (or that `inv_freq` decreases)
config.rope_parameters = {"rope_type": "dynamic", "rope_theta": 10000.0, "factor": scaling_factor}
ntk_scaling_rope = DeepseekV2RotaryEmbedding(config=config).to(torch_device)
ntk_freqs_cis_short = ntk_scaling_rope(x, position_ids_short)
ntk_freqs_cis_long = ntk_scaling_rope(x, position_ids_long)
torch.testing.assert_close(ntk_freqs_cis_short, original_freqs_cis_short)
with self.assertRaises(AssertionError):
torch.testing.assert_close(ntk_freqs_cis_long, original_freqs_cis_long)
self.assertTrue((ntk_scaling_rope.inv_freq <= original_rope.inv_freq).all())
# Sanity check Yarn RoPE scaling
# Scaling should be over the entire input
config.rope_parameters = {"rope_type": "yarn", "rope_theta": 10000.0, "factor": scaling_factor}
yarn_scaling_rope = DeepseekV2RotaryEmbedding(config=config).to(torch_device)
yarn_freqs_cis_short = yarn_scaling_rope(x, position_ids_short)
yarn_freqs_cis_long = yarn_scaling_rope(x, position_ids_long)
torch.testing.assert_close(yarn_freqs_cis_short, yarn_freqs_cis_long[:, :short_input_length, :])
with self.assertRaises(AssertionError):
torch.testing.assert_close(yarn_freqs_cis_short, original_freqs_cis_short)
with self.assertRaises(AssertionError):
torch.testing.assert_close(yarn_freqs_cis_long, original_freqs_cis_long)
def test_tp_plan_matches_params(self):
"""Need to overwrite as the plan contains keys that are valid but depend on some configs flags and cannot
be valid all at the same time"""
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
# The key is valid but not always used based on the flag
if config.q_lora_rank is not None:
config.base_model_tp_plan.pop("layers.*.self_attn.q_proj")
super().test_tp_plan_matches_params()
# Put them back in class attribute
config.base_model_tp_plan.update({"layers.*.self_attn.q_proj": "colwise"})
@slow
@require_torch_accelerator
class DeepseekV2IntegrationTest(unittest.TestCase):
def test_deepseek_v2_lite(self):
EXPECTED_TEXT = ['An attention function can be described as mapping a query and a set of key-value pairs to an output, where the query, keys, values, and output are all vectors.\n\nAttention functions are used in a variety of applications, including natural language processing, computer vision, and reinforcement learning.\n\nThe attention function is a function that takes a query and a set of key-value pairs as input and outputs a vector'] # fmt: skip
tokenizer = AutoTokenizer.from_pretrained("deepseek-ai/DeepSeek-V2-Lite")
model = DeepseekV2ForCausalLM.from_pretrained(
"deepseek-ai/DeepSeek-V2-Lite",
device_map=torch_device,
dtype=torch.bfloat16,
quantization_config=BitsAndBytesConfig(load_in_8bit=True),
)
input_text = [
"An attention function can be described as mapping a query and a set of key-value pairs to an output, where the query, keys, values, and output are all vectors." # fmt: skip
]
model_inputs = tokenizer(input_text, return_tensors="pt").to(model.device)
generated_ids = model.generate(**model_inputs, max_new_tokens=50, do_sample=False)
generated_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
self.assertEqual(generated_text, EXPECTED_TEXT)
def test_logits_eager(self):
input_ids = [1, 306, 4658, 278, 6593, 310, 2834, 338]
model = DeepseekV2ForCausalLM.from_pretrained(
"deepseek-ai/DeepSeek-V2-Lite",
device_map=torch_device,
dtype=torch.bfloat16,
quantization_config=BitsAndBytesConfig(load_in_8bit=True),
attn_implementation="eager",
)
with torch.no_grad():
out = model(torch.tensor([input_ids]).to(torch_device))
EXPECTED_MEAN = torch.tensor([[-6.1232, -5.0952, -4.4493, -2.6536, -2.0608, -2.3991, -3.8013, -2.8681]], device=torch_device) # fmt: skip
torch.testing.assert_close(out.logits.float().mean(-1), EXPECTED_MEAN, atol=1e-3, rtol=1e-3)
EXPECTED_SLICE = torch.tensor([-1.2500, -0.9961, -0.0194, -3.1562, 1.2812, -2.7656, -0.8438, -3.0469, -2.7812, -0.6328, -0.4160, -1.9688, -2.4219, -1.0391, -3.8906], device=torch_device) # fmt: skip
torch.testing.assert_close(out.logits[0, 0, :15].float(), EXPECTED_SLICE, atol=1e-3, rtol=1e-3)
def test_batch_fa2(self):
EXPECTED_TEXT = [
"Simply put, the theory of relativity states that \nthe laws of physics are the same for all observers, regardless of their \nrelative motion.\nThe theory of relativity is a theory of space, time, and gravity.\nThe theory of", # fmt: skip
"My favorite all time favorite condiment is ketchup. I love ketchup. I love ketchup on my hot dogs, hamburgers, french fries, and even on my eggs. I love ketchup. I love ketchup so much that I", # fmt: skip
]
prompts = [
"Simply put, the theory of relativity states that ",
"My favorite all time favorite condiment is ketchup.",
]
tokenizer = AutoTokenizer.from_pretrained(
"deepseek-ai/DeepSeek-V2-Lite", pad_token="</s>", padding_side="right"
)
model = DeepseekV2ForCausalLM.from_pretrained(
"deepseek-ai/DeepSeek-V2-Lite",
device_map=torch_device,
dtype=torch.bfloat16,
quantization_config=BitsAndBytesConfig(load_in_8bit=True),
)
inputs = tokenizer(prompts, return_tensors="pt", padding=True).to(model.device)
generated_ids = model.generate(**inputs, max_new_tokens=40, do_sample=False)
generated_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT, generated_text)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/deepseek_v2/test_modeling_deepseek_v2.py",
"license": "Apache License 2.0",
"lines": 178,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:examples/modular-transformers/modular_duplicated_method.py | from transformers.models.llama.configuration_llama import LlamaConfig
class DuplicatedMethodConfig(LlamaConfig):
@property
def vocab_size(self):
return 45
@vocab_size.setter
def vocab_size(self, value):
self.vocab_size = value
| {
"repo_id": "huggingface/transformers",
"file_path": "examples/modular-transformers/modular_duplicated_method.py",
"license": "Apache License 2.0",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
huggingface/transformers:src/transformers/models/aimv2/convert_aimv2_original_pytorch_to_hf.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import gc
import os
import re
import torch
from huggingface_hub import snapshot_download
from safetensors import safe_open
from transformers import (
Aimv2Config,
Aimv2Model,
Aimv2VisionConfig,
Aimv2VisionModel,
AutoImageProcessor,
AutoProcessor,
)
ORIGINAL_TO_CONVERTED_KEY_MAPPING_VISION_MODEL = {
# Embeddings
r"preprocessor.patchifier.proj": r"embeddings.patch_embed",
r"preprocessor.pos_embed": r"embeddings.position_embedding.weight",
r"preprocessor.patchifier.norm.weight": r"embeddings.rms_norm.weight",
# Encoder Layers
r"trunk.blocks.(\d+).attn.qkv": r"encoder.layers.\1.attention.qkv",
r"trunk.blocks.(\d+).attn.proj": r"encoder.layers.\1.attention.out_proj",
r"trunk.blocks.(\d+).mlp.fc1": r"encoder.layers.\1.ffn.gate_proj",
r"trunk.blocks.(\d+).mlp.fc2": r"encoder.layers.\1.ffn.down_proj",
r"trunk.blocks.(\d+).mlp.fc3": r"encoder.layers.\1.ffn.up_proj",
# Normalization Layers
r"trunk.blocks.(\d+).norm_1": r"encoder.layers.\1.rms_norm1",
r"trunk.blocks.(\d+).norm_2": r"encoder.layers.\1.rms_norm2",
# Final Norm
r"trunk.post_trunk_norm": r"rms_norm",
}
ORIGINAL_TO_CONVERTED_KEY_MAPPING = {
# Vision Embeddings
r"image_encoder.preprocessor.patchifier.proj": r"vision_model.embeddings.patch_embed",
r"image_encoder.preprocessor.pos_embed": r"vision_model.embeddings.position_embedding.weight",
r"image_encoder.preprocessor.patchifier.norm.weight": r"vision_model.embeddings.rms_norm.weight",
# Vision Encoder Layers
r"image_encoder.trunk.blocks.(\d+).attn.qkv": r"vision_model.encoder.layers.\1.attention.qkv",
r"image_encoder.trunk.blocks.(\d+).attn.proj": r"vision_model.encoder.layers.\1.attention.out_proj",
r"image_encoder.trunk.blocks.(\d+).mlp.fc1": r"vision_model.encoder.layers.\1.ffn.gate_proj",
r"image_encoder.trunk.blocks.(\d+).mlp.fc2": r"vision_model.encoder.layers.\1.ffn.down_proj",
r"image_encoder.trunk.blocks.(\d+).mlp.fc3": r"vision_model.encoder.layers.\1.ffn.up_proj",
# Normalization Layers
r"image_encoder.trunk.blocks.(\d+).norm_1": r"vision_model.encoder.layers.\1.rms_norm1",
r"image_encoder.trunk.blocks.(\d+).norm_2": r"vision_model.encoder.layers.\1.rms_norm2",
r"image_encoder.trunk.post_trunk_norm": r"vision_model.rms_norm",
r"image_projector": r"visual_projection",
# Vision Head
r"image_encoder.head.cls_token": r"vision_model.head.cls_token",
r"image_encoder.head.k": r"vision_model.head.k_proj",
r"image_encoder.head.v": r"vision_model.head.v_proj",
r"image_encoder.head.linear": r"vision_model.head.output_proj",
# Text Embeddings
r"text_encoder.preprocessor.text_embedding.weight": r"text_model.embeddings.token_embedding.weight",
r"text_encoder.preprocessor.positional_embedding": r"text_model.embeddings.position_embedding.weight",
# Text Encoder Layers
r"text_encoder.trunk.blocks.(\d+).attn.qkv": r"text_model.encoder.layers.\1.attention.qkv",
r"text_encoder.trunk.blocks.(\d+).attn.proj": r"text_model.encoder.layers.\1.attention.out_proj",
r"text_encoder.trunk.blocks.(\d+).mlp.fc1": r"text_model.encoder.layers.\1.ffn.gate_proj",
r"text_encoder.trunk.blocks.(\d+).mlp.fc2": r"text_model.encoder.layers.\1.ffn.down_proj",
r"text_encoder.trunk.blocks.(\d+).mlp.fc3": r"text_model.encoder.layers.\1.ffn.up_proj",
# Text Normalization Layers
r"text_encoder.trunk.blocks.(\d+).norm_1": r"text_model.encoder.layers.\1.rms_norm1",
r"text_encoder.trunk.blocks.(\d+).norm_2": r"text_model.encoder.layers.\1.rms_norm2",
r"text_encoder.trunk.post_trunk_norm": r"text_model.rms_norm",
r"text_projector": r"text_projection",
r"log_logit_scale": r"logit_scale",
}
def load_original_state_dict(model_id: str, revision: str | None = None) -> dict[str, torch.Tensor]:
# Download only the model.safetensors file
directory_path = snapshot_download(
repo_id=model_id,
revision=revision,
allow_patterns=["model.safetensors"],
)
original_state_dict = {}
safetensor_path = f"{directory_path}/model.safetensors"
with safe_open(safetensor_path, framework="pt", device="cpu") as f:
for key in f.keys():
original_state_dict[key] = f.get_tensor(key)
return original_state_dict
def convert_old_keys_to_new_keys(state_dict_keys: dict, ORIGINAL_TO_CONVERTED_KEY_MAPPING: dict):
"""Converts state dict keys from the old format to the new format."""
output_dict = {}
if state_dict_keys is not None:
old_text = "\n".join(state_dict_keys)
new_text = old_text
for pattern, replacement in ORIGINAL_TO_CONVERTED_KEY_MAPPING.items():
if replacement is None:
new_text = re.sub(pattern, "", new_text) # an empty line
continue
new_text = re.sub(pattern, replacement, new_text)
output_dict = dict(zip(old_text.split("\n"), new_text.split("\n")))
return output_dict
def split_qkv_tensor(key, tensor):
"""Splits a qkv tensor into separate q, k, v tensors and updates the key accordingly."""
new_keys = ["q_proj", "k_proj", "v_proj"]
split_size = tensor.shape[0] // 3
split_tensors = torch.split(tensor, split_size, dim=0)
return {key.replace("qkv", new_key): split_tensors[i] for i, new_key in enumerate(new_keys)}
def get_model_config_mapping(model_id: str):
"""Determines the correct model, config, and key mappings based on the checkpoint name."""
if model_id == "apple/aimv2-large-patch14-224-lit":
return Aimv2Model, Aimv2Config, ORIGINAL_TO_CONVERTED_KEY_MAPPING
else:
return Aimv2VisionModel, Aimv2VisionConfig, ORIGINAL_TO_CONVERTED_KEY_MAPPING_VISION_MODEL
def write_model(
hf_repo_id: str,
output_dir: str,
):
"""
Converts a model checkpoint to Hugging Face format and saves it.
Args:
hf_repo_id (str): The Hugging Face repo ID to load from.
output_dir (str): The directory to save the converted model.
Returns:
model: The reloaded Hugging Face model.
"""
os.makedirs(output_dir, exist_ok=True)
# Get the appropriate model, config, and key mapping
model_class, config_class, key_mapping = get_model_config_mapping(hf_repo_id)
# Load config and original state dict
config = config_class.from_pretrained(hf_repo_id)
# Checkpoint `apple/aimv2-large-patch14-224-lit` uses AttentionPoolingHead hence set the required attr in config.
if hf_repo_id != "apple/aimv2-large-patch14-224-lit":
config.use_head = False
if hf_repo_id == "apple/aimv2-large-patch14-native":
config.is_native = True
original_state_dict = load_original_state_dict(hf_repo_id)
print("Converting model...")
state_dict = {}
result = convert_old_keys_to_new_keys(original_state_dict, key_mapping)
all_keys = list(original_state_dict.keys())
for key in all_keys:
value = original_state_dict[key]
new_key = result.pop(key)
if "qkv" in new_key:
qkv_state_dict = split_qkv_tensor(new_key, value)
state_dict.update(qkv_state_dict)
else:
state_dict[new_key] = value
# Check if position embeddings exist before squeezing
if new_key.endswith("position_embedding.weight"):
state_dict[new_key] = value.squeeze(0)
print(f"Loading the checkpoint in a {model_class.__name__}.")
model = model_class(config)
model.load_state_dict(state_dict, strict=True, assign=True)
print("Checkpoint loaded successfully.")
print("Saving the model.")
model.save_pretrained(output_dir)
del state_dict, model
gc.collect()
print("Reloading the model to check if it's saved correctly.")
model = model_class.from_pretrained(output_dir, device_map="auto")
print("Model reloaded successfully.")
return model
def write_image_processor(hf_repo_id: str, output_dir: str):
if hf_repo_id == "apple/aimv2-large-patch14-224-lit":
image_processor = AutoProcessor.from_pretrained(hf_repo_id, use_fast=True)
else:
image_processor = AutoImageProcessor.from_pretrained(hf_repo_id, use_fast=True)
image_processor.save_pretrained(output_dir)
return image_processor
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--hf_repo_id",
default="apple/aimv2-large-patch14-224",
help="Location of official weights from apple on HF",
)
parser.add_argument(
"--output_dir",
default="aimv2_model",
help="Location to write the converted model and processor",
)
parser.add_argument(
"--push_to_hub",
action=argparse.BooleanOptionalAction,
help="Whether or not to push the converted model to the huggingface hub.",
)
parser.add_argument(
"--hub_repo_id",
default=None,
help="Huggingface hub repo to write the converted model and processor",
)
args = parser.parse_args()
model = write_model(
hf_repo_id=args.hf_repo_id,
output_dir=args.output_dir,
)
image_processor = write_image_processor(
hf_repo_id=args.hf_repo_id,
output_dir=args.output_dir,
)
if args.push_to_hub:
print("Pushing to hub...")
model.push_to_hub(args.hub_repo_id)
image_processor.push_to_hub(args.hub_repo_id)
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/aimv2/convert_aimv2_original_pytorch_to_hf.py",
"license": "Apache License 2.0",
"lines": 214,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/aimv2/modular_aimv2.py | # Copyright 2025 Apple Inc. and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pytorch implementation of AIMv2 Model"""
import math
import torch
import torch.nn.functional as F
from torch import nn
from ... import initialization as init
from ...masking_utils import create_causal_mask
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
from ...modeling_utils import PreTrainedModel
from ...processing_utils import Unpack
from ...utils import (
TransformersKwargs,
auto_docstring,
can_return_tuple,
)
from ...utils.generic import merge_with_config_defaults
from ...utils.output_capturing import capture_outputs
from ..clip.modeling_clip import CLIPModel, CLIPTextEmbeddings, _get_vector_norm
from ..llama.modeling_llama import LlamaMLP, LlamaRMSNorm
from ..siglip.configuration_siglip import SiglipConfig, SiglipTextConfig, SiglipVisionConfig
from ..siglip.modeling_siglip import SiglipAttention, SiglipEncoder, SiglipOutput
class Aimv2VisionConfig(SiglipVisionConfig):
r"""
This is the configuration class to store the configuration of a [`Aimv2VisionModel`]. It is used to instantiate a
AIMv2 vision encoder according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the vision encoder of the AIMv2
[apple/aimv2-large-patch14-224](https://huggingface.co/apple/aimv2-large-patch14-224) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 1024):
Dimensionality of the encoder layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 2816):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
num_hidden_layers (`int`, *optional*, defaults to 24):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer encoder.
num_channels (`int`, *optional*, defaults to 3):
Number of channels in the input images.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 14):
The size (resolution) of each patch.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
qkv_bias (`bool`, *optional*, defaults to `False`):
Whether to add a bias to the queries, keys and values.
mlp_bias (`bool`, *optional*, defaults to `False`):
Whether to add a bias to the Linear layers or Not.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the for initializing all weight matrices.
use_head (`str`, *optional*, defaults to `True`):
Whether to use Attention Pooling Head or Not.
is_native (`str`, *optional*, defaults to `False`):
Whether to use ckpt trained for image native resolution or not.
Example:
```python
>>> from transformers import SiglipVisionConfig, SiglipVisionModel
>>> # Initializing a Aimv2VisionConfig with apple/aimv2-large-patch14-224 style configuration
>>> configuration = Aimv2VisionConfig()
>>> # Initializing a Aimv2VisionModel (with random weights) from the apple/aimv2-large-patch14-224 style configuration
>>> model = Aimv2VisionModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
def __init__(
self,
hidden_size: int = 1024,
intermediate_size: int = 2816,
num_hidden_layers: int = 24,
num_attention_heads: int = 8,
num_channels: int = 3,
image_size: int = 224,
patch_size: int = 14,
rms_norm_eps: float = 1e-5,
attention_dropout: float = 0.0,
qkv_bias: bool = False,
mlp_bias: bool = False,
hidden_act: str = "silu",
initializer_range: float = 0.02,
use_head: bool = True,
is_native: bool = False,
**kwargs,
):
super().__init__(
hidden_size=hidden_size,
intermediate_size=intermediate_size,
num_hidden_layers=num_hidden_layers,
num_attention_heads=num_attention_heads,
hidden_act=hidden_act,
num_channels=num_channels,
image_size=image_size,
patch_size=patch_size,
qkv_bias=qkv_bias,
**kwargs,
)
self.use_head = use_head
self.initializer_range = initializer_range
self.attention_dropout = attention_dropout
self.mlp_bias = mlp_bias
self.qkv_bias = qkv_bias
self.rms_norm_eps = rms_norm_eps
self.is_native = is_native
del self.layer_norm_eps
class Aimv2TextConfig(SiglipTextConfig):
r"""
This is the configuration class to store the configuration of a [`Aimv2TextModel`]. It is used to instantiate a
AIMv2 text encoder according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the text encoder of the AIMv2
[apple/aimv2-large-patch14-224-lit](https://huggingface.co/apple/aimv2-large-patch14-224-lit) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 49408):
Vocabulary size of the AIMv2 text model. Defines the number of different tokens that can be represented by
the `inputs_ids` passed when calling [`Aimv2Model`].
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 2048):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 6):
Number of attention heads for each attention layer in the Transformer encoder.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
qkv_bias (`bool`, *optional*, defaults to `False`):
Whether to add a bias to the queries, keys and values.
mlp_bias (`bool`, *optional*, defaults to `False`):
Whether to add a bias to the Linear layers or Not.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
eos_token_id (`int`, *optional*, defaults to 49407):
The id of the end-of-sequence token in the vocabulary.
max_position_embeddings (`int`, *optional*, defaults to 77):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the for initializing all weight matrices.
"""
def __init__(
self,
vocab_size: int = 49408,
hidden_size: int = 768,
intermediate_size: int = 2048,
num_hidden_layers: int = 12,
num_attention_heads: int = 6,
rms_norm_eps: float = 1e-5,
attention_dropout: float = 0.0,
qkv_bias: bool = False,
mlp_bias: bool = False,
hidden_act: str = "silu",
eos_token_id: int = 49407,
max_position_embeddings: int = 77,
initializer_range: bool = 0.02,
**kwargs,
):
super().__init__(
vocab_size=vocab_size,
hidden_size=hidden_size,
intermediate_size=intermediate_size,
num_hidden_layers=num_hidden_layers,
num_attention_heads=num_attention_heads,
hidden_act=hidden_act,
max_position_embeddings=max_position_embeddings,
eos_token_id=eos_token_id,
**kwargs,
)
self.initializer_range = initializer_range
self.attention_dropout = attention_dropout
self.mlp_bias = mlp_bias
self.qkv_bias = qkv_bias
self.rms_norm_eps = rms_norm_eps
del self.bos_token_id
del self.pad_token_id
del self.projection_size
del self.layer_norm_eps
class Aimv2Config(SiglipConfig):
r"""
[`Aimv2Config`] is the configuration class to store the configuration of a [`Aimv2Model`]. It is used to
instantiate a AIMv2 model according to the specified arguments, defining the text model and vision model configs.
Instantiating a configuration with the defaults will yield a similar configuration to that of the AIMv2
[apple/aimv2-large-patch14-224-lit](https://huggingface.co/apple/aimv2-large-patch14-224-lit) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
text_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`Aimv2TextConfig`].
vision_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`Aimv2VisionConfig`].
projection_dim (`int`, *optional*, defaults to 512):
Dimensionality of text and vision projection layers.
logit_scale_init_value (`float`, *optional*, defaults to 2.6592):
The initial value of the *logit_scale* parameter.
kwargs (*optional*):
Dictionary of keyword arguments.
Example:
```python
>>> from transformers import Aimv2Config, Aimv2Model
>>> # Initializing a Aimv2Config with apple/aimv2-large-patch14-224-lit style configuration
>>> configuration = Aimv2Config()
>>> # Initializing a Aimv2Model (with random weights) from the apple/aimv2-large-patch14-224-lit style configuration
>>> model = Aimv2Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
>>> # We can also initialize a Aimv2Config from a Aimv2TextConfig and a Aimv2VisionConfig
>>> from transformers import Aimv2TextConfig, Aimv2VisionConfig
>>> # Initializing a AIMv2Text and AIMv2Vision configuration
>>> config_text = Aimv2TextConfig()
>>> config_vision = Aimv2VisionConfig()
>>> config = Aimv2Config(text_config=config_text, vision_config=config_vision)
```"""
def __init__(
self, text_config=None, vision_config=None, projection_dim=512, logit_scale_init_value=2.6592, **kwargs
):
self.projection_dim = projection_dim
self.logit_scale_init_value = logit_scale_init_value
self.max_logit_scale = 100.0
super().__init__(text_config, vision_config, **kwargs)
del self.initializer_factor
class Aimv2Output(SiglipOutput):
pass
class Aimv2RMSNorm(LlamaRMSNorm):
pass
class Aimv2MLP(LlamaMLP):
pass
class Aimv2VisionEmbeddings(nn.Module):
def __init__(self, config: Aimv2VisionConfig):
super().__init__()
self.config = config
self.patch_size = config.patch_size
self.patch_embed = nn.Conv2d(
config.num_channels, config.hidden_size, kernel_size=config.patch_size, stride=config.patch_size
)
self.rms_norm = Aimv2RMSNorm(config.hidden_size, config.rms_norm_eps)
num_patches = (config.image_size // config.patch_size) ** 2
if not self.config.is_native:
self.position_embedding = nn.Embedding(num_patches, config.hidden_size)
self.register_buffer("position_ids", torch.arange(num_patches).expand((1, -1)), persistent=False)
@staticmethod
def build_2d_sincos_position_embedding(
height, width, embed_dim=256, temperature=10000.0, device="cpu", dtype=torch.float32
) -> torch.Tensor:
grid_w = torch.arange(int(width), dtype=dtype, device=device)
grid_h = torch.arange(int(height), dtype=dtype, device=device)
grid_h, grid_w = torch.meshgrid(grid_w, grid_h, indexing="xy")
pos_dim = embed_dim // 4
omega = torch.arange(pos_dim, dtype=dtype, device=device) / pos_dim
omega = 1.0 / (temperature**omega)
out_h = grid_h.flatten()[..., None] @ omega[None, :]
out_w = grid_w.flatten()[..., None] @ omega[None, :]
return torch.concat([out_h.sin(), out_h.cos(), out_w.sin(), out_w.cos()], dim=1)[None, :, :]
def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
_, _, height, width = pixel_values.size()
hidden_states = self.patch_embed(pixel_values).flatten(2).transpose(1, 2)
hidden_states = self.rms_norm(hidden_states)
if self.config.is_native:
pos_embed = self.build_2d_sincos_position_embedding(
height // self.patch_size,
width // self.patch_size,
embed_dim=self.config.hidden_size,
device=hidden_states.device,
dtype=hidden_states.dtype,
)
else:
pos_embed = self.position_embedding(self.position_ids)
hidden_states = hidden_states + pos_embed
return hidden_states
class Aimv2TextEmbeddings(CLIPTextEmbeddings):
pass
class Aimv2Attention(SiglipAttention):
def __init__(self, config):
super().__init__(config)
self.k_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.qkv_bias)
self.v_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.qkv_bias)
self.q_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.qkv_bias)
self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.qkv_bias)
class Aimv2EncoderLayer(GradientCheckpointingLayer):
def __init__(self, config: Aimv2VisionConfig):
super().__init__()
self.attention = Aimv2Attention(config)
self.ffn = Aimv2MLP(config)
self.rms_norm1 = Aimv2RMSNorm(config.hidden_size, config.rms_norm_eps)
self.rms_norm2 = Aimv2RMSNorm(config.hidden_size, config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
norm_hidden_states = self.rms_norm1(hidden_states)
attn_output, _ = self.attention(hidden_states=norm_hidden_states, attention_mask=attention_mask, **kwargs)
hidden_states = hidden_states + attn_output
norm_hidden_states = self.rms_norm2(hidden_states)
mlp_output = self.ffn(norm_hidden_states)
hidden_states = hidden_states + mlp_output
return hidden_states
class Aimv2Encoder(SiglipEncoder):
pass
class Aimv2AttentionPoolingHead(nn.Module):
def __init__(self, config: Aimv2VisionConfig):
super().__init__()
self.hidden_size = config.hidden_size
self.num_heads = config.num_attention_heads
self.k_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=config.qkv_bias)
self.v_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=config.qkv_bias)
self.cls_token = nn.Parameter(torch.zeros(1, 1, self.hidden_size))
self.output_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=True)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
batch_size, seq_len, hidden_dim = hidden_states.shape
cls_token = self.cls_token.expand(batch_size, -1, -1)
key = self.k_proj(hidden_states).reshape(batch_size, seq_len, self.num_heads, hidden_dim // self.num_heads)
value = self.v_proj(hidden_states).reshape(batch_size, seq_len, self.num_heads, hidden_dim // self.num_heads)
query = cls_token.reshape(batch_size, 1, self.num_heads, hidden_dim // self.num_heads)
key = key.permute(0, 2, 1, 3)
value = value.permute(0, 2, 1, 3)
query = query.permute(0, 2, 1, 3)
attn_output = F.scaled_dot_product_attention(query, key, value)
attn_output = attn_output.transpose(1, 2).reshape(batch_size, 1, hidden_dim)
attn_output = attn_output.mean(dim=1)
output = self.output_proj(attn_output)
return output
@auto_docstring
class Aimv2PreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models. The model is only intended for inference and doesn't support finetuning.
"""
config: Aimv2Config
base_model_prefix = "aimv2"
input_modalities = ("image",)
supports_gradient_checkpointing = True
_no_split_modules = [
"Aimv2EncoderLayer",
"Aimv2AttentionPoolingHead",
"Aimv2VisionEmbeddings",
"Aimv2TextEmbeddings",
]
_supports_sdpa = True
_supports_flash_attn = True
_supports_flex_attn = True
@torch.no_grad()
def _init_weights(self, module):
super()._init_weights(module)
if hasattr(module, "logit_scale"):
if isinstance(module.logit_scale, nn.Parameter):
init.constant_(module.logit_scale, math.log(1 / 0.07))
elif isinstance(module, Aimv2AttentionPoolingHead):
init.normal_(module.cls_token, mean=0.0, std=self.config.initializer_range)
elif isinstance(module, Aimv2VisionEmbeddings):
init.copy_(module.position_ids, torch.arange(module.position_ids.shape[-1]).expand((1, -1)))
elif isinstance(module, Aimv2TextEmbeddings):
init.copy_(module.position_ids, torch.arange(module.position_ids.shape[-1]).expand((1, -1)))
@auto_docstring(
custom_intro="""
The Vision model from AIMv2 without any head or projection on top.
"""
)
class Aimv2VisionModel(Aimv2PreTrainedModel):
config: Aimv2VisionConfig
main_input_name = "pixel_values"
_can_record_outputs = {
"hidden_states": Aimv2EncoderLayer,
"attentions": Aimv2Attention,
}
def __init__(self, config: Aimv2VisionConfig):
super().__init__(config)
self.config = config
self.embeddings = Aimv2VisionEmbeddings(config)
self.encoder = Aimv2Encoder(config)
# The only change from SiglipVisionTransformer is, layernorm -> rms_norm.
self.rms_norm = Aimv2RMSNorm(config.hidden_size, config.rms_norm_eps)
self.use_head = config.use_head
if self.use_head:
self.head = Aimv2AttentionPoolingHead(config)
self.post_init()
def get_input_embeddings(self) -> nn.Module:
return self.embeddings.patch_embed
@merge_with_config_defaults
@capture_outputs(tie_last_hidden_states=False)
@auto_docstring
def forward(
self,
pixel_values,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutputWithPooling:
r"""
Examples:
```python
>>> from PIL import Image
>>> import httpx
>>> from io import BytesIO
>>> from transformers import AutoProcessor, Siglip2VisionModel
>>> model = Aimv2VisionModel.from_pretrained("apple/aimv2-large-patch14-native")
>>> processor = AutoProcessor.from_pretrained("apple/aimv2-large-patch14-native")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> with httpx.stream("GET", url) as response:
... image = Image.open(BytesIO(response.read()))
>>> inputs = processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_state = outputs.last_hidden_state
>>> pooled_output = outputs.pooler_output # pooled features
```"""
hidden_states = self.embeddings(pixel_values)
encoder_outputs: BaseModelOutput = self.encoder(
inputs_embeds=hidden_states,
**kwargs,
)
last_hidden_state = encoder_outputs.last_hidden_state
last_hidden_state = self.rms_norm(last_hidden_state)
pooler_output = self.head(last_hidden_state) if self.use_head else None
return BaseModelOutputWithPooling(
last_hidden_state=last_hidden_state,
pooler_output=pooler_output,
)
@auto_docstring(
custom_intro="""
The text model from AIMv2 without any head or projection on top.
"""
)
class Aimv2TextModel(Aimv2PreTrainedModel):
main_input_name = "input_ids"
_can_record_outputs = {
"hidden_states": Aimv2EncoderLayer,
"attentions": Aimv2Attention,
}
def __init__(self, config: Aimv2TextConfig):
super().__init__(config)
self.config = config
self.embeddings = Aimv2TextEmbeddings(config)
self.encoder = Aimv2Encoder(config)
self.rms_norm = Aimv2RMSNorm(config.hidden_size, config.rms_norm_eps)
self.eos_token_id = config.eos_token_id
self.post_init()
def get_input_embeddings(self) -> nn.Module:
return self.embeddings.token_embedding
def set_input_embeddings(self, value):
self.embeddings.token_embedding = value
@merge_with_config_defaults
@capture_outputs(tie_last_hidden_states=False)
@auto_docstring
def forward(
self,
input_ids,
attention_mask: torch.Tensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutputWithPooling:
hidden_states = self.embeddings(input_ids)
batch_size, seq_len, _ = hidden_states.shape
cache_position = torch.arange(seq_len, dtype=torch.long, device=hidden_states.device)
position_ids = cache_position.unsqueeze(0).expand(batch_size, -1)
if attention_mask is not None:
attention_mask = create_causal_mask(
config=self.config,
inputs_embeds=hidden_states,
position_ids=position_ids,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=None,
)
encoder_outputs = self.encoder(
inputs_embeds=hidden_states,
attention_mask=attention_mask,
**kwargs,
)
last_hidden_state = encoder_outputs.last_hidden_state
last_hidden_state = self.rms_norm(last_hidden_state)
# Get pooled output
pooled_output = last_hidden_state[
torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device),
(input_ids.to(dtype=torch.int, device=last_hidden_state.device) == self.eos_token_id).int().argmax(dim=-1),
]
return BaseModelOutputWithPooling(
last_hidden_state=last_hidden_state,
pooler_output=pooled_output,
)
@auto_docstring
class Aimv2Model(CLIPModel):
_supports_flash_attn = True
def __init__(self, config: Aimv2Config):
PreTrainedModel.__init__(self, config)
self.projection_dim = config.projection_dim
self.vision_embed_dim = config.vision_config.hidden_size
self.text_embed_dim = config.text_config.hidden_size
self.vision_model = Aimv2VisionModel._from_config(config.vision_config)
self.text_model = Aimv2TextModel._from_config(config.text_config)
self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False)
self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False)
self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value))
self.max_log_logit_scale = math.log(config.max_logit_scale)
self.post_init()
@auto_docstring
@can_return_tuple
def forward(
self,
input_ids: torch.LongTensor | None = None,
pixel_values: torch.FloatTensor | None = None,
attention_mask: torch.Tensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> Aimv2Output:
r"""
Examples:
```python
>>> from PIL import Image
>>> import httpx
>>> from io import BytesIO
>>> from transformers import AutoProcessor, Aimv2Model
>>> model = Aimv2Model.from_pretrained("apple/aimv2-large-patch14-224-lit")
>>> processor = AutoProcessor.from_pretrained("apple/aimv2-large-patch14-224-lit")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> with httpx.stream("GET", url) as response:
... image = Image.open(BytesIO(response.read()))
>>> inputs = processor(
... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True
... )
>>> outputs = model(**inputs)
>>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
>>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
```"""
vision_outputs: BaseModelOutputWithPooling = self.vision_model(
pixel_values=pixel_values,
**kwargs,
)
text_outputs: BaseModelOutputWithPooling = self.text_model(
input_ids=input_ids,
attention_mask=attention_mask,
**kwargs,
)
image_embeds = vision_outputs.pooler_output
image_embeds = self.visual_projection(image_embeds)
text_embeds = text_outputs.pooler_output
text_embeds = self.text_projection(text_embeds)
# normalized features
image_embeds = image_embeds / _get_vector_norm(image_embeds)
text_embeds = text_embeds / _get_vector_norm(text_embeds)
logit_scale = self.logit_scale.clamp(0.0, self.max_log_logit_scale).exp().to(text_embeds.device)
logits_per_text = (logit_scale * text_embeds) @ image_embeds.t()
logits_per_image = logits_per_text.t()
return Aimv2Output(
logits_per_image=logits_per_image,
logits_per_text=logits_per_text,
text_embeds=text_embeds,
image_embeds=image_embeds,
text_model_output=text_outputs,
vision_model_output=vision_outputs,
)
__all__ = [
"Aimv2Config",
"Aimv2VisionConfig",
"Aimv2TextConfig",
"Aimv2VisionModel",
"Aimv2Model",
"Aimv2PreTrainedModel",
"Aimv2TextModel",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/aimv2/modular_aimv2.py",
"license": "Apache License 2.0",
"lines": 578,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/aimv2/test_modeling_aimv2.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch AIMv2 model."""
import inspect
import tempfile
import unittest
import numpy as np
import requests
from parameterized import parameterized
from transformers import Aimv2Config, Aimv2TextConfig, Aimv2VisionConfig
from transformers.testing_utils import (
is_flaky,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import (
is_torch_available,
is_vision_available,
)
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import (
TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION,
ModelTesterMixin,
_test_eager_matches_sdpa_inference,
floats_tensor,
ids_tensor,
random_attention_mask,
)
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
Aimv2Model,
Aimv2TextModel,
Aimv2VisionModel,
)
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor, AutoProcessor
class Aimv2VisionModelTester:
def __init__(
self,
parent,
batch_size=12,
image_size=30,
patch_size=2,
num_channels=3,
is_training=False,
hidden_size=32,
projection_dim=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
dropout=0.1,
attention_dropout=0.1,
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.is_training = is_training
self.hidden_size = hidden_size
self.projection_dim = projection_dim
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.attention_dropout = attention_dropout
num_patches = (image_size // patch_size) ** 2
self.seq_length = num_patches
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
config = self.get_config()
return config, pixel_values
def get_config(self):
return Aimv2VisionConfig(
image_size=self.image_size,
patch_size=self.patch_size,
num_channels=self.num_channels,
hidden_size=self.hidden_size,
projection_dim=self.projection_dim,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
)
def create_and_check_model(self, config, pixel_values):
model = Aimv2VisionModel(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
result = model(pixel_values)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
class Aimv2ModelTesterMixin(ModelTesterMixin):
"""
Subclass of ModelTesterMixin with methods specific to testing Aimv2 models.
The SDPA equivalence test is overridden here because Aimv2 models may have test/vision/text+vision inputs,
different output logits, and are not supposed to be used or tested with padding_side="left".
"""
def test_sdpa_can_dispatch_composite_models(self):
for model_class in self.all_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
# Load the model with SDPA
model_sdpa = model_class.from_pretrained(tmpdirname)
# Load model with eager attention
model_eager = model_class.from_pretrained(
tmpdirname,
attn_implementation="eager",
)
model_eager = model_eager.eval().to(torch_device)
if hasattr(model_sdpa, "vision_model"):
self.assertTrue(model_sdpa.vision_model.config._attn_implementation == "sdpa")
self.assertTrue(model_eager.vision_model.config._attn_implementation == "eager")
if hasattr(model_sdpa, "text_model"):
self.assertTrue(model_sdpa.text_model.config._attn_implementation == "sdpa")
self.assertTrue(model_eager.text_model.config._attn_implementation == "eager")
self.assertTrue(model_sdpa.config._attn_implementation == "sdpa")
self.assertTrue(model_eager.config._attn_implementation == "eager")
@require_torch
class Aimv2VisionModelTest(Aimv2ModelTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as Aimv2 does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (Aimv2VisionModel,) if is_torch_available() else ()
test_resize_embeddings = False
def setUp(self):
self.model_tester = Aimv2VisionModelTester(self)
self.config_tester = ConfigTester(
self, config_class=Aimv2VisionConfig, has_text_modality=False, hidden_size=37
)
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(reason="Aimv2 does not use inputs_embeds")
def test_inputs_embeds(self):
pass
def test_model_get_set_embeddings(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, nn.Linear))
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["pixel_values"]
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
class Aimv2TextModelTester:
def __init__(
self,
parent,
batch_size=12,
seq_length=7,
is_training=False,
use_input_mask=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
projection_dim=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
dropout=0.1,
attention_dropout=0.1,
max_position_embeddings=25,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.projection_dim = projection_dim
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.attention_dropout = attention_dropout
self.max_position_embeddings = max_position_embeddings
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
if input_mask is not None:
batch_size, seq_length = input_mask.shape
rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,))
for batch_idx, start_index in enumerate(rnd_start_indices):
input_mask[batch_idx, :start_index] = 1
input_mask[batch_idx, start_index:] = 0
config = self.get_config()
return config, input_ids, input_mask
def get_config(self):
return Aimv2TextConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
projection_dim=self.projection_dim,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
max_position_embeddings=self.max_position_embeddings,
)
def create_and_check_model(self, config, input_ids, input_mask):
model = Aimv2TextModel(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
result = model(input_ids, attention_mask=input_mask)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, input_mask = config_and_inputs
inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class Aimv2TextModelTest(Aimv2ModelTesterMixin, unittest.TestCase):
all_model_classes = (Aimv2TextModel,) if is_torch_available() else ()
test_resize_embeddings = False
def setUp(self):
self.model_tester = Aimv2TextModelTester(self)
self.config_tester = ConfigTester(self, config_class=Aimv2TextConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skip(reason="Aimv2 does not use inputs_embeds")
def test_inputs_embeds(self):
pass
class Aimv2ModelTester:
def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=False):
if text_kwargs is None:
text_kwargs = {}
if vision_kwargs is None:
vision_kwargs = {}
self.parent = parent
self.text_model_tester = Aimv2TextModelTester(parent, **text_kwargs)
self.vision_model_tester = Aimv2VisionModelTester(parent, **vision_kwargs)
self.batch_size = self.text_model_tester.batch_size # need bs for batching_equivalence test
self.is_training = is_training
def prepare_config_and_inputs(self):
text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs()
vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs()
config = self.get_config()
return config, input_ids, attention_mask, pixel_values
def get_config(self):
return Aimv2Config(
text_config=self.text_model_tester.get_config(),
vision_config=self.vision_model_tester.get_config(),
projection_dim=64,
)
def create_and_check_model(self, config, input_ids, attention_mask, pixel_values):
model = Aimv2Model(config).to(torch_device).eval()
with torch.no_grad():
result = model(input_ids, pixel_values, attention_mask)
self.parent.assertEqual(
result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size)
)
self.parent.assertEqual(
result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size)
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, attention_mask, pixel_values = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"pixel_values": pixel_values,
}
return config, inputs_dict
@require_torch
class Aimv2ModelTest(Aimv2ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
additional_model_inputs = ["pixel_values"]
all_model_classes = (Aimv2Model,) if is_torch_available() else ()
pipeline_model_mapping = (
{"feature-extraction": Aimv2Model, "image-feature-extraction": Aimv2VisionModel}
if is_torch_available()
else {}
)
test_resize_embeddings = False
test_attention_outputs = False
_is_composite = True
def setUp(self):
self.model_tester = Aimv2ModelTester(self)
common_properties = ["projection_dim", "logit_scale_init_value"]
self.config_tester = ConfigTester(
self, config_class=Aimv2Config, has_text_modality=False, common_properties=common_properties
)
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
print(config_and_inputs)
self.model_tester.create_and_check_model(*config_and_inputs)
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(reason="Hidden_states is tested in individual model tests")
def test_hidden_states_output(self):
pass
@unittest.skip(reason="Retain_grad is tested in individual model tests")
def test_retain_grad_hidden_states_attentions(self):
pass
@unittest.skip(reason="Aimv2Model does not have input/output embeddings")
def test_model_get_set_embeddings(self):
pass
@unittest.skip("Size mismatch on CUDA")
def test_multi_gpu_data_parallel_forward(self):
pass
def test_load_vision_text_config(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# Save Aimv2Config and check if we can load Aimv2VisionConfig from it
with tempfile.TemporaryDirectory() as tmp_dir_name:
config.save_pretrained(tmp_dir_name)
vision_config = Aimv2VisionConfig.from_pretrained(tmp_dir_name)
self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict())
# Save Aimv2Config and check if we can load Aimv2TextConfig from it
with tempfile.TemporaryDirectory() as tmp_dir_name:
config.save_pretrained(tmp_dir_name)
text_config = Aimv2TextConfig.from_pretrained(tmp_dir_name)
self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict())
@parameterized.expand(TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION)
@is_flaky(
max_attempts=2,
description="sdpa gets nan values in some places while eager is fine. Except those places, the values are close",
)
def test_eager_matches_sdpa_inference(
self,
name,
dtype,
padding_side,
use_attention_mask,
output_attentions,
enable_kernels,
):
"We need to relax a bit the `atols` for fp32 here due to the altup projections"
atols = {
("cpu", False, torch.float32): 1e-6,
("cpu", False, torch.float16): 5e-3,
("cpu", False, torch.bfloat16): 3e-2, # this was relaxed
("cpu", True, torch.float32): 1e-6,
("cpu", True, torch.float16): 5e-3,
("cpu", True, torch.bfloat16): 3e-2, # this was relaxed
("cuda", False, torch.float32): 1e-6,
("cuda", False, torch.bfloat16): 3e-2, # this was relaxed
("cuda", False, torch.float16): 5e-3,
("cuda", True, torch.float32): 1e-6,
("cuda", True, torch.bfloat16): 3e-2, # this was relaxed
("cuda", True, torch.float16): 5e-3,
}
_test_eager_matches_sdpa_inference(
self, name, dtype, padding_side, use_attention_mask, output_attentions, enable_kernels, atols=atols
)
@require_vision
@require_torch
class Aimv2ModelIntegrationTest(unittest.TestCase):
@slow
def test_inference(self):
model_name = "apple/aimv2-large-patch14-224-lit"
model = Aimv2Model.from_pretrained(model_name, device_map=torch_device)
processor = AutoProcessor.from_pretrained(model_name)
image = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw)
inputs = processor(
text=["a photo of a cat", "a photo of a dog"], images=image, padding=True, return_tensors="pt"
).to(model.device)
# Forward pass
with torch.no_grad():
outputs = model(**inputs)
# Verify the logits
self.assertEqual(
outputs.logits_per_image.shape,
torch.Size((inputs.pixel_values.shape[0], inputs.input_ids.shape[0])),
)
self.assertEqual(
outputs.logits_per_text.shape,
torch.Size((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])),
)
# handle device
expected_logits = torch.tensor([[33.3550, 26.4255]]).to(model.device)
torch.testing.assert_close(outputs.logits_per_image, expected_logits, atol=1e-3, rtol=1e-3)
@require_vision
@require_torch
class Aimv2VisionModelIntegrationTests(unittest.TestCase):
@slow
def test_inference(self):
model_name = "apple/aimv2-large-patch14-224"
model = Aimv2VisionModel.from_pretrained(model_name, device_map=torch_device)
processor = AutoImageProcessor.from_pretrained(model_name)
image = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw)
inputs = processor(image, return_tensors="pt").to(model.device)
with torch.no_grad():
output = model(**inputs)
# Verify logits shape
self.assertEqual(output.last_hidden_state.shape, torch.Size([1, 256, 1024]))
# Verify logits slice
# fmt: off
expected_logits = torch.tensor(
[[ 0.0510, 0.0806, -0.0990, -0.0154],
[ 2.7850, -2.5143, -0.3320, 2.4196],
[ 2.8179, -2.4089, -0.2770, 2.3218],
[ 2.7641, -2.4114, -0.3684, 2.2998],
[ 2.7972, -2.3180, -0.4490, 2.2302],
[ 2.8584, -2.5322, -0.2302, 2.4936],
[-2.7849, 2.4121, 1.3670, -1.5514]]).to(model.device)
# fmt: on
output_slice = output.last_hidden_state.squeeze(0)[0:7, 0:4]
self.assertTrue(torch.allclose(output_slice, expected_logits, atol=1e-3))
@slow
def test_inference_for_native_resolution(self):
model_name = "apple/aimv2-large-patch14-native"
model = Aimv2VisionModel.from_pretrained(model_name, device_map="auto")
processor = AutoImageProcessor.from_pretrained(model_name)
image = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw)
inputs = processor(image, return_tensors="pt").to(model.device)
with torch.no_grad():
output = model(**inputs)
# Verify logits shape
self.assertEqual(output.last_hidden_state.shape, torch.Size([1, 1530, 1024]))
# Verify logits slice
# fmt: off
expected_logits = torch.tensor(
[[-1.3342, 0.3720, 0.0963, 0.4159],
[-1.5328, 0.4677, 0.0936, 0.4321],
[-0.3775, -0.2758, -0.0803, -0.5367],
[-1.3877, 0.5561, -1.9064, -1.1766],
[-0.5148, 0.0108, -0.4515, -0.6402],
[-0.3400, -0.1711, -0.1855, -0.4219],
[-1.2877, -0.0585, -0.1646, 0.7420]]).to(model.device)
# fmt: on
output_slice = output.last_hidden_state.squeeze(0)[0:7, 0:4]
self.assertTrue(torch.allclose(output_slice, expected_logits, atol=1e-3))
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/aimv2/test_modeling_aimv2.py",
"license": "Apache License 2.0",
"lines": 466,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/doge/convert_doge_weights_to_hf.py | import argparse
import json
import os
import re
import torch
from safetensors.torch import load_file
from transformers import DogeConfig, DogeForCausalLM
# fmt: off
# `None` means we drop the key
STATE_DICT_MAPPING = {
# CausalLM keys
r"^lm_head.weight": r"lm_head.weight",
# Model keys
r"^model.word_embed.weight": r"model.embed_tokens.weight",
r"^model.rotary_emb.rotary_emb": r"model.rotary_emb.rotary_emb",
r"^model.final_layernorm.weight": r"model.norm.weight",
# Layers keys
r"^model.layers.(\d+).pre_layernorm.weight": r"model.layers.\1.input_layernorm.weight",
r"^model.layers.(\d+).pre_residual.weight": r"model.layers.\1.input_residual",
r"^model.layers.(\d+).post_layernorm.weight": r"model.layers.\1.post_attention_layernorm.weight",
r"^model.layers.(\d+).post_residual.weight": r"model.layers.\1.post_attention_residual",
# Attention keys
r"^model.layers.(\d+).self_attn.q_proj.weight": r"model.layers.\1.self_attn.q_proj.weight",
r"^model.layers.(\d+).self_attn.k_proj.weight": r"model.layers.\1.self_attn.k_proj.weight",
r"^model.layers.(\d+).self_attn.v_proj.weight": r"model.layers.\1.self_attn.v_proj.weight",
r"^model.layers.(\d+).self_attn.A": r"model.layers.\1.self_attn.A",
r"^model.layers.(\d+).self_attn.dt_proj.weight": r"model.layers.\1.self_attn.dt_proj.weight",
r"^model.layers.(\d+).self_attn.o_proj.weight": r"model.layers.\1.self_attn.o_proj.weight",
# Feedforward keys
r"^model.layers.(\d+).feed_forward.gate_proj.weight": r"model.layers.\1.mlp.gate_proj.weight",
r"^model.layers.(\d+).feed_forward.up_proj.weight": r"model.layers.\1.mlp.up_proj.weight",
r"^model.layers.(\d+).feed_forward.down_proj.weight": r"model.layers.\1.mlp.down_proj.weight",
r"^model.layers.(\d+).feed_forward.router_gate.weight": r"model.layers.\1.mlp.router_gate.weight",
r"^model.layers.(\d+).feed_forward.router_gate.bias": None,
r"^model.layers.(\d+).feed_forward.down_embed.weight": r"model.layers.\1.mlp.down_embed.weight",
r"^model.layers.(\d+).feed_forward.up_embed.weight": r"model.layers.\1.mlp.up_embed.weight",
}
# fmt: on
def load_weights(input_dir: str):
safetensor_files = [os.path.join(input_dir, x) for x in os.listdir(input_dir) if x.endswith(".safetensors")]
all_weights = {}
if safetensor_files:
if len(safetensor_files) == 1:
tensors = load_file(safetensor_files[0])
all_weights.update(tensors)
return all_weights
safetensor_files = sorted(safetensor_files, key=lambda x: int(x.rsplit("-", 3)[1]))
for file in safetensor_files:
tensors = load_file(file)
all_weights.update(tensors)
return all_weights
else:
raise ValueError("No .safetensors or .bin files found in the specified directory.")
def map_old_key_to_new(old_key):
for pattern, replacement in STATE_DICT_MAPPING.items():
if replacement is None:
if re.fullmatch(pattern, old_key):
return None
else:
new_key, n_replace = re.subn(pattern, replacement, old_key)
# Early exit of the loop
if n_replace > 0:
return new_key
raise ValueError(f"Key: {old_key} could not be mapped (check the mapping).")
def convert_state_dict(original_state_dict: dict, config: DogeConfig):
new_dict = {}
for old_key, value in original_state_dict.items():
new_key = map_old_key_to_new(old_key)
if new_key is None:
continue
new_dict[new_key] = value
return new_dict
def convert_doge_model(input_dir, output_dir):
# Load and convert config
with open(os.path.join(input_dir, "config.json")) as f:
config = json.load(f)
config = DogeConfig(**config)
config.save_pretrained(output_dir)
# Load and convert weights
original_state_dict = load_weights(input_dir)
new_dict = convert_state_dict(original_state_dict, config)
with torch.device("meta"):
model = DogeForCausalLM(config)
if config.tie_word_embeddings:
new_dict["lm_head.weight"] = new_dict["model.embed_tokens.weight"]
model.load_state_dict(new_dict, strict=True, assign=True)
model.save_pretrained(output_dir)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"input_dir",
type=str,
help="Location of the local folder copied from the Hub.",
)
parser.add_argument(
"output_dir",
type=str,
help="Location to write HF model.",
)
args = parser.parse_args()
convert_doge_model(args.input_dir, args.output_dir)
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/doge/convert_doge_weights_to_hf.py",
"license": "Apache License 2.0",
"lines": 101,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
huggingface/transformers:src/transformers/models/doge/modular_doge.py | # Copyright 2025 Jingze Shi and the HuggingFace Inc. team. All rights reserved.
#
# The Doge family of small language models is trained by SmallDoge Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch Doge model."""
import math
from collections.abc import Callable
from typing import Union
import torch
import torch.nn.functional as F
from torch import nn
from ... import initialization as init
from ...activations import ACT2FN
from ...cache_utils import Cache
from ...configuration_utils import PreTrainedConfig
from ...integrations.flex_attention import compile_friendly_flex_attention
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import MoeCausalLMOutputWithPast, MoeModelOutputWithPast
from ...modeling_rope_utils import RopeParameters
from ...modeling_utils import AttentionInterface, PreTrainedModel
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, is_torch_flex_attn_available, logging
from ...utils.output_capturing import OutputRecorder
from ..llama.modeling_llama import (
LlamaForSequenceClassification,
LlamaMLP,
LlamaPreTrainedModel,
LlamaRMSNorm,
LlamaRotaryEmbedding,
apply_rotary_pos_emb,
eager_attention_forward,
repeat_kv,
)
from ..mixtral.modeling_mixtral import MixtralForCausalLM, MixtralModel
logger = logging.get_logger(__name__)
if is_torch_flex_attn_available():
from torch.nn.attention.flex_attention import BlockMask
class DogeConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`DogeModel`]. It is used to instantiate an Doge
model according to the specified arguments, defining the model architecture like [SmallDoge/Doge-320M](https://huggingface.co/SmallDoge/Doge-320M).
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 32768):
Vocabulary size of the Doge2 model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`DogeModel`]
hidden_size (`int`, *optional*, defaults to 1024):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 2048):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer decoder.
hidden_dropout (`float`, *optional*, defaults to 0.0):
Dropout probability for each sequence transformation and state transformation module.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether the model's input and output word embeddings should be tied.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
num_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention.
If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used.
When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group.
For more details checkout [this paper](https://huggingface.co/papers/2305.13245).
If it is not specified, will default to `num_attention_heads`.
attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
mlp_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in up_proj, down_proj and gate_proj layers in the MLP layers.
sliding_window (`int`, *optional*):
Sliding window attention window size. If not specified, will default to `None`.
keep_window_size (`int`, *optional*, defaults to 2048):
The window size of tokens that are not dynamically masked, and dynamic masking is only performed when the sequence length exceeds this value.
is_moe (`bool`, *optional*, defaults to `False`):
Whether to use the Cross Domain Mixture of Experts, if `True`, the MoE will inherit the MLP to initialize.
num_experts (`int`, *optional*, defaults to 16384):
Number of routed experts in the model. This is only used when `is_moe=True`.
num_experts_per_tok (`int`, *optional*, defaults to 64):
Number of selected experts to route per-token.
norm_topk_prob (`bool`, *optional*, defaults to `False`):
Whether to normalize the topk probabilities.
output_router_logits (`bool`, *optional*, defaults to `False`):
Whether or not the router logits should be returned by the model. Enabling this will also
allow the model to output the auxiliary loss, including load balancing loss and router z-loss.
router_aux_loss_coef (`float`, *optional*, defaults to 0.001):
The aux loss factor for the total loss.
pad_token_id (`int`, *optional*):
Padding token id.
bos_token_id (`int`, *optional*):
Beginning of stream token id.
eos_token_id (`int`, *optional*):
End of stream token id.
```python
>>> from transformers import DogeConfig, DogeModel
>>> # Initializing a Doge-320M style configuration
>>> configuration = DogeConfig()
>>> # Initializing a model from the Doge-320M style configuration
>>> model = DogeModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "doge"
keys_to_ignore_at_inference = ["past_key_values"]
# Default tensor parallel plan for base model `DogeModel`
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise",
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.dt_proj": "rowwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.gate_proj": "colwise",
"layers.*.mlp.up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
"layers.*.mlp.router_gate": "colwise_gather_output",
"layers.*.mlp.down_embed": "rowwise_split_input",
"layers.*.mlp.up_embed": "rowwise_split_input",
}
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
"norm": (["hidden_states"], ["hidden_states"]),
}
def __init__(
self,
vocab_size: int | None = 32768,
hidden_size: int | None = 1024,
intermediate_size: int | None = 2048,
num_hidden_layers: int | None = 32,
hidden_dropout: float | None = 0.0,
hidden_act: str | None = "silu",
initializer_range: float | None = 0.02,
rms_norm_eps: int | None = 1e-06,
use_cache: bool | None = True,
tie_word_embeddings: bool | None = False,
max_position_embeddings: int | None = 2048,
rope_parameters: RopeParameters | dict[str, RopeParameters] | None = None,
num_attention_heads: int | None = 8,
num_key_value_heads: int | None = None,
attention_bias: bool | None = False,
attention_dropout: float | None = 0.0,
mlp_bias: bool | None = False,
sliding_window: int | None = None,
keep_window_size: int | None = 2048,
is_moe: bool | None = False,
num_experts: int | None = 16384,
num_experts_per_tok: int | None = 64,
norm_topk_prob: bool | None = False,
output_router_logits: bool | None = False,
router_aux_loss_coef: float | None = 0.001,
pad_token_id: int | None = None,
bos_token_id: int | None = None,
eos_token_id: int | None = None,
**kwargs,
):
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.hidden_dropout = hidden_dropout
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.max_position_embeddings = max_position_embeddings
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.mlp_bias = mlp_bias
self.sliding_window = sliding_window
self.keep_window_size = keep_window_size
self.is_moe = is_moe
self.num_experts = num_experts
self.num_experts_per_tok = num_experts_per_tok
self.norm_topk_prob = norm_topk_prob
self.output_router_logits = output_router_logits
self.router_aux_loss_coef = router_aux_loss_coef
self.tie_word_embeddings = tie_word_embeddings
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.rope_parameters = rope_parameters
# for backward compatibility
if num_key_value_heads is None:
self.num_key_value_heads = num_attention_heads
super().__init__(**kwargs)
class DogeRMSNorm(LlamaRMSNorm):
pass
class DogeRotaryEmbedding(LlamaRotaryEmbedding):
pass
def flex_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Union[torch.Tensor, "BlockMask"],
scaling: float | None = None,
softcap: float | None = None,
**kwargs,
) -> tuple[torch.Tensor, torch.Tensor]:
block_mask = None
causal_mask = None
if isinstance(attention_mask, BlockMask):
block_mask = attention_mask
else:
causal_mask = attention_mask
if causal_mask is not None:
causal_mask = causal_mask[:, :, :, : key.shape[-2]]
def score_mod(score, batch_idx, head_idx, q_idx, kv_idx):
if softcap is not None:
score = softcap * torch.tanh(score / softcap)
if causal_mask is not None:
score = score + causal_mask[batch_idx][head_idx][q_idx][kv_idx]
return score
attn_output, attention_weights = compile_friendly_flex_attention(
query,
key,
value,
score_mod=score_mod,
block_mask=block_mask,
enable_gqa=True,
scale=scaling,
# Last time checked on PyTorch == 2.5.1: Flex Attention always computes the lse regardless.
# For simplification, we thus always return it as no additional computations are introduced.
return_lse=True,
)
# lse is returned in float32
attention_weights = attention_weights.to(value.dtype)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attention_weights
ALL_ATTENTION_FUNCTIONS = AttentionInterface()
ALL_ATTENTION_FUNCTIONS["doge_flex_attention"] = flex_attention_forward
class DogeAttention(nn.Module):
def __init__(self, config: DogeConfig, layer_idx: int | None = None):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.attention_dropout = config.attention_dropout
self.keep_window_size = config.keep_window_size
self.q_proj = nn.Linear(
config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
)
self.k_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.v_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
# dynamic mask for the QK^T attention weights matrix
self.A = nn.Parameter(torch.zeros(config.num_key_value_heads))
self.dt_proj = nn.Linear(
config.num_key_value_heads * self.head_dim, config.num_key_value_heads, bias=config.attention_bias
)
self.o_proj = nn.Linear(
config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias
)
self.q_norm = DogeRMSNorm(self.head_dim, eps=config.rms_norm_eps)
self.k_norm = DogeRMSNorm(self.head_dim, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: torch.Tensor | None = None,
past_key_values: Cache | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs,
) -> tuple[torch.Tensor, torch.Tensor | None, tuple[torch.Tensor] | None]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_norm(self.q_proj(hidden_states).view(hidden_shape)).transpose(1, 2)
key_states = self.k_norm(self.k_proj(hidden_states).view(hidden_shape)).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
# calculate dynamic mask from value_states
dt_states = self.dt_proj(
value_states.transpose(1, 2).reshape(value_states.shape[0], value_states.shape[-2], -1)
)
dt_states = torch.exp(self.A * F.softplus(dt_states)).transpose(-1, -2)
attn_mask = self.prepare_dynamic_mask(
hidden_states=hidden_states,
dt_states=dt_states,
keep_window_size=self.keep_window_size,
attention_mask=attention_mask,
)
attn_mask = repeat_kv(attn_mask, self.num_key_value_groups)
attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface(
self.config._attn_implementation, eager_attention_forward
)
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask=attn_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
def prepare_dynamic_mask(
self,
hidden_states: torch.Tensor,
dt_states: torch.Tensor,
keep_window_size: int = 2048,
attention_mask: torch.Tensor | None = None,
):
"""
The core idea of DMA is to calculate the dynamic attention mask to mask the tokens that should be masked, so as to form sparse attention.
Combine `dt_states` with `attention_mask` to generate the final `attn_mask`.
Args:
hidden_states (`torch.Tensor`): The input hidden_states, used to determine the minimum value of the current input precision.
dt_states (`torch.Tensor`): dt_states of shape `(batch_size, num_heads, key_sequence_length)`.
keep_window_size (`int`): The window size of tokens that are not dynamically masked, and dynamic masking is only performed when the sequence length exceeds this value.
attention_mask (`torch.Tensor`, *optional*): attention mask of shape `(batch_size, 1, query_sequence_length, key_sequence_length)`.
"""
min_dtype = torch.finfo(hidden_states.dtype).min
dtype = hidden_states.dtype
attn_mask = dt_states[:, :, None, :].expand(
-1, -1, hidden_states.shape[1], -1
) # [batch_size, num_heads, query_len, key_len]
if attention_mask is not None and not isinstance(attention_mask, BlockMask):
if attention_mask.dtype == torch.bool:
dtype = hidden_states.dtype
attention_mask = torch.where(
attention_mask, torch.tensor(0.0, device=attention_mask.device, dtype=dtype), min_dtype
)
attn_mask = attn_mask.masked_fill(attention_mask[:, :, :, : attn_mask.shape[-1]] != 0, min_dtype)
if attn_mask.shape[-1] > keep_window_size:
active_mask = torch.zeros_like(attn_mask, dtype=dtype, device=attn_mask.device)
topk_indices = torch.topk(attn_mask, keep_window_size, dim=-1, largest=True, sorted=False).indices
active_mask = active_mask.scatter(-1, topk_indices, 1.0)
attn_mask = attn_mask.masked_fill(active_mask == 0.0, min_dtype)
return attn_mask
class DogeMLP(LlamaMLP):
pass
class DogeCDMoE(nn.Module):
def __init__(self, config: DogeConfig):
super().__init__()
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size
self.act_fn = ACT2FN[config.hidden_act]
self.num_experts = config.num_experts
self.num_keys = math.floor(math.sqrt(self.num_experts))
self.top_k = config.num_experts_per_tok
self.norm_topk_prob = config.norm_topk_prob
# shared expert
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.mlp_bias)
# router gate for retrieval experts
self.router_gate = nn.Linear(self.hidden_size, self.num_keys * 2, bias=False)
# routed experts
self.down_embed = nn.Embedding(self.num_experts, self.hidden_size)
self.up_embed = nn.Embedding(self.num_experts, self.hidden_size)
def forward(
self,
hidden_states: torch.Tensor,
**kwargs,
) -> torch.Tensor:
bsz, seq_len, _ = hidden_states.shape
# get routing logits with router gate
router_logits = self.router_gate(hidden_states).view(2, bsz * seq_len, -1)
# get experts with the highest routing logits
(scores_x, scores_y), (indices_x, indices_y) = router_logits.topk(self.num_keys, dim=-1)
all_scores = scores_x.unsqueeze(-1) + scores_y.unsqueeze(-2)
all_indices = indices_x.unsqueeze(-1) * self.num_keys + indices_y.unsqueeze(-2)
all_scores = all_scores.view(*all_scores.shape[:-2], -1)
all_indices = all_indices.view(*all_indices.shape[:-2], -1)
scores, position_indices = all_scores.topk(self.top_k, dim=-1)
indices = all_indices.gather(-1, position_indices)
routing_weights = F.softmax(scores, dim=-1)
if self.norm_topk_prob:
routing_weights /= routing_weights.sum(dim=-1, keepdim=True)
# mix routed experts states with shared expert states
down_embed = self.down_embed(indices)
up_embed = self.up_embed(indices)
experts_weights = torch.matmul(down_embed, hidden_states.view(bsz * seq_len, -1, 1)).view(bsz * seq_len, -1)
experts_weights = self.act_fn(experts_weights) * routing_weights
experts_states = torch.matmul(experts_weights.view(bsz * seq_len, 1, -1), up_embed).view(bsz, seq_len, -1)
hidden_states = self.down_proj(self.act_fn(self.gate_proj(hidden_states)) * self.up_proj(hidden_states))
hidden_states = hidden_states + experts_states
return hidden_states, router_logits
class DogeDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: DogeConfig, layer_idx: int | None = None):
super().__init__()
self.hidden_dropout = config.hidden_dropout
self.input_layernorm = DogeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.self_attn = DogeAttention(config=config, layer_idx=layer_idx)
self.input_residual = nn.Parameter(torch.ones(config.hidden_size))
self.post_attention_layernorm = DogeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.mlp = DogeMLP(config) if not config.is_moe else DogeCDMoE(config)
self.post_attention_residual = nn.Parameter(torch.ones(config.hidden_size))
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
use_cache: bool | None = False,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.FloatTensor, tuple[torch.FloatTensor, torch.FloatTensor] | None]:
# sequence transformation
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
hidden_states, self_attn_weights = self.self_attn(
hidden_states=hidden_states,
position_embeddings=position_embeddings,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = F.dropout(hidden_states, p=self.hidden_dropout, training=self.training)
hidden_states = self.input_residual * residual + hidden_states
# state transformation
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = F.dropout(hidden_states, p=self.hidden_dropout, training=self.training)
hidden_states = self.post_attention_residual * residual + hidden_states
return hidden_states
class DogePreTrainedModel(LlamaPreTrainedModel):
_supports_flash_attn = False
_can_compile_fullgraph = False
_can_record_outputs = {
"router_logits": OutputRecorder(DogeCDMoE, index=1),
"hidden_states": DogeDecoderLayer,
"attentions": DogeAttention,
}
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights"""
PreTrainedModel._init_weights(self, module)
if isinstance(module, DogeAttention):
if hasattr(module, "A"):
init.zeros_(module.A)
elif isinstance(module, DogeDecoderLayer):
if hasattr(module, "input_residual"):
init.ones_(module.input_residual)
if hasattr(module, "post_attention_residual"):
init.ones_(module.post_attention_residual)
class DogeModel(MixtralModel):
pass
def load_balancing_loss_func(
gate_logits: torch.Tensor | tuple[torch.Tensor] | None,
num_experts: int | None = None,
num_keys: int | None = None,
top_k: int = 2,
attention_mask: torch.Tensor | None = None,
) -> torch.Tensor | int:
r"""
Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch.
See Switch Transformer (https://huggingface.co/papers/2101.03961) for more details. This function implements the loss
function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between
experts is too unbalanced.
Args:
gate_logits:
Logits from the `router_gate`, should be a tuple of model.config.num_hidden_layers tensors of
shape [2, batch_size * sequence_length, num_keys].
num_experts:
Number of experts
num_keys:
Number of keys
top_k:
The number of experts to route per-token, can be also interpreted as the `top-k` routing
parameter.
attention_mask (`torch.Tensor`, *optional*):
The attention_mask used in forward function
shape [batch_size X sequence_length] if not None.
Returns:
The auxiliary loss.
"""
if gate_logits is None or not isinstance(gate_logits, tuple):
return 0
compute_dtype = gate_logits[0].dtype
compute_device = gate_logits[0].device
all_expert_indices = []
all_routing_weights = []
for layer_gate_logits in gate_logits:
layer_gate_logits = layer_gate_logits.to(compute_device)
(scores_x, scores_y), (indices_x, indices_y) = layer_gate_logits.topk(num_keys, dim=-1)
all_scores = scores_x.unsqueeze(-1) + scores_y.unsqueeze(-2)
all_indices = indices_x.unsqueeze(-1) * num_keys + indices_y.unsqueeze(-2)
all_scores = all_scores.view(*all_scores.shape[:-2], -1)
all_indices = all_indices.view(*all_indices.shape[:-2], -1)
_, position_indices = all_scores.topk(top_k, dim=-1)
expert_indices = all_indices.gather(-1, position_indices)
routing_weights = F.softmax(all_scores, dim=-1)
all_expert_indices.append(expert_indices)
all_routing_weights.append(routing_weights)
all_expert_indices = torch.cat(all_expert_indices, dim=0)
all_routing_weights = torch.cat(all_routing_weights, dim=0)
if attention_mask is None:
# Compute the percentage of tokens routed to each experts
all_expert_indices = all_expert_indices.view(-1)
tokens_per_expert = torch.zeros(num_experts, dtype=compute_dtype, device=compute_device)
pad = torch.ones_like(all_expert_indices, dtype=compute_dtype, device=compute_device)
tokens_per_expert = tokens_per_expert.scatter_add_(0, all_expert_indices, pad) / all_expert_indices.shape[0]
# Compute the average probability of routing to these experts
router_prob_per_expert = torch.mean(all_routing_weights, dim=0)
else:
batch_size, sequence_length = attention_mask.shape
num_hidden_layers = len(gate_logits)
# Compute the mask that masks all padding tokens as 0 with the same shape of expert_mask
expert_attention_mask = (
attention_mask[None, :, :, None]
.expand((num_hidden_layers, batch_size, sequence_length, top_k))
.reshape(-1)
.to(compute_device)
)
all_expert_indices = all_expert_indices.view(-1)[expert_attention_mask.bool()]
# Compute the percentage of tokens routed to each experts
tokens_per_expert = torch.zeros(num_experts, dtype=compute_dtype, device=compute_device)
pad = torch.ones_like(all_expert_indices, dtype=compute_dtype, device=compute_device)
tokens_per_expert = tokens_per_expert.scatter_add_(0, all_expert_indices, pad) / torch.sum(
expert_attention_mask
)
# Compute the mask that masks all padding tokens as 0 with the same shape of tokens_per_expert
router_per_expert_attention_mask = (
attention_mask[None, :, :, None]
.expand((num_hidden_layers, batch_size, sequence_length, num_experts))
.reshape(-1, num_experts)
.to(compute_device)
)
# Compute the average probability of routing to these experts
router_prob_per_expert = torch.sum(all_routing_weights * router_per_expert_attention_mask, dim=0) / torch.sum(
router_per_expert_attention_mask, dim=0
)
overall_loss = torch.sum(tokens_per_expert * router_prob_per_expert)
return overall_loss * num_experts
class DogeForCausalLM(MixtralForCausalLM):
def __init__(self, config):
super().__init__(config)
self.model = DogeModel(config)
self.num_experts = config.num_experts
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
labels: torch.LongTensor | None = None,
use_cache: bool | None = None,
cache_position: torch.LongTensor | None = None,
logits_to_keep: int | torch.Tensor = 0,
output_router_logits: bool | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> MoeCausalLMOutputWithPast:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoTokenizer, DogeForCausalLM
>>> model = DogeForCausalLM.from_pretrained("SmallDoge/Doge-320M")
>>> tokenizer = AutoTokenizer.from_pretrained("SmallDoge/Doge-320M")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```"""
output_router_logits = (
output_router_logits if output_router_logits is not None else self.config.output_router_logits
)
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs: MoeModelOutputWithPast = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits, labels, self.vocab_size, **kwargs)
aux_loss = None
if output_router_logits:
aux_loss = load_balancing_loss_func(
outputs.router_logits,
self.num_experts,
math.floor(math.sqrt(self.num_experts)),
self.num_experts_per_tok,
attention_mask,
)
if labels is not None:
loss += self.router_aux_loss_coef * aux_loss.to(loss.device) # make sure to reside in the same device
return MoeCausalLMOutputWithPast(
loss=loss,
aux_loss=aux_loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
router_logits=outputs.router_logits,
)
class DogeForSequenceClassification(LlamaForSequenceClassification):
pass
__all__ = [
"DogeConfig",
"DogeForCausalLM",
"DogeModel",
"DogePreTrainedModel",
"DogeForSequenceClassification",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/doge/modular_doge.py",
"license": "Apache License 2.0",
"lines": 658,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/doge/test_modeling_doge.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch Doge model."""
import unittest
from transformers import AutoTokenizer, DogeConfig, is_torch_available, set_seed
from transformers.testing_utils import (
require_torch,
require_torch_accelerator,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DogeForCausalLM,
DogeForSequenceClassification,
DogeModel,
)
class DogeModelTester:
def __init__(
self,
parent,
batch_size=8,
seq_length=16,
is_training=True,
use_input_mask=True,
use_token_type_ids=False,
use_labels=True,
vocab_size=128,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=64,
hidden_act="silu",
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
pad_token_id=0,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.pad_token_id = pad_token_id
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = torch.tril(torch.ones_like(input_ids).to(torch_device))
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
config = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels
def get_config(self):
return DogeConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_decoder=False,
initializer_range=self.initializer_range,
pad_token_id=self.pad_token_id,
)
def create_and_check_model(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels):
model = DogeModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_model_as_decoder(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
):
config.add_cross_attention = True
model = DogeModel(config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
)
result = model(
input_ids,
attention_mask=input_mask,
encoder_hidden_states=encoder_hidden_states,
)
result = model(input_ids, attention_mask=input_mask)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_for_causal_lm(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
):
model = DogeForCausalLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_decoder_model_past_large_inputs(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
):
config.is_decoder = True
config.add_cross_attention = True
model = DogeForCausalLM(config=config)
model.to(torch_device)
model.eval()
# first forward pass
outputs = model(
input_ids,
attention_mask=input_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
use_cache=True,
)
past_key_values = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_mask = ids_tensor((self.batch_size, 3), vocab_size=2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([input_mask, next_mask], dim=-1)
output_from_no_past = model(
next_input_ids,
attention_mask=next_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_hidden_states=True,
)["hidden_states"][0]
output_from_past = model(
next_tokens,
attention_mask=next_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
output_hidden_states=True,
)["hidden_states"][0]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class DogeModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
DogeModel,
DogeForCausalLM,
DogeForSequenceClassification,
)
if is_torch_available()
else ()
)
all_generative_model_classes = (DogeForCausalLM,) if is_torch_available() else ()
pipeline_model_mapping = (
{
"feature-extraction": DogeModel,
"text-classification": DogeForSequenceClassification,
"text-generation": DogeForCausalLM,
"zero-shot": DogeForSequenceClassification,
}
if is_torch_available()
else {}
)
has_attentions = False
# Need to use `0.8` instead of `0.9` for `test_cpu_offload`
# This is because we are hitting edge cases with the causal_mask buffer
model_split_percents = [0.5, 0.7, 0.8]
# used in `test_torch_compile_for_training`
_torch_compile_train_cls = DogeForCausalLM if is_torch_available() else None
def setUp(self):
self.model_tester = DogeModelTester(self)
self.config_tester = ConfigTester(self, config_class=DogeConfig, hidden_size=32)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_doge_sequence_classification_model(self):
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.num_labels = 3
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(1).to(torch_device)
sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size)
model = DogeForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels)
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels))
def test_doge_sequence_classification_model_for_single_label(self):
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.num_labels = 3
config.problem_type = "single_label_classification"
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(1).to(torch_device)
sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size)
model = DogeForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels)
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels))
def test_doge_sequence_classification_model_for_multi_label(self):
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.num_labels = 3
config.problem_type = "multi_label_classification"
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(1).to(torch_device)
sequence_labels = ids_tensor(
[self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size
).to(torch.float)
model = DogeForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels)
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels))
@unittest.skip(reason="Doge buffers include complex numbers, which breaks this test")
def test_save_load_fast_init_from_base(self):
pass
def test_tp_plan_matches_params(self):
"""Need to overwrite as the plan contains keys that are valid but depend on some configs flags and cannot
be valid all at the same time"""
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
# They are valid but not always used, depending on config.is_moe flag (the modules are not the same in both cases)
problematic_keys = {
"layers.*.mlp.router_gate": "colwise_rep",
"layers.*.mlp.down_embed": "rowwise_rep",
"layers.*.mlp.up_embed": "rowwise_rep",
}
if not config.is_moe:
for key in problematic_keys:
config.base_model_tp_plan.pop(key)
super().test_tp_plan_matches_params()
# Put them back in class attribute
config.base_model_tp_plan.update(problematic_keys)
@require_torch_accelerator
class DogeIntegrationTest(unittest.TestCase):
# This variable is used to determine which CUDA device are we using for our runners (A10 or T4)
# Depending on the hardware we get different logits / generations
cuda_compute_capability_major_version = None
@classmethod
def setUpClass(cls):
if is_torch_available() and torch.cuda.is_available():
# 8 is for A100 / A10 and 7 for T4
cls.cuda_compute_capability_major_version = torch.cuda.get_device_capability()[0]
@slow
def test_Doge_20M_hard(self):
"""
An integration test for Doge-20M. It tests against a long output to ensure the subtle numerical differences
"""
EXPECTED_TEXT = "Here's everything I know about dogs. Dogs is the best animal in the world. It is a very popular and popular dog in the United States. It is a very popular"
tokenizer = AutoTokenizer.from_pretrained("SmallDoge/Doge-20M")
model = DogeForCausalLM.from_pretrained("SmallDoge/Doge-20M", device_map="auto", dtype=torch.bfloat16)
input_text = ["Here's everything I know about dogs. Dogs is the best animal in the"]
set_seed(42)
model_inputs = tokenizer(input_text, return_tensors="pt").to(model.device)
generated_ids = model.generate(**model_inputs, max_new_tokens=20, do_sample=False)
generated_text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
self.assertEqual(generated_text, EXPECTED_TEXT)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/doge/test_modeling_doge.py",
"license": "Apache License 2.0",
"lines": 338,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:examples/modular-transformers/modular_global_indexing.py | from transformers.modeling_utils import AttentionInterface
from transformers.models.llama.modeling_llama import LlamaAttention
def custom_flex(x, **kwargs):
"""Dummy function."""
return x
ALL_ATTENTION_FUNCTIONS = AttentionInterface()
# This indexing statement and associated function should be exported correctly!
ALL_ATTENTION_FUNCTIONS["flex_attention"] = custom_flex
class GlobalIndexingAttention(LlamaAttention):
pass
| {
"repo_id": "huggingface/transformers",
"file_path": "examples/modular-transformers/modular_global_indexing.py",
"license": "Apache License 2.0",
"lines": 10,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
huggingface/transformers:tests/utils/test_masking_utils.py | # Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers.testing_utils import (
cleanup,
is_torch_available,
require_torch,
torch_device,
)
if is_torch_available():
import torch
from torch.nn.attention.flex_attention import create_block_mask
from transformers import DynamicCache, LlamaConfig
from transformers.cache_utils import DynamicSlidingWindowLayer
from transformers.masking_utils import (
create_bidirectional_mask,
create_causal_mask,
create_chunked_causal_mask,
find_packed_sequence_indices,
)
# fmt: off
EXPECTED_PACKED_MASK = torch.tensor([[[
[ True, False, False, False, False, False, False, False, False, False],
[ True, True, False, False, False, False, False, False, False, False],
[ True, True, True, False, False, False, False, False, False, False],
[ True, True, True, True, False, False, False, False, False, False],
[False, False, False, False, True, False, False, False, False, False],
[False, False, False, False, True, True, False, False, False, False],
[False, False, False, False, False, False, True, False, False, False],
[False, False, False, False, False, False, True, True, False, False],
[False, False, False, False, False, False, True, True, True, False],
[False, False, False, False, False, False, True, True, True, True]]],
[[[ True, False, False, False, False, False, False, False, False, False],
[ True, True, False, False, False, False, False, False, False, False],
[ True, True, True, False, False, False, False, False, False, False],
[ True, True, True, True, False, False, False, False, False, False],
[ True, True, True, True, True, False, False, False, False, False],
[ True, True, True, True, True, True, False, False, False, False],
[False, False, False, False, False, False, True, False, False, False],
[False, False, False, False, False, False, True, True, False, False],
[False, False, False, False, False, False, True, True, True, False],
[False, False, False, False, False, False, True, True, True, True]
]]], dtype=torch.bool)
# fmt: on
@require_torch
class MaskTest(unittest.TestCase):
def setup(self):
cleanup(torch_device, gc_collect=True)
def tearDown(self):
cleanup(torch_device, gc_collect=True)
def test_packed_sequence_mask_sdpa(self):
config = LlamaConfig()
config._attn_implementation = "sdpa"
batch_size = 2
sequence_length = 10
cache_position = torch.arange(sequence_length)
# First batch has 3 packed sequences of 4, 2 and 4 tokens respectively, second has 2 of 6 and 4 tokens
position_ids = torch.tensor([[0, 1, 2, 3, 0, 1, 0, 1, 2, 3], [0, 1, 2, 3, 4, 5, 0, 1, 2, 3]])
causal_mask = create_causal_mask(
config=config,
# we only need batch size, seq_length and dtype here - we don't care about the values of the embeddings
inputs_embeds=torch.empty((batch_size, sequence_length), dtype=torch.float16),
attention_mask=None,
cache_position=cache_position,
past_key_values=None,
position_ids=position_ids,
)
self.assertTrue((causal_mask == EXPECTED_PACKED_MASK).all())
def test_packed_sequence_mask_eager(self):
config = LlamaConfig()
config._attn_implementation = "eager"
batch_size = 2
sequence_length = 10
cache_position = torch.arange(sequence_length)
# First batch has 3 packed sequences of 4, 2 and 4 tokens respectively, second has 2 of 6 and 4 tokens
position_ids = torch.tensor([[0, 1, 2, 3, 0, 1, 0, 1, 2, 3], [0, 1, 2, 3, 4, 5, 0, 1, 2, 3]])
causal_mask = create_causal_mask(
config=config,
# we only need batch size, seq_length and dtype here - we don't care about the values of the embeddings
inputs_embeds=torch.empty((batch_size, sequence_length), dtype=torch.float16),
attention_mask=None,
cache_position=cache_position,
past_key_values=None,
position_ids=position_ids,
)
min_dtype = torch.finfo(torch.float16).min
self.assertTrue((causal_mask == torch.where(EXPECTED_PACKED_MASK, 0.0, min_dtype)).all())
def test_packed_sequence_mask_flex_attention(self):
config = LlamaConfig()
config._attn_implementation = "flex_attention"
batch_size = 2
sequence_length = 10
cache_position = torch.arange(sequence_length)
# First batch has 3 packed sequences of 4, 2 and 4 tokens respectively, second has 2 of 6 and 4 tokens
position_ids = torch.tensor([[0, 1, 2, 3, 0, 1, 0, 1, 2, 3], [0, 1, 2, 3, 4, 5, 0, 1, 2, 3]])
causal_mask = create_causal_mask(
config=config,
# we only need batch size, seq_length and dtype here - we don't care about the values of the embeddings
inputs_embeds=torch.empty((batch_size, sequence_length), dtype=torch.float16),
attention_mask=None,
cache_position=cache_position,
past_key_values=None,
position_ids=position_ids,
)
def dummy_mask_mod(b, h, q, kv):
return EXPECTED_PACKED_MASK[b, h, q, kv]
EXPECTED_BLOCK_MASK = create_block_mask(dummy_mask_mod, 2, None, 10, 10, device="cpu")
# We compatre the str representations, as the BlockMask objects themselves cannot easily be compared
self.assertEqual(causal_mask.to_string(), EXPECTED_BLOCK_MASK.to_string())
def test_find_packed_sequence_indices(self):
position_ids = torch.tensor([[0, 1, 2, 3, 0, 1, 0, 1, 2, 3], [0, 1, 2, 3, 4, 5, 0, 1, 2, 3]])
EXPECTED_SEQUENCE_INDICES = torch.tensor([[0, 0, 0, 0, 1, 1, 2, 2, 2, 2], [0, 0, 0, 0, 0, 0, 1, 1, 1, 1]])
self.assertTrue((find_packed_sequence_indices(position_ids) == EXPECTED_SEQUENCE_INDICES).all())
def test_nonpacked_sequence_mask_skip(self):
config = LlamaConfig()
config._attn_implementation = "sdpa"
batch_size = 2
sequence_length = 10
cache_position = torch.arange(sequence_length)
# Non-packed sequences
position_ids = torch.arange(sequence_length)[None, :]
causal_mask = create_causal_mask(
config=config,
# we only need batch size, seq_length and dtype here - we don't care about the values of the embeddings
inputs_embeds=torch.empty((batch_size, sequence_length), dtype=torch.float16),
attention_mask=None,
cache_position=cache_position,
past_key_values=None,
position_ids=position_ids,
)
# packed sequence should be skipped
self.assertTrue(causal_mask is None)
create_causal_mask_compiled = torch.compile(create_causal_mask, mode="reduce-overhead")
causal_mask = create_causal_mask_compiled(
config=config,
# we only need batch size, seq_length and dtype here - we don't care about the values of the embeddings
inputs_embeds=torch.empty((batch_size, sequence_length), dtype=torch.float16),
attention_mask=None,
cache_position=cache_position,
past_key_values=None,
position_ids=position_ids,
)
# cannot be skipped under compile, should result into a triu mask
self.assertTrue(torch.equal(~torch.ones(*causal_mask.shape).triu(diagonal=1).bool(), causal_mask))
def test_chunked_mask_with_left_padding_and_large_prefill(self):
# Make sure we have an attention_chunk_size in the config
config = LlamaConfig(attention_chunk_size=3, attn_implementation="sdpa")
batch_size = 2
sequence_length = 8
pad_tokens = 4
input_ids = torch.randint(100, 200, (batch_size, sequence_length))
attention_mask = torch.tensor(
[[0 if i < pad_tokens else 1 for i in range(sequence_length)], [1] * sequence_length]
)
inputs_embeds = torch.empty_like(input_ids, dtype=torch.float16)
cache_position = torch.arange(sequence_length)
position_ids = torch.empty(batch_size, sequence_length, dtype=cache_position.dtype)
position_ids[0, :pad_tokens] = 1
position_ids[0, pad_tokens:] = torch.arange(sequence_length - pad_tokens)
position_ids[1, :] = cache_position
chunked_attention_mask = create_chunked_causal_mask(
config=config,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=None,
position_ids=position_ids,
)
# fmt: off
EXPECTED_CHUNKED_MASK = torch.tensor(
# Here, for the padded sequence, the chunk size should start correctly at index 4 (otherwise, with 4 padding
# tokens are chunk_size=3, the first chunk is from indices 0-2, then 3-6 if we don't account for the padding correctly)
[[[[False, False, False, False, False, False, False, False],
[False, False, False, False, False, False, False, False],
[False, False, False, False, False, False, False, False],
[False, False, False, False, False, False, False, False],
[False, False, False, False, True, False, False, False],
[False, False, False, False, True, True, False, False],
[False, False, False, False, True, True, True, False],
[False, False, False, False, False, False, False, True]]],
[[[ True, False, False, False, False, False, False, False],
[ True, True, False, False, False, False, False, False],
[ True, True, True, False, False, False, False, False],
[False, False, False, True, False, False, False, False],
[False, False, False, True, True, False, False, False],
[False, False, False, True, True, True, False, False],
[False, False, False, False, False, False, True, False],
[False, False, False, False, False, False, True, True]]]],
dtype=torch.bool)
# fmt: on
self.assertTrue((chunked_attention_mask == EXPECTED_CHUNKED_MASK).all())
def test_chunked_mask_with_left_padding_decoding(self):
# Make sure we have an attention_chunk_size in the config
config = LlamaConfig(attention_chunk_size=4, attn_implementation="sdpa", num_hidden_layers=1)
cache = DynamicCache(config=config)
# Sanity check
self.assertEqual(len(cache), 1)
self.assertTrue(isinstance(cache.layers[0], DynamicSlidingWindowLayer))
# Fill-in the Cache (sequence length is bigger than chunk size here)
batch_size = 2
prefill_size = 8
pad_tokens = 7
fake_kv = torch.rand(batch_size, 32, prefill_size, 32)
cache.update(fake_kv, fake_kv, 0, torch.arange(prefill_size))
# Create a new input after the prefill
input_ids = torch.randint(100, 200, (batch_size, 1))
attention_mask = torch.tensor(
[[0 if i < pad_tokens else 1 for i in range(prefill_size + 1)], [1] * (prefill_size + 1)]
)
inputs_embeds = torch.empty_like(input_ids, dtype=torch.float16)
cache_position = torch.tensor([prefill_size], dtype=int)
position_ids = torch.tensor([[prefill_size - pad_tokens], [prefill_size]])
chunked_attention_mask = create_chunked_causal_mask(
config=config,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=cache,
position_ids=position_ids,
)
# To understand a bit more the following expected mask, here is the full 2d mask, where the "|" characters are the chunk
# separators (where the tokens should stop seeing each other)
# [0, 0, 0, 0, 0, 0, 0, | 1, 1], -> due to left padding, the first chunk only starts after the padding tokens
# [| 1, 1, 1, 1, | 1, 1, 1, 1, | 1]]) -> easy case, each 4 tokens is a new chunk
# fmt: off
EXPECTED_CHUNKED_MASK = torch.tensor(
# Here, for the padded sequence, the chunk size should start correctly at index 7 (the first unpadded
# index), and so only indices 7 and 8 should be True
[[[[False, False, True, True]]],
# Here, for the unpadded sequence, the chunks start at index 0. Since we have 9 tokens in total, the last
# token (index 8) will only see itself (we have 2 full chunks before)
[[[False, False, False, True]]]],
dtype=torch.bool)
# fmt: on
self.assertTrue((chunked_attention_mask == EXPECTED_CHUNKED_MASK).all())
@staticmethod
def _run_bidirectional_mask(mask_fn, attn_implementation):
def run_mask_creation(mask_fn, config, inputs_embeds, encoder_mask, cross_mask, encoder_hidden_states):
encoder_attn_mask = mask_fn(
config=config,
inputs_embeds=inputs_embeds,
attention_mask=encoder_mask,
)
cross_attn_mask = mask_fn(
config=config,
inputs_embeds=inputs_embeds,
attention_mask=cross_mask,
encoder_hidden_states=encoder_hidden_states,
)
return encoder_attn_mask, cross_attn_mask
# We use llama but could be also bert/bart --> we only need the `_attn_implementation` here
config = LlamaConfig()
config._attn_implementation = attn_implementation
# Meta data
batch_size = 2
q_length = 10
kv_length = 5
inputs_embeds = torch.ones((batch_size, q_length, 1), device=torch_device, dtype=torch.float16)
encoder_hidden_states = torch.ones((batch_size, kv_length, 1), device=torch_device, dtype=torch.float16)
encoder_mask = torch.ones_like(inputs_embeds)[..., 0]
cross_mask = torch.ones_like(encoder_hidden_states)[..., 0]
# Case 1: Full mask
full_mask_encoder_1, full_mask_cross_1 = run_mask_creation(
mask_fn=mask_fn,
config=config,
inputs_embeds=inputs_embeds,
encoder_mask=encoder_mask,
cross_mask=cross_mask,
encoder_hidden_states=encoder_hidden_states,
)
full_mask_encoder_2, full_mask_cross_2 = run_mask_creation(
mask_fn=mask_fn,
config=config,
inputs_embeds=inputs_embeds,
encoder_mask=None,
cross_mask=None,
encoder_hidden_states=encoder_hidden_states,
)
# Case 2: Padding involved
cross_mask[:, -1] = 0
encoder_mask[:, -1] = 0
padded_mask_encoder, padded_mask_cross = run_mask_creation(
mask_fn=mask_fn,
config=config,
inputs_embeds=inputs_embeds,
encoder_mask=encoder_mask,
cross_mask=cross_mask,
encoder_hidden_states=encoder_hidden_states,
)
full_masks = (full_mask_encoder_1, full_mask_encoder_2), (full_mask_cross_1, full_mask_cross_2)
padded_masks = (padded_mask_encoder, padded_mask_cross)
return full_masks, padded_masks
def test_bidirectional_mask_cudagraphs(self):
"""
Checks whether the bidirectional mask creation is compatible with cuda graphs, i.e. we do not into any error
during this test.
"""
mask_creation_function = torch.compile(create_bidirectional_mask, mode="reduce-overhead")
self._run_bidirectional_mask(mask_fn=mask_creation_function, attn_implementation="sdpa")
def test_bidirectional_mask_skip_eager(self):
"""
Checks whether the bidirectional mask creation can skip the mask creation if we have a full mask.
"""
full_masks, padded_mask = self._run_bidirectional_mask(
mask_fn=create_bidirectional_mask, attn_implementation="eager"
)
for alternative_masks in full_masks:
self.assertTrue(alternative_masks[0] is None)
self.assertTrue(alternative_masks[1] is None)
self.assertTrue(padded_mask[0] is not None)
self.assertTrue(padded_mask[1] is not None)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/utils/test_masking_utils.py",
"license": "Apache License 2.0",
"lines": 322,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/chameleon/image_processing_chameleon_fast.py | # Copyright 2025 Meta Inc. and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Image processor class for Chameleon."""
from typing import Optional
import numpy as np
import PIL
import torch
import torchvision.transforms.v2.functional as tvF
from ...image_processing_utils_fast import BaseImageProcessorFast
from ...image_utils import ImageInput, PILImageResampling, SizeDict
from ...utils import auto_docstring, logging
logger = logging.get_logger(__name__)
@auto_docstring
class ChameleonImageProcessorFast(BaseImageProcessorFast):
resample = PILImageResampling.LANCZOS
image_mean = [1.0, 1.0, 1.0]
image_std = [1.0, 1.0, 1.0]
size = {"shortest_edge": 512}
default_to_square = False
crop_size = {"height": 512, "width": 512}
do_resize = True
do_center_crop = True
do_rescale = True
rescale_factor = 0.0078
do_normalize = True
do_convert_rgb = True
def convert_to_rgb(self, image: ImageInput) -> ImageInput:
"""
Convert image to RGB by blending the transparency layer if it's in RGBA format.
If image is not `PIL.Image`, it si simply returned without modifications.
Args:
image (`ImageInput`):
Image to convert.
"""
if not isinstance(image, PIL.Image.Image):
return image
elif image.mode == "RGB":
return image
img_rgba = np.array(image.convert("RGBA"))
# If there is no transparency layer, simple convert and return.
if not (img_rgba[:, :, 3] < 255).any():
return image.convert("RGB")
# There is a transparency layer, blend it with a white background.
# Calculate the alpha proportion for blending.
alpha = img_rgba[:, :, 3] / 255.0
img_rgb = (1 - alpha[:, :, np.newaxis]) * 255 + alpha[:, :, np.newaxis] * img_rgba[:, :, :3]
return PIL.Image.fromarray(img_rgb.astype("uint8"), "RGB")
def resize(
self,
image: "torch.Tensor",
size: SizeDict,
interpolation: Optional["tvF.InterpolationMode"] = None,
**kwargs,
) -> "torch.Tensor":
"""
Resize an image to `(size["height"], size["width"])`.
Args:
image (`torch.Tensor`):
Image to resize.
size (`SizeDict`):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
resample (`InterpolationMode`, *optional*, defaults to `InterpolationMode.BILINEAR`):
`InterpolationMode` filter to use when resizing the image e.g. `InterpolationMode.BICUBIC`.
Returns:
`torch.Tensor`: The resized image.
"""
interpolation = interpolation if interpolation is not None else tvF.InterpolationMode.BILINEAR
if interpolation == tvF.InterpolationMode.LANCZOS:
logger.warning_once(
"You have used fast image processor with LANCZOS resample which not yet supported for torch.Tensor. "
"BICUBIC resample will be used as an alternative. Please fall back to slow image processor if you "
"want full consistency with the original model."
)
interpolation = tvF.InterpolationMode.BICUBIC
return super().resize(
image=image,
size=size,
interpolation=interpolation,
**kwargs,
)
__all__ = ["ChameleonImageProcessorFast"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/chameleon/image_processing_chameleon_fast.py",
"license": "Apache License 2.0",
"lines": 92,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/mobilevit/image_processing_mobilevit_fast.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Image processor class for MobileViT."""
from typing import Optional, Union
import torch
import torchvision.transforms.v2.functional as tvF
from ...image_processing_utils import BatchFeature
from ...image_processing_utils_fast import (
BaseImageProcessorFast,
group_images_by_shape,
reorder_images,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
SizeDict,
is_torch_tensor,
)
from ...processing_utils import Unpack
from ...utils import (
TensorType,
auto_docstring,
)
from .image_processing_mobilevit import MobileVitImageProcessorKwargs
@auto_docstring
class MobileViTImageProcessorFast(BaseImageProcessorFast):
resample = PILImageResampling.BICUBIC
size = {"shortest_edge": 224}
default_to_square = False
crop_size = {"height": 256, "width": 256}
do_resize = True
do_center_crop = True
do_rescale = True
do_normalize = None
do_convert_rgb = None
do_flip_channel_order = True
do_reduce_labels = False
valid_kwargs = MobileVitImageProcessorKwargs
def __init__(self, **kwargs: Unpack[MobileVitImageProcessorKwargs]):
super().__init__(**kwargs)
# Copied from transformers.models.beit.image_processing_beit_fast.BeitImageProcessorFast.reduce_label
def reduce_label(self, labels: list["torch.Tensor"]):
for idx in range(len(labels)):
label = labels[idx]
label = torch.where(label == 0, torch.tensor(255, dtype=label.dtype), label)
label = label - 1
label = torch.where(label == 254, torch.tensor(255, dtype=label.dtype), label)
labels[idx] = label
return labels
@auto_docstring
def preprocess(
self,
images: ImageInput,
segmentation_maps: ImageInput | None = None,
**kwargs: Unpack[MobileVitImageProcessorKwargs],
) -> BatchFeature:
r"""
segmentation_maps (`ImageInput`, *optional*):
The segmentation maps to preprocess.
"""
return super().preprocess(images, segmentation_maps, **kwargs)
def _preprocess_image_like_inputs(
self,
images: ImageInput,
segmentation_maps: ImageInput | None,
do_convert_rgb: bool,
input_data_format: ChannelDimension,
device: Union[str, "torch.device"] | None = None,
**kwargs: Unpack[MobileVitImageProcessorKwargs],
) -> BatchFeature:
"""
Preprocess image-like inputs.
"""
images = self._prepare_image_like_inputs(
images=images, do_convert_rgb=do_convert_rgb, input_data_format=input_data_format, device=device
)
images_kwargs = kwargs.copy()
images_kwargs["do_reduce_labels"] = False
batch_feature = self._preprocess(images, **images_kwargs)
if segmentation_maps is not None:
processed_segmentation_maps = self._prepare_image_like_inputs(
images=segmentation_maps,
expected_ndims=2,
do_convert_rgb=False,
input_data_format=ChannelDimension.FIRST,
)
segmentation_maps_kwargs = kwargs.copy()
segmentation_maps_kwargs.update(
{
"do_rescale": False,
"do_flip_channel_order": False,
# Nearest interpolation is used for segmentation maps instead of BILINEAR.
"interpolation": tvF.InterpolationMode.NEAREST_EXACT,
}
)
processed_segmentation_maps = self._preprocess(
images=processed_segmentation_maps, **segmentation_maps_kwargs
).pixel_values
batch_feature["labels"] = processed_segmentation_maps.squeeze(1).to(torch.int64)
return batch_feature
def _preprocess(
self,
images: list["torch.Tensor"],
do_reduce_labels: bool,
do_resize: bool,
size: SizeDict | None,
interpolation: Optional["tvF.InterpolationMode"],
do_rescale: bool,
rescale_factor: float | None,
do_center_crop: bool,
crop_size: SizeDict | None,
do_flip_channel_order: bool,
disable_grouping: bool,
return_tensors: str | TensorType | None,
**kwargs,
) -> BatchFeature:
processed_images = []
if do_reduce_labels:
images = self.reduce_label(images)
# Group images by shape for more efficient batch processing
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
resized_images_grouped = {}
# Process each group of images with the same shape
for shape, stacked_images in grouped_images.items():
if do_resize:
stacked_images = self.resize(image=stacked_images, size=size, interpolation=interpolation)
resized_images_grouped[shape] = stacked_images
# Reorder images to original sequence
resized_images = reorder_images(resized_images_grouped, grouped_images_index)
# Group again after resizing (in case resize produced different sizes)
grouped_images, grouped_images_index = group_images_by_shape(resized_images, disable_grouping=disable_grouping)
processed_images_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_center_crop:
stacked_images = self.center_crop(image=stacked_images, size=crop_size)
if do_rescale:
stacked_images = self.rescale(image=stacked_images, scale=rescale_factor)
if do_flip_channel_order:
# For batched images, we need to handle them all at once
if stacked_images.ndim > 3 and stacked_images.shape[1] >= 3:
# Flip RGB → BGR for batched images
flipped = stacked_images.clone()
flipped[:, 0:3] = stacked_images[:, [2, 1, 0], ...]
stacked_images = flipped
processed_images_grouped[shape] = stacked_images
processed_images = reorder_images(processed_images_grouped, grouped_images_index)
# Stack all processed images if return_tensors is specified
return BatchFeature(data={"pixel_values": processed_images}, tensor_type=return_tensors)
def post_process_semantic_segmentation(self, outputs, target_sizes: list[tuple] | None = None):
"""
Converts the output of [`MobileNetV2ForSemanticSegmentation`] into semantic segmentation maps. Only supports PyTorch.
Args:
outputs ([`MobileNetV2ForSemanticSegmentation`]):
Raw outputs of the model.
target_sizes (`list[Tuple]` of length `batch_size`, *optional*):
List of tuples corresponding to the requested final size (height, width) of each prediction. If unset,
predictions will not be resized.
Returns:
semantic_segmentation: `list[torch.Tensor]` of length `batch_size`, where each item is a semantic
segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is
specified). Each entry of each `torch.Tensor` correspond to a semantic class id.
"""
logits = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(logits) != len(target_sizes):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits"
)
if is_torch_tensor(target_sizes):
target_sizes = target_sizes.numpy()
semantic_segmentation = []
for idx in range(len(logits)):
resized_logits = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False
)
semantic_map = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(semantic_map)
else:
semantic_segmentation = logits.argmax(dim=1)
semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
__all__ = ["MobileViTImageProcessorFast"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/mobilevit/image_processing_mobilevit_fast.py",
"license": "Apache License 2.0",
"lines": 195,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/nougat/image_processing_nougat_fast.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Image processor class for Nougat."""
from typing import Optional
import torch
import torchvision.transforms.v2.functional as tvF
from ...image_processing_utils import BatchFeature
from ...image_processing_utils_fast import (
BaseImageProcessorFast,
group_images_by_shape,
reorder_images,
)
from ...image_transforms import (
get_resize_output_image_size,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
SizeDict,
)
from ...processing_utils import Unpack
from ...utils import (
TensorType,
auto_docstring,
)
from .image_processing_nougat import NougatImageProcessorKwargs
@auto_docstring
class NougatImageProcessorFast(BaseImageProcessorFast):
resample = PILImageResampling.BILINEAR
image_mean = IMAGENET_DEFAULT_MEAN
image_std = IMAGENET_DEFAULT_STD
size = {"height": 896, "width": 672}
do_resize: bool = (True,)
do_normalize: bool = True
do_thumbnail: bool = True
do_align_long_axis: bool = False
do_pad: bool = True
do_rescale = True
do_crop_margin: bool = True
valid_kwargs = NougatImageProcessorKwargs
def __init__(self, **kwargs: Unpack[NougatImageProcessorKwargs]):
super().__init__(**kwargs)
@auto_docstring
def preprocess(self, images: ImageInput, **kwargs: Unpack[NougatImageProcessorKwargs]) -> BatchFeature:
return super().preprocess(images, **kwargs)
def python_find_non_zero(
self,
image: "torch.Tensor",
):
"""This is a reimplementation of a findNonZero function equivalent to cv2."""
non_zero_indices = torch.nonzero(image, as_tuple=False)
idxvec = non_zero_indices[:, [2, 1]]
idxvec = idxvec.reshape(-1, 1, 2)
return idxvec
def python_bounding_rect(self, coordinates):
"""This is a reimplementation of a BoundingRect function equivalent to cv2."""
min_values = torch.amin(coordinates, axis=(0, 1)).to(torch.int)
max_values = torch.amax(coordinates, axis=(0, 1)).to(torch.int)
x_min, y_min = min_values[0], min_values[1]
width = max_values[0] - x_min + 1
height = max_values[1] - y_min + 1
return x_min, y_min, width, height
def crop_margin(
self,
image: "torch.Tensor",
gray_threshold: int = 200,
) -> "torch.Tensor":
"""
Crops the margin of the image. Gray pixels are considered margin (i.e., pixels with a value below the
threshold).
Args:
image (`torch.Tensor`):
The image to be cropped.
gray_threshold (`int`, *optional*, defaults to `200`)
Value below which pixels are considered to be gray.
"""
data = tvF.rgb_to_grayscale(image, num_output_channels=1)
max_val = torch.max(data)
min_val = torch.min(data)
if max_val == min_val:
return image
data = (data - min_val) / (max_val - min_val) * 255
gray = data < gray_threshold
coords = self.python_find_non_zero(gray)
x_min, y_min, width, height = self.python_bounding_rect(coords)
image = image[:, y_min : y_min + height, x_min : x_min + width]
return image
def align_long_axis(
self,
image: "torch.Tensor",
size: SizeDict,
) -> "torch.Tensor":
"""
Align the long axis of the image to the longest axis of the specified size.
Args:
image (`torch.Tensor`):
The image to be aligned.
size (`Dict[str, int]`):
The size `{"height": h, "width": w}` to align the long axis to.
Returns:
`torch.Tensor`: The aligned image.
"""
input_height, input_width = image.shape[-2:]
output_height, output_width = size.height, size.width
if (output_width < output_height and input_width > input_height) or (
output_width > output_height and input_width < input_height
):
image = torch.rot90(image, 3, dims=[1, 2])
return image
def thumbnail(
self,
image: "torch.Tensor",
size: SizeDict,
) -> "torch.Tensor":
"""
Resize the image to make a thumbnail. The image is resized so that no dimension is larger than any
corresponding dimension of the specified size.
Args:
image (`torch.tensor`):
The image to be resized.
size (`Dict[str, int]`):
The size `{"height": h, "width": w}` to resize the image to.
"""
input_height, input_width = image.shape[-2:]
output_height, output_width = size.height, size.width
# We always resize to the smallest of either the input or output size.
height = min(input_height, output_height)
width = min(input_width, output_width)
if height == input_height and width == input_width:
return image
if input_height > input_width:
width = int(input_width * height / input_height)
elif input_width > input_height:
height = int(input_height * width / input_width)
new_size = (height, width)
return tvF.resize(image, new_size, interpolation=tvF.InterpolationMode.BICUBIC)
def pad_images(
self,
image: "torch.Tensor",
size: SizeDict,
) -> "torch.Tensor":
"""
Pads a batch of images to the specified size at the top, bottom, left and right.
Args:
image (`torch.tensor`):
The image to be padded.
size (`Dict[str, int]`):
The size `{"height": h, "width": w}` to pad the image to.
"""
input_height, input_width = image.shape[-2:]
output_height, output_width = size.height, size.width
delta_width = output_width - input_width
delta_height = output_height - input_height
pad_top = delta_height // 2
pad_left = delta_width // 2
pad_bottom = delta_height - pad_top
pad_right = delta_width - pad_left
padding = (pad_left, pad_top, pad_right, pad_bottom)
return tvF.pad(image, padding)
def resize(
self,
image: "torch.Tensor",
size: SizeDict,
interpolation: Optional["tvF.InterpolationMode"] = None,
antialias: bool = True,
**kwargs,
) -> "torch.Tensor":
"""
Resize an image to `(size["height"], size["width"])`.
Args:
image (`torch.Tensor`):
Image to resize.
size (`SizeDict`):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
interpolation (`InterpolationMode`, *optional*, defaults to `InterpolationMode.BICUBIC`):
`InterpolationMode` filter to use when resizing the image e.g. `InterpolationMode.BICUBIC`.
Returns:
`torch.Tensor`: The resized image.
"""
interpolation = interpolation if interpolation is not None else tvF.InterpolationMode.BICUBIC
shortest_edge = min(size["height"], size["width"])
new_size = get_resize_output_image_size(
image, size=shortest_edge, default_to_square=False, input_data_format=ChannelDimension.FIRST
)
return tvF.resize(image, new_size, interpolation=interpolation, antialias=antialias)
def _preprocess(
self,
images: list["torch.Tensor"],
do_resize: bool,
size: SizeDict,
do_align_long_axis: bool,
do_thumbnail: bool,
do_pad: bool,
interpolation: Optional["tvF.InterpolationMode"],
do_center_crop: bool,
crop_size: SizeDict,
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
do_crop_margin: bool,
image_mean: float | list[float] | None,
image_std: float | list[float] | None,
disable_grouping: bool,
return_tensors: str | TensorType | None,
**kwargs,
) -> BatchFeature:
# Crop images
images = [self.crop_margin(image) for image in images]
# Group images by size for batched resizing
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
resized_images_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_align_long_axis:
stacked_images = self.align_long_axis(image=stacked_images, size=size)
if do_resize:
stacked_images = self.resize(image=stacked_images, size=size)
if do_thumbnail:
stacked_images = self.thumbnail(image=stacked_images, size=size)
if do_pad:
stacked_images = self.pad_images(image=stacked_images, size=size)
resized_images_grouped[shape] = stacked_images
resized_images = reorder_images(resized_images_grouped, grouped_images_index)
# Group images by size for further processing
# Needed in case do_resize is False, or resize returns images with different sizes
grouped_images, grouped_images_index = group_images_by_shape(resized_images, disable_grouping=disable_grouping)
processed_images_grouped = {}
for shape, stacked_images in grouped_images.items():
# Fused rescale and normalize
stacked_images = self.rescale_and_normalize(
stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
processed_images_grouped[shape] = stacked_images
processed_images = reorder_images(processed_images_grouped, grouped_images_index)
return BatchFeature(data={"pixel_values": processed_images}, tensor_type=return_tensors)
__all__ = ["NougatImageProcessorFast"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/nougat/image_processing_nougat_fast.py",
"license": "Apache License 2.0",
"lines": 249,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/eomt/convert_eomt_to_hf.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import snapshot_download
from transformers import EomtConfig, EomtForUniversalSegmentation, EomtImageProcessorFast
# fmt: off
MAPPINGS = {
# Embeddings
r"network.encoder.backbone.cls_token" : r"embeddings.cls_token",
r"network.encoder.backbone.reg_token" : r"embeddings.register_tokens",
r"network.encoder.backbone.pos_embed" : r"embeddings.position_embeddings.weight",
r"network.encoder.backbone.patch_embed.proj" : r"embeddings.patch_embeddings.projection",
# Encoder Block
r"network.encoder.backbone.blocks.(\d+).norm1" : r"layers.\1.norm1",
r"network.encoder.backbone.blocks.(\d+).attn.proj" : r"layers.\1.attention.out_proj",
r"network.encoder.backbone.blocks.(\d+).ls1.gamma" : r"layers.\1.layer_scale1.lambda1",
r"network.encoder.backbone.blocks.(\d+).norm2" : r"layers.\1.norm2",
r"network.encoder.backbone.blocks.(\d+).ls2.gamma" : r"layers.\1.layer_scale2.lambda1",
r"network.encoder.backbone.blocks.(\d+).attn" : r"layers.\1.attention",
# Others
r"network.q.weight" : r"query.weight",
r"network.class_head" : r"class_predictor",
r"network.upscale.(\d+).conv1" : r"upscale_block.block.\1.conv1",
r"network.upscale.(\d+).conv2" : r"upscale_block.block.\1.conv2",
r"network.upscale.(\d+).norm" : r"upscale_block.block.\1.layernorm2d",
r"network.mask_head.0" : r"mask_head.fc1",
r"network.mask_head.2" : r"mask_head.fc2",
r"network.mask_head.4" : r"mask_head.fc3",
r"network.encoder.backbone.norm" : r"layernorm",
r"network.attn_mask_probs" : r"attn_mask_probs",
}
# fmt: on
# Mappings for MLP layers, depending on the type of MLP used in ckpts.
MLP_MAPPINGS = {
"swiglu_ffn": {
r"network.encoder.backbone.blocks.(\d+).mlp.fc1": r"layers.\1.mlp.weights_in",
r"network.encoder.backbone.blocks.(\d+).mlp.fc2": r"layers.\1.mlp.weights_out",
},
"vanilla_mlp": {
r"network.encoder.backbone.blocks.(\d+).mlp": r"layers.\1.mlp",
},
}
def convert_old_keys_to_new_keys(state_dict):
keys_as_text = "\n".join(state_dict.keys())
new_keys_as_text = keys_as_text
for old, repl in MAPPINGS.items():
if repl is None:
new_keys_as_text = re.sub(old, "", new_keys_as_text)
else:
new_keys_as_text = re.sub(old, repl, new_keys_as_text)
output_dict = dict(zip(keys_as_text.split("\n"), new_keys_as_text.split("\n")))
return output_dict
def split_qkv_tensor(key, tensor):
"""Splits a qkv tensor into separate q, k, v tensors and updates the key accordingly."""
new_keys = ["q_proj", "k_proj", "v_proj"]
split_size = tensor.shape[0] // 3
split_tensors = torch.split(tensor, split_size, dim=0)
return {key.replace("qkv", new_key): split_tensors[i] for i, new_key in enumerate(new_keys)}
def convert_state_dict_to_hf(state_dict):
"""Convert state dict keys to HF format."""
conversion_dict = convert_old_keys_to_new_keys(state_dict)
converted_state_dict = {}
for old_key, new_key in conversion_dict.items():
if new_key:
if "qkv" in new_key: # Detect merged attention keys and split them.
qkv_split_dict = split_qkv_tensor(new_key, state_dict[old_key])
converted_state_dict.update(qkv_split_dict)
else:
converted_state_dict[new_key] = state_dict[old_key]
for i in [
"network.encoder.pixel_mean",
"network.encoder.pixel_std",
]:
converted_state_dict.pop(i)
# Embeddings will not have initial dimension
pos_embed_key = "embeddings.position_embeddings.weight"
converted_state_dict[pos_embed_key] = converted_state_dict[pos_embed_key].squeeze(0)
return converted_state_dict
def ensure_model_downloaded(
repo_id: str | None = None, revision: str | None = None, local_dir: str | None = None
) -> str:
"""
Ensures model files are downloaded locally, downloads them if not.
Returns path to local files.
Args:
repo_id: The Hugging Face model repo ID (required if local_dir not provided)
revision: Optional git revision to use
local_dir: Optional local directory path where model files should be stored/found
"""
if local_dir is not None:
if os.path.exists(local_dir):
print(f"Using provided local directory: {local_dir}")
else:
# Create the local directory if it doesn't exist
os.makedirs(local_dir, exist_ok=True)
print(f"Created local directory: {local_dir}")
if repo_id is None:
raise ValueError("Either repo_id or local_dir must be provided")
print(f"Ensuring {repo_id} (revision: {revision or 'latest'}) is downloaded...")
try:
# First try to find files locally
download_dir = snapshot_download(repo_id, revision=revision, local_files_only=True, local_dir=local_dir)
print(f"Found model files locally at {download_dir}")
return download_dir
except Exception:
# If files not found locally, download them
print(f"Downloading model files for {repo_id}...")
download_dir = snapshot_download(repo_id, revision=revision, local_files_only=False, local_dir=local_dir)
print(f"Downloaded model files to {download_dir}")
return download_dir
def load_model_state_dict(input_path: str) -> dict:
"""
Load model state dict, handling both single and sharded files.
"""
index_path = os.path.join(input_path, "pytorch_model.bin.index.json")
single_file_path = os.path.join(input_path, "pytorch_model.bin")
# Check if we have a sharded model
if os.path.exists(index_path):
print("Loading sharded model...")
state_dict = {}
with open(index_path, "r") as f:
index = json.load(f)
# Get unique shard files and load each one only once
unique_shard_files = sorted(set(index["weight_map"].values()))
for shard_file in unique_shard_files:
print(f"Loading shard {shard_file}...")
shard_path = os.path.join(input_path, shard_file)
shard_dict = torch.load(shard_path, map_location="cpu")
state_dict.update(shard_dict)
return state_dict
# Single file model
elif os.path.exists(single_file_path):
print("Loading single file model...")
return torch.load(single_file_path, map_location="cpu")
else:
raise ValueError(f"No model files found in {input_path}")
def convert_model(
repo_id=None,
local_dir=None,
output_dir=None,
output_hub_path=None,
revision=None,
):
"""Convert and save the model weights, processor, and configuration."""
if output_dir is None and output_hub_path is None:
raise ValueError("At least one of output_dir or output_hub_path must be specified")
if repo_id is None and local_dir is None:
raise ValueError("Either repo_id or local_dir must be specified")
# Create output directory if specified
if output_dir:
os.makedirs(output_dir, exist_ok=True)
print(f"Created/verified output directory: {output_dir}")
torch.set_default_dtype(torch.float16)
# Download or locate model files
input_path = ensure_model_downloaded(repo_id=repo_id, revision=revision, local_dir=local_dir)
with open(os.path.join(input_path, "config.json"), "r") as f:
config_data = json.load(f)
# Pop off unwanted keys
_ = config_data.pop("backbone", None)
config = EomtConfig(
**{
**config_data,
"layerscale_value": 1e-5,
}
)
if "semantic" in repo_id.split("_"):
size = {"shortest_edge": config.image_size, "longest_edge": None}
do_split_image = True
do_pad = False
else:
size = {"shortest_edge": config.image_size, "longest_edge": config.image_size}
do_split_image = False
do_pad = True
if "giant" in repo_id.split("_"):
config.use_swiglu_ffn = True
config.hidden_size = 1536
config.num_hidden_layers = 40
config.num_attention_heads = 24
# Update MAPPINGS for ckpts depending on the MLP type
MAPPINGS.update(MLP_MAPPINGS["swiglu_ffn"])
else:
MAPPINGS.update(MLP_MAPPINGS["vanilla_mlp"])
processor = EomtImageProcessorFast(size=size, do_split_image=do_split_image, do_pad=do_pad)
# Save the config and processor
if output_dir:
config.save_pretrained(output_dir)
processor.save_pretrained(output_dir)
if output_hub_path:
config.push_to_hub(output_hub_path)
processor.push_to_hub(output_hub_path)
# Initialize model with empty weights
print("Creating empty model...")
with torch.device("meta"):
model = EomtForUniversalSegmentation(config)
# Load and convert state dict
print("Loading state dict...")
state_dict = load_model_state_dict(input_path)
state_dict = convert_state_dict_to_hf(state_dict)
# Load converted state dict
print("Loading converted weights into model...")
model.load_state_dict(state_dict, strict=True, assign=True)
# Save the model
if output_dir:
print(f"Saving model to {output_dir}...")
model.save_pretrained(output_dir)
if output_hub_path:
print(f"Pushing model to hub at {output_hub_path}...")
model.push_to_hub(output_hub_path)
del state_dict, model
gc.collect()
# Validate the saved model if saved locally
if output_dir:
print("Reloading the local model to check if it's saved correctly...")
EomtForUniversalSegmentation.from_pretrained(output_dir, device_map="auto")
print("Local model reloaded successfully.")
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--hf_repo_id",
help="HuggingFace Hub repo ID for the model",
default=None,
)
parser.add_argument(
"--local_dir",
help="Local directory containing the model files",
default=None,
)
parser.add_argument(
"--revision",
help="Specific revision to download from the Hub",
default=None,
)
parser.add_argument(
"--output_dir",
help="Location to write HF model locally",
default=None,
)
parser.add_argument(
"--output_hub_path",
help="Repository ID to push model to hub (e.g. 'username/model-name')",
default=None,
)
args = parser.parse_args()
if args.output_dir is None and args.output_hub_path is None:
raise ValueError("At least one of --output_dir or --output_hub_path must be specified")
if args.hf_repo_id is None and args.local_dir is None:
raise ValueError("Either --hf_repo_id or --local_dir must be specified")
convert_model(
repo_id=args.hf_repo_id,
local_dir=args.local_dir,
output_dir=args.output_dir,
output_hub_path=args.output_hub_path,
revision=args.revision,
)
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/eomt/convert_eomt_to_hf.py",
"license": "Apache License 2.0",
"lines": 272,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/eomt/image_processing_eomt.py | # Copyright 2025 Mobile Perception Systems Lab at TU/e and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image processor class for EoMT."""
import math
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
PaddingMode,
get_size_with_aspect_ratio,
pad,
resize,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
infer_channel_dimension_format,
make_flat_list_of_images,
to_numpy_array,
valid_images,
validate_preprocess_arguments,
)
from ...processing_utils import ImagesKwargs
from ...utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
TensorType,
filter_out_non_signature_kwargs,
is_torch_available,
logging,
)
logger = logging.get_logger(__name__)
if is_torch_available():
import torch
import torch.nn.functional as F
class EomtImageProcessorKwargs(ImagesKwargs, total=False):
"""
do_split_image (`bool`, *optional*, defaults to `False`):
Whether to split the input images into overlapping patches for semantic segmentation. If set to `True`, the
input images will be split into patches of size `size["shortest_edge"]` with an overlap between patches.
Otherwise, the input images will be padded to the target size.
ignore_index (`int`, *optional*):
Label to be assigned to background pixels in segmentation maps. If provided, segmentation map pixels
denoted with 0 (background) will be replaced with `ignore_index`.
"""
do_split_image: bool
ignore_index: int | None
# Adapted from transformers.models.maskformer.image_processing_maskformer.convert_segmentation_map_to_binary_masks
def convert_segmentation_map_to_binary_masks(
segmentation_map: np.ndarray,
instance_id_to_semantic_id: dict[int, int] | None = None,
ignore_index: int | None = None,
):
if ignore_index is not None:
segmentation_map = np.where(segmentation_map == 0, ignore_index, segmentation_map - 1)
# Get unique ids (class or instance ids based on input)
all_labels = np.unique(segmentation_map)
# Drop background label if applicable
if ignore_index is not None:
all_labels = all_labels[all_labels != ignore_index]
# Generate a binary mask for each object instance
binary_masks = [(segmentation_map == i) for i in all_labels]
# Stack the binary masks
if binary_masks:
binary_masks = np.stack(binary_masks, axis=0)
else:
binary_masks = np.zeros((0, *segmentation_map.shape))
# Convert instance ids to class ids
if instance_id_to_semantic_id is not None:
labels = np.zeros(all_labels.shape[0])
for label in all_labels:
class_id = instance_id_to_semantic_id[label + 1 if ignore_index is not None else label]
labels[all_labels == label] = class_id - 1 if ignore_index is not None else class_id
else:
labels = all_labels
return binary_masks.astype(np.float32), labels.astype(np.int64)
def remove_low_and_no_objects(masks, scores, labels, object_mask_threshold, num_labels):
"""
Binarize the given masks using `object_mask_threshold`, it returns the associated values of `masks`, `scores` and
`labels`.
Args:
masks (`torch.Tensor`):
A tensor of shape `(num_queries, height, width)`.
scores (`torch.Tensor`):
A tensor of shape `(num_queries)`.
labels (`torch.Tensor`):
A tensor of shape `(num_queries)`.
object_mask_threshold (`float`):
A number between 0 and 1 used to binarize the masks.
Raises:
`ValueError`: Raised when the first dimension doesn't match in all input tensors.
Returns:
`tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`]`: The `masks`, `scores` and `labels` without the region
< `object_mask_threshold`.
"""
if not (masks.shape[0] == scores.shape[0] == labels.shape[0]):
raise ValueError("mask, scores and labels must have the same shape!")
to_keep = labels.ne(num_labels) & (scores > object_mask_threshold)
return masks[to_keep], scores[to_keep], labels[to_keep]
def check_segment_validity(mask_labels, mask_probs, k, mask_threshold=0.5, overlap_mask_area_threshold=0.8):
# Get the mask associated with the k class
mask_k = mask_labels == k
mask_k_area = mask_k.sum()
# Compute the area of all the stuff in query k
original_mask = mask_probs[k] >= mask_threshold
original_area = original_mask.sum()
final_mask = mask_k & original_mask
final_mask_area = final_mask.sum()
mask_exists = mask_k_area > 0 and original_area > 0 and final_mask_area > 0
if mask_exists:
area_ratio = mask_k_area / original_area
if not area_ratio.item() > overlap_mask_area_threshold:
mask_exists = False
return mask_exists, final_mask
def compute_segments(
mask_probs,
pred_scores,
pred_labels,
stuff_classes,
mask_threshold: float = 0.5,
overlap_mask_area_threshold: float = 0.8,
target_size: tuple[int, int] | None = None,
):
height = mask_probs.shape[1] if target_size is None else target_size[0]
width = mask_probs.shape[2] if target_size is None else target_size[1]
segmentation = torch.zeros((height, width), dtype=torch.long, device=mask_probs.device) - 1
segments: list[dict] = []
# Compute per-pixel assignment based on weighted mask scores
mask_probs = mask_probs.sigmoid()
mask_labels = (pred_scores[:, None, None] * mask_probs).argmax(0)
# Keep track of instances of each class
current_segment_id = 0
stuff_memory_list: dict[str, int] = {}
for k in range(pred_labels.shape[0]):
pred_class = pred_labels[k].item()
# Check if mask exists and large enough to be a segment
mask_exists, final_mask = check_segment_validity(
mask_labels, mask_probs, k, mask_threshold, overlap_mask_area_threshold
)
if not mask_exists:
continue
if stuff_classes and pred_class in stuff_classes:
if pred_class in stuff_memory_list:
segmentation[final_mask] = stuff_memory_list[pred_class]
continue
else:
stuff_memory_list[pred_class] = current_segment_id
segmentation[final_mask] = current_segment_id
segment_score = round(pred_scores[k].item(), 6)
segments.append(
{
"id": current_segment_id,
"label_id": pred_class,
"score": segment_score,
}
)
current_segment_id += 1
return segmentation, segments
def get_target_size(size_dict: dict[str, int]) -> tuple[int, int]:
"""Returns the height and width from a size dict."""
target_height = size_dict["shortest_edge"]
target_width = size_dict.get("longest_edge") or target_height
return target_height, target_width
class EomtImageProcessor(BaseImageProcessor):
r"""
Constructs a EoMT image processor. The image processor can be used to prepare image(s) and optional targets
for the model.
This image processor inherits from [`BaseImageProcessor`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the input to a certain `size`.
size (`int`, *optional*, defaults to 640):
Resize the input to the given size. Only has an effect if `do_resize` is set to `True`. If size is a
sequence like `(width, height)`, output size will be matched to this. If size is an int, smaller edge of
the image will be matched to this number. i.e, if `height > width`, then image will be rescaled to `(size *
height / width, size)`.
resample (`int`, *optional*, defaults to `Resampling.BILINEAR`):
An optional resampling filter. This can be one of `PIL.Image.Resampling.NEAREST`,
`PIL.Image.Resampling.BOX`, `PIL.Image.Resampling.BILINEAR`, `PIL.Image.Resampling.HAMMING`,
`PIL.Image.Resampling.BICUBIC` or `PIL.Image.Resampling.LANCZOS`. Only has an effect if `do_resize` is set
to `True`.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the input to a certain `scale`.
rescale_factor (`float`, *optional*, defaults to `1/ 255`):
Rescale the input by the given factor. Only has an effect if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether or not to normalize the input with mean and standard deviation.
do_split_image (`bool`, *optional*, defaults to `False`):
Whether to split the input images into overlapping patches for semantic segmentation. If set to `True`, the
input images will be split into patches of size `size["shortest_edge"]` with an overlap between patches.
Otherwise, the input images will be padded to the target size.
do_pad (`bool`, *optional*, defaults to `False`):
Whether to pad the image. If `True`, will pad the patch dimension of the images in the batch to the largest
number of patches in the batch. Padding will be applied to the bottom and right with zeros.
image_mean (`int`, *optional*, defaults to `[0.485, 0.456, 0.406]`):
The sequence of means for each channel, to be used when normalizing images. Defaults to the ImageNet mean.
image_std (`int`, *optional*, defaults to `[0.229, 0.224, 0.225]`):
The sequence of standard deviations for each channel, to be used when normalizing images. Defaults to the
ImageNet std.
ignore_index (`int`, *optional*):
Label to be assigned to background pixels in segmentation maps. If provided, segmentation map pixels
denoted with 0 (background) will be replaced with `ignore_index`.
num_labels (`int`, *optional*):
The number of labels in the segmentation map.
"""
model_input_names = ["pixel_values"]
def __init__(
self,
do_resize: bool = True,
size: dict[str, int] | None = None,
resample: PILImageResampling = PILImageResampling.BILINEAR,
do_rescale: bool = True,
rescale_factor: float = 1 / 255,
do_normalize: bool = True,
do_split_image: bool = False,
do_pad: bool = False,
image_mean: float | list[float] | None = None,
image_std: float | list[float] | None = None,
ignore_index: int | None = None,
num_labels: int | None = None,
**kwargs,
):
super().__init__(**kwargs)
size = size if size is not None else {"shortest_edge": 640, "longest_edge": 640}
size = get_size_dict(size, default_to_square=False)
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.do_split_image = do_split_image
self.do_pad = do_pad
self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
self.ignore_index = ignore_index
self.num_labels = num_labels
def resize(
self,
image: np.ndarray,
size: dict,
resample: PILImageResampling = PILImageResampling.BILINEAR,
data_format=None,
input_data_format: str | ChannelDimension | None = None,
**kwargs,
) -> np.ndarray:
"""
Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge
resized to keep the input aspect ratio.
Args:
image (`np.ndarray`):
Image to resize.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
Resampling filter to use when resiizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
image_size = get_image_size(image)
output_size = get_size_with_aspect_ratio(image_size, size["shortest_edge"], size["longest_edge"])
image = resize(
image=image,
size=output_size,
resample=resample,
data_format=data_format,
input_data_format=input_data_format,
return_numpy=True,
**kwargs,
)
return image
def _split_image(self, image: ImageInput, size: dict, image_index: int) -> tuple[list, list]:
"""Slices an image into overlapping patches for semantic segmentation."""
patches, patch_offsets = [], []
image_size = get_image_size(image)
patch_size = size["shortest_edge"]
longer_side = max(image_size)
num_patches = math.ceil(longer_side / patch_size)
total_overlap = num_patches * patch_size - longer_side
overlap_per_patch = total_overlap / (num_patches - 1) if num_patches > 1 else 0
for i in range(num_patches):
start = int(i * (patch_size - overlap_per_patch))
end = start + patch_size
if image_size[0] > image_size[1]:
patch = image[:, start:end, :]
else:
patch = image[:, :, start:end]
patches.append(patch)
patch_offsets.append([image_index, start, end])
return patches, patch_offsets
def _pad(self, image: ImageInput, size: dict) -> np.ndarray:
"""Pads the image to the target size using zero padding."""
height, width = get_image_size(image)
target_height, target_width = get_target_size(size)
pad_h = max(0, target_height - height)
pad_w = max(0, target_width - width)
padding = ((0, pad_h), (0, pad_w))
# Channel axis is last; default padding format is compatible
padded_image = pad(image=image, padding=padding, mode=PaddingMode.CONSTANT, constant_values=0.0)
return padded_image
def _preprocess_images(
self,
images: ImageInput,
do_resize: bool | None = None,
size: dict[str, int] | None = None,
resample: PILImageResampling | None = None,
do_split_image: bool | None = None,
do_pad: bool | None = None,
do_rescale: bool | None = None,
rescale_factor: float | None = None,
do_normalize: bool | None = None,
image_mean: float | list[float] | None = None,
image_std: float | list[float] | None = None,
data_format: str | ChannelDimension | None = None,
input_data_format: str | ChannelDimension | None = None,
) -> np.ndarray:
"""Preprocesses a batch of images."""
images = [to_numpy_array(image) for image in images]
if do_resize:
images = [
self.resize(
image,
size=size,
resample=resample,
data_format=data_format,
input_data_format=input_data_format,
)
for image in images
]
processed_images, patch_offsets = [], []
if do_split_image:
for idx, img in enumerate(images):
patches, offsets = self._split_image(img, size, idx)
processed_images.extend(patches)
patch_offsets.extend(offsets)
images = processed_images
if do_pad:
images = [self._pad(img, size) for img in images]
if do_rescale:
images = [self.rescale(img, scale=rescale_factor, input_data_format=input_data_format) for img in images]
if do_normalize:
images = [
self.normalize(
image,
mean=image_mean,
std=image_std,
input_data_format=input_data_format,
)
for image in images
]
return images, patch_offsets
def _preprocess_mask(
self,
segmentation_map: ImageInput,
do_resize: bool | None = False,
do_pad: bool | None = False,
size: dict[str, int] | None = None,
resample: PILImageResampling | None = None,
data_format: str | ChannelDimension = None,
input_data_format: str | ChannelDimension | None = None,
) -> np.ndarray:
"""Preprocesses a single mask."""
# Add channel dimension if missing - needed for certain transformations
if segmentation_map.ndim == 2:
added_channel_dim = True
segmentation_map = segmentation_map[None, ...]
input_data_format = ChannelDimension.FIRST
else:
added_channel_dim = False
if input_data_format is None:
input_data_format = infer_channel_dimension_format(segmentation_map)
if do_resize:
segmentation_map = self.resize(
segmentation_map,
size=size,
resample=resample,
data_format=data_format,
)
if do_pad:
segmentation_map = self._pad(segmentation_map, size)
# Remove extra channel dimension if added for processing
if added_channel_dim:
segmentation_map = segmentation_map.squeeze(0)
return torch.from_numpy(segmentation_map)
@filter_out_non_signature_kwargs()
def preprocess(
self,
images: ImageInput,
segmentation_maps: list[dict[int, int]] | dict[int, int] | None = None,
instance_id_to_semantic_id: dict[int, int] | None = None,
do_split_image: bool | None = None,
do_resize: bool | None = None,
size: dict[str, int] | None = None,
resample: PILImageResampling | None = None,
do_rescale: bool | None = None,
rescale_factor: float | None = None,
do_normalize: bool | None = None,
do_pad: bool | None = None,
image_mean: float | list[float] | None = None,
image_std: float | list[float] | None = None,
ignore_index: int | None = None,
return_tensors: str | TensorType | None = None,
data_format: str | ChannelDimension = ChannelDimension.FIRST,
input_data_format: str | ChannelDimension | None = None,
) -> BatchFeature:
"""
Preprocesses images or a batch of images.
Args:
images (`ImageInput`):
Image or batch of images to preprocess.
segmentation_maps (`ImageInput`, *optional*):
The corresponding semantic segmentation maps with the pixel-wise annotations.
instance_id_to_semantic_id (`list[dict[int, int]]` or `dict[int, int]`, *optional*):
A mapping between object instance ids and class ids.
do_split_image (`bool`, *optional*, defaults to `self.do_split_image`):
Whether to split the input images into overlapping patches for semantic segmentation.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the input images.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Target size as a dictionary with `"shortest_edge"` and `"longest_edge"` keys.
resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
Resampling filter to use when resizing.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the input images by `rescale_factor`.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Factor to scale image pixel values.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the input images.
do_pad (`bool`, *optional*, defaults to `False`):
Whether to pad the image. If `True`, will pad the patch dimension of the images in the batch to the largest
number of patches in the batch. Padding will be applied to the bottom and right with zeros.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Mean for normalization. Single value or list for each channel.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Standard deviation for normalization. Single value or list for each channel.
ignore_index (`int`, *optional*):
Label to be assigned to background pixels in segmentation maps. If provided, segmentation map pixels
denoted with 0 (background) will be replaced with `ignore_index`.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be `"pt"` or `"np"`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
Channel format of the output image. Either `"channels_first"` or `"channels_last"`.
input_data_format (`ChannelDimension` or `str`, *optional*):
Channel format of the input image.
"""
do_split_image = do_split_image if do_split_image is not None else self.do_split_image
do_resize = do_resize if do_resize is not None else self.do_resize
size = size if size is not None else self.size
size = get_size_dict(size, default_to_square=False)
resample = resample if resample is not None else self.resample
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
do_pad = do_pad if do_pad is not None else self.do_pad
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
ignore_index = ignore_index if ignore_index is not None else self.ignore_index
images = self.fetch_images(images)
images = make_flat_list_of_images(images)
if not valid_images(images):
raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor")
validate_preprocess_arguments(
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_resize=do_resize,
size=size,
resample=resample,
)
pixel_values_list, patch_offsets = self._preprocess_images(
images=images,
do_resize=do_resize,
size=size,
resample=resample,
do_split_image=do_split_image,
do_pad=do_pad,
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
data_format=data_format,
input_data_format=input_data_format,
)
if segmentation_maps is not None:
segmentation_maps = make_flat_list_of_images(segmentation_maps, expected_ndims=2)
segmentation_maps = [to_numpy_array(mask) for mask in segmentation_maps]
segmentation_maps = [
self._preprocess_mask(
segmentation_map,
do_resize=do_resize,
do_pad=do_pad,
size=size,
resample=PILImageResampling.NEAREST,
data_format=data_format,
input_data_format=input_data_format,
)
for segmentation_map in segmentation_maps
]
encoded_inputs = self.encode_inputs(
pixel_values_list,
segmentation_maps,
instance_id_to_semantic_id,
ignore_index,
return_tensors,
input_data_format=data_format,
)
if do_split_image and patch_offsets:
encoded_inputs["patch_offsets"] = [torch.tensor(offsets) for offsets in patch_offsets]
return encoded_inputs
def encode_inputs(
self,
pixel_values_list: list[ImageInput],
segmentation_maps: ImageInput | None = None,
instance_id_to_semantic_id: list[dict[int, int]] | dict[int, int] | None = None,
ignore_index: int | None = None,
return_tensors: str | TensorType | None = None,
input_data_format: str | ChannelDimension | None = None,
):
"""
Pad images up to the largest image in a batch and create a corresponding `pixel_mask`.
EoMT addresses semantic segmentation with a mask classification paradigm, thus input segmentation maps
will be converted to lists of binary masks and their respective labels. Let's see an example, assuming
`segmentation_maps = [[2,6,7,9]]`, the output will contain `mask_labels =
[[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]]` (four binary masks) and `class_labels = [2,6,7,9]`, the labels for
each mask.
Args:
pixel_values_list (`list[ImageInput]`):
list of images (pixel values) to be padded. Each image should be a tensor of shape `(channels, height,
width)`.
segmentation_maps (`ImageInput`, *optional*):
The corresponding semantic segmentation maps with the pixel-wise annotations.
(`bool`, *optional*, defaults to `True`):
Whether or not to pad images up to the largest image in a batch and create a pixel mask.
If left to the default, will return a pixel mask that is:
- 1 for pixels that are real (i.e. **not masked**),
- 0 for pixels that are padding (i.e. **masked**).
instance_id_to_semantic_id (`list[dict[int, int]]` or `dict[int, int]`, *optional*):
A mapping between object instance ids and class ids. If passed, `segmentation_maps` is treated as an
instance segmentation map where each pixel represents an instance id. Can be provided as a single
dictionary with a global/dataset-level mapping or as a list of dictionaries (one per image), to map
instance ids in each image separately.
return_tensors (`str` or [`~file_utils.TensorType`], *optional*):
If set, will return tensors instead of NumPy arrays. If set to `'pt'`, return PyTorch `torch.Tensor`
objects.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **pixel_values** -- Pixel values to be fed to a model.
- **mask_labels** -- Optional list of mask labels of shape `(labels, height, width)` to be fed to a model
(when `annotations` are provided).
- **class_labels** -- Optional list of class labels of shape `(labels)` to be fed to a model (when
`annotations` are provided). They identify the labels of `mask_labels`, e.g. the label of
`mask_labels[i][j]` if `class_labels[i][j]`.
"""
ignore_index = self.ignore_index if ignore_index is None else ignore_index
pixel_values_list = [to_numpy_array(pixel_values) for pixel_values in pixel_values_list]
if input_data_format is None:
input_data_format = infer_channel_dimension_format(pixel_values_list[0])
encoded_inputs = BatchFeature({"pixel_values": pixel_values_list}, tensor_type=return_tensors)
if segmentation_maps is not None:
mask_labels = []
class_labels = []
# Convert to list of binary masks and labels
for idx, segmentation_map in enumerate(segmentation_maps):
segmentation_map = to_numpy_array(segmentation_map)
if isinstance(instance_id_to_semantic_id, list):
instance_id = instance_id_to_semantic_id[idx]
else:
instance_id = instance_id_to_semantic_id
# Use instance2class_id mapping per image
masks, classes = convert_segmentation_map_to_binary_masks(
segmentation_map,
instance_id,
ignore_index=ignore_index,
)
mask_labels.append(torch.from_numpy(masks))
class_labels.append(torch.from_numpy(classes))
# we cannot batch them since they don't share a common class size
encoded_inputs["mask_labels"] = mask_labels
encoded_inputs["class_labels"] = class_labels
return encoded_inputs
def merge_image_patches(
self,
segmentation_logits: torch.Tensor,
patch_offsets: list[tuple[int, int, int]],
target_sizes: list[tuple[int, int]],
size: dict[str, int],
) -> list[torch.Tensor]:
"""
Reconstructs full-size semantic segmentation logits from patch predictions.
Args:
segmentation_logits (`torch.Tensor`):
A tensor of shape `(num_patches, num_classes, patch_height, patch_width)` representing predicted logits
for each image patch.
patch_offsets (`list[tuple[int, int, int]]`):
A list of tuples where each tuple contains:
- `image_index` (int): Index of the original image this patch belongs to.
- `start` (int): Start pixel index of the patch along the long dimension (height or width).
- `end` (int): End pixel index of the patch along the long dimension.
target_sizes (`list[tuple[int, int]]`):
list of original (height, width) dimensions for each image before preprocessing.
size (`dict[str, int]`):
A size dict which was used to resize.
"""
num_classes = segmentation_logits.shape[1]
aggregated_logits = []
patch_counts = []
for image_size in target_sizes:
height, width = get_size_with_aspect_ratio(image_size, size["shortest_edge"], size["longest_edge"])
aggregated_logits.append(torch.zeros((num_classes, height, width), device=segmentation_logits.device))
patch_counts.append(torch.zeros((num_classes, height, width), device=segmentation_logits.device))
# Stitch patches back into full-sized logit maps
for patch_idx, (image_idx, patch_start, patch_end) in enumerate(patch_offsets):
if target_sizes[image_idx][0] > target_sizes[image_idx][1]:
aggregated_logits[image_idx][:, patch_start:patch_end, :] += segmentation_logits[patch_idx]
patch_counts[image_idx][:, patch_start:patch_end, :] += 1
else:
aggregated_logits[image_idx][:, :, patch_start:patch_end] += segmentation_logits[patch_idx]
patch_counts[image_idx][:, :, patch_start:patch_end] += 1
# Normalize and resize logits to original image size
reconstructed_logits = []
for idx, (logit_sum, count) in enumerate(zip(aggregated_logits, patch_counts)):
averaged_logits = logit_sum / count.clamp(min=1)
resized_logits = F.interpolate(
averaged_logits[None, ...],
size=target_sizes[idx],
mode="bilinear",
align_corners=False,
)[0]
reconstructed_logits.append(resized_logits)
return reconstructed_logits
def unpad_image(
self,
segmentation_logits: torch.Tensor,
target_sizes: list[tuple[int, int]],
size: dict[str, int],
) -> list[torch.Tensor]:
"""Restores panoptic segmentation logits to their original image resolutions."""
resized_logits = []
for idx, original_size in enumerate(target_sizes):
target_height, target_width = get_size_with_aspect_ratio(
original_size, size["shortest_edge"], size["longest_edge"]
)
cropped_logits = segmentation_logits[idx][:, :target_height, :target_width]
upsampled_logits = F.interpolate(
cropped_logits[None, ...], size=original_size, mode="bilinear", align_corners=False
)[0]
resized_logits.append(upsampled_logits)
return resized_logits
def post_process_semantic_segmentation(
self,
outputs,
target_sizes: list[tuple[int, int]],
size: dict[str, int] | None = None,
) -> np.ndarray:
"""Post-processes model outputs into final semantic segmentation prediction."""
size = size if size is not None else self.size
masks_queries_logits = outputs.masks_queries_logits # [batch_size, num_queries, height, width]
class_queries_logits = outputs.class_queries_logits # [batch_size, num_queries, num_classes+1]
patch_offsets = outputs.patch_offsets
output_size = get_target_size(size)
masks_queries_logits = F.interpolate(
masks_queries_logits,
size=output_size,
mode="bilinear",
)
# Remove the null class `[..., :-1]`
masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1]
masks_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width]
segmentation_logits = torch.einsum("bqc, bqhw -> bchw", masks_classes, masks_probs)
if patch_offsets:
output_logits = self.merge_image_patches(segmentation_logits, patch_offsets, target_sizes, size)
else:
output_logits = []
for idx in range(len(segmentation_logits)):
resized_logits = torch.nn.functional.interpolate(
segmentation_logits[idx].unsqueeze(dim=0),
size=target_sizes[idx],
mode="bilinear",
align_corners=False,
)
output_logits.append(resized_logits[0])
preds = [logit.argmax(dim=0) for logit in output_logits]
return preds
def post_process_panoptic_segmentation(
self,
outputs,
target_sizes: list[tuple[int, int]],
threshold: float = 0.8,
mask_threshold: float = 0.5,
overlap_mask_area_threshold: float = 0.8,
stuff_classes: list[int] | None = None,
size: dict[str, int] | None = None,
):
"""Post-processes model outputs into final panoptic segmentation prediction."""
size = size if size is not None else self.size
masks_queries_logits = outputs.masks_queries_logits # [batch_size, num_queries, height, width]
class_queries_logits = outputs.class_queries_logits # [batch_size, num_queries, num_classes+1]
batch_size = class_queries_logits.shape[0]
num_labels = class_queries_logits.shape[-1] - 1
output_size = get_target_size(size)
masks_queries_logits = F.interpolate(
masks_queries_logits,
size=output_size,
mode="bilinear",
)
mask_probs_batch = self.unpad_image(masks_queries_logits, target_sizes, size)
pred_scores_batch, pred_labels_batch = class_queries_logits.softmax(dim=-1).max(-1)
results: list = []
for i in range(batch_size):
mask_probs, pred_scores, pred_labels = remove_low_and_no_objects(
mask_probs_batch[i], pred_scores_batch[i], pred_labels_batch[i], threshold, num_labels
)
# No mask found
if mask_probs.shape[0] <= 0:
height, width = target_sizes[i] if target_sizes is not None else mask_probs.shape[1:]
segmentation = torch.zeros((height, width)) - 1
results.append({"segmentation": segmentation, "segments_info": []})
continue
segmentation, segments = compute_segments(
mask_probs=mask_probs,
pred_scores=pred_scores,
pred_labels=pred_labels,
stuff_classes=stuff_classes,
mask_threshold=mask_threshold,
overlap_mask_area_threshold=overlap_mask_area_threshold,
target_size=target_sizes[i] if target_sizes is not None else None,
)
results.append({"segmentation": segmentation, "segments_info": segments})
return results
@filter_out_non_signature_kwargs()
def post_process_instance_segmentation(
self,
outputs,
target_sizes: list[tuple[int, int]],
threshold: float = 0.5,
size: dict[str, int] | None = None,
):
"""Post-processes model outputs into Instance Segmentation Predictions."""
size = size if size is not None else self.size
class_queries_logits = outputs.class_queries_logits
masks_queries_logits = outputs.masks_queries_logits
output_size = get_target_size(size)
masks_queries_logits = F.interpolate(
masks_queries_logits,
size=output_size,
mode="bilinear",
)
mask_probs_batch = self.unpad_image(masks_queries_logits, target_sizes, size)
device = masks_queries_logits.device
batch_size = class_queries_logits.shape[0]
num_queries = class_queries_logits.shape[-2]
results = []
for i in range(batch_size):
mask_pred = mask_probs_batch[i]
mask_class = class_queries_logits[i]
# Remove the null class `[..., :-1]`
scores, pred_classes = mask_class.softmax(dim=-1)[..., :-1].max(-1)
pred_masks = (mask_pred > 0).float()
# Calculate average mask prob
mask_scores = (mask_pred.sigmoid().flatten(1) * pred_masks.flatten(1)).sum(1) / (
pred_masks.flatten(1).sum(1) + 1e-6
)
pred_scores = scores * mask_scores
segmentation = torch.zeros(target_sizes[i], device=device) - 1
instance_maps, segments = [], []
current_segment_id = 0
for j in range(num_queries):
score = pred_scores[j].item()
if not torch.all(pred_masks[j] == 0) and score >= threshold:
segmentation[pred_masks[j] == 1] = current_segment_id
segments.append(
{
"id": current_segment_id,
"label_id": pred_classes[j].item(),
"score": round(score, 6),
}
)
current_segment_id += 1
instance_maps.append(pred_masks[j])
results.append({"segmentation": segmentation, "segments_info": segments})
return results
__all__ = ["EomtImageProcessor"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/eomt/image_processing_eomt.py",
"license": "Apache License 2.0",
"lines": 800,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/eomt/image_processing_eomt_fast.py | # Copyright 2025 Mobile Perception Systems Lab at TU/e and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Image processor class for EoMT."""
import math
from typing import Optional, Union
import numpy as np
import torch
import torchvision.transforms.v2.functional as tvF
from ...image_processing_utils import BatchFeature
from ...image_processing_utils_fast import (
BaseImageProcessorFast,
group_images_by_shape,
reorder_images,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
SizeDict,
)
from ...processing_utils import Unpack
from ...utils import (
TensorType,
auto_docstring,
filter_out_non_signature_kwargs,
)
from .image_processing_eomt import (
EomtImageProcessorKwargs,
compute_segments,
get_size_with_aspect_ratio,
remove_low_and_no_objects,
)
# Adapted from transformers.models.maskformer.image_processing_maskformer_fast.convert_segmentation_map_to_binary_masks_fast
def convert_segmentation_map_to_binary_masks_fast(
segmentation_map: "torch.Tensor",
instance_id_to_semantic_id: dict[int, int] | None = None,
ignore_index: int | None = None,
):
if ignore_index is not None:
segmentation_map = torch.where(segmentation_map == 0, ignore_index, segmentation_map - 1)
all_labels = torch.unique(segmentation_map)
if ignore_index is not None:
all_labels = all_labels[all_labels != ignore_index] # drop background label if applicable
binary_masks = [(segmentation_map == i) for i in all_labels]
if binary_masks:
binary_masks = torch.stack(binary_masks, dim=0)
else:
binary_masks = torch.zeros((0, *segmentation_map.shape), device=segmentation_map.device)
# Convert instance ids to class ids
if instance_id_to_semantic_id is not None:
labels = torch.zeros(all_labels.shape[0], device=segmentation_map.device)
for i, label in enumerate(all_labels):
class_id = instance_id_to_semantic_id[(label.item() + 1 if ignore_index is not None else label.item())]
labels[i] = class_id - 1 if ignore_index is not None else class_id
else:
labels = all_labels
return binary_masks.float(), labels.long()
def get_target_size(size_dict: dict[str, int]) -> tuple[int, int]:
"""Returns the height and width from a size dict."""
target_height = size_dict["shortest_edge"]
target_width = size_dict["longest_edge"] or target_height
return target_height, target_width
def reorder_patches_and_offsets(
patches: list[torch.Tensor], offsets: list[list[int]]
) -> tuple[list[torch.Tensor], list[list[int]]]:
"""Sorts patches and offsets according to the original image index."""
combined = list(zip(offsets, patches))
combined.sort(key=lambda x: x[0][0])
sorted_offsets, sorted_patches = zip(*combined)
return list(sorted_patches), list(sorted_offsets)
@auto_docstring
class EomtImageProcessorFast(BaseImageProcessorFast):
resample = PILImageResampling.BILINEAR
image_mean = IMAGENET_DEFAULT_MEAN
image_std = IMAGENET_DEFAULT_STD
size = {"shortest_edge": 640, "longest_edge": 640}
default_to_square = False
do_resize = True
do_rescale = True
do_normalize = True
do_split_image = False
do_pad = False
ignore_index = None
valid_kwargs = EomtImageProcessorKwargs
def __init__(self, **kwargs: Unpack[EomtImageProcessorKwargs]):
super().__init__(**kwargs)
def _split_image(self, images: torch.Tensor, size: dict, image_indices: int) -> tuple[list, list]:
"""Slices an image into overlapping patches for semantic segmentation."""
patches, patch_offsets = [], []
_, _, height, width = images.shape
patch_size = size["shortest_edge"]
longer_side = max(height, width)
num_patches = math.ceil(longer_side / patch_size)
total_overlap = num_patches * patch_size - longer_side
overlap_per_patch = total_overlap / (num_patches - 1) if num_patches > 1 else 0
for i in range(num_patches):
start = int(i * (patch_size - overlap_per_patch))
end = start + patch_size
if height > width:
batch_patch = images[:, :, start:end, :]
else:
batch_patch = images[:, :, :, start:end]
for batch_idx, single in enumerate(torch.unbind(batch_patch, dim=0)):
patches.append(single)
patch_offsets.append([image_indices[batch_idx], start, end])
return patches, patch_offsets
def _pad(self, images: torch.Tensor, size: dict) -> torch.Tensor:
"""Pads the image to the target size using zero padding."""
_, _, height, width = images.shape
target_height, target_width = get_target_size(size)
pad_h = max(0, target_height - height)
pad_w = max(0, target_width - width)
padding = (0, pad_w, 0, pad_h)
padded_images = torch.nn.functional.pad(images, padding, mode="constant", value=0.0)
return padded_images
@auto_docstring
def preprocess(
self,
images: ImageInput,
segmentation_maps: list[torch.Tensor] | None = None,
instance_id_to_semantic_id: dict[int, int] | None = None,
**kwargs: Unpack[EomtImageProcessorKwargs],
) -> BatchFeature:
r"""
segmentation_maps (`ImageInput`, *optional*):
The segmentation maps to preprocess for corresponding images.
instance_id_to_semantic_id (`list[dict[int, int]]` or `dict[int, int]`, *optional*):
A mapping between object instance ids and class ids.
"""
return super().preprocess(images, segmentation_maps, instance_id_to_semantic_id, **kwargs)
def _preprocess_image_like_inputs(
self,
images: ImageInput,
segmentation_maps: ImageInput | None,
instance_id_to_semantic_id: dict[int, int] | None,
do_convert_rgb: bool,
input_data_format: ChannelDimension,
device: Union[str, "torch.device"] | None = None,
**kwargs: Unpack[EomtImageProcessorKwargs],
) -> BatchFeature:
"""
Preprocess image-like inputs.
"""
images = self._prepare_image_like_inputs(
images=images, do_convert_rgb=do_convert_rgb, input_data_format=input_data_format, device=device
)
ignore_index = kwargs.pop("ignore_index", None)
images_kwargs = kwargs.copy()
outputs = self._preprocess(images, **images_kwargs)
if segmentation_maps is not None:
processed_segmentation_maps = self._prepare_image_like_inputs(
images=segmentation_maps,
expected_ndims=2,
do_convert_rgb=False,
input_data_format=ChannelDimension.FIRST,
)
segmentation_maps_kwargs = kwargs.copy()
segmentation_maps_kwargs.update(
{
"do_normalize": False,
"do_rescale": False,
# Nearest interpolation is used for segmentation maps instead of BILINEAR.
"interpolation": tvF.InterpolationMode.NEAREST_EXACT,
}
)
processed_segmentation_maps = self._preprocess(
images=processed_segmentation_maps, **segmentation_maps_kwargs
).pixel_values
processed_segmentation_maps = processed_segmentation_maps.squeeze(1).to(torch.int64)
# Convert to list of binary masks and labels
mask_labels, class_labels = [], []
for idx, segmentation_map in enumerate(processed_segmentation_maps):
if isinstance(instance_id_to_semantic_id, list):
instance_id = instance_id_to_semantic_id[idx]
else:
instance_id = instance_id_to_semantic_id
# Use instance2class_id mapping per image
masks, classes = convert_segmentation_map_to_binary_masks_fast(
segmentation_map,
instance_id,
ignore_index=ignore_index,
)
mask_labels.append(masks)
class_labels.append(classes)
# we cannot batch them since they don't share a common class size
outputs["mask_labels"] = mask_labels
outputs["class_labels"] = class_labels
if outputs.patch_offsets:
outputs["patch_offsets"] = [torch.tensor(offsets) for offsets in outputs.patch_offsets]
return outputs
def _preprocess(
self,
images: list["torch.Tensor"],
do_resize: bool,
size: SizeDict,
interpolation: Optional["tvF.InterpolationMode"],
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
do_split_image: bool,
do_pad: bool,
image_mean: float | list[float] | None,
image_std: float | list[float] | None,
disable_grouping: bool | None,
return_tensors: str | TensorType | None,
**kwargs,
):
"""Preprocesses the input images and masks if provided."""
processed_images, patch_offsets = [], []
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
resized_images_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_resize:
stacked_images = self.resize(image=stacked_images, size=size, interpolation=interpolation)
resized_images_grouped[shape] = stacked_images
images = reorder_images(resized_images_grouped, grouped_images_index)
# Group images by size for batched resizing, Needed in case do_resize is False.
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
processed_images_grouped = {}
for shape, stacked_images in grouped_images.items():
original_indices = [
original_idx for original_idx, (img_shape, _) in grouped_images_index.items() if img_shape == shape
]
if do_split_image:
patches, offsets = self._split_image(stacked_images, size, original_indices)
processed_images.extend(patches)
patch_offsets.extend(offsets)
if do_pad:
stacked_images = self._pad(stacked_images, size)
processed_images_grouped[shape] = stacked_images
if do_split_image:
images, patch_offsets = reorder_patches_and_offsets(processed_images, patch_offsets)
if do_pad:
images = reorder_images(processed_images_grouped, grouped_images_index)
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
processed_images_grouped = {}
for shape, stacked_images in grouped_images.items():
stacked_images = self.rescale_and_normalize(
stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
processed_images_grouped[shape] = stacked_images
processed_images = reorder_images(processed_images_grouped, grouped_images_index)
return BatchFeature(
data={"pixel_values": processed_images, "patch_offsets": patch_offsets},
tensor_type=return_tensors,
skip_tensor_conversion=["patch_offsets"],
)
def merge_image_patches(
self,
segmentation_logits: torch.Tensor,
patch_offsets: list[tuple[int, int, int]],
target_sizes: list[tuple[int, int]],
size: dict[str, int],
) -> list[torch.Tensor]:
"""
Reconstructs full-size semantic segmentation logits from patch predictions.
Args:
segmentation_logits (`torch.Tensor`):
A tensor of shape `(num_patches, num_classes, patch_height, patch_width)` representing predicted logits
for each image patch.
patch_offsets (`list[tuple[int, int, int]]`):
A list of tuples where each tuple contains:
- `image_index` (int): Index of the original image this patch belongs to.
- `start` (int): Start pixel index of the patch along the long dimension (height or width).
- `end` (int): End pixel index of the patch along the long dimension.
target_sizes (`list[tuple[int, int]]`):
list of original (height, width) dimensions for each image before preprocessing.
size (`dict[str, int]`):
A size dict which was used to resize.
"""
num_classes = segmentation_logits.shape[1]
aggregated_logits = []
patch_counts = []
for image_size in target_sizes:
height, width = get_size_with_aspect_ratio(image_size, size["shortest_edge"], size["longest_edge"])
aggregated_logits.append(torch.zeros((num_classes, height, width), device=segmentation_logits.device))
patch_counts.append(torch.zeros((num_classes, height, width), device=segmentation_logits.device))
# Stitch patches back into full-sized logit maps
for patch_idx, (image_idx, patch_start, patch_end) in enumerate(patch_offsets):
if target_sizes[image_idx][0] > target_sizes[image_idx][1]:
aggregated_logits[image_idx][:, patch_start:patch_end, :] += segmentation_logits[patch_idx]
patch_counts[image_idx][:, patch_start:patch_end, :] += 1
else:
aggregated_logits[image_idx][:, :, patch_start:patch_end] += segmentation_logits[patch_idx]
patch_counts[image_idx][:, :, patch_start:patch_end] += 1
# Normalize and resize logits to original image size
reconstructed_logits = []
for idx, (logit_sum, count) in enumerate(zip(aggregated_logits, patch_counts)):
averaged_logits = logit_sum / count.clamp(min=1)
resized_logits = torch.nn.functional.interpolate(
averaged_logits[None, ...],
size=target_sizes[idx],
mode="bilinear",
align_corners=False,
)[0]
reconstructed_logits.append(resized_logits)
return reconstructed_logits
def unpad_image(
self,
segmentation_logits: torch.Tensor,
target_sizes: list[tuple[int, int]],
size: dict[str, int],
) -> list[torch.Tensor]:
"""Restores panoptic segmentation logits to their original image resolutions."""
resized_logits = []
for idx, original_size in enumerate(target_sizes):
target_height, target_width = get_size_with_aspect_ratio(
original_size, size["shortest_edge"], size["longest_edge"]
)
cropped_logits = segmentation_logits[idx][:, :target_height, :target_width]
upsampled_logits = torch.nn.functional.interpolate(
cropped_logits[None, ...], size=original_size, mode="bilinear", align_corners=False
)[0]
resized_logits.append(upsampled_logits)
return resized_logits
def post_process_semantic_segmentation(
self,
outputs,
target_sizes: list[tuple[int, int]],
size: dict[str, int] | None = None,
) -> np.ndarray:
"""Post-processes model outputs into final semantic segmentation prediction."""
size = size if size is not None else self.size
masks_queries_logits = outputs.masks_queries_logits # [batch_size, num_queries, height, width]
class_queries_logits = outputs.class_queries_logits # [batch_size, num_queries, num_classes+1]
patch_offsets = outputs.patch_offsets
output_size = get_target_size(size)
masks_queries_logits = torch.nn.functional.interpolate(
masks_queries_logits,
size=output_size,
mode="bilinear",
)
# Remove the null class `[..., :-1]`
masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1]
masks_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width]
segmentation_logits = torch.einsum("bqc, bqhw -> bchw", masks_classes, masks_probs)
if patch_offsets:
output_logits = self.merge_image_patches(segmentation_logits, patch_offsets, target_sizes, size)
else:
output_logits = []
for idx in range(len(segmentation_logits)):
resized_logits = torch.nn.functional.interpolate(
segmentation_logits[idx].unsqueeze(dim=0),
size=target_sizes[idx],
mode="bilinear",
align_corners=False,
)
output_logits.append(resized_logits[0])
preds = [logit.argmax(dim=0) for logit in output_logits]
return preds
def post_process_panoptic_segmentation(
self,
outputs,
target_sizes: list[tuple[int, int]],
threshold: float = 0.8,
mask_threshold: float = 0.5,
overlap_mask_area_threshold: float = 0.8,
stuff_classes: list[int] | None = None,
size: dict[str, int] | None = None,
):
"""Post-processes model outputs into final panoptic segmentation prediction."""
size = size if size is not None else self.size
masks_queries_logits = outputs.masks_queries_logits # [batch_size, num_queries, height, width]
class_queries_logits = outputs.class_queries_logits # [batch_size, num_queries, num_classes+1]
batch_size = class_queries_logits.shape[0]
num_labels = class_queries_logits.shape[-1] - 1
output_size = get_target_size(size)
masks_queries_logits = torch.nn.functional.interpolate(
masks_queries_logits,
size=output_size,
mode="bilinear",
)
mask_probs_batch = self.unpad_image(masks_queries_logits, target_sizes, size)
pred_scores_batch, pred_labels_batch = class_queries_logits.softmax(dim=-1).max(-1)
results: list = []
for i in range(batch_size):
mask_probs, pred_scores, pred_labels = remove_low_and_no_objects(
mask_probs_batch[i], pred_scores_batch[i], pred_labels_batch[i], threshold, num_labels
)
# No mask found
if mask_probs.shape[0] <= 0:
height, width = target_sizes[i] if target_sizes is not None else mask_probs.shape[1:]
segmentation = torch.zeros((height, width)) - 1
results.append({"segmentation": segmentation, "segments_info": []})
continue
segmentation, segments = compute_segments(
mask_probs=mask_probs,
pred_scores=pred_scores,
pred_labels=pred_labels,
stuff_classes=stuff_classes,
mask_threshold=mask_threshold,
overlap_mask_area_threshold=overlap_mask_area_threshold,
target_size=target_sizes[i] if target_sizes is not None else None,
)
results.append({"segmentation": segmentation, "segments_info": segments})
return results
@filter_out_non_signature_kwargs()
def post_process_instance_segmentation(
self,
outputs,
target_sizes: list[tuple[int, int]],
threshold: float = 0.8,
size: dict[str, int] | None = None,
):
"""Post-processes model outputs into Instance Segmentation Predictions."""
size = size if size is not None else self.size
masks_queries_logits = outputs.masks_queries_logits
class_queries_logits = outputs.class_queries_logits
output_size = get_target_size(size)
masks_queries_logits = torch.nn.functional.interpolate(
masks_queries_logits,
size=output_size,
mode="bilinear",
)
mask_probs_batch = self.unpad_image(masks_queries_logits, target_sizes, size)
device = masks_queries_logits.device
batch_size = class_queries_logits.shape[0]
num_queries = class_queries_logits.shape[-2]
results = []
for i in range(batch_size):
mask_pred = mask_probs_batch[i]
mask_class = class_queries_logits[i]
# Remove the null class `[..., :-1]`
scores, pred_classes = mask_class.softmax(dim=-1)[..., :-1].max(-1)
pred_masks = (mask_pred > 0).float()
# Calculate average mask prob
mask_scores = (mask_pred.sigmoid().flatten(1) * pred_masks.flatten(1)).sum(1) / (
pred_masks.flatten(1).sum(1) + 1e-6
)
pred_scores = scores * mask_scores
segmentation = torch.zeros(target_sizes[i], device=device) - 1
instance_maps, segments = [], []
current_segment_id = 0
for j in range(num_queries):
score = pred_scores[j].item()
if not torch.all(pred_masks[j] == 0) and score >= threshold:
segmentation[pred_masks[j] == 1] = current_segment_id
segments.append(
{
"id": current_segment_id,
"label_id": pred_classes[j].item(),
"score": round(score, 6),
}
)
current_segment_id += 1
instance_maps.append(pred_masks[j])
results.append({"segmentation": segmentation, "segments_info": segments})
return results
__all__ = ["EomtImageProcessorFast"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/eomt/image_processing_eomt_fast.py",
"license": "Apache License 2.0",
"lines": 462,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/eomt/modular_eomt.py | # Copyright 2025 Mobile Perception Systems Lab at TU/e and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch EoMT model."""
import math
from dataclasses import dataclass
import torch
import torch.nn.functional as F
from torch import Tensor, nn
from ... import initialization as init
from ...activations import ACT2FN
from ...file_utils import (
ModelOutput,
)
from ...modeling_utils import PreTrainedModel
from ...processing_utils import Unpack
from ...utils import (
TransformersKwargs,
auto_docstring,
logging,
)
from ...utils.generic import merge_with_config_defaults
from ...utils.output_capturing import capture_outputs
from ..dinov2.modeling_dinov2 import (
Dinov2Embeddings,
Dinov2Layer,
Dinov2LayerScale,
Dinov2PatchEmbeddings,
)
from ..mask2former.modeling_mask2former import Mask2FormerForUniversalSegmentation, Mask2FormerLoss
from ..siglip.modeling_siglip import SiglipAttention
from ..vit.configuration_vit import ViTConfig
logger = logging.get_logger(__name__)
class EomtConfig(ViTConfig):
r"""
This is the configuration class to store the configuration of a [`EomtForUniversalSegmentation`]. It is used to instantiate an EoMT model
according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the EoMT
[tue-mps/coco_panoptic_eomt_large_640](https://huggingface.co/tue-mps/coco_panoptic_eomt_large_640)
architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 1024):
Dimensionality of the hidden representations.
num_hidden_layers (`int`, *optional*, defaults to 24):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads in each attention layer.
mlp_ratio (`int`, *optional*, defaults to 4):
Ratio of the MLP hidden dimensionality to the hidden size.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings and encoder.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
image_size (`int`, *optional*, defaults to 640):
The size (resolution) of each input image.
patch_size (`int`, *optional*, defaults to 16):
The size (resolution) of each patch.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
layerscale_value (`float`, *optional*, defaults to 1.0):
Initial value for the LayerScale parameter.
drop_path_rate (`float`, *optional*, defaults to 0.0):
The stochastic depth rate (drop path) used during training.
num_upscale_blocks (`int`, *optional*, defaults to 2):
Number of upsampling blocks used in the decoder or segmentation head.
attention_dropout (`float`, *optional*, defaults to 0.0):
Dropout probability applied after attention projection.
use_swiglu_ffn (`bool`, *optional*, defaults to `False`):
Whether to use the SwiGLU feedforward neural network.
num_blocks (`int`, *optional*, defaults to 4):
Number of feature blocks or stages in the architecture.
no_object_weight (`float`, *optional*, defaults to 0.1):
Loss weight for the 'no object' class in panoptic/instance segmentation.
class_weight (`float`, *optional*, defaults to 2.0):
Loss weight for classification targets.
mask_weight (`float`, *optional*, defaults to 5.0):
Loss weight for mask prediction.
dice_weight (`float`, *optional*, defaults to 5.0):
Loss weight for the dice loss component.
train_num_points (`int`, *optional*, defaults to 12544):
Number of points to sample for mask loss computation during training.
oversample_ratio (`float`, *optional*, defaults to 3.0):
Oversampling ratio used in point sampling for mask training.
importance_sample_ratio (`float`, *optional*, defaults to 0.75):
Ratio of points to sample based on importance during training.
num_queries (`int`, *optional*, defaults to 200):
Number of object queries in the Transformer.
num_register_tokens (`int`, *optional*, defaults to 4):
Number of learnable register tokens added to the transformer input.
Example:
```python
>>> from transformers import EomtConfig, EomtForUniversalSegmentation
>>> # Initialize configuration
>>> config = EomtConfig()
>>> # Initialize model
>>> model = EomtForUniversalSegmentation(config)
>>> # Access config
>>> config = model.config
```"""
model_type = "eomt"
def __init__(
self,
hidden_size=1024,
num_hidden_layers=24,
num_attention_heads=16,
mlp_ratio=4,
hidden_act="gelu",
hidden_dropout_prob=0.0,
initializer_range=0.02,
layer_norm_eps=1e-6,
image_size=640,
patch_size=16,
num_channels=3,
layerscale_value=1.0,
drop_path_rate=0.0,
num_upscale_blocks=2,
attention_dropout=0.0,
use_swiglu_ffn=False,
num_blocks=4,
no_object_weight: float = 0.1,
class_weight: float = 2.0,
mask_weight: float = 5.0,
dice_weight: float = 5.0,
train_num_points: int = 12544,
oversample_ratio: float = 3.0,
importance_sample_ratio: float = 0.75,
num_queries=200,
num_register_tokens=4,
**kwargs,
):
self.mlp_ratio = mlp_ratio
self.attention_dropout = attention_dropout
self.layerscale_value = layerscale_value
self.drop_path_rate = drop_path_rate
self.num_upscale_blocks = num_upscale_blocks
self.use_swiglu_ffn = use_swiglu_ffn
self.num_blocks = num_blocks
self.no_object_weight = no_object_weight
self.class_weight = class_weight
self.mask_weight = mask_weight
self.dice_weight = dice_weight
self.train_num_points = train_num_points
self.oversample_ratio = oversample_ratio
self.importance_sample_ratio = importance_sample_ratio
self.num_queries = num_queries
self.num_register_tokens = num_register_tokens
super().__init__(
hidden_size=hidden_size,
num_hidden_layers=num_hidden_layers,
num_attention_heads=num_attention_heads,
hidden_dropout_prob=hidden_dropout_prob,
hidden_act=hidden_act,
initializer_range=initializer_range,
layer_norm_eps=layer_norm_eps,
image_size=image_size,
patch_size=patch_size,
num_channels=num_channels,
**kwargs,
)
del self.intermediate_size
del self.qkv_bias
del self.pooler_act
del self.pooler_output_size
del self.encoder_stride
del self.attention_probs_dropout_prob
@dataclass
@auto_docstring(
custom_intro="""
Class for outputs of [`EomtForUniversalSegmentationOutput`].
This output can be directly passed to [`~EomtImageProcessor.post_process_semantic_segmentation`] or
[`~EomtImageProcessor.post_process_instance_segmentation`] or
[`~EomtImageProcessor.post_process_panoptic_segmentation`] to compute final segmentation maps. Please, see
[`~EomtImageProcessor] for details regarding usage.
"""
)
class EomtForUniversalSegmentationOutput(ModelOutput):
r"""
loss (`torch.Tensor`, *optional*):
The computed loss, returned when labels are present.
class_queries_logits (`torch.FloatTensor`):
A tensor of shape `(batch_size, num_queries, num_labels + 1)` representing the proposed classes for each
query. Note the `+ 1` is needed because we incorporate the null class.
masks_queries_logits (`torch.FloatTensor`):
A tensor of shape `(batch_size, num_queries, height, width)` representing the proposed masks for each
query.
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Last hidden states (final feature map) of the last layer.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, sequence_length, hidden_size)`. Hidden-states all layers of the model.
attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `tuple(torch.FloatTensor)` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Self and Cross Attentions weights from transformer decoder.
patch_offsets (`list[torch.Tensor]`, *optional*):
list of tuples indicating the image index and start and end positions of patches for semantic segmentation.
"""
loss: torch.FloatTensor | None = None
class_queries_logits: torch.FloatTensor | None = None
masks_queries_logits: torch.FloatTensor | None = None
last_hidden_state: torch.FloatTensor | None = None
hidden_states: tuple[torch.FloatTensor] | None = None
attentions: tuple[torch.FloatTensor] | None = None
patch_offsets: list[torch.Tensor] | None = None
class EomtLoss(Mask2FormerLoss):
pass
class EomtPatchEmbeddings(Dinov2PatchEmbeddings):
pass
class EomtEmbeddings(Dinov2Embeddings):
def __init__(self, config: EomtConfig) -> None:
nn.Module.__init__(self)
self.config = config
self.patch_size = config.patch_size
self.cls_token = nn.Parameter(torch.randn(1, 1, config.hidden_size))
self.register_tokens = nn.Parameter(torch.zeros(1, config.num_register_tokens, config.hidden_size))
self.patch_embeddings = EomtPatchEmbeddings(config)
num_patches = self.patch_embeddings.num_patches
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.num_prefix_tokens = 1 + config.num_register_tokens # 1 for [CLS]
self.position_embeddings = nn.Embedding(num_patches, config.hidden_size)
self.register_buffer("position_ids", torch.arange(num_patches).expand((1, -1)), persistent=False)
def interpolate_pos_encoding(self):
raise AttributeError("Not needed for Eomt Model")
def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
batch_size, _, _, _ = pixel_values.shape
target_dtype = self.patch_embeddings.projection.weight.dtype
embeddings = self.patch_embeddings(pixel_values.to(dtype=target_dtype))
cls_tokens = self.cls_token.expand(batch_size, -1, -1)
register_tokens = self.register_tokens.expand(batch_size, -1, -1)
embeddings = embeddings + self.position_embeddings(self.position_ids)
embeddings = torch.cat([cls_tokens, register_tokens, embeddings], dim=1)
embeddings = self.dropout(embeddings)
return embeddings
class EomtAttention(SiglipAttention):
pass
class EomtLayerScale(Dinov2LayerScale):
pass
class EomtLayer(Dinov2Layer):
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor | None = None,
) -> torch.Tensor:
hidden_states_norm = self.norm1(hidden_states)
self_attention_output, _ = self.attention(hidden_states_norm, attention_mask)
self_attention_output = self.layer_scale1(self_attention_output)
# first residual connection
hidden_states = self.drop_path(self_attention_output) + hidden_states
# in Eomt, layernorm is also applied after self-attention
layer_output = self.norm2(hidden_states)
layer_output = self.mlp(layer_output)
layer_output = self.layer_scale2(layer_output)
# second residual connection
layer_output = self.drop_path(layer_output) + hidden_states
return layer_output
class EomtLayerNorm2d(nn.LayerNorm):
def __init__(self, num_channels, eps=1e-6, affine=True):
super().__init__(num_channels, eps=eps, elementwise_affine=affine)
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
hidden_state = hidden_state.permute(0, 2, 3, 1)
hidden_state = F.layer_norm(hidden_state, self.normalized_shape, self.weight, self.bias, self.eps)
hidden_state = hidden_state.permute(0, 3, 1, 2)
return hidden_state
class EomtScaleLayer(nn.Module):
def __init__(self, config: EomtConfig):
super().__init__()
hidden_size = config.hidden_size
self.conv1 = nn.ConvTranspose2d(hidden_size, hidden_size, kernel_size=2, stride=2)
self.activation = ACT2FN[config.hidden_act]
self.conv2 = nn.Conv2d(
hidden_size,
hidden_size,
kernel_size=3,
padding=1,
groups=hidden_size,
bias=False,
)
self.layernorm2d = EomtLayerNorm2d(hidden_size)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.conv1(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = self.conv2(hidden_states)
hidden_states = self.layernorm2d(hidden_states)
return hidden_states
class EomtScaleBlock(nn.Module):
def __init__(self, config: EomtConfig):
super().__init__()
self.num_blocks = config.num_upscale_blocks
self.block = nn.ModuleList([EomtScaleLayer(config) for _ in range(self.num_blocks)])
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
for block in self.block:
hidden_states = block(hidden_states)
return hidden_states
class EomtMaskHead(nn.Module):
def __init__(self, config: EomtConfig):
super().__init__()
hidden_size = config.hidden_size
self.fc1 = nn.Linear(hidden_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.fc3 = nn.Linear(hidden_size, hidden_size)
self.activation = ACT2FN[config.hidden_act]
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.activation(self.fc1(hidden_states))
hidden_states = self.activation(self.fc2(hidden_states))
hidden_states = self.fc3(hidden_states)
return hidden_states
@auto_docstring
class EomtPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config: EomtConfig
base_model_prefix = "eomt"
main_input_name = "pixel_values"
input_modalities = ("image",)
supports_gradient_checkpointing = False
_no_split_modules = ["EomtLayer"]
_supports_sdpa = True
_can_record_outputs = {
"hidden_states": EomtLayer,
"attentions": EomtAttention,
}
@torch.no_grad()
def _init_weights(self, module: nn.Module) -> None:
std = self.config.initializer_range
if isinstance(module, (nn.Linear, nn.Conv2d, nn.ConvTranspose2d)):
init.kaiming_uniform_(module.weight, a=math.sqrt(5))
if module.bias is not None:
fan_in, _ = torch.nn.init._calculate_fan_in_and_fan_out(module.weight)
bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
init.uniform_(module.bias, -bound, bound)
elif isinstance(module, nn.LayerNorm):
init.ones_(module.weight)
init.zeros_(module.bias)
elif isinstance(module, nn.Embedding):
init.normal_(module.weight, mean=0.0, std=1)
# Here we need the check explicitly, as we slice the weight in the `zeros_` call, so it looses the flag
if module.padding_idx is not None and not getattr(module.weight, "_is_hf_initialized", False):
init.zeros_(module.weight[module.padding_idx])
elif isinstance(module, EomtLayerScale):
if hasattr(module, "lambda1"):
init.constant_(module.lambda1, self.config.layerscale_value)
elif isinstance(module, EomtEmbeddings):
init.trunc_normal_(module.cls_token, mean=0.0, std=std)
init.zeros_(module.register_tokens)
init.copy_(module.position_ids, torch.arange(module.position_ids.shape[-1]).expand((1, -1)))
elif isinstance(module, EomtLoss):
empty_weight = torch.ones(module.num_labels + 1)
empty_weight[-1] = module.eos_coef
init.copy_(module.empty_weight, empty_weight)
elif isinstance(module, EomtForUniversalSegmentation):
init.ones_(module.attn_mask_probs)
@auto_docstring(
custom_intro="""
The EoMT Model with head on top for instance/semantic/panoptic segmentation.
"""
)
class EomtForUniversalSegmentation(Mask2FormerForUniversalSegmentation):
def __init__(self, config: EomtConfig):
PreTrainedModel.__init__(self, config)
self.config = config
self.num_hidden_layers = config.num_hidden_layers
self.embeddings = EomtEmbeddings(config)
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.query = nn.Embedding(config.num_queries, config.hidden_size)
self.layers = nn.ModuleList([EomtLayer(config) for _ in range(config.num_hidden_layers)])
self.upscale_block = EomtScaleBlock(config)
self.mask_head = EomtMaskHead(config)
self.class_predictor = nn.Linear(config.hidden_size, config.num_labels + 1)
self.grid_size = (config.image_size // config.patch_size, config.image_size // config.patch_size)
self.weight_dict: dict[str, float] = {
"loss_cross_entropy": config.class_weight,
"loss_mask": config.mask_weight,
"loss_dice": config.dice_weight,
}
self.criterion = EomtLoss(config=config, weight_dict=self.weight_dict)
self.register_buffer("attn_mask_probs", torch.ones(config.num_blocks))
self.post_init()
def get_input_embeddings(self):
return self.embeddings.patch_embeddings
def get_auxiliary_logits(self):
raise AttributeError("Note needed for Eomt Model.")
def predict(self, logits: torch.Tensor):
query_tokens = logits[:, : self.config.num_queries, :]
class_logits = self.class_predictor(query_tokens)
prefix_tokens = logits[:, self.config.num_queries + self.embeddings.num_prefix_tokens :, :]
prefix_tokens = prefix_tokens.transpose(1, 2)
prefix_tokens = prefix_tokens.reshape(prefix_tokens.shape[0], -1, *self.grid_size)
query_tokens = self.mask_head(query_tokens)
prefix_tokens = self.upscale_block(prefix_tokens)
mask_logits = torch.einsum("bqc, bchw -> bqhw", query_tokens, prefix_tokens)
return mask_logits, class_logits
@staticmethod
def _disable_attention_mask(attn_mask, prob, num_query_tokens, encoder_start_tokens, device):
if prob < 1:
# Generate random queries to disable based on the probs
random_queries = torch.rand(attn_mask.shape[0], num_query_tokens, device=device) > prob
# Disable attention to the query tokens, considering the prefix tokens
attn_mask[:, :num_query_tokens, encoder_start_tokens:][random_queries] = 1
return attn_mask
@merge_with_config_defaults
@capture_outputs
@auto_docstring
def forward(
self,
pixel_values: Tensor,
mask_labels: list[Tensor] | None = None,
class_labels: list[Tensor] | None = None,
patch_offsets: list[Tensor] | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> EomtForUniversalSegmentationOutput:
r"""
mask_labels (`list[torch.Tensor]`, *optional*):
list of mask labels of shape `(num_labels, height, width)` to be fed to a model
class_labels (`list[torch.LongTensor]`, *optional*):
list of target class labels of shape `(num_labels, height, width)` to be fed to a model. They identify the
labels of `mask_labels`, e.g. the label of `mask_labels[i][j]` if `class_labels[i][j]`.
patch_offsets (`list[torch.Tensor]`, *optional*):
list of tuples indicating the image index and start and end positions of patches for semantic segmentation.
"""
masks_queries_logits_per_layer, class_queries_logits_per_layer = (), ()
attention_mask = None
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
hidden_states = self.embeddings(pixel_values)
for idx, layer_module in enumerate(self.layers):
if idx == self.num_hidden_layers - self.config.num_blocks:
query = self.query.weight[None, :, :].expand(hidden_states.shape[0], -1, -1).to(hidden_states.device)
hidden_states = torch.cat((query, hidden_states), dim=1)
if idx >= self.num_hidden_layers - self.config.num_blocks and (
self.training or self.attn_mask_probs[idx - self.num_hidden_layers + self.config.num_blocks] > 0
):
norm_hidden_states = self.layernorm(hidden_states)
masks_queries_logits, class_queries_logits = self.predict(norm_hidden_states)
masks_queries_logits_per_layer += (masks_queries_logits,)
class_queries_logits_per_layer += (class_queries_logits,)
attention_mask = torch.ones(
hidden_states.shape[0],
hidden_states.shape[1],
hidden_states.shape[1],
device=hidden_states.device,
dtype=torch.bool,
)
interpolated_logits = F.interpolate(masks_queries_logits, size=self.grid_size, mode="bilinear")
interpolated_logits = interpolated_logits.view(
interpolated_logits.size(0), interpolated_logits.size(1), -1
)
num_query_tokens = self.config.num_queries
encoder_start_tokens = num_query_tokens + self.embeddings.num_prefix_tokens
# Set attention mask for queries to focus on encoder tokens based on interpolated logits
attention_mask[:, :num_query_tokens, encoder_start_tokens:] = interpolated_logits > 0
# Disable attention mask for random query tokens.
attention_mask = self._disable_attention_mask(
attention_mask,
prob=self.attn_mask_probs[idx - self.num_hidden_layers + self.config.num_blocks],
num_query_tokens=num_query_tokens,
encoder_start_tokens=encoder_start_tokens,
device=attention_mask.device,
)
# Expand attention mask to 4d mask.
attention_mask = attention_mask[:, None, ...].expand(-1, self.config.num_attention_heads, -1, -1)
attention_mask = attention_mask.float().masked_fill(~attention_mask, -1e9)
hidden_states = layer_module(hidden_states, attention_mask)
sequence_output = self.layernorm(hidden_states)
masks_queries_logits, class_queries_logits = self.predict(sequence_output)
masks_queries_logits_per_layer += (masks_queries_logits,)
class_queries_logits_per_layer += (class_queries_logits,)
loss = None
if mask_labels is not None and class_labels is not None:
loss = 0.0
for masks_queries_logits, class_queries_logits in zip(
masks_queries_logits_per_layer, class_queries_logits_per_layer
):
loss_dict = self.get_loss_dict(
masks_queries_logits=masks_queries_logits,
class_queries_logits=class_queries_logits,
mask_labels=mask_labels,
class_labels=class_labels,
auxiliary_predictions=None,
)
loss += self.get_loss(loss_dict)
return EomtForUniversalSegmentationOutput(
loss=loss,
masks_queries_logits=masks_queries_logits,
class_queries_logits=class_queries_logits,
last_hidden_state=sequence_output,
patch_offsets=patch_offsets,
)
__all__ = ["EomtConfig", "EomtPreTrainedModel", "EomtForUniversalSegmentation"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/eomt/modular_eomt.py",
"license": "Apache License 2.0",
"lines": 505,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/eomt/test_image_processing_eomt.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch EoMT Image Processor."""
import unittest
import numpy as np
from datasets import load_dataset
from transformers.image_utils import load_image
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available
from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs
from ...test_processing_common import url_to_local_path
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import EomtImageProcessor
if is_torchvision_available():
from transformers import EomtImageProcessorFast
from transformers.models.eomt.modeling_eomt import EomtForUniversalSegmentationOutput
class EomtImageProcessingTester:
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
min_resolution=30,
max_resolution=400,
size=None,
do_resize=True,
do_pad=True,
do_normalize=True,
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
num_labels=10,
):
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.do_pad = do_pad
self.size = size if size is not None else {"shortest_edge": 18, "longest_edge": 18}
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
# for the post_process_functions
self.batch_size = 2
self.num_queries = 3
self.num_classes = 2
self.height = 18
self.width = 18
self.num_labels = num_labels
def prepare_image_processor_dict(self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
"num_labels": self.num_labels,
}
def prepare_fake_eomt_outputs(self, batch_size, patch_offsets=None):
return EomtForUniversalSegmentationOutput(
masks_queries_logits=torch.randn((batch_size, self.num_queries, self.height, self.width)),
class_queries_logits=torch.randn((batch_size, self.num_queries, self.num_classes + 1)),
patch_offsets=patch_offsets,
)
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
def prepare_semantic_single_inputs():
ds = load_dataset("hf-internal-testing/fixtures_ade20k", split="test")
example = ds[0]
return example["image"], example["map"]
def prepare_semantic_batch_inputs():
ds = load_dataset("hf-internal-testing/fixtures_ade20k", split="test")
return list(ds["image"][:2]), list(ds["map"][:2])
@require_torch
@require_vision
class EomtImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = EomtImageProcessor if is_vision_available() else None
fast_image_processing_class = EomtImageProcessorFast if is_torchvision_available() else None
def setUp(self):
super().setUp()
self.image_processor_tester = EomtImageProcessingTester(self)
self.model_id = "tue-mps/coco_panoptic_eomt_large_640"
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "image_mean"))
self.assertTrue(hasattr(image_processing, "image_std"))
self.assertTrue(hasattr(image_processing, "do_normalize"))
self.assertTrue(hasattr(image_processing, "do_resize"))
self.assertTrue(hasattr(image_processing, "size"))
self.assertTrue(hasattr(image_processing, "do_rescale"))
self.assertTrue(hasattr(image_processing, "rescale_factor"))
self.assertTrue(hasattr(image_processing, "resample"))
def test_image_processor_from_dict_with_kwargs(self):
image_processor = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {"shortest_edge": 18, "longest_edge": 18})
image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42)
self.assertEqual(image_processor.size, {"shortest_edge": 42})
def test_call_numpy(self):
for image_processing_class in self.image_processor_list:
# Initialize image_processing
image_processing = image_processing_class(**self.image_processor_dict)
# create random numpy tensors
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, numpify=True)
for image in image_inputs:
self.assertIsInstance(image, np.ndarray)
# Test not batched input
encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values
expected_output_image_shape = (1, 3, 18, 18)
self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape)
# Test batched
encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values
expected_output_image_shape = (2, 3, 18, 18)
self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape)
@unittest.skip(reason="Not supported")
def test_call_numpy_4_channels(self):
pass
def test_call_pil(self):
image_processing = self.image_processing_class(**self.image_processor_dict)
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True)
for image in image_inputs:
self.assertIsInstance(image, Image.Image)
# Test Non batched input
encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values
expected_output_image_shape = (1, 3, 18, 18)
self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape)
# Test batched
encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values
expected_output_image_shape = (2, 3, 18, 18)
self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape)
def test_call_pytorch(self):
image_processing = self.image_processing_class(**self.image_processor_dict)
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, torchify=True)
for image in image_inputs:
self.assertIsInstance(image, torch.Tensor)
encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values
expected_output_image_shape = (1, 3, 18, 18)
self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape)
encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values
expected_output_image_shape = (2, 3, 18, 18)
self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape)
def test_slow_fast_equivalence(self):
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast equivalence test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
dummy_image, dummy_map = prepare_semantic_single_inputs()
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
image_encoding_slow = image_processor_slow(dummy_image, segmentation_maps=dummy_map, return_tensors="pt")
image_encoding_fast = image_processor_fast(dummy_image, segmentation_maps=dummy_map, return_tensors="pt")
self.assertTrue(torch.allclose(image_encoding_slow.pixel_values, image_encoding_fast.pixel_values, atol=1e-1))
self.assertLessEqual(
torch.mean(torch.abs(image_encoding_slow.pixel_values - image_encoding_fast.pixel_values)).item(), 1e-3
)
# Lets check whether 99.9% of mask_labels values match or not.
match_ratio = (image_encoding_slow.mask_labels[0] == image_encoding_fast.mask_labels[0]).float().mean().item()
self.assertGreaterEqual(match_ratio, 0.999, "Mask labels do not match between slow and fast image processor.")
def test_slow_fast_equivalence_batched(self):
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast equivalence test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
if hasattr(self.image_processor_tester, "do_center_crop") and self.image_processor_tester.do_center_crop:
self.skipTest(
reason="Skipping as do_center_crop is True and center_crop functions are not equivalent for fast and slow processors"
)
dummy_images, dummy_maps = prepare_semantic_batch_inputs()
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
encoding_slow = image_processor_slow(dummy_images, segmentation_maps=dummy_maps, return_tensors="pt")
encoding_fast = image_processor_fast(dummy_images, segmentation_maps=dummy_maps, return_tensors="pt")
self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1))
self.assertLessEqual(
torch.mean(torch.abs(encoding_slow.pixel_values - encoding_fast.pixel_values)).item(), 1e-3
)
for idx in range(len(dummy_maps)):
match_ratio = (encoding_slow.mask_labels[idx] == encoding_fast.mask_labels[idx]).float().mean().item()
self.assertGreaterEqual(
match_ratio, 0.999, "Mask labels do not match between slow and fast image processors."
)
def test_post_process_semantic_segmentation(self):
processor = self.image_processing_class(**self.image_processor_dict)
# Set longest_edge to None to test for semantic segmentatiom.
processor.size = {"shortest_edge": 18, "longest_edge": None}
image = load_image(url_to_local_path("http://images.cocodataset.org/val2017/000000039769.jpg"))
inputs = processor(images=image, do_split_image=True, return_tensors="pt")
patch_offsets = inputs["patch_offsets"]
target_sizes = [image.size[::-1]]
# For semantic segmentation, the BS of output is 2 coz, two patches are created for the image.
outputs = self.image_processor_tester.prepare_fake_eomt_outputs(inputs["pixel_values"].shape[0], patch_offsets)
segmentation = processor.post_process_semantic_segmentation(outputs, target_sizes)
self.assertEqual(segmentation[0].shape, (image.height, image.width))
def test_post_process_panoptic_segmentation(self):
processor = self.image_processing_class(**self.image_processor_dict)
image = load_image(url_to_local_path("http://images.cocodataset.org/val2017/000000039769.jpg"))
original_sizes = [image.size[::-1], image.size[::-1]]
# lets test for batched input of 2
outputs = self.image_processor_tester.prepare_fake_eomt_outputs(2)
segmentation = processor.post_process_panoptic_segmentation(outputs, original_sizes)
self.assertTrue(len(segmentation) == 2)
for el in segmentation:
self.assertTrue("segmentation" in el)
self.assertTrue("segments_info" in el)
self.assertEqual(type(el["segments_info"]), list)
self.assertEqual(el["segmentation"].shape, (image.height, image.width))
def test_post_process_instance_segmentation(self):
processor = self.image_processing_class(**self.image_processor_dict)
image = load_image(url_to_local_path("http://images.cocodataset.org/val2017/000000039769.jpg"))
original_sizes = [image.size[::-1], image.size[::-1]]
# lets test for batched input of 2
outputs = self.image_processor_tester.prepare_fake_eomt_outputs(2)
segmentation = processor.post_process_instance_segmentation(outputs, original_sizes)
self.assertTrue(len(segmentation) == 2)
for el in segmentation:
self.assertTrue("segmentation" in el)
self.assertTrue("segments_info" in el)
self.assertEqual(type(el["segments_info"]), list)
self.assertEqual(el["segmentation"].shape, (image.height, image.width))
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/eomt/test_image_processing_eomt.py",
"license": "Apache License 2.0",
"lines": 246,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/models/eomt/test_modeling_eomt.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch EoMT model."""
import unittest
import requests
from transformers import AutoImageProcessor, EomtConfig, EomtForUniversalSegmentation, pipeline
from transformers.testing_utils import require_torch, require_torch_accelerator, require_torch_fp16, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
class EomtForUniversalSegmentationTester:
def __init__(
self,
parent,
batch_size=2,
is_training=True,
image_size=40,
patch_size=2,
num_queries=5,
num_register_tokens=19,
num_labels=4,
hidden_size=8,
num_attention_heads=2,
num_hidden_layers=2,
):
self.parent = parent
self.batch_size = batch_size
self.is_training = is_training
self.num_queries = num_queries
self.image_size = image_size
self.patch_size = patch_size
self.num_labels = num_labels
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
self.num_hidden_layers = num_hidden_layers
self.num_register_tokens = num_register_tokens
num_patches = (image_size // patch_size) ** 2
self.seq_length = num_patches + 1 + self.num_register_tokens
def get_config(self):
config = {
"image_size": self.image_size,
"patch_size": self.patch_size,
"num_labels": self.num_labels,
"hidden_size": self.hidden_size,
"num_attention_heads": self.num_attention_heads,
"num_hidden_layers": self.num_hidden_layers,
"num_register_tokens": self.num_register_tokens,
"num_queries": self.num_queries,
"num_blocks": 1,
}
return EomtConfig(**config)
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, 3, self.image_size, self.image_size]).to(torch_device)
mask_labels = (
torch.rand([self.batch_size, self.num_labels, self.image_size, self.image_size], device=torch_device) > 0.5
).float()
class_labels = (torch.rand((self.batch_size, self.num_labels), device=torch_device) > 0.5).long()
config = self.get_config()
return config, pixel_values, mask_labels, class_labels
def prepare_config_and_inputs_for_common(self):
config, pixel_values, mask_labels, class_labels = self.prepare_config_and_inputs()
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
def prepare_config_and_inputs_for_training(self):
config, pixel_values, mask_labels, class_labels = self.prepare_config_and_inputs()
inputs_dict = {"pixel_values": pixel_values, "mask_labels": mask_labels, "class_labels": class_labels}
return config, inputs_dict
@require_torch
class EomtForUniversalSegmentationTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (EomtForUniversalSegmentation,) if is_torch_available() else ()
pipeline_model_mapping = {"image-segmentation": EomtForUniversalSegmentation} if is_torch_available() else {}
is_encoder_decoder = False
test_missing_keys = False
test_torch_exportable = False
def setUp(self):
self.model_tester = EomtForUniversalSegmentationTester(self)
self.config_tester = ConfigTester(self, config_class=EomtConfig, has_text_modality=False)
def test_config(self):
self.config_tester.run_common_tests()
def test_model_with_labels(self):
size = (self.model_tester.image_size,) * 2
inputs = {
"pixel_values": torch.randn((2, 3, *size), device=torch_device),
"mask_labels": torch.randn((2, 10, *size), device=torch_device),
"class_labels": torch.zeros(2, 10, device=torch_device).long(),
}
config = self.model_tester.get_config()
model = EomtForUniversalSegmentation(config).to(torch_device)
outputs = model(**inputs)
self.assertTrue(outputs.loss is not None)
@unittest.skip(reason="EoMT does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="EoMT does not have a get_input_embeddings method")
def test_model_get_set_embeddings(self):
pass
@unittest.skip(reason="EoMT is not a generative model")
def test_generate_without_input_ids(self):
pass
@unittest.skip(reason="EoMT does not use token embeddings")
def test_resize_tokens_embeddings(self):
pass
def test_training(self):
# We override this test because EoMT requires `mask_labels` and `class_labels` for training,
# which are not standard labels that `_prepare_for_class` can generate. We can't include
# these labels in `prepare_config_and_inputs_for_common` because that would break determinism
# tests (the Hungarian matching in the loss computation is non-deterministic).
if not self.model_tester.is_training:
self.skipTest(reason="ModelTester is not configured to run training tests")
for model_class in self.all_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_training()
config.return_dict = True
model = model_class(config)
model.to(torch_device)
model.train()
inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
loss = model(**inputs).loss
loss.backward()
@require_torch
class EomtForUniversalSegmentationIntegrationTest(unittest.TestCase):
def setUp(self):
self.model_id = "tue-mps/coco_panoptic_eomt_large_640"
@slow
def test_inference(self):
model = EomtForUniversalSegmentation.from_pretrained(self.model_id, device_map="auto")
processor = AutoImageProcessor.from_pretrained(self.model_id)
image = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw)
inputs = processor(images=image, return_tensors="pt").to(model.device)
with torch.inference_mode():
outputs = model(**inputs)
self.assertTrue(outputs.class_queries_logits.shape == (1, 200, 134))
self.assertTrue(outputs.masks_queries_logits.shape == (1, 200, 160, 160))
# fmt: off
EXPECTED_SLICE = torch.tensor([
[ 13.2540, 8.9279, 8.6631, 12.3760, 10.1429],
[ -3.4815, -36.4630, -45.5604, -46.8404, -37.5099],
[ -6.8689, -44.4206, -62.7591, -59.2928, -47.7035],
[ -2.9380, -42.0659, -57.4382, -55.1537, -43.5142],
[ -8.4387, -38.5275, -53.1383, -47.0064, -38.9667],
]).to(model.device)
# fmt: on
output_slice = outputs.masks_queries_logits[0, 0, :5, :5]
torch.testing.assert_close(output_slice, EXPECTED_SLICE, rtol=1e-2, atol=1e-2)
# fmt: off
EXPECTED_SLICE = torch.tensor([
[-0.6977, -6.4907, -4.1178, -6.5554, -6.6529],
[-0.3650, -6.6560, -4.0143, -6.5776, -6.5879],
[-0.8820, -6.7175, -3.5334, -6.8569, -6.2415],
[ 0.4502, -5.3911, -3.0232, -5.9411, -6.3243],
[ 0.3157, -5.6321, -2.6716, -5.5740, -5.5607],
]).to(model.device)
# fmt: on
output_slice = outputs.class_queries_logits[0, :5, :5]
torch.testing.assert_close(output_slice, EXPECTED_SLICE, rtol=1e-2, atol=1e-2)
@require_torch_accelerator
@require_torch_fp16
@slow
def test_inference_fp16(self):
model = EomtForUniversalSegmentation.from_pretrained(self.model_id, dtype=torch.float16, device_map="auto")
processor = AutoImageProcessor.from_pretrained(self.model_id)
image = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw)
inputs = processor(images=image, return_tensors="pt").to(model.device)
with torch.inference_mode():
outputs = model(**inputs)
self.assertTrue(outputs.class_queries_logits.shape == (1, 200, 134))
self.assertTrue(outputs.masks_queries_logits.shape == (1, 200, 160, 160))
@slow
def test_semantic_segmentation_inference(self):
model_id = "tue-mps/ade20k_semantic_eomt_large_512"
model = EomtForUniversalSegmentation.from_pretrained(model_id, device_map="auto")
processor = AutoImageProcessor.from_pretrained(model_id)
image = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw)
inputs = processor(images=image, return_tensors="pt").to(model.device)
with torch.inference_mode():
outputs = model(**inputs)
self.assertTrue(outputs.class_queries_logits.shape == (2, 100, 151))
self.assertTrue(outputs.masks_queries_logits.shape == (2, 100, 128, 128))
preds = processor.post_process_semantic_segmentation(outputs, target_sizes=[(image.size[1], image.size[0])])[0]
self.assertTrue(preds.shape == (image.size[1], image.size[0]))
# fmt: off
EXPECTED_SLICE = torch.tensor([
[39, 39, 39, 39, 39, 39, 39, 39, 39, 39],
[39, 39, 39, 39, 39, 39, 39, 39, 39, 39],
[39, 39, 39, 39, 39, 39, 39, 39, 39, 39],
[39, 39, 39, 39, 39, 39, 39, 39, 39, 39],
[39, 39, 39, 39, 39, 39, 39, 39, 39, 39],
[39, 39, 39, 39, 39, 39, 39, 39, 39, 39],
[39, 39, 39, 39, 39, 39, 39, 39, 39, 39],
[39, 39, 39, 39, 39, 39, 39, 39, 39, 39],
[39, 39, 39, 39, 39, 39, 39, 39, 39, 39],
[39, 39, 39, 39, 39, 39, 39, 39, 39, 39]
], device=model.device)
# fmt: on
output_slice = preds[:10, :10]
torch.testing.assert_close(output_slice, EXPECTED_SLICE, rtol=1e-2, atol=1e-2)
@slow
def test_panoptic_segmentation_inference(self):
model = EomtForUniversalSegmentation.from_pretrained(self.model_id, device_map="auto")
processor = AutoImageProcessor.from_pretrained(self.model_id)
image = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw)
inputs = processor(images=image, return_tensors="pt").to(model.device)
with torch.inference_mode():
outputs = model(**inputs)
self.assertTrue(outputs.class_queries_logits.shape == (1, 200, 134))
self.assertTrue(outputs.masks_queries_logits.shape == (1, 200, 160, 160))
preds = processor.post_process_panoptic_segmentation(outputs, target_sizes=[(image.size[1], image.size[0])])[0]
segmentation, segments_info = preds["segmentation"], preds["segments_info"]
# fmt: off
EXPECTED_SLICE = torch.tensor([
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, 2, 2, 2, 2, 2],
[-1, -1, -1, 2, 2, 2, 2, 2, 2, 2],
[ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2],
[ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2],
[ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2],
[ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2],
[ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
], device=model.device)
EXPECTED_SEGMENTS_INFO = [
{"id": 0, "label_id": 15, "score": 0.99935},
{"id": 1, "label_id": 15, "score": 0.998688},
{"id": 2, "label_id": 57, "score": 0.954325},
{"id": 3, "label_id": 65, "score": 0.997285},
{"id": 4, "label_id": 65, "score": 0.99711}
]
# fmt: on
output_slice = segmentation[:10, :10]
torch.testing.assert_close(output_slice, EXPECTED_SLICE, rtol=1e-2, atol=1e-2)
for actual, expected in zip(segments_info, EXPECTED_SEGMENTS_INFO):
self.assertEqual(actual["id"], expected["id"])
self.assertEqual(actual["label_id"], expected["label_id"])
self.assertAlmostEqual(actual["score"], expected["score"], delta=1e-3)
@slow
def test_instance_segmentation_inference(self):
model_id = "tue-mps/coco_instance_eomt_large_640"
model = EomtForUniversalSegmentation.from_pretrained(model_id, device_map="auto")
processor = AutoImageProcessor.from_pretrained(model_id)
image = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw)
inputs = processor(images=image, return_tensors="pt").to(model.device)
with torch.inference_mode():
outputs = model(**inputs)
self.assertTrue(outputs.class_queries_logits.shape == (1, 200, 81))
self.assertTrue(outputs.masks_queries_logits.shape == (1, 200, 160, 160))
preds = processor.post_process_instance_segmentation(outputs, target_sizes=[(image.size[1], image.size[0])])[0]
segmentation, segments_info = preds["segmentation"], preds["segments_info"]
# fmt: off
EXPECTED_SLICE = torch.tensor([
[-1., -1., -1., -1., -1., -1., -1., -1., -1., -1.],
[-1., -1., -1., -1., -1., -1., -1., -1., -1., -1.],
[-1., -1., -1., -1., -1., -1., -1., -1., -1., -1.],
[-1., -1., -1., 0., 0., 1., 1., 1., 1., 1.],
[ 0., 0., 1., 1., 1., 1., 1., 1., 1., 1.],
[ 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[ 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[ 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[ 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[ 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]
], device=model.device)
EXPECTED_SEGMENTS_INFO = [
{'id': 0, 'label_id': 57, 'score': 0.871247},
{'id': 1, 'label_id': 57, 'score': 0.821225},
{'id': 2, 'label_id': 15, 'score': 0.976252},
{'id': 3, 'label_id': 65, 'score': 0.972960},
{'id': 4, 'label_id': 65, 'score': 0.981109},
{'id': 5, 'label_id': 15, 'score': 0.972689}
]
# fmt: on
output_slice = segmentation[:10, :10]
torch.testing.assert_close(output_slice, EXPECTED_SLICE, rtol=1e-2, atol=1e-2)
for actual, expected in zip(segments_info, EXPECTED_SEGMENTS_INFO):
self.assertEqual(actual["id"], expected["id"])
self.assertEqual(actual["label_id"], expected["label_id"])
self.assertAlmostEqual(actual["score"], expected["score"], delta=1e-3)
@slow
def test_segmentation_pipeline(self):
image = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw)
pipe = pipeline(model=self.model_id, subtask="panoptic", device=torch_device)
output = pipe(image)
EXPECTED_OUTPUT_LABELS = ["cat", "cat", "couch", "remote", "remote"]
output_labels = [segment["label"] for segment in output]
self.assertEqual(output_labels, EXPECTED_OUTPUT_LABELS)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/eomt/test_modeling_eomt.py",
"license": "Apache License 2.0",
"lines": 299,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/gemma3n/convert_gemma3n_weights.py | # Copyright 2025 Google Inc. HuggingFace Inc. team. All rights reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Utility to convert Gemma models from Orbax to HF Transformers checkpoint.
python src/transformers/models/gemma3n/convert_gemma3n_weights.py \
--variant='gemma3n_e4b' \
--tokenizer_path="$HOME/tokenizers/gemma-3n-tokenizer.model" \
--checkpoint_path="$HOME/checkpoints/gemma-3n-orbax/" \
--output_path="$HOME/checkpoints/gemma-3n-safetensors/"
"""
import json
import os
import re
from collections.abc import Iterable, Mapping
from typing import Any
import accelerate
import numpy as np
import torch
import tree
from absl import app, flags, logging
from orbax import checkpoint as obc
from transformers import (
Gemma3nAudioConfig,
Gemma3nAudioFeatureExtractor,
Gemma3nConfig,
Gemma3nForConditionalGeneration,
Gemma3nProcessor,
Gemma3nTextConfig,
Gemma3nVisionConfig,
GemmaTokenizerFast,
GenerationConfig,
SiglipImageProcessorFast,
)
from transformers.image_utils import PILImageResampling
# ==== Internal Constants and Classes ====
_CHAT_TEMPLATE = """{{ bos_token }}
{%- if messages[0]['role'] == 'system' -%}
{%- if messages[0]['content'] is string -%}
{%- set first_user_prefix = messages[0]['content'] + '\n\n' -%}
{%- else -%}
{%- set first_user_prefix = messages[0]['content'][0]['text'] + '\n\n' -%}
{%- endif -%}
{%- set loop_messages = messages[1:] -%}
{%- else -%}
{%- set first_user_prefix = "" -%}
{%- set loop_messages = messages -%}
{%- endif -%}
{%- for message in loop_messages -%}
{%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%}
{{ raise_exception("Conversation roles must alternate user/assistant/user/assistant/...") }}
{%- endif -%}
{%- if (message['role'] == 'assistant') -%}
{%- set role = "model" -%}
{%- else -%}
{%- set role = message['role'] -%}
{%- endif -%}
{{ '<start_of_turn>' + role + '\n' + (first_user_prefix if loop.first else "") }}
{%- if message['content'] is string -%}
{{ message['content'] | trim }}
{%- elif message['content'] is iterable -%}
{%- for item in message['content'] -%}
{%- if item['type'] == 'audio' -%}
{{ '<audio_soft_token>' }}
{%- elif item['type'] == 'image' -%}
{{ '<image_soft_token>' }}
{%- elif item['type'] == 'text' -%}
{{ item['text'] | trim }}
{%- endif -%}
{%- endfor -%}
{%- else -%}
{{ raise_exception("Invalid content type") }}
{%- endif -%}
{{ '<end_of_turn>\n' }}
{%- endfor -%}
{%- if add_generation_prompt -%}
{{'<start_of_turn>model\n'}}
{%- endif -%}
"""
_DTYPES = {"float32", "bfloat16", "float16"}
_SLIDING_WINDOW_PATTERN = 5
_AUDIO_ENCODER_PARAMETER = "AudioEncoder/encoder"
_AUDIO_ENCODER_CONFORMER = f"{_AUDIO_ENCODER_PARAMETER}/conformer/stacked_layers"
_AUDIO_ENCODER_SSCP = f"{_AUDIO_ENCODER_PARAMETER}/feature"
_TRANSFORMER_PARAMETER = "transformer"
_TRANSFORMER_ALTUP_PROJ = f"{_TRANSFORMER_PARAMETER}/altup_projection_"
_TRANSFORMER_ALTUP_UNEMB = f"{_TRANSFORMER_PARAMETER}/altup_unembed_projection_"
_TRANSFORMER_DECODER_BLOCK = f"{_TRANSFORMER_PARAMETER}/stacked_layers/attention_type_"
_TRANSFORMER_DECODER_BLOCK_LEN = len(_TRANSFORMER_DECODER_BLOCK)
_TRANSFORMER_EMBEDDER = f"{_TRANSFORMER_PARAMETER}/embedder"
_TRANSFORMER_FINAL_NORM = "transformer/final_norm"
_TRANSFORMER_POST_TRAINING_PREFIX = "rlx_networks/policy_network/"
_TRANSFORMER_POST_TRAINING_PREFIX_LEN = len(_TRANSFORMER_POST_TRAINING_PREFIX)
# _MOBILE_NET_CONFIG = Gemma3nVisionConfig.from_pretrained("")
_MOBILE_NET_PREFIX = "mobilenet"
_MOBILE_NET_TIMM_SUMMED_BLOCK_SIZES = [3, 8, 45, 84]
_MOBILE_NET_CONV = "block_group_conv2d_"
_MOBILE_NET_FIB = "block_group_fused_ib_"
_MOBILE_NET_MQA = "block_group_mmqa_"
_MOBILE_NET_MSFA = "block_adapter_"
_MOBILE_NET_UIB = "block_group_uib_"
_MOBILE_NET_UIB_HAS_DW_START = {
(1, 0),
(1, 1),
(1, 2),
(1, 3),
(1, 4),
(2, 0),
(2, 1),
(2, 2),
(2, 3),
(2, 4),
(2, 5),
(2, 6),
(2, 7),
(3, 0),
}
_MOBILE_NET_UIB_HAS_DW_MID = {
(1, 0),
(2, 0),
(3, 0),
}
_VARIANT_GEMMA_3_2B = "gemma3n_e2b"
_VARIANT_GEMMA_3_4B = "gemma3n_e4b"
_VARIANTS: Mapping[str, Gemma3nConfig] = {
_VARIANT_GEMMA_3_2B: Gemma3nConfig(
text_config=Gemma3nTextConfig(
intermediate_size=2048 * 4,
num_hidden_layers=30,
activation_sparsity_pattern=(0.95,) * 10 + (0.0,) * 20,
num_kv_shared_layers=10,
),
vision_config=Gemma3nVisionConfig(),
audio_config=Gemma3nAudioConfig(),
),
_VARIANT_GEMMA_3_4B: Gemma3nConfig(
text_config=Gemma3nTextConfig(),
vision_config=Gemma3nVisionConfig(),
audio_config=Gemma3nAudioConfig(),
),
}
# ==== Flags ====
_AUDIO_DTYPE = flags.DEFINE_enum(
name="audio_dtype",
default="bfloat16",
help="The floating point precision (aka dtype) of the model.",
enum_values=_DTYPES,
)
_CHECKPOINT_PATH = flags.DEFINE_string(
name="checkpoint_path",
default=None,
help="Path to the Orbax checkpoint.",
required=True,
)
_INCLUDE_CHAT_TEMPLATE = flags.DEFINE_bool(
name="include_chat_template", default=False, help="If true, will save the default chat template with the tokenizer"
)
_OUTPUT_PATH = flags.DEFINE_string(
name="output_path",
default=None,
help="Path to store the HF checkpoint.",
required=True,
)
_TRANSFORMER_DTYPE = flags.DEFINE_enum(
name="text_dtype",
default="bfloat16",
help="The floating point precision (aka dtype) of the model.",
enum_values=_DTYPES,
)
_TOKENIZER_PATH = flags.DEFINE_string(
name="tokenizer_path",
default=None,
help="Path to the SentencePiece model file.",
required=True,
)
_VARIANT = flags.DEFINE_enum(
name="variant",
default=_VARIANT_GEMMA_3_4B,
help="The model variant to convert.",
enum_values=set(_VARIANTS.keys()),
)
_VERBOSE = flags.DEFINE_bool(
name="verbose",
default=False,
help="If true, log the path, shape, and dtype of every converted layer.",
)
_VISION_DTYPE = flags.DEFINE_enum(
name="vision_dtype",
default="bfloat16",
help="The floating point precision (aka dtype) of the model.",
enum_values=_DTYPES,
)
def convert_audio_encoder_weights(
config: Gemma3nAudioConfig,
path: str,
param: str,
weights: np.ndarray,
) -> Iterable[tuple[str, np.ndarray]]:
converted_paths: list[str] = []
converted_weights: list[Any] = []
if path.startswith(_AUDIO_ENCODER_CONFORMER):
assert weights.shape[0] == config.conf_num_hidden_layers
for i, matrix in enumerate(weights):
if "fflayer_end" in path:
base = f"conformer.{i}.ffw_layer_end"
if path.endswith("ffn_layer1"):
converted_paths.append(f"{base}.ffw_layer_1.weight")
converted_weights.append(matrix.transpose())
elif path.endswith("ffn_layer2"):
converted_paths.append(f"{base}.ffw_layer_2.weight")
converted_weights.append(matrix.transpose())
elif path.endswith("post_layer_norm"):
converted_paths.append(f"{base}.post_layer_norm.weight")
converted_weights.append(matrix)
elif path.endswith("pre_layer_norm"):
converted_paths.append(f"{base}.pre_layer_norm.weight")
converted_weights.append(matrix)
elif "fflayer_start" in path:
base = f"conformer.{i}.ffw_layer_start"
if path.endswith("ffn_layer1"):
converted_paths.append(f"{base}.ffw_layer_1.weight")
converted_weights.append(matrix.transpose())
elif path.endswith("ffn_layer2"):
converted_paths.append(f"{base}.ffw_layer_2.weight")
converted_weights.append(matrix.transpose())
elif path.endswith("post_layer_norm"):
converted_paths.append(f"{base}.post_layer_norm.weight")
converted_weights.append(matrix)
elif path.endswith("pre_layer_norm"):
converted_paths.append(f"{base}.pre_layer_norm.weight")
converted_weights.append(matrix)
elif path.endswith("final_ln"):
converted_paths.append(f"conformer.{i}.norm.weight")
converted_weights.append(matrix)
elif "lconv" in path:
base = f"conformer.{i}.lconv1d"
if path.endswith("conv_norm"):
converted_paths.append(f"{base}.conv_norm.weight")
converted_weights.append(matrix)
elif path.endswith("depthwise_conv1d"):
converted_paths.append(f"{base}.depthwise_conv1d.weight")
converted_weights.append(matrix.transpose())
elif path.endswith("linear_end"):
converted_paths.append(f"{base}.linear_end.weight")
converted_weights.append(matrix.transpose())
elif path.endswith("linear_start"):
converted_paths.append(f"{base}.linear_start.weight")
converted_weights.append(matrix.transpose())
elif path.endswith("ln"):
converted_paths.append(f"{base}.pre_layer_norm.weight")
converted_weights.append(matrix)
elif "trans_atten" in path:
base = f"conformer.{i}.attention"
if param == "per_dim_scale":
converted_paths.append(f"{base}.attn.per_dim_scale")
converted_weights.append(matrix)
if path.endswith("query_key_value_projection"):
converted_paths.extend(
[f"{base}.attn.q_proj.weight", f"{base}.attn.k_proj.weight", f"{base}.attn.v_proj.weight"]
)
converted_weights.extend(
[
m.reshape(config.hidden_size, config.hidden_size).transpose()
for m in matrix.transpose(1, 0, 2, 3)
]
)
elif path.endswith("pos_proj"):
converted_paths.append(f"{base}.attn.relative_position_embedding.pos_proj.weight")
converted_weights.append(matrix.reshape(config.hidden_size, config.hidden_size).transpose())
elif path.endswith("post"):
converted_paths.append(f"{base}.post.weight")
converted_weights.append(matrix.transpose(2, 0, 1).reshape(config.hidden_size, config.hidden_size))
elif path.endswith("post_norm"):
converted_paths.append(f"{base}.post_norm.weight")
converted_weights.append(matrix)
elif path.endswith("pre_norm"):
converted_paths.append(f"{base}.pre_attn_norm.weight")
converted_weights.append(matrix)
elif path.startswith(_AUDIO_ENCODER_SSCP):
if path.endswith("input_proj"):
converted_paths.append("subsample_conv_projection.input_proj_linear.weight")
converted_weights.append(
weights.transpose(2, 0, 1).reshape(config.hidden_size, config.sscp_conv_channel_size[1] ** 2)
)
elif "norm_" in path:
index = int(path[-1])
converted_paths.append(f"subsample_conv_projection.conv_{index}.norm.weight")
converted_weights.append(weights)
elif "subsampling_" in path:
index = int(path[-1])
converted_paths.append(f"subsample_conv_projection.conv_{index}.conv.weight")
converted_weights.append(weights.transpose(3, 2, 0, 1))
if (cpl := len(converted_paths)) != (cwl := len(converted_weights)):
raise ValueError(
"The `converted_paths` and `converted_weights` should be the same "
f"length. Got {cpl} and {cwl}, respectively, for {path}."
)
return zip(converted_paths, converted_weights)
def convert_transformer_weights(
config: Gemma3nTextConfig,
path: str,
param: str,
weights: np.ndarray,
) -> Iterable[tuple[str, np.ndarray]]:
if path.startswith(_TRANSFORMER_POST_TRAINING_PREFIX):
path = path[_TRANSFORMER_POST_TRAINING_PREFIX_LEN:]
converted_paths: list[str] = []
converted_weights: list[Any] = []
if path.startswith(_TRANSFORMER_ALTUP_PROJ):
index = int(path[-1])
converted_paths.append(f"altup_projections.{index}.weight")
converted_weights.append(weights.transpose())
elif path.startswith(_TRANSFORMER_ALTUP_UNEMB):
index = int(path[-1])
converted_paths.append(f"altup_unembed_projections.{index}.weight")
converted_weights.append(weights.transpose())
elif path.startswith(_TRANSFORMER_DECODER_BLOCK):
attention_type_index = int(path[_TRANSFORMER_DECODER_BLOCK_LEN])
assert weights.shape[0] == config.num_hidden_layers / _SLIDING_WINDOW_PATTERN
for i, matrix in enumerate(weights):
layer_idx = _SLIDING_WINDOW_PATTERN * i + attention_type_index
base_path = f"layers.{layer_idx}"
if "altup" in path:
altup_path = f"{base_path}.altup"
if param == "correct_output_scale":
converted_paths.append(f"{altup_path}.correct_output_scale")
converted_weights.append(matrix)
elif param == "correction_coefs":
converted_paths.append(f"{altup_path}.correction_coefs.weight")
converted_weights.append(matrix.transpose())
elif param == "prediction_coefs":
converted_paths.append(f"{altup_path}.prediction_coefs.weight")
converted_weights.append(
np.clip(
matrix.reshape(config.altup_num_inputs, config.altup_num_inputs**2).transpose(),
-config.altup_coef_clip,
config.altup_coef_clip,
)
)
if path.endswith("modality_router"):
converted_paths.append(f"{altup_path}.modality_router.weight")
converted_weights.append(matrix.transpose())
elif path.endswith("router_norm_layer"):
converted_paths.append(f"{altup_path}.router_norm.weight")
converted_weights.append(matrix)
elif path.endswith("attn/attn_vec_einsum"):
converted_paths.append(f"{base_path}.self_attn.o_proj.weight")
converted_weights.append(
matrix.transpose(2, 0, 1).reshape(config.hidden_size, config.num_attention_heads * config.head_dim)
)
elif path.endswith("attn/kv_einsum"):
converted_paths.extend(
[
f"{base_path}.self_attn.k_proj.weight",
f"{base_path}.self_attn.v_proj.weight",
]
)
k_proj_weights, v_proj_weights = matrix.transpose(0, 2, 1, 3)
kv_proj_shape = (config.hidden_size, config.num_key_value_heads * config.head_dim)
converted_weights.extend(
[
k_proj_weights.reshape(kv_proj_shape).transpose(),
v_proj_weights.reshape(kv_proj_shape).transpose(),
]
)
elif path.endswith("attn/q_einsum"):
converted_paths.append(f"{base_path}.self_attn.q_proj.weight")
converted_weights.append(
matrix.transpose(1, 0, 2)
.reshape(config.hidden_size, config.num_attention_heads * config.head_dim)
.transpose()
)
elif path.endswith("attn/query_norm"):
converted_paths.append(f"{base_path}.self_attn.q_norm.weight")
converted_weights.append(matrix)
elif path.endswith("attn/key_norm"):
converted_paths.append(f"{base_path}.self_attn.k_norm.weight")
converted_weights.append(matrix)
elif path.endswith("laurel_block/linear_left"):
converted_paths.append(f"{base_path}.laurel.linear_left.weight")
converted_weights.append(matrix.transpose())
elif path.endswith("laurel_block/linear_right"):
converted_paths.append(f"{base_path}.laurel.linear_right.weight")
converted_weights.append(matrix.transpose())
elif path.endswith("mlp/gating_einsum"):
converted_paths.extend([f"{base_path}.mlp.gate_proj.weight", f"{base_path}.mlp.up_proj.weight"])
gate_proj_weight, up_proj_weight = matrix
converted_weights.extend([gate_proj_weight, up_proj_weight])
elif path.endswith("mlp/linear"):
converted_paths.append(f"{base_path}.mlp.down_proj.weight")
converted_weights.append(matrix.transpose())
elif path.endswith("per_layer_input_gate"):
converted_paths.append(f"{base_path}.per_layer_input_gate.weight")
converted_weights.append(matrix.transpose())
elif path.endswith("per_layer_projection"):
converted_paths.append(f"{base_path}.per_layer_projection.weight")
converted_weights.append(matrix.transpose())
elif path.endswith("post_attention_norm"):
converted_paths.append(f"{base_path}.post_attention_layernorm.weight")
converted_weights.append(matrix)
elif path.endswith("post_ffw_norm"):
converted_paths.append(f"{base_path}.post_feedforward_layernorm.weight")
converted_weights.append(matrix)
elif path.endswith("post_laurel_norm"):
converted_paths.append(f"{base_path}.laurel.post_laurel_norm.weight")
converted_weights.append(matrix)
elif path.endswith("post_per_layer_input_norm"):
converted_paths.append(f"{base_path}.post_per_layer_input_norm.weight")
converted_weights.append(matrix)
elif path.endswith("pre_attention_norm"):
converted_paths.append(f"{base_path}.input_layernorm.weight")
converted_weights.append(matrix)
elif path.endswith("pre_ffw_norm"):
converted_paths.append(f"{base_path}.pre_feedforward_layernorm.weight")
converted_weights.append(matrix)
elif path == _TRANSFORMER_EMBEDDER:
if param == "input_embedding":
converted_paths.append("embed_tokens.weight")
# Gemma 3n model doesn't have soft tokens or "end of" tokens for images and audio in its input and output
# embeddings, so we resize to avoid bugs observed with Mllama
pre_expansion_embeddings = weights
pad_token_slice = slice(config.pad_token_id, config.pad_token_id + 1)
new_embeddings = np.repeat(pre_expansion_embeddings[pad_token_slice], 256, axis=0)
weights = np.vstack([pre_expansion_embeddings, new_embeddings])
converted_weights.append(weights)
elif param == "per_layer_embeddings":
converted_paths.append("embed_tokens_per_layer.weight")
converted_weights.append(
weights.reshape(
config.vocab_size_per_layer_input, config.num_hidden_layers * config.hidden_size_per_layer_input
)
)
elif path.startswith(_TRANSFORMER_EMBEDDER):
# TODO: ryanmullins - support multimodal norms and projections
if path.endswith("per_layer_model_projection"):
converted_paths.append("per_layer_model_projection.weight")
converted_weights.append(
weights.reshape(
config.hidden_size, config.num_hidden_layers * config.hidden_size_per_layer_input
).transpose()
)
elif path.endswith("per_layer_projection_norm"):
converted_paths.append("per_layer_projection_norm.weight")
converted_weights.append(weights)
elif path == _TRANSFORMER_FINAL_NORM:
converted_paths = ["norm.weight"]
converted_weights = [weights]
if (cpl := len(converted_paths)) != (cwl := len(converted_weights)):
raise ValueError(
"The `converted_paths` and `converted_weights` should be the same "
f"length. Got {cpl} and {cwl}, respectively, for {path}."
)
return zip(converted_paths, converted_weights)
def convert_vision_weights(
config: Gemma3nVisionConfig,
path: str,
param: str,
weights: np.ndarray,
) -> Iterable[tuple[str, np.ndarray]]:
def generate_base_path(path: str, block_type: str) -> tuple[str, tuple[int, int]]:
re_str = rf"{block_type}(\d+)/"
re_pattern = re.compile(re_str)
match = re.search(re_pattern, path).group(1)
idx = abs(int(match)) - 1
for block_idx, v in enumerate(_MOBILE_NET_TIMM_SUMMED_BLOCK_SIZES):
if v > idx:
offset = _MOBILE_NET_TIMM_SUMMED_BLOCK_SIZES[block_idx - 1] if block_idx > 0 else 0
layer_idx = idx - offset
return f"blocks.{block_idx}.{layer_idx}", (block_idx, layer_idx)
raise ValueError(f"could not extract a base path from {path}")
if _MOBILE_NET_MSFA in path:
converted_path = "msfa"
if "ffn/Normalize_0" in path:
converted_path += ".ffn.pw_exp.bn.weight"
converted_weight = weights
elif "ffn/Normalize_1" in path:
converted_path += ".ffn.pw_proj.bn.weight"
converted_weight = weights
elif "ffn/expand" in path:
converted_path += ".ffn.pw_exp.conv.weight"
converted_weight = weights.transpose()[:, :, None, None]
elif "ffn/project" in path:
converted_path += ".ffn.pw_proj.conv.weight"
converted_weight = weights.transpose()[:, :, None, None]
elif "Normalize_0" in path:
converted_path += ".norm.weight"
converted_weight = weights
elif _MOBILE_NET_CONV in path:
if "Conv_0" in path:
converted_path = ("conv_stem.conv.weight", "conv_stem.conv.bias")
converted_weight = weights.transpose(3, 2, 0, 1)
converted_weight = (converted_weight, np.zeros(converted_weight.shape[0]))
elif "Normalize_0" in path:
converted_path = "conv_stem.bn.weight"
converted_weight = weights
elif _MOBILE_NET_FIB in path:
converted_path, _ = generate_base_path(path, _MOBILE_NET_FIB)
if "Normalize_0" in path:
converted_path += ".bn1.weight"
converted_weight = weights
elif "Normalize_1" in path:
converted_path += ".bn2.weight"
converted_weight = weights
elif "expand_conv" in path:
converted_path += ".conv_exp.weight"
converted_weight = weights.transpose(3, 2, 0, 1)
else:
converted_path += ".conv_pwl.weight"
converted_weight = weights.transpose()[:, :, None, None]
elif _MOBILE_NET_MQA in path:
converted_path, _ = generate_base_path(path, _MOBILE_NET_MQA)
if "LayerScale_0" in path:
converted_path += ".layer_scale.gamma"
converted_weight = weights
elif "Normalize_0" in path:
converted_path += ".norm.weight"
converted_weight = weights
elif "Normalize_1" in path:
converted_path += ".attn.key.norm.weight"
converted_weight = weights
elif "Normalize_2" in path:
converted_path += ".attn.value.norm.weight"
converted_weight = weights
elif "key_dwconv" in path:
converted_path += ".attn.key.down_conv.weight"
converted_weight = weights.transpose(3, 2, 0, 1)
elif "key_proj" in path:
converted_path += ".attn.key.proj.weight"
converted_weight = weights.transpose()[:, :, None, None]
elif "output_proj" in path:
converted_path += ".attn.output.proj.weight"
converted_weight = weights.transpose()[:, :, None, None]
elif "query_proj" in path:
converted_path += ".attn.query.proj.weight"
converted_weight = weights.transpose()[:, :, None, None]
elif "value_dwconv" in path:
converted_path += ".attn.value.down_conv.weight"
converted_weight = weights.transpose(3, 2, 0, 1)
elif "value_proj" in path:
converted_path += ".attn.value.proj.weight"
converted_weight = weights.transpose()[:, :, None, None]
elif _MOBILE_NET_UIB in path:
converted_path, idx_key = generate_base_path(path, _MOBILE_NET_UIB)
has_dw_start = idx_key in _MOBILE_NET_UIB_HAS_DW_START
has_dw_mid = idx_key in _MOBILE_NET_UIB_HAS_DW_MID
if "LayerScale_0" in path:
converted_path += ".layer_scale.gamma"
converted_weight = weights
elif "Normalize_0" in path:
converted_path += ".dw_start.bn.weight" if has_dw_start else ".pw_exp.bn.weight"
converted_weight = weights
elif "Normalize_1" in path:
converted_path += ".pw_exp.bn.weight" if has_dw_start else ".pw_proj.bn.weight"
converted_weight = weights
elif "Normalize_2" in path:
converted_path += ".dw_mid.bn.weight" if has_dw_mid else ".pw_proj.bn.weight"
converted_weight = weights
elif "Normalize_3" in path:
converted_path += ".pw_proj.bn.weight"
converted_weight = weights
elif "expand" in path:
converted_path += ".pw_exp.conv.weight"
converted_weight = weights.transpose()[:, :, None, None]
elif "middle_dwconv" in path:
converted_path += ".dw_mid.conv.weight"
converted_weight = weights.transpose(3, 2, 0, 1)
elif "project" in path:
converted_path += ".pw_proj.conv.weight"
converted_weight = weights.transpose()[:, :, None, None]
elif "start_dwconv" in path:
converted_path += ".dw_start.conv.weight"
converted_weight = weights.transpose(3, 2, 0, 1)
if isinstance(converted_path, (tuple, list)):
return zip(converted_path, converted_weight)
else:
return [(converted_path, converted_weight)]
def convert(checkpoint_path: str, config: Gemma3nConfig) -> dict[str, torch.Tensor]:
"""Loads Orbax checkpoint from `input_path` and converts it to HF tree."""
checkpointer = obc.PyTreeCheckpointer()
ckpt = checkpointer.restore(checkpoint_path)
hf_tree: dict[str, torch.Tensor] = {}
def update_tree(path: str, weights: np.ndarray, target_dtype: torch.dtype) -> None:
hf_tree[path] = torch.from_numpy(weights.astype("float32")).type(target_dtype)
if _VERBOSE.value:
logging.info(
"%s converted shape=%s with dtype=%s",
path,
weights.shape,
target_dtype,
)
for (path, param), value in tree.flatten_with_path(ckpt):
if param == "audio_input_embedding_extra":
update_tree("model.embed_audio.embedding.weight", value, config.audio_config.dtype)
elif path.endswith("audio_embedding_norm"):
update_tree("model.embed_audio.hard_embedding_norm.weight", value, config.audio_config.dtype)
elif path.endswith("audio_input_projection"):
update_tree("model.embed_audio.embedding_projection.weight", value.transpose(), config.audio_config.dtype)
elif path.endswith("audio_soft_embedding_norm"):
update_tree("model.embed_audio.soft_embedding_norm.weight", value, config.audio_config.dtype)
elif param == "mm_input_embedding_extra":
update_tree("model.embed_vision.embedding.weight", value, config.vision_config.dtype)
elif path.endswith("mm_hard_embedding_norm"):
update_tree("model.embed_vision.hard_embedding_norm.weight", value, config.vision_config.dtype)
elif path.endswith("mm_input_projection"):
update_tree(
"model.embed_vision.embedding_projection.weight", value.transpose(), config.vision_config.dtype
)
elif path.endswith("mm_soft_embedding_norm"):
update_tree("model.embed_vision.soft_embedding_norm.weight", value, config.vision_config.dtype)
elif path.startswith(_TRANSFORMER_PARAMETER):
for path, weights in convert_transformer_weights(config.text_config, path, param, value):
update_tree(f"model.language_model.{path}", weights, config.text_config.dtype)
elif _MOBILE_NET_PREFIX in path:
mobilenet_prefix_idx = path.index(_MOBILE_NET_PREFIX)
path = path[mobilenet_prefix_idx:]
for path, weights in convert_vision_weights(config.vision_config, path, param, value):
update_tree(f"model.vision_tower.timm_model.{path}", weights, config.vision_config.dtype)
elif path.startswith(_AUDIO_ENCODER_PARAMETER):
for path, weights in convert_audio_encoder_weights(config.audio_config, path, param, value):
update_tree(f"model.audio_tower.{path}", weights, config.audio_config.dtype)
hf_tree["lm_head.weight"] = hf_tree["model.language_model.embed_tokens.weight"]
return hf_tree
def main(*args):
del args
output_path = _OUTPUT_PATH.value
variant = _VARIANT.value
config = _VARIANTS[variant]
config.audio_config.dtype = getattr(torch, _AUDIO_DTYPE.value)
config.text_config.dtype = getattr(torch, _TRANSFORMER_DTYPE.value)
config.vision_config.dtype = getattr(torch, _VISION_DTYPE.value)
if _INCLUDE_CHAT_TEMPLATE.value:
# Chat template is included for instruction tuned models, which treat
# both "<eos>" and "<end_of_turn>" as generation stoppers.
config.eos_token_id = [1, 106]
logging.info(
"Converting Gemma 3 (%s) @ %s (language) and %s (vision)",
variant,
_TRANSFORMER_DTYPE.value,
_VISION_DTYPE.value,
)
state_tree = convert(_CHECKPOINT_PATH.value, config)
logging.info("Converted Gemma 3 (%s) state tree from Orbax to Hugging Face.", variant)
with accelerate.init_empty_weights():
model = Gemma3nForConditionalGeneration(config=config)
model.load_state_dict(state_tree, assign=True, strict=True)
logging.info(
"Loaded Gemma 3 (%s) in Hugging Face Transformers as a %s instance.",
variant,
type(model).__name__,
)
model.save_pretrained(output_path, state_dict=state_tree)
logging.info(
"Saved Gemma 3 (%s) to SafeTensors in %s using %s",
variant,
output_path,
type(model).__name__,
)
del model
del state_tree
chat_template_kwargs = {"chat_template": _CHAT_TEMPLATE} if _INCLUDE_CHAT_TEMPLATE.value else {}
tokenizer = GemmaTokenizerFast(
_TOKENIZER_PATH.value,
add_bos_token=True,
extra_special_tokens={
"image_token": "<image_soft_token>", # Should be ID=262_145
"boi_token": "<start_of_image>", # Should be ID=255_999
"eoi_token": "<end_of_image>", # Should be ID=262_144
"audio_token": "<audio_soft_token>", # Should be ID=262_273
"boa_token": "<start_of_audio>", # Should be ID=256_000
"eoa_token": "<end_of_audio>", # Should be ID=262_272
},
**chat_template_kwargs,
)
tokenizer.save_pretrained(output_path)
logging.info("Saved GemmaTokenizer for %s to %s", variant, output_path)
feature_extractor = Gemma3nAudioFeatureExtractor()
image_processor = SiglipImageProcessorFast(
image_seq_length=256,
image_mean=(0.5,) * 3,
image_std=(0.5,) * 3,
size={"height": 768, "width": 768},
resample=PILImageResampling.BILINEAR,
do_normalize=False,
)
processor = Gemma3nProcessor(
feature_extractor=feature_extractor,
image_processor=image_processor,
tokenizer=tokenizer,
**chat_template_kwargs,
)
processor.save_pretrained(output_path)
logging.info("Saved Gemma3nProcessor for %s to %s", variant, output_path)
# NOTE: feature_extractor and image_processor both use the same filename, preprocessor_config.json, when saved to
# disk, but the files are overwritten by processor.save_pretrained(). However, the configs can be unioned, saved,
# and loaded from the same preprocessor_config.json file, so we do that explicitly here.
feature_extractor_config = json.loads(feature_extractor.to_json_string())
image_processor_config = json.loads(image_processor.to_json_string())
preprocessor_config = {**feature_extractor_config, **image_processor_config}
with open(os.path.join(output_path, "preprocessor_config.json"), "w", encoding="utf-8") as writer:
writer.write(json.dumps(preprocessor_config, indent=2, sort_keys=True) + "\n")
logging.info("Saved joint preprocessor_config.json for %s to %s", variant, output_path)
del feature_extractor, image_processor, processor, tokenizer
generation_config = GenerationConfig(
pad_token_id=config.text_config.pad_token_id,
bos_token_id=config.text_config.bos_token_id,
eos_token_id=(
[config.text_config.eos_token_id, 106] if _INCLUDE_CHAT_TEMPLATE.value else config.text_config.eos_token_id
),
cache_implementation="hybrid",
temperature=1.0,
do_sample=True,
top_k=64,
top_p=0.95,
)
generation_config.save_pretrained(output_path)
if __name__ == "__main__":
app.run(main)
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/gemma3n/convert_gemma3n_weights.py",
"license": "Apache License 2.0",
"lines": 727,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/gemma3n/feature_extraction_gemma3n.py | # Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from collections.abc import Sequence
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
logger = logging.get_logger(__name__)
def create_fb_matrix(
n_freqs: int,
f_min: float,
f_max: float,
n_mels: int,
sample_rate: int,
fft_length: int,
norm: str | None = None,
) -> np.ndarray:
r"""Create a frequency bin conversion matrix (NumPy version).
Args:
n_freqs (int): Number of frequencies to highlight/apply
f_min (float): Minimum frequency (Hz)
f_max (float): Maximum frequency (Hz)
n_mels (int): Number of mel filterbanks
sample_rate (int): Sample rate of the audio waveform
fft_length (int): FFT length
norm (Optional[str]): If 'slaney', divide the triangular mel weights by
the width of the mel band (area normalization). (Default: ``None``)
Returns:
np.ndarray: Triangular filter banks (fb matrix) of size (``n_freqs``,
``n_mels``)
meaning number of frequencies to highlight/apply to x the number of
filterbanks.
Each column is a filterbank so that assuming there is a matrix A of
size (..., ``n_freqs``), the applied result would be
``A @ create_fb_matrix_numpy(A.shape[-1], ...)``.
"""
if norm is not None and norm != "slaney":
raise ValueError("norm must be one of None or 'slaney'")
# freq bins
all_freqs = np.arange(n_freqs, dtype=np.float32) * (sample_rate / fft_length)
# calculate mel freq bins
# hertz to mel(f) is 2595. * math.log10(1. + (f / 700.))
m_min = 2595.0 * math.log10(1.0 + (f_min / 700.0))
m_max = 2595.0 * math.log10(1.0 + (f_max / 700.0))
m_pts = np.linspace(m_min, m_max, n_mels + 2)
# mel to hertz(mel) is 700. * (10**(mel / 2595.) - 1.)
f_pts = 700.0 * (10 ** (m_pts / 2595.0) - 1.0)
# calculate difference between each mel point and each stft freq point in Hz
f_diff = f_pts[1:] - f_pts[:-1] # (n_mels + 1)
slopes = np.expand_dims(f_pts, 0) - np.expand_dims(all_freqs, 1) # (n_freqs, n_mels + 2)
# create overlapping triangles
zero = np.zeros(1, dtype=np.float32)
down_slopes = (-1.0 * slopes[:, :-2]) / f_diff[:-1] # (n_freqs, n_mels)
up_slopes = slopes[:, 2:] / f_diff[1:] # (n_freqs, n_mels)
fb = np.maximum(zero, np.minimum(down_slopes, up_slopes))
if norm is not None and norm == "slaney":
# Slaney-style mel is scaled to be approx constant energy per channel
enorm = 2.0 / (f_pts[2 : n_mels + 2] - f_pts[:n_mels])
fb *= np.expand_dims(enorm, 0)
return fb
def _unfold(array: np.ndarray, dimension: int, size: int, step: int) -> np.ndarray:
"""A basic NumPy equivalent of PyTorch's unfold for 2D arrays along the last dim."""
if array.ndim != 2:
raise ValueError("This unfold implementation currently supports 2D arrays (batch, time).")
if dimension != -1 and dimension != array.ndim - 1:
raise ValueError("This unfold implementation only supports unfolding the last dimension.")
batch_size, original_length = array.shape
num_frames = (original_length - size) // step + 1
if num_frames <= 0:
return np.zeros((batch_size, 0, size), dtype=array.dtype)
output_shape = (batch_size, num_frames, size)
output_strides = (array.strides[0], array.strides[1] * step, array.strides[1])
return np.lib.stride_tricks.as_strided(array, shape=output_shape, strides=output_strides)
class Gemma3nAudioFeatureExtractor(SequenceFeatureExtractor):
"""An audio feature extractor Universal Speech Models https://huggingface.co/papers/2303.01037.
Args:
feature_size (`int`, *optional*, defaults to 128):
The feature dimension of the extracted features.
sampling_rate (`int`, *optional*, defaults to 16000):
The sampling rate at which the audio files should be digitalized expressed in hertz (Hz).
padding_value (`float`, *optional*, defaults to 0.0):
Padding value used to pad the audio. Should correspond to silences.
return_attention_mask (`bool`, *optional*, defaults to `True`):
Whether to return the attention mask for the generated MEL spectrograms.
frame_length_ms (`float`, *optional*, defaults to 32.0):
The length of a frame in milliseconds.
hop_length_ms (`float`, *optional*, defaults to 10.0):
Length of the overlapping windows for the STFT used to obtain the Mel Frequency coefficients.
min_frequency (`float`, *optional*, defaults to 125.0):
The minimum frequency (in Hz) for the Mel filterbank.
max_frequency (`float`, *optional*, defaults to 7600.0):
The maximum frequency (in Hz) for the Mel filterbank.
preemphasis (`float`, *optional*, defaults to 0.97):
The preemphasis coefficient.
preemphasis_htk_flavor (`bool`, *optional*, defaults to `True`):
Whether to use HTK-style preemphasis.
fft_overdrive (`bool`, *optional*, defaults to `True`):
Whether to use FFT overdrive.
dither (`float`, *optional*, defaults to 0.0):
Adds dithering. In other words, adds a small Gaussian noise to each frame.
E.g. use 0.0001 to add dithering with a normal distribution centered
around 0.0 with standard deviation 0.0001 (assuming [-1,+1] range of raw_speech).
The value 0.0 means no dithering.
Dithering has similar effect as `spectrogram(mel_floor=...)`. It reduces
the high log_mel_fbank values for signals with hard-zero sections,
when VAD cutoff is present in the signal.
input_scale_factor (`float`, *optional*, defaults to 1.0):
Scaling factor applied to the input waveform.
mel_floor (`float`, *optional*, defaults to 1e-05):
Minimum value for Mel spectrograms to avoid log(0).
per_bin_mean (`Optional[Sequence[float]]`, *optional*):
Mean values for per-bin normalization.
per_bin_stddev (`Optional[Sequence[float]]`, *optional*):
Standard deviation values for per-bin normalization.
"""
model_input_names = ["input_features", "input_features_mask"]
def __init__(
self,
feature_size: int = 128,
sampling_rate: int = 16_000,
padding_value: float = 0.0,
return_attention_mask: bool = True,
frame_length_ms: float = 32.0,
hop_length_ms: float = 10.0,
min_frequency: float = 125.0,
max_frequency: float = 7600.0,
preemphasis: float = 0.97,
preemphasis_htk_flavor: bool = True,
fft_overdrive: bool = True,
dither: float = 0.0,
input_scale_factor: float = 1.0,
mel_floor: float = 1e-5,
per_bin_mean: Sequence[float] | None = None,
per_bin_stddev: Sequence[float] | None = None,
**kwargs,
):
super().__init__(
feature_size=feature_size,
sampling_rate=sampling_rate,
padding_value=padding_value,
return_attention_mask=return_attention_mask,
**kwargs,
)
self.min_frequency = min_frequency
self.max_frequency = max_frequency
self.preemphasis = preemphasis
self.preemphasis_htk_flavor = preemphasis_htk_flavor
self.fft_overdrive = fft_overdrive
self.dither = dither
self.input_scale_factor = input_scale_factor
self.frame_length = int(round(sampling_rate * frame_length_ms / 1000.0))
self.hop_length = int(round(sampling_rate * hop_length_ms / 1000.0))
self.mel_floor = np.array(mel_floor, dtype=np.float64)
fft_length = 2 ** math.ceil(math.log2(self.frame_length))
if self.fft_overdrive:
fft_length *= 2
self.fft_length = fft_length
hann_arange = np.arange(self.frame_length, dtype=np.float32)
window = 0.5 * (1 - np.cos(2 * np.pi * hann_arange / self.frame_length))
self.window = window.astype(np.float32)
self.mel_filters = create_fb_matrix(
n_freqs=self.fft_length // 2 + 1,
f_min=min_frequency,
f_max=max_frequency,
n_mels=feature_size,
sample_rate=self.sampling_rate,
norm=None,
fft_length=fft_length,
)
if per_bin_mean is not None:
self.per_bin_mean = np.array(per_bin_mean).reshape(1, 1, feature_size)
else:
self.per_bin_mean = None
if per_bin_stddev is not None:
self.per_bin_stddev = np.array(per_bin_stddev).reshape(1, 1, feature_size)
else:
self.per_bin_stddev = None
def _extract_spectrogram(self, waveform: np.ndarray, attention_mask: np.ndarray) -> tuple[np.ndarray, np.ndarray]:
""""""
if waveform.ndim == 1: # If single waveform, add batch dimension
waveform = np.expand_dims(waveform, axis=0)
if self.dither > 0.0:
waveform = waveform + self.dither * np.random.randn(*waveform.shape).astype(waveform.dtype)
if self.input_scale_factor != 1.0:
waveform = waveform * self.input_scale_factor
frame_size_for_unfold = self.frame_length + 1
# NumPy equivalent of unfold for [B, NumFrames, frame_size_for_unfold]
frames_to_process = _unfold(waveform, dimension=-1, size=frame_size_for_unfold, step=self.hop_length)
if self.preemphasis > 0.0:
if self.preemphasis_htk_flavor:
first_in_frame = frames_to_process[..., :1] * (1.0 - self.preemphasis)
rest_in_frame = frames_to_process[..., 1:-1] - self.preemphasis * frames_to_process[..., :-2]
frames = np.concatenate([first_in_frame, rest_in_frame], axis=-1)
else:
frames = frames_to_process[..., 1:] - self.preemphasis * frames_to_process[..., :-1]
else:
frames = frames_to_process[..., :-1]
frames = frames * self.window # Broadcasting window
stft = np.fft.rfft(frames, n=self.fft_length, axis=-1)
magnitude_spec = np.abs(stft)
mel_spec = np.matmul(magnitude_spec, self.mel_filters)
log_mel_spec = np.log(np.maximum(mel_spec, self.mel_floor))
if self.per_bin_mean is not None:
log_mel_spec = log_mel_spec - self.per_bin_mean # Broadcasting
if self.per_bin_stddev is not None:
log_mel_spec = log_mel_spec / self.per_bin_stddev # Broadcasting
mel_spectrogram = log_mel_spec.squeeze(0)
mask = attention_mask[:: self.hop_length].astype(bool)
# TODO: The filtered mask is always exactly 3 elements longer than the mel_spectrogram. Why???
return mel_spectrogram, mask[: mel_spectrogram.shape[0]]
def __call__(
self,
raw_speech: np.ndarray | list[float] | list[np.ndarray] | list[list[float]],
padding: bool | str | PaddingStrategy = "longest",
max_length: int | None = 480_000,
truncation: bool = True,
pad_to_multiple_of: int | None = 128,
return_tensors: str | TensorType | None = None,
return_attention_mask: bool | None = True,
**kwargs,
) -> BatchFeature:
"""Creates a batch of MEL spectrograms from the provided raw speech.
This implementation uses a different algorithm for windowing and preemphasis compared to the built-in
`transformers.audio_utils.spectrogram()` function that _will_ result in different outputs. Consider this
carefully when selecting an audio feature extractor, especially with pre-trained models.
Args:
raw_speech:
The audio for which MEL spectrograms are created.
padding (`Union[bool, str, PaddingStrategy]`, *optional*, defaults to `"longest"`):
The padding strategy to use for batches of audio with different lengths.
max_length (`int`, *optional*, defaults to 480000):
If provided, defines the maximum length of the audio to allow. Audio longer than this will be
truncated if `truncation=True`.
truncation (`bool`, *optional*, defaults to `True`):
Whether or not to truncate audio above `max_length`.
pad_to_multiple_of (`int`, *optional*, defaults to 128):
When padding, pad to a multiple of this value. The default value is defined for optimal TPU support.
return_tensors (`Union[str, TensorType]`, *optional*, defaults to `None`):
The type of tensors to return (e.g., NumPy, or Torch).
return_attention_mask (`bool`, *optional*, defaults to `True`):
Whether to return the attention mask for the generated MEL spectrograms.
"""
is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1
is_batched_sequence = isinstance(raw_speech, Sequence) and isinstance(raw_speech[0], (np.ndarray, Sequence))
is_batched = is_batched_numpy or is_batched_sequence
# Always return a batch
if not is_batched:
raw_speech = [raw_speech]
raw_speech = [np.asarray([rs]).T for rs in raw_speech]
batched_speech = self.pad(
BatchFeature({"input_features": raw_speech}),
padding=padding,
max_length=max_length,
truncation=truncation,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
)
prepared_speech = []
prepared_speech_mask = []
for speech, mask in zip(batched_speech.input_features, batched_speech.attention_mask):
speech, mask = self._extract_spectrogram(speech.T, mask)
prepared_speech.append(speech.astype(np.float32))
prepared_speech_mask.append(mask)
return BatchFeature(
{"input_features": prepared_speech, "input_features_mask": prepared_speech_mask},
tensor_type=return_tensors,
)
__all__ = ["Gemma3nAudioFeatureExtractor"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/gemma3n/feature_extraction_gemma3n.py",
"license": "Apache License 2.0",
"lines": 280,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/gemma3n/modular_gemma3n.py | # Copyright 2025 Google Inc. HuggingFace Inc. team. All rights reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from collections.abc import Callable, Sequence
from dataclasses import dataclass
from typing import Any, Literal
import torch
import torch.nn as nn
import torch.nn.functional as F
from ... import initialization as init
from ...activations import ACT2FN
from ...cache_utils import Cache, DynamicCache
from ...configuration_utils import PreTrainedConfig, layer_type_validation
from ...masking_utils import create_causal_mask, create_sliding_window_causal_mask
from ...modeling_outputs import BaseModelOutputWithPast, BaseModelOutputWithPooling
from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, RopeParameters
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging, torch_compilable_check
from ...utils.generic import merge_with_config_defaults
from ...utils.output_capturing import capture_outputs
from ..auto import AutoModel
from ..gemma2.configuration_gemma2 import Gemma2Config
from ..gemma2.modeling_gemma2 import (
Gemma2MLP,
Gemma2PreTrainedModel,
eager_attention_forward,
rotate_half,
)
from ..gemma3.modeling_gemma3 import (
Gemma3Attention,
Gemma3DecoderLayer,
Gemma3ForCausalLM,
Gemma3RMSNorm,
Gemma3RotaryEmbedding,
Gemma3TextModel,
Gemma3TextScaledWordEmbedding,
)
from ..paligemma.modeling_paligemma import (
PaliGemmaCausalLMOutputWithPast,
PaliGemmaForConditionalGeneration,
PaliGemmaModel,
PaligemmaModelOutputWithPast,
)
from ..timm_wrapper.configuration_timm_wrapper import TimmWrapperConfig
logger = logging.get_logger(__name__)
class Gemma3nTextConfig(Gemma2Config, PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Gemma3nTextModel`]. It is used to instantiate an
Gemma3nTextModel model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the Gemma 3n E4B, e.g.
[google/gemma-3n-E4B](https://huggingface.co/google/gemma-3n-E4B).
Configuration objects that inherit from [`Gemma3nTextConfig`] and can be used to control the model outputs. Read
the documentation from [`Gemma3nTextConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 262400):
Vocabulary size of the Gemma3nText model. Defines the number of different tokens that can be represented by
the `inputs_ids` passed when calling [`Gemma3nTextModel`]
vocab_size_per_layer_input (`int`, *optional*, defaults to 262144):
Vocabulary size of the per-layer text embeddings that augment the standard embeddings.
hidden_size (`int`, *optional*, defaults to 2048):
Dimension of the hidden representations.
hidden_size_per_layer_input (`int`, *optional*, defaults to 256):
Dimension of the hidden representations for per-layer emebeddings.
intermediate_size (`int` or `Sequence[int]`, *optional*, defaults to 16384):
Dimension of the MLP representations. MatFormer configurations may wish to provide a sequence of integers
to account for variable intermediate_size values across layers. In such cases,
`len(intermediate_size) == num_hidden_layers`.
num_hidden_layers (`int`, *optional*, defaults to 35):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*, defaults to 2):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details checkout this
[paper](https://huggingface.co/papers/2305.13245). If not specified, will default to `num_attention_heads`.
head_dim (`int`, *optional*, defaults to 256):
The attention head dimension.
hidden_activation (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
The non-linear activation function (function or string) in the decoder. Will default to
`"gelu_pytorch_tanh"` if not specified. `"gelu_pytorch_tanh"` uses an approximation of the `"gelu"`
activation function.
max_position_embeddings (`int`, *optional*, defaults to 32768):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*, defaults to 0):
Padding token id.
eos_token_id (`int`, *optional*, defaults to 1):
End of stream token id.
bos_token_id (`int`, *optional*, defaults to 2):
Beginning of stream token id.
rope_parameters (`dict`, *optional*):
Dictionary mapping attention patterns (`"full_attention"`, `"sliding_attention"`) to `RopeParameters`.
Each value should be a dictionary containing `rope_type` and optional scaling parameters.
attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
sliding_window (`int`, *optional*, defaults to 512):
This is the size of the sliding window used by local attention layers.
layer_types (`Optional`, *optional*):
A sequence of strings defining the attention type for that layer as either "sliding_attention" or
"full_attention". If not provided, `layer_types` will de inferred from `num_hidden_layers` using a pattern
of four "sliding_attention" layers followed one "full_attention". The last layer in the model should always
be a "full_attention" layer.
final_logit_softcapping (`float`, *optional*, defaults to 30.0):
Scaling factor when applying tanh softcapping on the logits.
altup_active_idx (`int`, *optional*, defaults to 0):
The index of the prediction from which AltUp will compute additional predictions or correct
altup_coef_clip (`float`, *optional*, defaults to 120.0):
The maximum amplitude of an AltUp prediction or correction coefficient weight.
altup_correct_scale (`bool`, *optional*, defaults to `True`):
If True, apply the `AltUp.correct_output_scale` to the corrected prediction at `altup_active_idx`.
altup_num_inputs (`int`, *optional*, defaults to 4):
The number of predictions that AltUp should be make given the input sequence.
num_kv_shared_layers (`int`, *optional*, defaults to 15):
The number of layer that share KV cache values. During the forward pass, the last `num_kv_shared_layers`
layers in the model "share" the KV values in that each local and global layer in this range uses the KV
cache values computed for the last local or global layer, respectively, before entering this range. The
value should be a multiple of the attention pattern size (see `layer_types` parameter).
laurel_rank (int, *optional*, defaults to 64):
The intermediate size for the linear projections in the Learned Augmented Residual Layer.
activation_sparsity_pattern (Sequence[float], *optional*):
The sparsity factor used to extract the top-k activations for a given layer. The provided Sequence must
explicitly provide a sparsity value for each layer in the model. By default, the first 10 layers are
sparse with a sparsity factor of 0.95 and the rest are dense.
tie_word_embeddings (`bool`, *optional*, defaults to `True`):
Whether to tie weight embeddings
```python
>>> from transformers import Gemma3nTextModel, Gemma3nTextConfig
>>> # Initializing a Gemma3nText gemma3n_text-E4B style configuration
>>> configuration = Gemma3nTextConfig()
>>> # Initializing a model from the gemma3n_text-E4B style configuration
>>> model = Gemma3nTextModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = "gemma3n_text"
default_theta = {"global": 1_000_000.0, "local": 10_000.0}
def __init__(
self,
vocab_size: int = 262_400,
vocab_size_per_layer_input: int = 262_144,
hidden_size: int = 2048,
hidden_size_per_layer_input: int = 256,
intermediate_size: int | Sequence[int] = 16_384,
num_hidden_layers: int = 35,
num_attention_heads: int = 8,
num_key_value_heads: int = 2,
head_dim: int = 256,
hidden_activation: str = "gelu_pytorch_tanh",
max_position_embeddings: int = 32_768,
initializer_range: float = 0.02,
rms_norm_eps: float = 1e-6,
use_cache: bool = True,
pad_token_id: int = 0,
eos_token_id: int = 1,
bos_token_id: int = 2,
rope_parameters: dict[Literal["sliding_attention", "full_attention"], RopeParameters] | None = None,
attention_bias: bool = False,
attention_dropout: float = 0.0,
sliding_window: int = 512,
layer_types: Sequence[str] | None = None,
final_logit_softcapping: float = 30.0,
altup_active_idx: int = 0,
altup_coef_clip: float = 120.0,
altup_correct_scale: bool = True,
altup_num_inputs: int = 4,
num_kv_shared_layers: int = 15,
laurel_rank: int = 64,
activation_sparsity_pattern: float | Sequence[float] | None = None,
tie_word_embeddings: bool | None = True,
**kwargs,
):
if isinstance(intermediate_size, Sequence) and (intsize_len := len(intermediate_size)) != num_hidden_layers:
raise ValueError(
"intermediate_size must have an explicit intermediate size for every layer or one for all layers. "
f"Expected {num_hidden_layers} values but got {intsize_len}."
)
elif not isinstance(intermediate_size, Sequence):
intermediate_size = [intermediate_size] * num_hidden_layers
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.vocab_size = vocab_size
self.vocab_size_per_layer_input = vocab_size_per_layer_input
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.head_dim = head_dim
self.num_key_value_heads = num_key_value_heads
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.hidden_activation = hidden_activation
self.sliding_window = sliding_window
self.final_logit_softcapping = final_logit_softcapping
self.layer_types = layer_types
if layer_types is None:
self.layer_types = [
"full_attention" if (i + 1) % 5 == 0 else "sliding_attention" for i in range(self.num_hidden_layers)
]
else:
self.layer_types = layer_types
layer_type_validation(self.layer_types, self.num_hidden_layers)
self.hidden_size_per_layer_input = hidden_size_per_layer_input
self.num_kv_shared_layers = num_kv_shared_layers
self.altup_active_idx = altup_active_idx
self.altup_coef_clip = altup_coef_clip
self.altup_correct_scale = altup_correct_scale
self.altup_num_inputs = altup_num_inputs
self.laurel_rank = laurel_rank
if activation_sparsity_pattern is None:
num_sparse_layers = 10 if num_hidden_layers > 10 else 0
activation_sparsity_pattern = [0.95] * num_sparse_layers + [0.0] * (num_hidden_layers - num_sparse_layers)
if (len_asp := len(activation_sparsity_pattern)) != num_hidden_layers:
raise ValueError(
"activation_sparsity_pattern must have an explicit activation sparsity value for every layer."
f"Expected {num_hidden_layers} values but got {len_asp}."
)
self.activation_sparsity_pattern = activation_sparsity_pattern
self.rope_parameters = rope_parameters
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.tie_word_embeddings = tie_word_embeddings
PreTrainedConfig.__init__(**kwargs)
def convert_rope_params_to_dict(self, ignore_keys_at_rope_validation=None, **kwargs):
rope_scaling = kwargs.pop("rope_scaling", None)
# Try to set `rope_scaling` if available, otherwise use `rope_parameters`. If we find `rope_parameters`
# as arg in the inputs, we can safely assume that it is in the new format. New naming used -> new format
default_rope_params = {
"sliding_attention": {"rope_type": "default"},
"full_attention": {"rope_type": "default"},
}
self.rope_parameters = self.rope_parameters if self.rope_parameters is not None else default_rope_params
if rope_scaling is not None:
self.rope_parameters["full_attention"].update(rope_scaling)
# Set default values if not present
if self.rope_parameters.get("full_attention") is None:
self.rope_parameters["full_attention"] = {"rope_type": "default"}
self.rope_parameters["full_attention"].setdefault(
"rope_theta", kwargs.pop("rope_theta", self.default_theta["global"])
)
if self.rope_parameters.get("sliding_attention") is None:
self.rope_parameters["sliding_attention"] = {"rope_type": "default"}
self.rope_parameters["sliding_attention"].setdefault(
"rope_theta", kwargs.pop("rope_local_base_freq", self.default_theta["local"])
)
# Standardize and validate the correctness of rotary position embeddings parameters
self.standardize_rope_params()
self.validate_rope(ignore_keys=ignore_keys_at_rope_validation)
return kwargs
class Gemma3nAudioConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Gemma3nAudioEncoder`]. It is used to instantiate
an `Gemma3nAudioEncoder` model according to the specified arguments, defining the model architecture. Instantiating
a configuration with the defaults will yield a similar configuration to that of the Gemma 3n E4B, e.g.,
[google/gemma-3n-E4B](https://huggingface.co/google/gemma-3n-E4B).
Configuration objects that inherit from [`Gemma3nAudioConfig`] and can be used to control the model outputs. Read
the documentation from [`Gemma3nAudioConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 128):
Vocabulary size of the additional hard-token embeddings for audio model. These augment the embeddings
included in the `Gemma3nTextModel` to provide, e.g., the end of audio and audio soft token placeholder
tokens when converting `input_ids` to embeddings in the `Gemma3nForConditionalGeneration` model.
vocab_offset (`int`, *optional*, defaults to 262272):
Offset between the tokenizer vocab index for the token ids embedded by `Gemma3nMultimodalEmbedder` and the
0-indexed `Gemma3nMultimodalEmbedder.embedding` table.
input_feat_size (`int`, *optional*, defaults to 128):
The number of channels in each mel-spectrogram frame.
hidden_size (`int`, *optional*, defaults to 1536):
Dimension of the hidden representations.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
gradient_clipping (`float`, *optional*, defaults to 10000000000.0):
Clipping value used to stabilize extremely large gradient values.
conf_attention_chunk_size (`int`, *optional*, defaults to 12):
The sub-sequence size for local attention processing inside the Conformer ("conf") section of the
Universal Speech Model.
conf_attention_context_left (`int`, *optional*, defaults to 13):
The left context size of the local attention inside the Conformer ("conf") section of the
Universal Speech Model.
conf_attention_context_right (`int`, *optional*, defaults to 0):
The right context size of the local attention inside the Conformer ("conf") section of the
Universal Speech Model.
conf_attention_logit_cap (`float`, *optional*, defaults to 50.0):
Logit cap applied during local attention inside the Conformer ("conf") section of the
Universal Speech Model.
conf_num_attention_heads (`int`, *optional*, defaults to 8):
The number of attention heads in local attention inside the Conformer ("conf") section of the
Universal Speech Model.
conf_num_hidden_layers (`int`, *optional*, defaults to 12):
The number of layers that use local attention inside the Conformer ("conf") section of the
Universal Speech Model.
conf_conv_kernel_size (`int`, *optional*, defaults to 5):
Convolution kernel size for the conformer block inside the Conformer ("conf") section of the
Universal Speech Model.
conf_reduction_factor (`int`, *optional*, defaults to 4):
Reduction factor used in the conformer block inside the Conformer ("conf") section of the
Universal Speech Model.
conf_residual_weight (`float`, *optional*, defaults to 0.5):
Residual connection weight inside the Conformer ("conf") section of the
Universal Speech Model.
sscp_conv_channel_size (`tuple(int, int)`, *optional*, defaults to `(128, 32)`):
The channel sizes for the first and second convolutional layers in the Sub-sample Convolution Projection
("sscp") section of the Universal Speech Model.
sscp_conv_group_norm_eps (`float`, *optional*, defaults to 0.001):
Epsilon used in group normalization in the subsample convolution projection in the Sub-sample Convolution
Projection ("sscp") section of the Universal Speech Model.
sscp_conv_kernel_size (`tuple(tuple(int, int), tuple(int, int))`, *optional*, defaults to `((3, 3), (3, 3))`):
Kernel sizes of the two convolutional layers in the subsample convolution projection in the Sub-sample
Convolution Projection ("sscp") section of the Universal Speech Model. The kernel sizes are specified as a
tuple of height and width for each layer, where the height corresponds to the time dimension and the width
corresponds to the frequency dimension.
sscp_conv_stride_size (`tuple(tuple(int, int), tuple(int, int))`, *optional*, defaults to `((2, 2), (2, 2))`):
Stride sizes of the two convolutional layers in the subsample convolution projection in the Sub-sample
Convolution Projection ("sscp") section of the Universal Speech Model. The stride sizes are specified as a
tuple of height and width for each layer, where the height corresponds to the time dimension and the width
corresponds to the frequency dimension.
Example:
```python
>>> from transformers import Gemma3nAudioConfig, Gemma3nAudioEncoder
>>> # Initializing a Gemma3nAudioEncoder gemma3n_audio-E4B-style configuration
>>> configuration = Gemma3nAudioConfig()
>>> # Initializing a model from the gemma3n_audio-E4B style configuration
>>> model = Gemma3nAudioEncoder(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = "gemma3n_audio"
def __init__(
self,
vocab_size: int = 128,
vocab_offset: int = 262_144 + 128, # text vocab size + vision vocab size
input_feat_size: int = 128,
hidden_size: int = 1536,
rms_norm_eps: float = 1e-6,
gradient_clipping: float = 10_000_000_000.0,
conf_attention_chunk_size: int = 12,
conf_attention_context_left: int = 13,
conf_attention_context_right: int = 0,
conf_attention_logit_cap: float = 50.0,
conf_num_attention_heads: int = 8,
conf_num_hidden_layers: int = 12,
conf_conv_kernel_size: int = 5,
conf_reduction_factor: int = 4,
conf_residual_weight: float = 0.5,
sscp_conv_channel_size: tuple[int, int] = (128, 32),
sscp_conv_group_norm_eps: float = 1e-3,
sscp_conv_kernel_size: tuple[tuple[int, int], tuple[int, int]] = (
(3, 3),
(3, 3),
),
sscp_conv_stride_size: tuple[tuple[int, int], tuple[int, int]] = (
(2, 2),
(2, 2),
),
**kwargs,
):
super().__init__(**kwargs)
self.input_feat_size = input_feat_size
self.hidden_size = hidden_size
self.rms_norm_eps = rms_norm_eps
self.vocab_size = vocab_size
self.vocab_offset = vocab_offset
self.gradient_clipping = gradient_clipping
self.conf_attention_chunk_size = conf_attention_chunk_size
self.conf_attention_context_left = conf_attention_context_left
self.conf_attention_context_right = conf_attention_context_right
self.conf_attention_logit_cap = conf_attention_logit_cap
self.conf_num_attention_heads = conf_num_attention_heads
self.conf_num_hidden_layers = conf_num_hidden_layers
self.conf_conv_kernel_size = conf_conv_kernel_size
self.conf_reduction_factor = conf_reduction_factor
self.conf_residual_weight = conf_residual_weight
self.sscp_conv_channel_size = sscp_conv_channel_size
self.sscp_conv_group_norm_eps = sscp_conv_group_norm_eps
self.sscp_conv_kernel_size = sscp_conv_kernel_size
self.sscp_conv_stride_size = sscp_conv_stride_size
class Gemma3nVisionConfig(TimmWrapperConfig):
r"""
This is the configuration class to store the configuration for a timm backbone [`TimmWrapper`]. It is used to
instantiate an timm model model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the Gemma 3n E4B
vision tower, e.g. [google/gemma-3n-E4B](https://huggingface.co/google/gemma-3n-E4B).
Configuration objects inherit from [`Gemma3nVisionConfig`] and can be used to control the model outputs. Read the
documentation from [`Gemma3nVisionConfig`] for more information.
Config loads imagenet label descriptions and stores them in `id2label` attribute, `label2id` attribute for default
imagenet models is set to `None` due to occlusions in the label descriptions.
Args:
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
do_pooling (`bool`, *optional*, defaults to `False`):
Whether to do pooling for the last_hidden_state in `TimmWrapper` or not.
architecture (`str`, *optional*, defaults to `"mobilenetv5_300m_enc"`):
Determines vision architecture for TimmWrapper.
hidden_size (`int`, *optional*, defaults to 2048):
Dimension of the hidden representations.
vocab_size (`int`, *optional*, defaults to 128):
Vocabulary size of the additional hard-token embeddings for vision model.
vocab_offset (`int`, *optional*, defaults to 262144):
Offset between the tokenizer vocab index for the token ids embedded by `Gemma3nMultimodalEmbedder` and the
0-indexed `Gemma3nMultimodalEmbedder.embedding` table.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
Example:
```python
>>> from transformers import Gemma3nVisionConfig, TimmWrapper
>>> # Initializing a TimmWrapper gemma3n_vision-E4B-style configuration
>>> configuration = Gemma3nVisionConfig()
>>> # Initializing a gemma3n_vision-E4B-style TimmWrapper from the configuration
>>> model = TimmWrapper(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = "gemma3n_vision"
def __init__(
self,
initializer_range: float = 0.02,
do_pooling: bool = False,
architecture: str = "mobilenetv5_300m_enc",
hidden_size: int = 2048,
vocab_size: int = 128,
vocab_offset: int = 262_144,
rms_norm_eps: float = 1e-06,
model_args: dict | None = None,
**kwargs,
):
self.architecture = architecture
self.initializer_range = initializer_range
self.do_pooling = do_pooling
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.vocab_offset = vocab_offset
self.rms_norm_eps = rms_norm_eps
super().__init__(**kwargs)
class Gemma3nConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Gemma3nForConditionalGeneration`]. It is used to
instantiate a Gemma3nForConditionalGeneration according to the specified arguments, defining the model
architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of
Gemma3n-E4B.
e.g. [google/gemma-3n-E4B](https://huggingface.co/google/gemma-3n-E4B)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
text_config (`Union[Gemma3nTextConfig, dict]`, *optional*):
The config object of the text backbone.
vision_config (`Union[AutoConfig, dict]`, *optional*):
Custom vision config or dict.
audio_config (`Union[AutoConfig, dict]`, *optional*):
Custom audio config or dict.
audio_soft_tokens_per_image (`int`, *optional*, defaults to 188):
The number of soft tokens per audio clip.
vision_soft_tokens_per_image (`int`, *optional*, defaults to 256):
The number of soft tokens per image.
boi_token_id (`int`, *optional*, defaults to 255999):
The begin-of-image token index to wrap the image prompt.
eoi_token_id (`int`, *optional*, defaults to 262144):
The end-of-image token index to wrap the image prompt.
image_token_id (`int`, *optional*, defaults to 262145):
The image token index to encode the image prompt.
boa_token_id (`int`, *optional*, defaults to 256000):
The begin-of-audio token index to wrap the audio prompt.
eoa_token_id (`int`, *optional*, defaults to 262272):
The end-of-audio token index to wrap the audio prompt.
audio_token_id (`int`, *optional*, defaults to 262273):
The audio token index to encode the audio prompt.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
tie_word_embeddings (`bool`, *optional*, defaults to `True`):
Whether to tie weight embeddings
Example:
```python
>>> from transformers import Gemma3nForConditionalGeneration, Gemma3nConfig, Gemma3nTextConfig
>>> # Initializing a MobileNet vision config, which is loaded from TIMM
>>> vision_config = Gemma3nVisionConfig()
>>> # Initializing a Gemma3n Audio config
>>> audio_config = Gemma3nAudioConfig()
>>> # Initializing a Gemma3n Text config
>>> text_config = Gemma3nTextConfig()
>>> # Initializing a Gemma3n gemma-3-4b style configuration
>>> configuration = Gemma3nConfig(text_config, vision_config, audio_config)
>>> # Initializing a model from the gemma-3-4b style configuration
>>> model = Gemma3nTextConfig(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "gemma3n"
sub_configs = {
"text_config": Gemma3nTextConfig,
"vision_config": Gemma3nVisionConfig,
"audio_config": Gemma3nAudioConfig,
}
def __init__(
self,
text_config: Gemma3nTextConfig | dict[str, Any] | None = None,
vision_config: Gemma3nVisionConfig | dict[str, Any] | None = None,
audio_config: Gemma3nAudioConfig | dict[str, Any] | None = None,
audio_soft_tokens_per_image: int | None = 188,
vision_soft_tokens_per_image: int | None = 256,
boi_token_id: int | None = 255_999,
eoi_token_id: int | None = 262_144,
image_token_id: int | None = 262_145,
boa_token_id: int | None = 256_000,
eoa_token_id: int | None = 262_272,
audio_token_id: int | None = 262_273,
initializer_range: float | None = 0.02,
tie_word_embeddings: bool | None = True,
**kwargs,
):
if isinstance(text_config, dict):
text_config = Gemma3nTextConfig(**text_config)
elif text_config is None:
text_config = Gemma3nTextConfig()
logger.info("text_config is None. Using default Gemma3nTextConfig.")
if isinstance(vision_config, dict):
vision_config = Gemma3nVisionConfig(**vision_config)
elif vision_config is None:
vision_config = Gemma3nVisionConfig()
logger.info("vision_config is None. Using default Gemma3nVisionConfig.")
if isinstance(audio_config, dict):
audio_config = Gemma3nAudioConfig(**audio_config)
elif audio_config is None:
audio_config = Gemma3nAudioConfig()
logger.info("audio_config is None. Using default Gemma3nAudioConfig.")
self.text_config = text_config
self.vision_config = vision_config
self.audio_config = audio_config
self.audio_soft_tokens_per_image = audio_soft_tokens_per_image
self.vision_soft_tokens_per_image = vision_soft_tokens_per_image
self.boi_token_id = boi_token_id
self.eoi_token_id = eoi_token_id
self.image_token_id = image_token_id
self.boa_token_id = boa_token_id
self.eoa_token_id = eoa_token_id
self.audio_token_id = audio_token_id
self.initializer_range = initializer_range
self.tie_word_embeddings = tie_word_embeddings
super().__init__(**kwargs)
@dataclass
@auto_docstring
class Gemma3nAudioEncoderModelOutput(BaseModelOutputWithPooling):
r"""
audio_mel_mask (`torch.BoolTensor`, *optional*):
A torch.BoolTensor of shape `(batch_size, num_frames)`
"""
audio_mel_mask: torch.BoolTensor | None = None
class Gemma3nModelOutputWithPast(PaligemmaModelOutputWithPast):
r"""
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
image_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`.
image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
audio_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`.
audio_hidden_states of the model produced by the audio encoder and after projecting the last hidden state.
"""
audio_hidden_states: torch.FloatTensor | None = None
class Gemma3nCausalLMOutputWithPast(PaliGemmaCausalLMOutputWithPast):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.text_config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
image_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`.
image_hidden_states of the model produced by the vision encoder after projecting last hidden state.
audio_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`.
audio_hidden_states of the model produced by the audio encoder and after projecting the last hidden state.
"""
audio_hidden_states: torch.FloatTensor | None = None
class Gemma3nRMSNorm(Gemma3RMSNorm):
def __init__(self, dim: int, eps: float = 1e-6, with_scale: bool = True):
super().__init__(dim, eps=eps)
del self.weight
self.with_scale = with_scale
if self.with_scale:
self.weight = nn.Parameter(torch.ones(dim))
else:
self.register_buffer("weight", torch.tensor(1.0), persistent=False)
def _norm(self, x):
return x / torch.sqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
def forward(self, x: torch.Tensor) -> torch.Tensor:
# Llama does x.to(float16) * w whilst Gemma2 is (x * w).to(float16)
# See https://github.com/huggingface/transformers/pull/29402
output = self._norm(x.float()) * self.weight.float()
return output.type_as(x)
# ==== Audio Encoder ====
class Gemma3nAudioRelativePositionEmbedding(nn.Module):
def __init__(self, config: Gemma3nAudioConfig):
super().__init__()
self.config = config
self.num_heads = self.config.conf_num_attention_heads
self.channels = self.config.hidden_size
self.head_dim = self.channels // self.num_heads
self.max_backward = max(0, self.config.conf_attention_context_left - 1)
self.max_forward = self.config.conf_attention_context_right
self.pos_proj = nn.Linear(self.channels, self.num_heads * self.head_dim, bias=False)
min_timescale = 1.0
max_timescale = 1.0e4
num_timescales = self.channels // 2
log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / max(num_timescales - 1, 1)
inv_timescales = min_timescale * torch.exp(torch.arange(num_timescales) * -log_timescale_increment)
self.register_buffer(
"inv_timescales",
inv_timescales.float().unsqueeze(0).unsqueeze(0),
persistent=False,
)
def _get_timing_signal_1d_pos(self, position: torch.Tensor, dtype: torch.dtype) -> torch.Tensor:
position = position.float().unsqueeze(-1)
scaled_time = position * self.inv_timescales.to(device=position.device, dtype=torch.float32)
timing_signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=-1)
return timing_signal.type(dtype)
def _relative_shift(
self,
term_bd_before_shift: torch.Tensor,
batch_size: int,
num_heads: int,
num_query_blocks: int,
query_block_size: int,
key_context_size: int,
max_span_plus_1: int,
) -> torch.Tensor:
"""Performs the relative shift.
Args:
term_bd_before_shift: Tensor of shape [B, N, U, W, F_span]. batch_size
(B), num_heads (N), num_query_blocks (U), query_block_size (W),
key_context_size (C = W+L+R), max_span_plus_1 (F_span = L+R+1).
Returns:
Tensor of shape [B, N, U, W, C].
"""
# term_bd_before_shift shape: [B, N, U, W, F_span]
# Target shape after shift: [B, N, U, W, C]
# Padding amount for the last dimension (F_span) to become (C + 1)
# C = key_context_size
# F_span = max_span_plus_1
pad_amount_last_dim = (key_context_size + 1) - max_span_plus_1
# PyTorch F.pad expects (pad_left, pad_right, pad_top, pad_bottom ...)
# We only pad the last dimension on the right.
padding_tuple = (0, pad_amount_last_dim)
term_bd_padded = nn.functional.pad(term_bd_before_shift, padding_tuple)
# Shape after pad: [B, N, U, W, C+1]
# Reshape for slicing (emulating JAX's behavior)
# [B, N, U, W * (C+1)]
term_bd_reshaped = term_bd_padded.reshape(
(
batch_size,
num_heads,
num_query_blocks,
query_block_size * (key_context_size + 1),
)
)
# Slice to effective [B, N, U, W * C]
term_bd_sliced = term_bd_reshaped[:, :, :, : query_block_size * key_context_size]
# Reshape back to [B, N, U, W, C]
term_bd_shifted = term_bd_sliced.reshape(
(
batch_size,
num_heads,
num_query_blocks,
query_block_size,
key_context_size,
)
)
return term_bd_shifted
def forward(self, queries: torch.Tensor, keys: torch.Tensor) -> torch.Tensor:
# queries: [B, U, W, N, H] (batch, num_query_blocks, query_block_size, num_heads, head_dim)
# keys: [B, U, C, N, H] (batch, num_query_blocks, key_context_size, num_heads, head_dim)
# C = W + L + R (key_context_size)
# F_span = L + R + 1 (max_span + 1)
batch_size, num_query_blocks, query_block_size, num_heads, head_dim = queries.shape
_, _, key_context_size, _, _ = keys.shape
# Relative positions for sinusoidal embeddings: [L, L-1, ..., -R]
# Length is L+R+1 = self.max_span + 1
pos_indices = torch.arange(self.max_backward, -self.max_forward - 1, -1, device=queries.device).unsqueeze(
0
) # Shape [1, F_span]
max_span_plus_1 = pos_indices.shape[1] # F_span
sin_emb_timing_signal = self._get_timing_signal_1d_pos(
pos_indices, dtype=queries.dtype
) # Shape [1, F_span, self.channels]
# Project sinusoidal embeddings: [1, F_span, self.channels] -> [1, F_span, N*H]
projected_sin_emb = self.pos_proj(sin_emb_timing_signal)
# Reshape to [1, F_span, N, H] then squeeze to [F_span, N, H]
sin_emb = projected_sin_emb.reshape(1, max_span_plus_1, self.num_heads, self.head_dim).squeeze(
0
) # Shape [F, N, H]
# term_ac: Query-Key content interaction
# queries: [B, U, W, N, H] -> permute to [B, N, U, W, H] for matmul
# keys: [B, U, C, N, H] -> permute to [B, N, U, H, C] for matmul
queries_p = queries.permute(0, 3, 1, 2, 4) # [B, N, U, W, H]
keys_p_t = keys.permute(0, 3, 1, 4, 2) # [B, N, U, H, C]
term_ac = torch.matmul(queries_p, keys_p_t) # [B, N, U, W, C]
# term_bd: Query-Position interaction
# Original einsum: term_bd_unshifed = torch.einsum('buwnh,fnh->bnuwf', queries, sin_emb)
# queries shape: [B, U, W, N, H]
# sin_emb shape: [F, N, H]
# Target output shape: [B, N, U, W, F]
# Permute queries to [B, N, U, W, H] for easier broadcasting with sin_emb
q_permuted = queries.permute(0, 3, 1, 2, 4)
# Permute sin_emb to [N, H, F] to prepare for matmul
# sin_emb original is [F, N, H]
s_permuted = sin_emb.permute(1, 2, 0) # Shape: [N, H, F]
# Reshape queries for matmul: [B, N, U*W, H]
q_reshaped = q_permuted.reshape(batch_size, num_heads, num_query_blocks * query_block_size, head_dim)
# Perform matmul: [B, N, U*W, H] @ [N, H, F]
# s_permuted ([N, H, F]) will be broadcast to [B, N, H, F]
# Result: [B, N, U*W, F]
term_bd_unshifed_matmul = torch.matmul(q_reshaped, s_permuted)
# Reshape to target [B, N, U, W, F]
term_bd_unshifed = term_bd_unshifed_matmul.reshape(
batch_size,
num_heads,
num_query_blocks,
query_block_size,
max_span_plus_1,
)
# Apply relative shift to term_bd_unshifed
term_bd_shifted = self._relative_shift(
term_bd_unshifed,
batch_size,
num_heads,
num_query_blocks,
query_block_size,
key_context_size,
max_span_plus_1,
) # Shape [B, N, U, W, C]
return term_ac + term_bd_shifted
class Gemma3nAudioAttention(nn.Module):
def __init__(self, config: Gemma3nAudioConfig):
super().__init__()
self.config = config
self.num_heads = self.config.conf_num_attention_heads
self.hidden_size = self.config.hidden_size
self.head_dim = self.hidden_size // self.num_heads
self.chunk_size = self.config.conf_attention_chunk_size
self.max_future_horizon = self.config.conf_attention_context_right
self.max_past_horizon = max(0, self.config.conf_attention_context_left - 1)
self.attention_logits_soft_cap = self.config.conf_attention_logit_cap
self.context_size = self.chunk_size + self.max_past_horizon + self.max_future_horizon
self.relative_position_embedding = Gemma3nAudioRelativePositionEmbedding(config)
self.per_dim_scale = nn.Parameter(torch.zeros((self.head_dim,)))
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
q_scale = self.head_dim**-0.5
r_softplus_0 = 1.0 / torch.nn.functional.softplus(torch.tensor(0.0))
self.register_buffer("q_scale", (q_scale * r_softplus_0).clone().detach(), persistent=False)
local_causal_valid_mask = self.create_local_causal_valid_mask()
self.register_buffer("local_causal_valid_mask", local_causal_valid_mask, persistent=False)
self.register_buffer(
"softcap",
torch.tensor(self.attention_logits_soft_cap).float(),
persistent=False,
)
def create_local_causal_valid_mask(self):
lower_causal_mask = torch.tril(
torch.ones((self.context_size, self.chunk_size), dtype=torch.bool),
diagonal=0,
).T
upper_causal_mask = torch.tril(
torch.ones((self.chunk_size, self.context_size), dtype=torch.bool),
diagonal=self.max_past_horizon + self.max_future_horizon,
)
local_causal_valid_mask = torch.ones((self.chunk_size, self.context_size), dtype=torch.bool)
local_causal_valid_mask = local_causal_valid_mask * lower_causal_mask * upper_causal_mask
return local_causal_valid_mask
def _pad_dim1(self, x: torch.Tensor, pad_left: int, pad_right: int) -> torch.Tensor:
batch, _, *tail_shape = x.shape
left = x.new_zeros((batch, pad_left, *tail_shape))
right = x.new_zeros((batch, pad_right, *tail_shape))
x = torch.cat([left, x, right], dim=1)
return x
def _convert_to_block(self, hidden_states: torch.Tensor) -> torch.Tensor:
"""Turns a sequence to non overlapping blocks.
Args:
hidden_states: a tensor of [batch, time, ...].
Returns:
A tensor of [batch, num_blocks, block_size, ...], with necessary
paddings,
where output[:, i, ...] are x[:, i*block_size:(i+1)*block_size, ...].
"""
shape = hidden_states.shape
b, t = shape[:2]
num_blocks = (t + self.chunk_size - 1) // self.chunk_size
if (padding_len := num_blocks * self.chunk_size - t) > 0:
hidden_states = self._pad_dim1(hidden_states, 0, padding_len)
permute_dims = (b, num_blocks, self.chunk_size) + shape[2:]
hidden_states = hidden_states.reshape(permute_dims).contiguous()
return hidden_states
def _extract_block_context(self, hidden_states: torch.Tensor) -> torch.Tensor:
"""Extracts temporal context for every block.
Args:
hidden_states: a tensor of [batch, time, ...].
Returns:
A tensor of [batch, num_blocks, context_size, ...], with necessary
paddings,
where context_size = block_size + left_context + right_context,
and output[:, i, ...] are x[:, start-left_context:end+right_context,
...],
start = i * block_size, end = (i + 1) * block_size.
"""
pad_left = self.max_past_horizon
# The JAX equivalent padding for signal.frame with pad_mode='valid' is
# (left_context, right_context + block_size - 1) on the time dimension.
# PyTorch's _pad_dim1 applies padding symmetrically if only one value is given,
# or (pad_dim_start, pad_dim_end) if two are given.
# Our _pad_dim1(x, pad_left, pad_right) pads dim -2 (time for [B,T,N,H])
# or dim 1 (time for [B,T]).
# The current pad_right calculation matches the JAX effective padding.
pad_right = self.max_future_horizon + self.chunk_size - 1
hidden_states = self._pad_dim1(hidden_states, pad_left, pad_right)
frame_len = self.context_size
frame_step = self.chunk_size
# Directly use unfold without the subframe_factor logic
# x.unfold(dimension, size, step)
# dimension=1 (time dimension, assuming x is [B, T_padded, ...])
# size=frame_len (context_size)
# step=frame_step (chunk_size)
x_unfolded = hidden_states.unfold(dimension=1, size=frame_len, step=frame_step)
# If x was [B, T_padded], x_unfolded is [B, num_blocks, frame_len]
# If x was [B, T_padded, N, H], x_unfolded is [B, num_blocks, N, H, frame_len]
# We want to match JAX's typical output for such operations which might be
# [B, num_blocks, frame_len, N, H] if N, H are present.
# The relative_position_embedding expects keys as [B, U, C, N, H].
# If x_unfolded is [B, U, N, H, C(frame_len)], we need to move C.
if hidden_states.ndim > 2 and x_unfolded.ndim > 3: # Check if inner dimensions (like N, H) exist
# Current shape after unfold for [B, T_pad, N, H] is [B, U, N, H, C]
# Target shape for keys in RPE: [B, U, C, N, H]
x_unfolded = torch.movedim(x_unfolded, source=-1, destination=2)
return x_unfolded.contiguous()
def forward(self, hidden_states: torch.Tensor, mask: torch.BoolTensor) -> torch.Tensor:
# sl.Dense uses jax.numpy.einsum("...a,abcd->...bcd") and jax.numpy.select()
qkv_shape = (*hidden_states.shape[:-1], self.num_heads, self.head_dim)
query_states = self.q_proj(hidden_states).reshape(qkv_shape).contiguous()
key_states = self.k_proj(hidden_states).reshape(qkv_shape).contiguous()
value_states = self.v_proj(hidden_states).reshape(qkv_shape).contiguous()
per_dim_scale_sp = torch.nn.functional.softplus(self.per_dim_scale)
broadcast_shape = (1, 1, 1, self.head_dim)
per_dim_scale_sp_broadcast = per_dim_scale_sp.view(broadcast_shape)
query_states = query_states * self.q_scale * per_dim_scale_sp_broadcast
batch_size, q_time = query_states.shape[:2]
query_blocks = self._convert_to_block(query_states)
key_blocks = self._extract_block_context(key_states)
value_blocks = self._extract_block_context(value_states)
num_query_blocks = query_blocks.shape[1]
# 1. Create a mask indicating originally valid positions.
original_valid_mask = ~mask # True for valid, False for padded
# 2. Extract blocks from this validity mask.
extracted_valid_mask_blocks = self._extract_block_context(original_valid_mask)
# If subframe_factor was used in _extract_block_context for a [B, T] input mask,
# the shape might be [B, U, C/SF, SF]. Reshape to [B, U, C].
# batch_size and num_query_blocks are known from query_blocks.
# self.context_size is C.
if (
extracted_valid_mask_blocks.ndim == 4
and extracted_valid_mask_blocks.shape[2] * extracted_valid_mask_blocks.shape[3] == self.context_size
):
extracted_valid_mask_blocks = extracted_valid_mask_blocks.reshape(
batch_size, num_query_blocks, self.context_size
)
# After potential reshape, ensure it's [B, U, C] if it was from a [B,T] mask.
# This assertion might be too strict if _extract_block_context handles higher-rank inputs differently,
# but for the mask case, this should hold.
if extracted_valid_mask_blocks.shape != (
batch_size,
num_query_blocks,
self.context_size,
):
raise ValueError(
"Shape of extracted_valid_mask_blocks"
f" {extracted_valid_mask_blocks.shape} is not ({batch_size},"
f" {num_query_blocks}, {self.context_size}) after potential reshape."
)
# 3. Expand dimensions for broadcasting with logits and causal mask.
# Target shape for broadcasting with logits [B,N,U,W,C]
# extracted_valid_mask_blocks to [B, 1, U, 1, C]
condition_from_input_validity = extracted_valid_mask_blocks.unsqueeze(1).unsqueeze(-2)
# self.local_causal_valid_mask is [W, C], True where allowed by local window.
# Expand to [1, 1, 1, W, C]
condition_from_causality = self.local_causal_valid_mask.unsqueeze(0).unsqueeze(0).unsqueeze(0)
# 4. Combine the two conditions.
# final_condition will be True where a key is *both* originally valid *and* causally accessible.
# Broadcasts to [B, 1, U, W, C]
final_condition_for_where = torch.logical_and(
condition_from_input_validity,
condition_from_causality.to(condition_from_input_validity.device), # Ensure same device
)
# Embed queries and keys
logits = self.relative_position_embedding(query_blocks, key_blocks)
# Apply attention logit softcap
# Ensure softcap is on the same device as logits
softcap_val = self.softcap.to(logits.device)
logits = logits / softcap_val
logits = torch.tanh(logits)
logits = logits * softcap_val
# Apply the combined mask.
# final_condition_for_where will broadcast with logits [B,N,U,W,C]
logits = torch.where(final_condition_for_where, logits, torch.finfo(logits.dtype).min)
probabilities = torch.nn.functional.softmax(logits, dim=-1, dtype=torch.float32).to(dtype=value_blocks.dtype)
# context_vectors is adapted from jax.numpy.einsum("BNuwc,BucNH->BuwNH", ...)
b_dim, n_dim, u_dim, w_dim, c_dim = probabilities.shape
h_dim = value_blocks.shape[-1]
prob_bun = probabilities.permute(0, 2, 1, 3, 4).reshape(-1, w_dim, c_dim)
v_bun = value_blocks.permute(0, 1, 3, 2, 4).reshape(-1, c_dim, h_dim)
result_bmm = torch.bmm(prob_bun, v_bun)
context_vectors = result_bmm.reshape(b_dim, u_dim, n_dim, w_dim, h_dim).permute(0, 1, 3, 2, 4)
context_vectors = context_vectors.reshape(
(
batch_size,
num_query_blocks * self.chunk_size,
self.num_heads,
self.head_dim,
)
)
context_vectors = context_vectors[:, :q_time]
return context_vectors
class Gemma3nAudioCumulativeGroupNorm(nn.Module):
"""Applies Group Normalization cumulatively over the time dimension.
This layer normalizes the input by calculating the mean and variance
cumulatively over the time dimension (dim 1). The statistics are computed
over all feature dimensions (specified by `feature_dims` and `num_channels`)
for elements marked as valid by the optional `mask`.
If a `mask` is provided (True for valid, False for invalid/padded),
invalid time steps do not contribute to the statistics calculation, and
their corresponding output values are zeroed out.
Scale and bias, if enabled, are applied per-channel (last dimension).
This behavior is similar to JAX's `GroupNormalization` with `num_groups=1`
and `cumulative=True`.
"""
def __init__(
self,
num_channels: int, # Number of channels (size of the last dimension)
feature_dims: Sequence[int], # Sizes of non-channel feature dimensions, e.g., (H, W) for input [B,T,H,W,C]
eps: float = 1e-3,
):
super().__init__()
self.num_channels = num_channels
self.feature_dims = tuple(feature_dims)
self.eps = eps
# Scale parameter depends only on the channel dimension
self.weight = nn.Parameter(torch.ones(num_channels))
# Axes for normalization: all dimensions except Batch (0) and Time (1).
# For input [B, T, *feature_dims, C], these are dims from 2 onwards.
self.reduction_axes = tuple(range(2, 2 + len(self.feature_dims) + 1))
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
"""Applies cumulative group norm, optionally using a mask.
Args:
hidden_states: Input tensor, shape [B, T, *feature_dims, C].
Returns:
Normalized tensor with the same shape as x.
"""
expected_input_suffix = self.feature_dims + (self.num_channels,)
if hidden_states.shape[2:] != expected_input_suffix:
raise ValueError(
f"Input tensor shape suffix {hidden_states.shape[2:]} does not match expected"
f" suffix (feature_dims + num_channels) {expected_input_suffix}"
)
input_dtype = hidden_states.dtype
# Calculations are performed in float32 for numerical stability.
calc_dtype = torch.float32
x_calc = hidden_states.to(calc_dtype)
# Prepare a broadcastable mask (`mask_calc`).
# If no mask is provided, treat all elements as valid
# (mask_calc is all ones).
# Otherwise, expand the [B, T] mask to [B, T, 1, ..., 1] for broadcasting.
mask_calc = torch.ones_like(x_calc, dtype=calc_dtype)
# Cumulative Statistics Calculation
# 1. Sum of values over reduction axes at each time step.
sum_values_at_t = torch.sum(x_calc, dim=self.reduction_axes, keepdim=True)
# 2. Cumulative sum of values over time.
cum_sum_values = torch.cumsum(sum_values_at_t, dim=1)
# 3. Count of valid elements in the normalization group at each time step.
# (A "group" here consists of all features at a given Batch, Time).
elements_in_group_at_t = torch.sum(mask_calc, dim=self.reduction_axes, keepdim=True)
# 4. Cumulative count of valid elements over time.
cum_count_elements = torch.cumsum(elements_in_group_at_t, dim=1)
# Avoid division by zero if all preceding elements were masked.
safe_cum_count_elements = torch.clamp(cum_count_elements, min=1.0)
# 5. Cumulative mean.
cum_mean = cum_sum_values / safe_cum_count_elements
# 6. Sum of squared differences from the cumulative mean.
# Only sum for valid elements: (x_calc - cum_mean)^2 * mask_calc.
# Using x_calc here for the difference, as cum_mean already accounts for masking.
squared_diff_from_mean = (x_calc - cum_mean).pow(2)
sum_sq_diff_at_t = torch.sum(squared_diff_from_mean, dim=self.reduction_axes, keepdim=True)
# 7. Cumulative sum of squared differences over time.
cum_sum_sq_diff = torch.cumsum(sum_sq_diff_at_t, dim=1)
# 8. Cumulative variance.
cum_variance = cum_sum_sq_diff / safe_cum_count_elements
# Normalize the input using the calculated cumulative statistics:
# (x - E[x]) / sqrt(Var[x] + eps)
normalized_x = (x_calc - cum_mean) * torch.rsqrt(cum_variance + self.eps)
# Apply affine transformation (scale and bias) if enabled.
# Scale and bias are applied per-channel (last dimension).
scale = self.weight.to(calc_dtype)
# Reshape for broadcasting: [C] -> [1, ..., 1, C]
scale_view_shape = [1] * (hidden_states.dim() - 1) + [self.num_channels]
normalized_x = normalized_x * scale.view(scale_view_shape)
# Zero out outputs for time steps that were originally masked (where mask_calc is 0).
# This ensures padded/invalid positions in the input result in zero output.
final_output = normalized_x * mask_calc
return final_output.to(input_dtype)
class Gemma3nAudioSSCPConvBlock(nn.Module):
"""A single convolution block for the SubSampleConvProjection.
This block consists of a 2D convolution, followed by CumulativeGroupNorm,
and a ReLU activation. It handles manual padding for the convolution.
"""
def __init__(
self,
config: Gemma3nAudioConfig,
idx: int,
input_freq_dim: int, # Changed from input_spatial_dim
manual_padding: tuple[int, int, int, int] = (0, 0, 0, 0),
):
super().__init__()
self.config = config
self.manual_padding = manual_padding
# in_channels is 1 for the first block, or C_out from previous block's conv
in_channels = 1 if idx == 0 else self.config.sscp_conv_channel_size[idx - 1]
out_channels = self.config.sscp_conv_channel_size[idx]
kernel_h, kernel_w = self.config.sscp_conv_kernel_size[idx]
stride_h, stride_w = self.config.sscp_conv_stride_size[idx]
self.conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=(
kernel_h,
kernel_w,
), # Kernel (kH, kW) operates on (Time, Freq_dim)
stride=(stride_h, stride_w),
padding=(0, 0), # Manual padding is used
bias=False,
)
# Calculate output frequency dimension (f_out_conv) after this convolution.
# input_freq_dim is the unpadded width (feature dimension).
# self.manual_padding is (pad_F_left, pad_F_right, pad_T_top, pad_T_bottom)
f_in_padded = input_freq_dim + self.manual_padding[0] + self.manual_padding[1]
f_out_conv = (f_in_padded - kernel_w) // stride_w + 1
self.norm = Gemma3nAudioCumulativeGroupNorm(
num_channels=out_channels, # Channels of the conv output
feature_dims=(f_out_conv,), # The frequency dimension size after conv
eps=self.config.sscp_conv_group_norm_eps,
)
self.activation = nn.ReLU()
def forward(self, audio_encodings: torch.Tensor) -> torch.Tensor:
# Input audio_encodings is [B, C_in, T_in, F_in] (e.g., C_in=1)
# manual_padding is (pad_F_left, pad_F_right, pad_T_top, pad_T_bottom)
# F.pad applies to last two dims: F_in then T_in
audio_encodings_padded = F.pad(audio_encodings, self.manual_padding, mode="constant", value=0.0).to(
self.conv.weight.dtype
)
# Expected padded shape for F_in, k_w=3, pad_F=(1,1) -> F_padded = F_in+2
# Expected padded shape for T_in, k_h=3, pad_T=(0,2) -> T_padded = T_in+2
audio_encodings_conv = self.conv(audio_encodings_padded)
# Expected conv output shape: [B, C_out, T_out, F_out]
# Input to norm is [B, T_out, F_out, C_out]
x_for_norm = audio_encodings_conv.permute(0, 2, 3, 1).contiguous()
x_normed = self.norm(x_for_norm)
# Output of norm is [B, T_out, F_out, C_out], permute back to [B, C_out, T_out, F_out]
audio_encodings_normed = x_normed.permute(0, 3, 1, 2).contiguous()
return self.activation(audio_encodings_normed)
class Gemma3nAudioSubSampleConvProjection(nn.Module):
def __init__(self, config: Gemma3nAudioConfig):
super().__init__()
self.config = config
current_f_for_block_input = config.input_feat_size # Start with original feature dim
calculated_block_padding = []
calculated_f_out_dims = [] # Tracking frequency dimension output sizes
for i in range(2): # Assuming 2 conv layers as per sscp_conv_... arrays
kernel_h, kernel_w = config.sscp_conv_kernel_size[i]
stride_h, stride_w = config.sscp_conv_stride_size[i]
# Padding for Time (Height for Conv2d) - REVERSE_CAUSAL like
# JAX 'reverse_causal' padding is (0, kernel_size - 1)
pad_t_top = 0
pad_t_bottom = kernel_h - 1
# Frequency Padding (Width for Conv2d)
# Based on JAX effective padding (1,1) for F_in=10, K_w=3, S_w=2
# and the successful test configuration.
# If kernel/stride/input_freq for frequency changes, this might need re-evaluation
# to match generic JAX 'SAME' behavior if it differs.
pad_f_left = 1
pad_f_right = 1
manual_padding_tuple = (
pad_f_left,
pad_f_right,
pad_t_top,
pad_t_bottom,
)
calculated_block_padding.append(manual_padding_tuple)
# Calculate output frequency dimension after this convolution
# This uses the actual padding applied and kernel/stride.
f_in_padded = current_f_for_block_input + pad_f_left + pad_f_right
f_out_after_conv = (f_in_padded - kernel_w) // stride_w + 1 # Assuming dilation_w = 1
calculated_f_out_dims.append(f_out_after_conv)
current_f_for_block_input = f_out_after_conv
self.conv_0 = Gemma3nAudioSSCPConvBlock(
idx=0,
input_freq_dim=config.input_feat_size, # Pass original feature dim
config=config,
manual_padding=calculated_block_padding[0],
)
self.conv_1 = Gemma3nAudioSSCPConvBlock(
idx=1,
input_freq_dim=calculated_f_out_dims[0], # Output freq dim from conv_0
config=config,
manual_padding=calculated_block_padding[1],
)
final_c_out = config.sscp_conv_channel_size[-1]
final_f_out = calculated_f_out_dims[-1] # Final frequency dimension
self.input_proj_in_features = final_c_out * final_f_out
self.input_proj_linear = nn.Linear(self.input_proj_in_features, self.config.hidden_size, bias=False)
def forward(self, audio_encodings: torch.Tensor) -> torch.Tensor:
# audio_encodings is [B, T, F_in]
# Reshape to [B, 1, T, F_in] (Batch, Channels=1, Height=Time, Width=F_in)
audio_encodings_reshaped = audio_encodings.unsqueeze(1)
x = self.conv_0(audio_encodings_reshaped)
x = self.conv_1(x)
# x from conv_1 is [B, C_out_1, T_out_1, F_out_1]
b, c_out, t_out, f_out = x.shape
# Permute to [B, T_out_1, F_out_1, C_out_1] then flatten F_out_1 and C_out_1
x_permuted = x.permute(0, 2, 3, 1).contiguous()
output_flattened = x_permuted.view(b, t_out, f_out * c_out)
output = self.input_proj_linear(output_flattened)
return output
class Gemma3nAudioConformerAttention(nn.Module):
def __init__(self, config: Gemma3nAudioConfig):
super().__init__()
self.config = config
self.post_in_features = self.config.hidden_size
self.register_buffer("gradient_clipping", torch.tensor(self.config.gradient_clipping), persistent=False)
self.pre_attn_norm = Gemma3nRMSNorm(self.config.hidden_size)
self.attn = Gemma3nAudioAttention(config)
self.post = nn.Linear(self.post_in_features, self.config.hidden_size, bias=False)
self.post_norm = Gemma3nRMSNorm(self.config.hidden_size)
def forward(self, audio_encodings: torch.Tensor, audio_mel_mask: torch.BoolTensor) -> torch.Tensor:
audio_encodings_input_to_attn = audio_encodings
audio_encodings = torch.clamp(audio_encodings, -self.gradient_clipping, self.gradient_clipping)
audio_encodings_norm = self.pre_attn_norm(audio_encodings)
# Output of self.attn is [B, T, NumHeads, HeadDim]
audio_encodings_attn_out = self.attn(audio_encodings_norm, audio_mel_mask)
# Reshape from [B, T, NumHeads, HeadDim] to [B, T, NumHeads * HeadDim]
# NumHeads * HeadDim = hidden_size
b, t, num_heads, head_dim = audio_encodings_attn_out.shape
audio_encodings_reshaped = audio_encodings_attn_out.reshape(b, t, num_heads * head_dim)
audio_encodings = self.post(audio_encodings_reshaped)
audio_encodings = torch.clamp(audio_encodings, -self.gradient_clipping, self.gradient_clipping)
return audio_encodings_input_to_attn + self.post_norm(audio_encodings)
class Gemma3nAudioConformerFeedForward(nn.Module):
def __init__(self, config: Gemma3nAudioConfig):
super().__init__()
self.config = config
self.register_buffer("gradient_clipping", torch.tensor(self.config.gradient_clipping), persistent=False)
self.pre_layer_norm = Gemma3nRMSNorm(self.config.hidden_size)
self.ffw_layer_1 = nn.Linear(self.config.hidden_size, self.config.hidden_size * 4, bias=False)
self.ffw_layer_2 = nn.Linear(self.config.hidden_size * 4, self.config.hidden_size, bias=False)
self.post_layer_norm = Gemma3nRMSNorm(self.config.hidden_size)
self.post_layer_scale = self.config.conf_residual_weight
def forward(self, audio_encodings: torch.Tensor) -> torch.Tensor:
residual = audio_encodings
audio_encodings = torch.clamp(audio_encodings, -self.gradient_clipping, self.gradient_clipping)
audio_encodings = self.pre_layer_norm(audio_encodings)
audio_encodings: torch.Tensor = self.ffw_layer_1(audio_encodings)
audio_encodings = nn.functional.silu(audio_encodings)
audio_encodings: torch.Tensor = self.ffw_layer_2(audio_encodings)
audio_encodings = torch.clamp(audio_encodings, -self.gradient_clipping, self.gradient_clipping)
audio_encodings = self.post_layer_norm(audio_encodings)
return residual + (audio_encodings * self.post_layer_scale)
class Gemma3nAudioConformerLightConv1d(nn.Module):
def __init__(self, config: Gemma3nAudioConfig):
super().__init__()
self.config = config
self.pre_layer_norm = Gemma3nRMSNorm(self.config.hidden_size, eps=self.config.rms_norm_eps)
self.linear_start = nn.Linear(self.config.hidden_size, self.config.hidden_size * 2, bias=False)
self.depthwise_conv1d = nn.Conv1d(
in_channels=self.config.hidden_size,
out_channels=self.config.hidden_size,
kernel_size=self.config.conf_conv_kernel_size,
stride=1,
padding=0, # Manual causal padding
groups=self.config.hidden_size, # Depthwise
bias=False,
)
self.register_buffer("gradient_clipping", torch.tensor(self.config.gradient_clipping), persistent=False)
self.conv_norm = Gemma3nRMSNorm(self.config.hidden_size, eps=self.config.rms_norm_eps)
self.linear_end = nn.Linear(self.config.hidden_size, self.config.hidden_size, bias=False)
self.causal_padding = self.config.conf_conv_kernel_size - 1
def forward(self, audio_encodings: torch.Tensor) -> torch.Tensor:
audio_encodings_residual = audio_encodings # Save for residual connection
audio_encodings = self.pre_layer_norm(audio_encodings)
audio_encodings = self.linear_start(audio_encodings)
audio_encodings = torch.nn.functional.glu(audio_encodings, dim=-1)
# Permute for Conv1d: [B, T, D] -> [B, D, T]
audio_encodings_permuted = audio_encodings.permute(0, 2, 1)
# Apply manual causal padding
audio_encodings_permuted_padded = F.pad(audio_encodings_permuted, (self.causal_padding, 0))
audio_encodings = self.depthwise_conv1d(audio_encodings_permuted_padded)
# Permute back: [B, D, T_out] -> [B, T_out, D]
audio_encodings = audio_encodings.permute(0, 2, 1)
audio_encodings = torch.clamp(audio_encodings, -self.gradient_clipping, self.gradient_clipping)
audio_encodings = self.conv_norm(audio_encodings)
audio_encodings = nn.functional.silu(audio_encodings)
audio_encodings = self.linear_end(audio_encodings)
output = audio_encodings + audio_encodings_residual
return output
class Gemma3nAudioConformerBlock(nn.Module):
def __init__(self, config: Gemma3nAudioConfig):
super().__init__()
self.config = config
self.ffw_layer_start = Gemma3nAudioConformerFeedForward(self.config)
self.attention = Gemma3nAudioConformerAttention(self.config)
self.lconv1d = Gemma3nAudioConformerLightConv1d(self.config)
self.ffw_layer_end = Gemma3nAudioConformerFeedForward(self.config)
self.register_buffer("gradient_clipping", torch.tensor(self.config.gradient_clipping), persistent=False)
self.norm = Gemma3nRMSNorm(self.config.hidden_size)
def forward(self, audio_encodings: torch.Tensor, audio_mel_mask: torch.BoolTensor) -> torch.Tensor:
audio_encodings = self.ffw_layer_start(audio_encodings)
audio_encodings = self.attention(audio_encodings, audio_mel_mask)
validity_mask_for_lconv = ~audio_mel_mask # True for valid
audio_encodings_for_lconv_input = audio_encodings * validity_mask_for_lconv.unsqueeze(-1).to(
audio_encodings.dtype
)
audio_encodings = self.lconv1d(audio_encodings_for_lconv_input)
audio_encodings = self.ffw_layer_end(audio_encodings)
audio_encodings = torch.clamp(audio_encodings, -self.gradient_clipping, self.gradient_clipping)
output = self.norm(audio_encodings)
return output
# ==== Language Model ====
class Gemma3nTextScaledWordEmbedding(Gemma3TextScaledWordEmbedding):
pass
class Gemma3nTextLaurelBlock(nn.Module):
"""Learned Augmented Residual Layer"""
def __init__(self, config: Gemma3nTextConfig):
super().__init__()
self.config = config
self.linear_left = nn.Linear(self.config.hidden_size, self.config.laurel_rank, bias=False)
self.linear_right = nn.Linear(self.config.laurel_rank, self.config.hidden_size, bias=False)
self.post_laurel_norm = Gemma3nRMSNorm(self.config.hidden_size, eps=self.config.rms_norm_eps)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
laurel_hidden_states: torch.Tensor = self.linear_left(hidden_states)
laurel_hidden_states: torch.Tensor = self.linear_right(laurel_hidden_states)
normed_laurel_hidden_states = self.post_laurel_norm(laurel_hidden_states)
return hidden_states + normed_laurel_hidden_states
class Gemma3nTextMLP(Gemma2MLP):
def __init__(self, config: Gemma3nTextConfig, layer_idx: int = 0):
super().__init__(config)
self.intermediate_size = config.intermediate_size[layer_idx]
self.activation_sparsity = config.activation_sparsity_pattern[layer_idx]
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
gate_proj = self.gate_proj(hidden_states)
if self.activation_sparsity > 0.0:
gate_proj = self._gaussian_topk(gate_proj)
activations = self.act_fn(gate_proj)
up_proj = self.up_proj(hidden_states)
down_proj = self.down_proj(activations * up_proj)
return down_proj
def _gaussian_topk(self, inputs: torch.Tensor) -> torch.Tensor:
target_sparsity_tensor = torch.tensor(self.activation_sparsity, dtype=torch.float32, device=inputs.device)
# normal_dist and std_multiplier are adapted from jax.scipy.stats.norm.ppf().
#
# References:
# * https://docs.jax.dev/en/latest/_autosummary/jax.scipy.stats.norm.ppf.html
# * https://pytorch.org/docs/stable/distributions.html#torch.distributions.normal.Normal
# * https://pytorch.org/docs/stable/distributions.html#torch.distributions.transformed_distribution.TransformedDistribution.icdf
normal_dist = torch.distributions.normal.Normal(0, 1)
std_multiplier: torch.Tensor = normal_dist.icdf(target_sparsity_tensor)
std_multiplier = std_multiplier.type(inputs.dtype)
inputs_mean = torch.mean(inputs, dim=-1, keepdim=True)
inputs_std = torch.std(inputs, dim=-1, keepdim=True, unbiased=False)
cutoff_x = inputs_mean + inputs_std * std_multiplier
return nn.functional.relu(inputs - cutoff_x)
class Gemma3nTextAltUp(nn.Module):
"""Alternating Updates (AltUp)
The AltUp module wraps transformer layers. The `predict` step modifies the
input to the transformer layer, and the `correct` step propagates the output
of the transformer layer to the sparsely updated dimensions.
See more in the research paper:
https://proceedings.neurips.cc/paper_files/paper/2023/file/f2059277ac6ce66e7e5543001afa8bb5-Paper-Conference.pdf
"""
def __init__(self, config: Gemma3nTextConfig):
super().__init__()
self.config = config
self.correct_output_scale = nn.Parameter(torch.zeros(self.config.hidden_size))
self.correction_coefs = nn.Linear(self.config.altup_num_inputs, self.config.altup_num_inputs, bias=False)
self.prediction_coefs = nn.Linear(self.config.altup_num_inputs, self.config.altup_num_inputs**2, bias=False)
self.modality_router = nn.Linear(self.config.hidden_size, self.config.altup_num_inputs, bias=False)
self.router_norm = Gemma3nRMSNorm(self.config.hidden_size, eps=self.config.rms_norm_eps)
self.register_buffer("router_input_scale", torch.tensor(self.config.hidden_size**-1.0), persistent=False)
def compute_router_modalities(self, x: torch.Tensor) -> torch.Tensor:
router_inputs = self.router_norm(x) * self.router_input_scale
routed = self.modality_router(router_inputs)
return torch.tanh(routed.float()).type_as(x)
def predict(self, hidden_states: torch.Tensor) -> torch.Tensor:
"""Predicts the output of a layer using a trainable map.
Args:
hidden_states: A 4D tensor of shape `[num_altup_inputs, batch_size, num_tokens, hidden_size]` derived by
stacking the input embeddings and preprocessing the last `num_altup_inputs - 1` matrices.
Returns:
A 4D tensor of shape `[num_altup_inputs, batch_size, num_tokens, hidden_size]` containing the predictions.
"""
modalities = self.compute_router_modalities(hidden_states[self.config.altup_active_idx])
if self.training and self.config.altup_coef_clip is not None:
self.prediction_coefs.weight.data.clamp_(-self.config.altup_coef_clip, self.config.altup_coef_clip)
# Project and then transpose all 2D matrices contained so that mulmat gives the correct result
all_coefs: torch.Tensor = (
self.prediction_coefs(modalities)
.reshape(*modalities.shape[:-1], self.config.altup_num_inputs, self.config.altup_num_inputs)
.permute(0, 1, 3, 2)
)
# permute hidden_states to [batch_size, num_tokens, hidden_size, altup_num_inputs]
predictions = torch.matmul(hidden_states.permute(1, 2, 3, 0), all_coefs)
predictions = predictions.permute(3, 0, 1, 2) # undo the permute
predictions += hidden_states # add the original input
return predictions.contiguous().type_as(hidden_states)
def correct(self, predictions: torch.Tensor, activated: torch.Tensor) -> torch.Tensor:
"""Corrects the predictions relative to the
Args:
predictions: A 4D tensor of shape `[num_altup_inputs, batch_size, num_tokens, hidden_size]` derived by
stacking the input embeddings and preprocessing the last `num_altup_inputs - 1` matrices.
activated: A 3D tensor of shape `[batch_size, num_tokens, hidden_size]` containing the activated inputs.
Returns:
A 4D tensor of shape `[num_altup_inputs, batch_size, num_tokens, hidden_size]` correcting the original
predictions relative to the activated input embeddings.
"""
modalities = self.compute_router_modalities(activated)
innovation = activated - predictions[self.config.altup_active_idx] # (batch, num_tokens, hidden_size)
innovation = innovation.repeat(self.config.altup_num_inputs, 1, 1, 1) # Repeat on dim0 to match predictions
if self.training and self.config.altup_coef_clip is not None:
weight = self.correction_coefs.weight.clamp(-self.config.altup_coef_clip, self.config.altup_coef_clip)
all_coefs = torch.nn.functional.linear(modalities, weight, bias=None) + 1.0
else:
all_coefs = self.correction_coefs(modalities) + 1.0
# all_coefs adapted from jax.numpy.einsum("...p,pi->...i", ...)
# Permute to (altup_num_inputs, batch_size, num_tokens) as the last dim is a scalar applied to each altup input
# and expand on dim1 for broadcastability
all_coefs = all_coefs.permute(2, 0, 1).unsqueeze(-1)
corrected = torch.mul(innovation, all_coefs)
corrected += predictions # add the original input
return corrected.contiguous().type_as(activated)
def forward(self, corrected: torch.Tensor) -> torch.Tensor:
"""
This is only defined as the `forward` so that accelerate hooks can move correctly `correct_output_scale`
(which is a nn.Parameter, not a Module) between devices when offloading. It is otherwise only used in
`scale_corrected_output`
"""
return (corrected.type_as(self.correct_output_scale) * self.correct_output_scale).type_as(corrected)
def scale_corrected_output(self, corrected: torch.Tensor) -> torch.Tensor:
"""Scales the provided 3D tensor of shape [batch_size, num_tokens, hidden_size]."""
return self.forward(corrected)
def apply_rotary_pos_emb(x: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor, unsqueeze_dim: int = 1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
x (`torch.Tensor`): The tensor to embed.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
return (x * cos) + (rotate_half(x) * sin)
class Gemma3nTextAttention(Gemma3Attention):
def __init__(self, config: Gemma3nTextConfig, layer_idx: int):
super().__init__(config, layer_idx)
self.is_causal = True
del self.attn_logit_softcapping
self.scaling = 1.0
self.v_norm = Gemma3nRMSNorm(dim=config.head_dim, eps=config.rms_norm_eps, with_scale=False)
first_kv_shared_layer_idx = self.config.num_hidden_layers - self.config.num_kv_shared_layers
self.is_kv_shared_layer = layer_idx >= first_kv_shared_layer_idx > 0
prev_layers = config.layer_types[:first_kv_shared_layer_idx]
if self.is_kv_shared_layer:
# For shared layers, find the last non-shared layer of the same type before sharing starts
self.kv_shared_layer_index = len(prev_layers) - 1 - prev_layers[::-1].index(config.layer_types[layer_idx])
self.store_full_length_kv = False
else:
self.kv_shared_layer_index = None
# For non-shared layers, store full-length kv if this is the last non-shared layer of its type
self.store_full_length_kv = layer_idx == len(prev_layers) - 1 - prev_layers[::-1].index(
config.layer_types[layer_idx]
)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: torch.Tensor = None,
attention_mask: torch.Tensor | None = None,
past_key_values: Cache | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, torch.Tensor | None, tuple[torch.Tensor] | None]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.config.head_dim)
cos, sin = position_embeddings
query_states = self.q_proj(hidden_states).view(hidden_shape)
query_states = self.q_norm(query_states)
query_states = apply_rotary_pos_emb(query_states, cos, sin, unsqueeze_dim=2)
query_states = query_states.transpose(1, 2)
# For layers with shared KV (from kv sharing point onwards), we reuse the same keys/values states as the last non-sharing layer
if self.is_kv_shared_layer and past_key_values is not None:
key_states, value_states = past_key_values.shared_layers[self.kv_shared_layer_index]
# Device of past layer may be different from current one
key_states = key_states.to(query_states.device)
value_states = value_states.to(query_states.device)
else:
key_states = self.k_proj(hidden_states).view(hidden_shape)
key_states = self.k_norm(key_states)
key_states = apply_rotary_pos_emb(key_states, cos, sin, unsqueeze_dim=2)
key_states = key_states.transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape)
value_states = self.v_norm(value_states)
value_states = value_states.transpose(1, 2)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {
"sin": sin,
"cos": cos,
"cache_position": cache_position,
"sliding_window": self.sliding_window,
}
if not self.is_kv_shared_layer:
key_states, value_states = past_key_values.update(
key_states, value_states, self.layer_idx, cache_kwargs
)
if self.store_full_length_kv:
if not hasattr(past_key_values, "shared_layers"):
past_key_values.shared_layers = {}
past_key_values.shared_layers[self.layer_idx] = key_states, value_states
attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface(
self.config._attn_implementation, eager_attention_forward
)
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=self.attention_dropout if self.training else 0.0,
scaling=self.scaling,
sliding_window=self.sliding_window,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class Gemma3nTextDecoderLayer(Gemma3DecoderLayer):
def __init__(self, config: Gemma3nTextConfig, layer_idx: int):
super().__init__(config, layer_idx)
self.mlp = Gemma3nTextMLP(config, layer_idx=layer_idx)
self.hidden_size_per_layer_input = config.hidden_size_per_layer_input
self.act_fn = ACT2FN[config.hidden_activation]
self.altup = Gemma3nTextAltUp(config)
self.laurel = Gemma3nTextLaurelBlock(config)
self.self_attn = Gemma3nTextAttention(config, layer_idx)
self.per_layer_input_gate = nn.Linear(self.hidden_size, self.hidden_size_per_layer_input, bias=False)
self.per_layer_projection = nn.Linear(self.hidden_size_per_layer_input, self.hidden_size, bias=False)
self.post_per_layer_input_norm = Gemma3nRMSNorm(self.hidden_size, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: torch.Tensor = None,
per_layer_input: torch.Tensor = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, tuple[torch.FloatTensor, torch.FloatTensor] | None]:
predictions = self.altup.predict(hidden_states)
active_prediction = predictions[self.config.altup_active_idx]
active_prediction_normed = self.input_layernorm(active_prediction)
laurel_output = self.laurel(active_prediction_normed)
attn, _ = self.self_attn(
hidden_states=active_prediction_normed,
attention_mask=attention_mask,
position_ids=position_ids,
position_embeddings=position_embeddings,
past_key_values=past_key_values,
cache_position=cache_position,
**kwargs,
)
attn = self.post_attention_layernorm(attn)
attn_gated = active_prediction + attn
attn_laurel = (attn_gated + laurel_output) / math.sqrt(2)
attn_norm = self.pre_feedforward_layernorm(attn_laurel)
attn_ffw = self.mlp(attn_norm)
attn_ffw_norm = self.post_feedforward_layernorm(attn_ffw)
attn_ffw_laurel_gated = attn_laurel + attn_ffw_norm
corrected_predictions = self.altup.correct(predictions, attn_ffw_laurel_gated)
first_prediction = corrected_predictions[self.config.altup_active_idx].clone()
if self.config.altup_correct_scale:
first_prediction = self.altup.scale_corrected_output(first_prediction)
# per_layer_input_gate adapted from jax.numpy.einsum("btd,dp->btp", ...)
first_prediction = self.per_layer_input_gate(first_prediction)
first_prediction = self.act_fn(first_prediction)
first_prediction = torch.multiply(first_prediction, per_layer_input)
# per_layer_projection adapted from jax.numpy.einsum("btp,pd->btd", ...)
first_prediction = self.per_layer_projection(first_prediction)
first_prediction = self.post_per_layer_input_norm(first_prediction)
corrected_predictions[1:] += first_prediction
return corrected_predictions
class Gemma3nPreTrainedModel(Gemma2PreTrainedModel):
config: Gemma3nConfig
input_modalities = ("image", "text", "audio")
_no_split_modules = ["Gemma3nTextDecoderLayer"]
_can_record_outputs = {
"hidden_states": Gemma3nTextDecoderLayer,
"attentions": Gemma3nTextAttention,
}
@torch.no_grad()
def _init_weights(self, module):
PreTrainedModel._init_weights(self, module)
if isinstance(module, Gemma3nAudioCumulativeGroupNorm):
init.ones_(module.weight)
elif isinstance(module, Gemma3nAudioAttention):
init.zeros_(module.per_dim_scale)
q_scale = module.head_dim**-0.5
r_softplus_0 = 1.0 / torch.nn.functional.softplus(torch.tensor(0.0))
init.copy_(module.q_scale, q_scale * r_softplus_0)
init.constant_(module.softcap, module.attention_logits_soft_cap)
init.copy_(module.local_causal_valid_mask, module.create_local_causal_valid_mask())
elif isinstance(module, Gemma3nTextScaledWordEmbedding):
init.constant_(module.embed_scale, module.scalar_embed_scale)
elif isinstance(module, Gemma3nTextAltUp):
init.zeros_(module.correct_output_scale)
init.constant_(module.router_input_scale, self.config.hidden_size**-1.0)
elif isinstance(module, Gemma3nAudioRelativePositionEmbedding):
min_timescale, max_timescale = 1.0, 1.0e4
num_timescales = module.channels // 2
log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / max(
num_timescales - 1, 1
)
inv_timescales = min_timescale * torch.exp(torch.arange(num_timescales) * -log_timescale_increment)
init.copy_(module.inv_timescales, inv_timescales.float().unsqueeze(0).unsqueeze(0))
elif isinstance(module, Gemma3nTextModel):
init.constant_(module.per_layer_projection_scale, self.hidden_size**-0.5)
init.constant_(module.per_layer_input_scale, 1 / math.sqrt(2.0))
elif isinstance(module, Gemma3nRotaryEmbedding):
for layer_type in module.layer_types:
rope_init_fn = module.compute_default_rope_parameters
if module.rope_type[layer_type] != "default":
rope_init_fn = ROPE_INIT_FUNCTIONS[module.rope_type[layer_type]]
curr_inv_freq, _ = rope_init_fn(module.config, layer_type=layer_type)
init.copy_(getattr(module, f"{layer_type}_inv_freq"), curr_inv_freq)
init.copy_(getattr(module, f"{layer_type}_original_inv_freq"), curr_inv_freq)
if hasattr(module, "gradient_clipping"):
init.constant_(module.gradient_clipping, self.config.gradient_clipping)
class Gemma3nAudioEncoder(Gemma3nPreTrainedModel):
"""
An audio encoder based on the [Universal Speech Model](https://huggingface.co/papers/2303.01037) architecture.
"""
config: Gemma3nAudioConfig
main_input_name = "audio_mel"
input_modalities = "audio"
def __init__(self, config: Gemma3nAudioConfig):
super().__init__(config)
self.config = config
self.subsample_conv_projection = Gemma3nAudioSubSampleConvProjection(config)
self.conformer = nn.ModuleList(
[Gemma3nAudioConformerBlock(config) for _ in range(config.conf_num_hidden_layers)]
)
self.post_init()
@merge_with_config_defaults
@capture_outputs
def forward(
self, audio_mel: torch.Tensor, audio_mel_mask: torch.BoolTensor, **kwargs: Unpack[TransformersKwargs]
) -> tuple | Gemma3nAudioEncoderModelOutput:
"""Encodes a batch of MELs.
Args:
audio_mel: a torch.Tensor of shape [batch, num_frames, num_channels,
mel_bins].
Returns:
audio_encodings: a torch.Tensor of shape
`[batch_size, self.config.audio_soft_tokens_per_image,
self.config.audio_config.hidden_size]`
audio_mel_mask: a torch.BoolTensor of shape [batch, num_frames].
"""
audio_encodings = self.subsample_conv_projection(audio_mel) # audio_encodings: [B, T_sub, D]
# Subsample the input audio_mel_mask to match the time dimension of audio_encodings (T_sub)
t_sub = audio_encodings.shape[1]
time_stride_product = 1
for stride_pair_idx in range(len(self.config.sscp_conv_stride_size)):
time_stride_product *= self.config.sscp_conv_stride_size[stride_pair_idx][0]
# Create indices for gathering from the original mask.
# These indices map to original time steps corresponding to the start of each
# receptive field in the subsampled output.
indices = torch.arange(t_sub, device=audio_mel_mask.device) * time_stride_product
indices = torch.clamp(indices, max=audio_mel_mask.shape[1] - 1) # Ensure indices are valid
# Expand indices for batch compatibility if B > 1 and indices is 1D.
if audio_mel_mask.ndim > 1 and indices.ndim == 1:
indices = indices.unsqueeze(0).expand(audio_mel_mask.shape[0], -1) # [B, T_sub]
elif (
audio_mel_mask.ndim == indices.ndim
and audio_mel_mask.shape[0] == 1
and indices.shape[0] != 1
and t_sub == indices.shape[0]
):
# Handle case where B=1 but indices became [T_sub] instead of [1, T_sub]
indices = indices.unsqueeze(0)
current_mask = torch.gather(audio_mel_mask, 1, indices) # [B, T_sub]
for block in self.conformer:
audio_encodings = block(audio_encodings, current_mask) # Pass the processed mask
if self.config.conf_reduction_factor > 1:
audio_encodings = audio_encodings[:, :: self.config.conf_reduction_factor]
# Reduce the mask as well
current_mask = current_mask[:, :: self.config.conf_reduction_factor]
audio_encodings = audio_encodings.masked_fill(current_mask.unsqueeze(-1), 0.0)
return Gemma3nAudioEncoderModelOutput(
last_hidden_state=audio_encodings,
audio_mel_mask=current_mask,
)
class Gemma3nRotaryEmbedding(Gemma3RotaryEmbedding):
pass
@auto_docstring(custom_intro="The base Gemma 3n language model without a language modeling head.")
class Gemma3nTextModel(Gemma3TextModel):
config: Gemma3nTextConfig
def __init__(self, config: Gemma3nTextConfig):
super().__init__(config)
self.hidden_size = config.hidden_size
self.hidden_size_per_layer_input = config.hidden_size_per_layer_input
self.embed_tokens_per_layer = Gemma3nTextScaledWordEmbedding(
config.vocab_size_per_layer_input,
config.num_hidden_layers * config.hidden_size_per_layer_input,
self.padding_idx,
embed_scale=config.hidden_size_per_layer_input**0.5,
)
self.per_layer_model_projection = nn.Linear(
self.hidden_size,
config.num_hidden_layers * config.hidden_size_per_layer_input,
bias=False,
)
self.per_layer_projection_norm = Gemma3nRMSNorm(config.hidden_size_per_layer_input, eps=config.rms_norm_eps)
self.layers = nn.ModuleList(
[Gemma3nTextDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = Gemma3nRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.altup_projections = nn.ModuleList(
[nn.Linear(self.hidden_size, self.hidden_size, bias=False) for _ in range(1, self.config.altup_num_inputs)]
)
self.altup_unembed_projections = nn.ModuleList(
[nn.Linear(self.hidden_size, self.hidden_size, bias=False) for _ in range(1, self.config.altup_num_inputs)]
)
self.register_buffer("per_layer_projection_scale", torch.tensor(self.hidden_size**-0.5), persistent=False)
self.register_buffer("per_layer_input_scale", torch.rsqrt(torch.tensor(2.0)), persistent=False)
def get_per_layer_inputs(self, input_ids: torch.LongTensor) -> torch.Tensor:
return self.embed_tokens_per_layer(input_ids).reshape(
*input_ids.shape,
self.config.num_hidden_layers,
self.hidden_size_per_layer_input,
)
def project_per_layer_inputs(
self,
inputs_embeds: torch.Tensor,
per_layer_inputs: torch.Tensor | None = None,
) -> torch.Tensor:
per_layer_projection: torch.Tensor = self.per_layer_model_projection(inputs_embeds)
per_layer_projection *= self.per_layer_projection_scale.to(
dtype=inputs_embeds.dtype, device=per_layer_projection.device
)
per_layer_projection = per_layer_projection.reshape(
*inputs_embeds.shape[:-1],
self.config.num_hidden_layers,
self.hidden_size_per_layer_input,
)
per_layer_projection = self.per_layer_projection_norm(per_layer_projection)
if per_layer_inputs is None:
return per_layer_projection
if per_layer_projection.shape != per_layer_inputs.shape:
# per-layer inputs are sometimes padded with zeros, slice the relevant embeddings.
per_layer_inputs = per_layer_inputs[..., : self.config.num_hidden_layers, :]
return (per_layer_projection + per_layer_inputs) * self.per_layer_input_scale.to(
dtype=inputs_embeds.dtype, device=per_layer_projection.device
)
# Last hidden states should be before reprojecting, to stay consistent with the other layer outputs
@merge_with_config_defaults
@capture_outputs(tie_last_hidden_states=False)
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
per_layer_inputs: torch.Tensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
use_cache: bool | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutputWithPast:
r"""
per_layer_inputs (torch.Tensor, *optional*, defaults to None):
Pre-computed per-layer embeddings. If None, they are derived from input_ids if provided.
"""
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if input_ids is not None:
inputs_embeds = self.embed_tokens(input_ids)
per_layer_inputs = self.get_per_layer_inputs(input_ids)
per_layer_inputs = self.project_per_layer_inputs(inputs_embeds, per_layer_inputs)
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(inputs_embeds.shape[1], device=inputs_embeds.device) + past_seen_tokens
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
# It may already have been prepared by e.g. `generate`
if not isinstance(causal_mask_mapping := attention_mask, dict):
# Prepare mask arguments
mask_kwargs = {
"config": self.config,
"inputs_embeds": inputs_embeds,
"attention_mask": attention_mask,
"cache_position": cache_position,
"past_key_values": past_key_values,
"position_ids": position_ids,
}
# Create the masks
causal_mask_mapping = {
"full_attention": create_causal_mask(**mask_kwargs),
"sliding_attention": create_sliding_window_causal_mask(**mask_kwargs),
}
# embed positions
hidden_states_0 = inputs_embeds
# Expand hidden_states to support per-layer inputs
target_magnitude = torch.mean(hidden_states_0**2, dim=-1, keepdim=True) ** 0.5
epsilon_tensor = torch.tensor(1e-5)
temp_hidden_states = [hidden_states_0]
for i in range(1, self.config.altup_num_inputs):
# altup_proj adapted from jax.numpy.einsum("btp,pd->btd", ...)
altup_proj = self.altup_projections[i - 1](hidden_states_0)
current_hidden_state = altup_proj.to(dtype=hidden_states_0.dtype, device=target_magnitude.device)
new_magnitude = torch.mean(current_hidden_state**2, dim=-1, keepdim=True)
new_magnitude = torch.sqrt(torch.maximum(new_magnitude, epsilon_tensor.to(target_magnitude.device)))
current_hidden_state = current_hidden_state * target_magnitude / new_magnitude
temp_hidden_states.append(current_hidden_state)
hidden_states = torch.stack(temp_hidden_states, dim=0) # [num_altup_inputs, batch, seq_len, hidden_size]
position_embeddings = {}
for layer_type in self.config.layer_types:
position_embeddings[layer_type] = self.rotary_emb(hidden_states, position_ids, layer_type)
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
causal_mask = causal_mask_mapping[decoder_layer.attention_type]
per_layer_input = per_layer_inputs[:, :, decoder_layer.layer_idx, :]
hidden_states = decoder_layer(
hidden_states,
position_embeddings[decoder_layer.attention_type],
per_layer_input,
attention_mask=causal_mask,
position_ids=position_ids,
past_key_values=past_key_values,
cache_position=cache_position,
**kwargs,
)
# Per-layer inputs to single output
target_magnitude = torch.mean(hidden_states[0] ** 2, dim=-1, keepdim=True) ** 0.5
temp_hidden_states = [hidden_states[0]]
for i in range(1, self.config.altup_num_inputs):
# altup_unembed_projections adapted from jax.numpy.einsum("btp,pd->btd", ...)
altup_unemb_proj: torch.Tensor = self.altup_unembed_projections[i - 1](hidden_states[i])
current_hidden_state = altup_unemb_proj.to(dtype=hidden_states_0.dtype, device=target_magnitude.device)
new_magnitude = torch.mean(current_hidden_state**2, dim=-1, keepdim=True)
new_magnitude = torch.sqrt(torch.maximum(new_magnitude, epsilon_tensor.to(target_magnitude.device)))
current_hidden_state = current_hidden_state * target_magnitude / new_magnitude
temp_hidden_states.append(current_hidden_state)
hidden_states = torch.stack(temp_hidden_states)
hidden_states = torch.mean(hidden_states, dim=0)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
@auto_docstring(custom_intro="The base Gemma 3n language model with a language modeling head.")
class Gemma3nForCausalLM(Gemma3ForCausalLM):
_checkpoint_conversion_mapping = {"model.language_model": "model"}
class Gemma3nMultimodalEmbedder(nn.Module):
"""Embeds token ids or soft tokens for multimodal content into language model space."""
def __init__(
self,
multimodal_config: Gemma3nAudioConfig | Gemma3nVisionConfig,
text_config: Gemma3nTextConfig,
):
super().__init__()
self.multimodal_hidden_size = multimodal_config.hidden_size
self.eps = multimodal_config.rms_norm_eps
self.vocab_offset = multimodal_config.vocab_offset
self.vocab_size = multimodal_config.vocab_size
self.text_hidden_size = text_config.hidden_size
self.embedding = nn.Embedding(self.vocab_size, self.multimodal_hidden_size)
self.hard_embedding_norm = Gemma3nRMSNorm(self.multimodal_hidden_size, eps=self.eps)
self.soft_embedding_norm = Gemma3nRMSNorm(self.multimodal_hidden_size, eps=self.eps)
self.embedding_projection = nn.Linear(self.multimodal_hidden_size, self.text_hidden_size, bias=False)
self.embedding_post_projection_norm = Gemma3nRMSNorm(self.text_hidden_size, eps=self.eps, with_scale=False)
def forward(
self,
input_ids: torch.LongTensor | None = None,
inputs_embeds: torch.Tensor | None = None,
) -> torch.Tensor:
"""Embeds token ids or soft tokens for multimodal content into language model space.
Args:
input_ids: A torch.LongTensor containing the token ids to embed. Values should be in the range
`[vocab_offset, vocab_offset + vocab_size)`.
inputs_embeds: A torch.Tensor containing the soft tokens to embed.
Returns:
A torch.Tensor of embeddings with shape `[batch_size, seq_len, self.config.text_config.hidden_size]`.
"""
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is not None:
emb_norm = self.soft_embedding_norm(inputs_embeds)
else:
hard_emb = self.embedding(input_ids - self.vocab_offset)
emb_norm = self.hard_embedding_norm(hard_emb)
emb_norm_proj = self.embedding_projection(emb_norm)
return self.embedding_post_projection_norm(emb_norm_proj)
@auto_docstring(
custom_intro="""
The base Gemma 3n model comprising a vision backbone, an audio backbone, and a language model without a
language modeling head.
"""
)
class Gemma3nModel(PaliGemmaModel):
_checkpoint_conversion_mapping = {}
def __init__(self, config: Gemma3nConfig):
super().__init__(config)
del self.multi_modal_projector # Replaced by Gemma3nVisionEmbedder
del self.text_config_dtype
self.vocab_size_per_layer_input = config.text_config.vocab_size_per_layer_input
self.audio_tower = AutoModel.from_config(config.audio_config)
self.embed_vision = Gemma3nMultimodalEmbedder(config.vision_config, config.text_config)
self.embed_audio = Gemma3nMultimodalEmbedder(config.audio_config, config.text_config)
@can_return_tuple
@auto_docstring(custom_intro="Projects the last hidden state from the vision model into language model space.")
def get_image_features(
self,
pixel_values: torch.FloatTensor,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | BaseModelOutputWithPooling:
vision_outputs = self.vision_tower(pixel_values=pixel_values, do_pooling=False, return_dict=True, **kwargs)
last_hidden_state = vision_outputs.last_hidden_state
# Convert from (batch, channels, height, width) to (batch, height * width, channels) where:
# height == width and height * width == Gemma3nConfig.vision_soft_tokens_per_image.
last_hidden_state = last_hidden_state.reshape(
last_hidden_state.shape[0],
self.config.vision_config.hidden_size,
self.config.vision_soft_tokens_per_image,
).permute(0, 2, 1)
# Normalize and embed the soft tokens into language model space.
last_hidden_state *= self.config.vision_config.hidden_size**0.5
vision_outputs.pooler_output = self.embed_vision(inputs_embeds=last_hidden_state)
return vision_outputs
def get_placeholder_mask(
self,
input_ids: torch.LongTensor | None = None,
inputs_embeds: torch.FloatTensor | None = None,
image_features: torch.FloatTensor | None = None,
audio_features: torch.FloatTensor | None = None,
):
"""
Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is
equal to the length of multimodal features. If the lengths are different, an error is raised.
"""
if input_ids is None:
special_image_mask = inputs_embeds == self.get_input_embeddings()(
torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device)
)
special_image_mask = special_image_mask.all(-1)
special_audio_mask = (
inputs_embeds
== self.get_input_embeddings()(
torch.tensor(self.config.audio_token_id, dtype=torch.long, device=inputs_embeds.device)
)
).all(-1)
else:
special_image_mask = input_ids == self.config.image_token_id
special_audio_mask = input_ids == self.config.audio_token_id
n_image_tokens = special_image_mask.sum()
special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
if image_features is not None:
torch_compilable_check(
inputs_embeds[special_image_mask].numel() == image_features.numel(),
f"Image features and image tokens do not match, tokens: {n_image_tokens}, features: {image_features.shape[0] * image_features.shape[1]}",
)
n_audio_tokens = special_audio_mask.sum()
special_audio_mask = special_audio_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
if audio_features is not None:
torch_compilable_check(
inputs_embeds[special_audio_mask].numel() == audio_features.numel(),
f"Audio features and audio tokens do not match, tokens: {n_audio_tokens}, features: {audio_features.shape[0] * audio_features.shape[1]}",
)
return special_image_mask, special_audio_mask
@can_return_tuple
def forward(
self,
input_ids: torch.LongTensor | None = None, # text inputs
pixel_values: torch.FloatTensor | None = None, # vision inputs
input_features: torch.FloatTensor | None = None, # audio inputs
attention_mask: torch.Tensor | None = None,
input_features_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
token_type_ids: torch.LongTensor | None = None,
cache_position: torch.LongTensor | None = None,
inputs_embeds: torch.FloatTensor | None = None,
labels: torch.LongTensor | None = None,
use_cache: bool | None = None,
output_attentions: bool | None = None,
output_hidden_states: bool | None = None,
**lm_kwargs: Unpack[TransformersKwargs],
) -> Gemma3nModelOutputWithPast:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.text_config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.text_config.vocab_size]`.
Example:
```python
>>> from PIL import Image
>>> import httpx
>>> from io import BytesIO
>>> from transformers import AutoProcessor, Gemma3nForConditionalGeneration
>>> model = Gemma3nForConditionalGeneration.from_pretrained("google/gemma3n2-3b-mix-224")
>>> processor = AutoProcessor.from_pretrained("google/gemma3n2-3b-mix-224")
>>> prompt = "Where is the cat standing?"
>>> url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg"
>>> with httpx.stream("GET", url) as response:
... image = Image.open(BytesIO(response.read()))
>>> inputs = processor(images=image, text=prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(**inputs,)
>>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Where is the cat standing?\nsnow"
```
"""
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
if input_ids is not None:
inputs_embeds = self.get_input_embeddings()(input_ids)
# Prepare per-layer inputs from inputs_ids
per_layer_inputs_mask = torch.logical_and(input_ids >= 0, input_ids < self.vocab_size_per_layer_input)
per_layer_inputs_tokens = torch.where(per_layer_inputs_mask, input_ids, torch.zeros_like(input_ids))
per_layer_inputs = self.language_model.get_per_layer_inputs(per_layer_inputs_tokens)
# Handle vision tokens (>= embed_vision.vocab_offset and < embed_audio.vocab_offset)
vision_mask = torch.logical_and(
input_ids >= self.embed_vision.vocab_offset, input_ids < self.embed_audio.vocab_offset
)
dummy_vision_token_id = self.embed_vision.vocab_offset + self.embed_vision.vocab_size - 1
vision_input_ids = torch.where(vision_mask, input_ids, dummy_vision_token_id).to(inputs_embeds.device)
vision_embeds = self.embed_vision(input_ids=vision_input_ids)
vision_embeds = vision_embeds.to(inputs_embeds.device, inputs_embeds.dtype)
expanded_vision_mask = vision_mask.unsqueeze(-1).expand_as(inputs_embeds)
inputs_embeds = torch.where(expanded_vision_mask, vision_embeds, inputs_embeds)
# Handle audio tokens (>= embed_audio.vocab_offset)
audio_mask = input_ids >= self.embed_audio.vocab_offset
dummy_audio_token_id = self.embed_audio.vocab_offset + self.embed_audio.vocab_size - 1
audio_input_ids = torch.where(audio_mask, input_ids, dummy_audio_token_id).to(inputs_embeds.device)
audio_embeds = self.embed_audio(input_ids=audio_input_ids)
audio_embeds = audio_embeds.to(inputs_embeds.device, inputs_embeds.dtype)
expanded_audio_mask = audio_mask.unsqueeze(-1).expand_as(inputs_embeds)
inputs_embeds = torch.where(expanded_audio_mask, audio_embeds, inputs_embeds)
else:
per_layer_inputs = None
# Merge text and images
if pixel_values is not None:
image_features = self.get_image_features(pixel_values, return_dict=True).pooler_output
image_features = image_features.to(inputs_embeds.device, inputs_embeds.dtype)
special_image_mask, _ = self.get_placeholder_mask(
input_ids, inputs_embeds=inputs_embeds, image_features=image_features
)
inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features)
# Merge text and audio
if input_features is not None and input_features_mask is not None:
audio_outputs = self.get_audio_features(input_features, ~input_features_mask, return_dict=True)
audio_features = audio_outputs.pooler_output
audio_mask = audio_outputs.audio_mel_mask
# The Gemma3nProcessor expects all audio will be 30s in length and inserts 188 audio soft tokens into the
# text to account for this. However, the audio preprocessing and encoder do not gurarantee they will
# produce 188 soft tokens; they will produce at most that many tokens, but they may produce fewer tokens
# depending on the length of the longest audio input in the batch. When we encounter this situation, we pad
# the audio feature out to 188 soft tokens with the emebedding of the last token in the embed_audio vocab.
audio_padding_toks = torch.tensor([[self.vocab_size - 1]], dtype=torch.long, device=audio_features.device)
audio_padding_embs = self.embed_audio(input_ids=audio_padding_toks)
audio_features = torch.where(audio_mask.unsqueeze(-1), audio_padding_embs, audio_features)
audio_batch_size, audio_seq_len, audio_embed_dim = audio_features.shape
extra_padding_tokens = self.config.audio_soft_tokens_per_image - audio_seq_len
extra_padding_features = audio_padding_embs.expand(audio_batch_size, extra_padding_tokens, audio_embed_dim)
audio_features = torch.cat((audio_features, extra_padding_features), dim=1)
audio_features = audio_features.to(inputs_embeds.device, inputs_embeds.dtype)
_, special_audio_mask = self.get_placeholder_mask(
input_ids, inputs_embeds=inputs_embeds, audio_features=audio_features
)
inputs_embeds = inputs_embeds.masked_scatter(special_audio_mask, audio_features)
outputs = self.language_model(
input_ids=None,
per_layer_inputs=per_layer_inputs,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
cache_position=cache_position,
**lm_kwargs,
)
return Gemma3nModelOutputWithPast(
last_hidden_state=outputs.last_hidden_state,
past_key_values=outputs.past_key_values if use_cache else None,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=image_features if pixel_values is not None else None,
audio_hidden_states=audio_features if input_features is not None else None,
)
@can_return_tuple
@auto_docstring(custom_intro="Projects the last hidden state from the audio encoder into language model space.")
def get_audio_features(
self,
input_features: torch.Tensor,
input_features_mask: torch.Tensor,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | Gemma3nAudioEncoderModelOutput:
r"""
input_features (`torch.FloatTensor]` of shape `(num_images, seq_length, num_features)`):
The tensors corresponding to the input audio.
input_features_mask (`torch.FloatTensor]` of shape `(num_images, seq_length)`):
The attention mask for the input audio.
"""
audio_outputs: Gemma3nAudioEncoderModelOutput = self.audio_tower(
input_features, input_features_mask, return_dict=True, **kwargs
)
audio_embeds = self.embed_audio(inputs_embeds=audio_outputs.last_hidden_state)
audio_outputs.pooler_output = audio_embeds
return audio_outputs
@auto_docstring(
custom_intro="""
The base Gemma 3n model comprising a vision backbone, an audio backbone, a language model, and a language modeling
head.
"""
)
class Gemma3nForConditionalGeneration(PaliGemmaForConditionalGeneration):
_checkpoint_conversion_mapping = {}
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None, # text inputs
pixel_values: torch.FloatTensor | None = None, # vision inputs
input_features: torch.FloatTensor | None = None, # audio inputs
attention_mask: torch.Tensor | None = None,
input_features_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
token_type_ids: torch.LongTensor | None = None,
cache_position: torch.LongTensor | None = None,
inputs_embeds: torch.FloatTensor | None = None,
labels: torch.LongTensor | None = None,
use_cache: bool | None = None,
output_attentions: bool | None = None,
output_hidden_states: bool | None = None,
logits_to_keep: int | torch.Tensor = 0,
**lm_kwargs: Unpack[TransformersKwargs],
) -> Gemma3nCausalLMOutputWithPast:
r"""
input_features_mask (torch.Tensor, *optional*, defaults to None):
The attention mask for the input audio.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.text_config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are
ignored (masked), the loss is only computed for the tokens with labels in
`[0, ..., config.text_config.vocab_size]`.
Example:
```python
>>> from PIL import Image
>>> import httpx
>>> from io import BytesIO
>>> from transformers import AutoProcessor, Gemma3ForConditionalGeneration
>>> model = Gemma3ForConditionalGeneration.from_pretrained("google/gemma-3-4b-it")
>>> processor = AutoProcessor.from_pretrained("google/gemma-3-4b-it")
>>> messages = [
... {
... "role": "system",
... "content": [
... {"type": "text", "text": "You are a helpful assistant."}
... ]
... },
... {
... "role": "user", "content": [
... {"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg"},
... {"type": "text", "text": "Where is the cat standing?"},
... ]
... },
... ]
>>> inputs = processor.apply_chat_template(
... messages,
... tokenizer=True,
... return_dict=True,
... return_tensors="pt",
... add_generation_prompt=True
... )
>>> # Generate
>>> generate_ids = model.generate(**inputs)
>>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"user\nYou are a helpful assistant.\n\n\n\n\n\nWhere is the cat standing?\nmodel\nBased on the image, the cat is standing in a snowy area, likely outdoors. It appears to"
```
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
outputs = self.model(
input_ids=input_ids,
pixel_values=pixel_values,
input_features=input_features,
attention_mask=attention_mask,
input_features_mask=input_features_mask,
position_ids=position_ids,
past_key_values=past_key_values,
token_type_ids=token_type_ids,
cache_position=cache_position,
inputs_embeds=inputs_embeds,
labels=labels,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
**lm_kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
if (final_logit_softcapping := self.config.get_text_config().final_logit_softcapping) is not None:
logits = logits / final_logit_softcapping
logits = torch.tanh(logits)
logits = logits * final_logit_softcapping
loss = None
if labels is not None:
# Upcast to float if we need to compute the loss to avoid potential precision issues
logits = logits.float()
shift_logits = logits[..., :-1, :]
shift_labels = labels[..., 1:]
if attention_mask is not None:
# we use the input attention mask to shift the logits and labels, because it is 2D.
# we also crop attn mask in case it is longer, which happens in PrefixTuning with peft
shift_attention_mask = attention_mask[:, -shift_logits.shape[1] :].to(logits.device)
shift_logits = shift_logits[shift_attention_mask.to(logits.device) != 0].contiguous()
shift_labels = shift_labels[shift_attention_mask.to(shift_labels.device) != 0].contiguous()
else:
shift_logits = shift_logits.contiguous()
shift_labels = shift_labels.contiguous()
# Flatten the tokens
loss_fct = nn.CrossEntropyLoss()
flat_logits = shift_logits.view(-1, self.config.text_config.vocab_size)
flat_labels = shift_labels.view(-1).to(shift_logits.device)
loss = loss_fct(flat_logits, flat_labels)
return Gemma3nCausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=outputs.image_hidden_states,
audio_hidden_states=outputs.audio_hidden_states,
)
def prepare_inputs_for_generation(
self,
input_ids,
past_key_values=None,
inputs_embeds=None,
cache_position=None,
position_ids=None,
pixel_values=None,
input_features=None,
attention_mask=None,
input_features_mask=None,
token_type_ids=None,
use_cache=True,
logits_to_keep=None,
labels=None,
is_first_iteration=False,
**kwargs,
):
# Overwritten -- custom `position_ids` and `pixel_values` handling
model_inputs = super().prepare_inputs_for_generation(
input_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
position_ids=position_ids,
cache_position=cache_position,
use_cache=use_cache,
logits_to_keep=logits_to_keep,
token_type_ids=token_type_ids,
is_first_iteration=is_first_iteration,
**kwargs,
)
# If we're in cached decoding stage, multimodal inputs should be None because input ids do not contain special
# tokens anymore. Otherwise multimodal inputs should be passed to model.
# NOTE: use_cache=False always needs pixel_values, input_features, and input_features_mask
if is_first_iteration or not use_cache:
model_inputs["pixel_values"] = pixel_values
model_inputs["input_features"] = input_features
model_inputs["input_features_mask"] = input_features_mask
return model_inputs
def create_masks_for_generate(self, **super_kwargs):
raise AttributeError("Do not inherit create_masks_for_generate from PaliGemma")
__all__ = [
"Gemma3nAudioConfig",
"Gemma3nAudioEncoder",
"Gemma3nConfig",
"Gemma3nForCausalLM",
"Gemma3nForConditionalGeneration",
"Gemma3nModel",
"Gemma3nPreTrainedModel",
"Gemma3nTextConfig",
"Gemma3nTextModel",
"Gemma3nVisionConfig",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/gemma3n/modular_gemma3n.py",
"license": "Apache License 2.0",
"lines": 2266,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/gemma3n/processing_gemma3n.py | # Copyright 2025 Google Inc. HuggingFace Inc. team. All rights reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...image_utils import ImageInput, make_nested_list_of_images
from ...processing_utils import ProcessingKwargs, ProcessorMixin, Unpack
from ...tokenization_utils_base import PreTokenizedInput, TextInput
from ...utils import auto_docstring
class Gemma3nProcessorKwargs(ProcessingKwargs, total=False):
_defaults = {
"text_kwargs": {"padding": False},
}
@auto_docstring
class Gemma3nProcessor(ProcessorMixin):
def __init__(
self,
feature_extractor,
image_processor,
tokenizer,
chat_template=None,
audio_seq_length: int = 188,
image_seq_length: int = 256,
**kwargs,
):
r"""
audio_seq_length (int, *optional*, defaults to 188):
The number of audio soft tokens that will be added to the text prompt
image_seq_length (int, *optional*, defaults to 256):
The number of image soft tokens that should be added to
"""
self.audio_seq_length = audio_seq_length
self.audio_token_id = tokenizer.audio_token_id
self.boa_token = tokenizer.boa_token
self.audio_token = tokenizer.audio_token
audio_tokens_expanded = "".join([tokenizer.audio_token] * audio_seq_length)
self.full_audio_sequence = f"\n\n{tokenizer.boa_token}{audio_tokens_expanded}{tokenizer.eoa_token}\n\n"
self.image_seq_length = image_seq_length
self.image_token_id = tokenizer.image_token_id
self.boi_token = tokenizer.boi_token
self.image_token = tokenizer.image_token
image_tokens_expanded = "".join([tokenizer.image_token] * image_seq_length)
self.full_image_sequence = f"\n\n{tokenizer.boi_token}{image_tokens_expanded}{tokenizer.eoi_token}\n\n"
super().__init__(
feature_extractor=feature_extractor,
image_processor=image_processor,
tokenizer=tokenizer,
chat_template=chat_template,
**kwargs,
)
@auto_docstring
def __call__(
self,
images: ImageInput | None = None,
text: TextInput | PreTokenizedInput | list[TextInput] | list[PreTokenizedInput] = None,
audio: np.ndarray | list[float] | list[np.ndarray] | list[list[float]] | None = None,
**kwargs: Unpack[Gemma3nProcessorKwargs],
) -> BatchFeature:
if text is None and images is None and audio is None:
raise ValueError("Provide at least one of `text`, `images`, or `audio`.")
output_kwargs = self._merge_kwargs(
Gemma3nProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
if isinstance(text, str):
text = [text]
elif not isinstance(text, list) and not isinstance(text[0], str):
raise TypeError("Invalid input text. Please provide a string, or a list of strings")
if audio is not None:
audio_inputs = self.feature_extractor(audio, **output_kwargs["audio_kwargs"])
if not text:
text = [self.audio_token for _ in audio]
# Expand placeholder audio tokens to the full audio token sequence
text = [prompt.replace(self.audio_token, self.full_audio_sequence) for prompt in text]
else:
audio_inputs = {}
if images is not None:
images = self.image_processor.fetch_images(images)
batched_images = make_nested_list_of_images(images)
image_inputs = self.image_processor(batched_images, **output_kwargs["images_kwargs"])
# Create empty text to be replaced with placeholders
if not text:
text = [" ".join([self.image_token] * len(images)) for images in batched_images]
if len(batched_images) != len(text):
raise ValueError(
f"Received inconsistently sized batches of images ({len(batched_images)}) and text ({len(text)})."
)
# Expand placeholder image tokens to the full image token sequence
text = [prompt.replace(self.image_token, self.full_image_sequence) for prompt in text]
else:
image_inputs = {}
return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None)
text_inputs = self.tokenizer(text=text, **output_kwargs["text_kwargs"], return_tensors="np")
self._check_special_mm_tokens(text, text_inputs, modalities=["image"])
# Add token type ids manually, as tokenizer can't do arbitrary position token types
array_ids = text_inputs["input_ids"]
token_type_ids = np.zeros_like(array_ids)
token_type_ids[array_ids == self.image_token_id] = 1
token_type_ids[array_ids == self.audio_token_id] = 3
text_inputs = {k: v.tolist() for k, v in text_inputs.items()} # in case user requested list inputs
text_inputs["token_type_ids"] = token_type_ids.tolist()
return BatchFeature(data={**text_inputs, **image_inputs, **audio_inputs}, tensor_type=return_tensors)
@property
def model_input_names(self):
tokenizer_input_names = self.tokenizer.model_input_names + ["token_type_ids"]
image_processor_input_names = self.image_processor.model_input_names
audio_processor_input_names = self.feature_extractor.model_input_names
image_processor_input_names = [name for name in image_processor_input_names if name != "num_crops"]
return list(tokenizer_input_names + image_processor_input_names + audio_processor_input_names)
__all__ = ["Gemma3nProcessor"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/gemma3n/processing_gemma3n.py",
"license": "Apache License 2.0",
"lines": 122,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/gemma3n/test_feature_extraction_gemma3n.py | # Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import os
import random
import tempfile
import unittest
from collections.abc import Sequence
import numpy as np
from parameterized import parameterized
from transformers.models.gemma3n import Gemma3nAudioFeatureExtractor
from transformers.testing_utils import (
check_json_file_has_correct_format,
require_torch,
)
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
pass
global_rng = random.Random()
MAX_LENGTH_FOR_TESTING = 512
def floats_list(shape, scale=1.0, rng=None):
"""Creates a random float32 tensor"""
if rng is None:
rng = global_rng
values = []
for _ in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
class Gemma3nAudioFeatureExtractionTester:
def __init__(
self,
parent,
batch_size=7,
min_seq_length=400,
max_seq_length=2000,
feature_size: int = 128,
sampling_rate: int = 16_000,
padding_value: float = 0.0,
return_attention_mask: bool = False,
# ignore hop_length / frame_length for now, as ms -> length conversion causes issues with serialization tests
# frame_length_ms: float = 32.0,
# hop_length: float = 10.0,
min_frequency: float = 125.0,
max_frequency: float = 7600.0,
preemphasis: float = 0.97,
preemphasis_htk_flavor: bool = True,
fft_overdrive: bool = True,
dither: float = 0.0,
input_scale_factor: float = 1.0,
mel_floor: float = 1e-5,
per_bin_mean: Sequence[float] | None = None,
per_bin_stddev: Sequence[float] | None = None,
):
self.parent = parent
self.batch_size = batch_size
self.min_seq_length = min_seq_length
self.max_seq_length = max_seq_length
self.seq_length_diff = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
self.feature_size = feature_size
self.sampling_rate = sampling_rate
self.padding_value = padding_value
self.return_attention_mask = return_attention_mask
# ignore hop_length / frame_length for now, as ms -> length conversion causes issues with serialization tests
# self.frame_length_ms = frame_length_ms
# self.hop_length = hop_length
self.min_frequency = min_frequency
self.max_frequency = max_frequency
self.preemphasis = preemphasis
self.preemphasis_htk_flavor = preemphasis_htk_flavor
self.fft_overdrive = fft_overdrive
self.dither = dither
self.input_scale_factor = input_scale_factor
self.mel_floor = mel_floor
self.per_bin_mean = per_bin_mean
self.per_bin_stddev = per_bin_stddev
def prepare_feat_extract_dict(self):
return {
"feature_size": self.feature_size,
"sampling_rate": self.sampling_rate,
"padding_value": self.padding_value,
"return_attention_mask": self.return_attention_mask,
"min_frequency": self.min_frequency,
"max_frequency": self.max_frequency,
"preemphasis": self.preemphasis,
"preemphasis_htk_flavor": self.preemphasis_htk_flavor,
"fft_overdrive": self.fft_overdrive,
"dither": self.dither,
"input_scale_factor": self.input_scale_factor,
"mel_floor": self.mel_floor,
"per_bin_mean": self.per_bin_mean,
"per_bin_stddev": self.per_bin_stddev,
}
def prepare_inputs_for_common(self, equal_length=False, numpify=False):
def _flatten(list_of_lists):
return list(itertools.chain(*list_of_lists))
if equal_length:
speech_inputs = [floats_list((self.max_seq_length, self.feature_size)) for _ in range(self.batch_size)]
else:
# make sure that inputs increase in size
speech_inputs = [
floats_list((x, self.feature_size))
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff)
]
if numpify:
speech_inputs = [np.asarray(x) for x in speech_inputs]
return speech_inputs
class Gemma3nAudioFeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest.TestCase):
feature_extraction_class = Gemma3nAudioFeatureExtractor
def setUp(self):
self.feat_extract_tester = Gemma3nAudioFeatureExtractionTester(self)
def test_feat_extract_from_and_save_pretrained(self):
feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
saved_file = feat_extract_first.save_pretrained(tmpdirname)[0]
check_json_file_has_correct_format(saved_file)
feat_extract_second = self.feature_extraction_class.from_pretrained(tmpdirname)
dict_first = feat_extract_first.to_dict()
dict_second = feat_extract_second.to_dict()
mel_1 = feat_extract_first.mel_filters
mel_2 = feat_extract_second.mel_filters
self.assertTrue(np.allclose(mel_1, mel_2))
self.assertEqual(dict_first, dict_second)
def test_feat_extract_to_json_file(self):
feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
json_file_path = os.path.join(tmpdirname, "feat_extract.json")
feat_extract_first.to_json_file(json_file_path)
feat_extract_second = self.feature_extraction_class.from_json_file(json_file_path)
dict_first = feat_extract_first.to_dict()
dict_second = feat_extract_second.to_dict()
mel_1 = feat_extract_first.mel_filters
mel_2 = feat_extract_second.mel_filters
self.assertTrue(np.allclose(mel_1, mel_2))
self.assertEqual(dict_first, dict_second)
def test_feat_extract_from_pretrained_kwargs(self):
feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
saved_file = feat_extract_first.save_pretrained(tmpdirname)[0]
check_json_file_has_correct_format(saved_file)
feat_extract_second = self.feature_extraction_class.from_pretrained(
tmpdirname, feature_size=2 * self.feat_extract_dict["feature_size"]
)
mel_1 = feat_extract_first.mel_filters
mel_2 = feat_extract_second.mel_filters
self.assertTrue(2 * mel_1.shape[1] == mel_2.shape[1])
@parameterized.expand(
[
([floats_list((1, x))[0] for x in range(800, 1400, 200)],),
([floats_list((1, x))[0] for x in (800, 800, 800)],),
([floats_list((1, x))[0] for x in range(200, (MAX_LENGTH_FOR_TESTING + 500), 200)], True),
]
)
def test_call(self, audio_inputs, test_truncation=False):
feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
np_audio_inputs = [np.asarray(audio_input) for audio_input in audio_inputs]
input_features = feature_extractor(np_audio_inputs, padding="max_length", return_tensors="np").input_features
self.assertTrue(input_features.ndim == 3)
# input_features.shape should be (batch, num_frames, n_mels) ~= (batch, num_frames, feature_size)
# 480_000 is the max_length that inputs are padded to. we use that to calculate num_frames
expected_num_frames = (480_000 - feature_extractor.frame_length) // (feature_extractor.hop_length) + 1
self.assertTrue(
input_features.shape[-2] == expected_num_frames,
f"no match: {input_features.shape[-1]} vs {expected_num_frames}",
)
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size)
encoded_sequences_1 = feature_extractor(audio_inputs, return_tensors="np").input_features
encoded_sequences_2 = feature_extractor(np_audio_inputs, return_tensors="np").input_features
for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2):
self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3))
if test_truncation:
audio_inputs_truncated = [x[:MAX_LENGTH_FOR_TESTING] for x in audio_inputs]
np_audio_inputs_truncated = [np.asarray(audio_input) for audio_input in audio_inputs_truncated]
encoded_sequences_1 = feature_extractor(
audio_inputs_truncated, max_length=MAX_LENGTH_FOR_TESTING, return_tensors="np"
).input_features
encoded_sequences_2 = feature_extractor(
np_audio_inputs_truncated, max_length=MAX_LENGTH_FOR_TESTING, return_tensors="np"
).input_features
for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2):
self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3))
def test_call_unbatched(self):
feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
np_audio = floats_list((1, 800))[0]
input_features = feature_extractor(np_audio, return_tensors="np").input_features
expected_input_features = feature_extractor([np_audio], return_tensors="np").input_features
np.testing.assert_allclose(input_features, expected_input_features)
def test_audio_features_attn_mask_consistent(self):
# regression test for https://github.com/huggingface/transformers/issues/39911
# Test input_features and input_features_mask have consistent shape
np.random.seed(42)
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
for i in [512, 640, 1024]:
audio = np.random.randn(i)
mm_data = {
"raw_speech": [audio],
"sampling_rate": 16000,
}
inputs = feature_extractor(**mm_data, return_tensors="np")
out = inputs["input_features"]
mask = inputs["input_features_mask"]
assert out.ndim == 3
assert mask.ndim == 2
assert out.shape[:2] == mask.shape[:2]
def test_dither(self):
np.random.seed(42) # seed the dithering randn()
# Tests that features with and without little dithering are similar, but not the same
dict_no_dither = self.feat_extract_tester.prepare_feat_extract_dict()
dict_no_dither["dither"] = 0.0
dict_dither = self.feat_extract_tester.prepare_feat_extract_dict()
dict_dither["dither"] = 0.00003 # approx. 1/32k
feature_extractor_no_dither = self.feature_extraction_class(**dict_no_dither)
feature_extractor_dither = self.feature_extraction_class(**dict_dither)
# create three inputs of length 800, 1000, and 1200
speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)]
np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs]
# compute features
input_features_no_dither = feature_extractor_no_dither(
np_speech_inputs, padding=True, return_tensors="np", sampling_rate=dict_no_dither["sampling_rate"]
).input_features
input_features_dither = feature_extractor_dither(
np_speech_inputs, padding=True, return_tensors="np", sampling_rate=dict_dither["sampling_rate"]
).input_features
# test there is a difference between features (there's added noise to input signal)
diff = input_features_dither - input_features_no_dither
# features are not identical
assert np.abs(diff).mean() > 1e-6
# features are not too different
# the heuristic value `7e-4` is obtained by running 50000 times (maximal value is around 3e-4).
assert np.abs(diff).mean() < 7e-4
# the heuristic value `8e-1` is obtained by running 50000 times (maximal value is around 5e-1).
assert np.abs(diff).max() < 8e-1
@require_torch
def test_double_precision_pad(self):
import torch
feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
np_speech_inputs = np.random.rand(100, 32).astype(np.float64)
py_speech_inputs = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
np_processed = feature_extractor.pad([{"input_features": inputs}], return_tensors="np")
self.assertTrue(np_processed.input_features.dtype == np.float32)
pt_processed = feature_extractor.pad([{"input_features": inputs}], return_tensors="pt")
self.assertTrue(pt_processed.input_features.dtype == torch.float32)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/gemma3n/test_feature_extraction_gemma3n.py",
"license": "Apache License 2.0",
"lines": 256,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/models/gemma3n/test_modeling_gemma3n.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch Gemma3n model."""
import copy
import inspect
import unittest
import numpy as np
import pytest
from datasets import load_dataset
from parameterized import parameterized
from transformers import (
AutoModelForCausalLM,
AutoProcessor,
AutoTokenizer,
Gemma3nAudioConfig,
Gemma3nAudioFeatureExtractor,
Gemma3nConfig,
StaticCache,
is_torch_available,
)
from transformers.testing_utils import (
Expectations,
cleanup,
require_deterministic_for_xpu,
require_torch,
require_torch_accelerator,
set_config_for_less_flaky_test,
set_model_for_less_flaky_test,
slow,
torch_device,
)
from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester
from ...generation.test_utils import GenerationTesterMixin, assert_similar_generate_outputs
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import (
TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION,
ModelTesterMixin,
_test_eager_matches_sdpa_inference,
floats_tensor,
ids_tensor,
)
if is_torch_available():
import torch
from transformers import (
Gemma3nAudioEncoder,
Gemma3nForCausalLM,
Gemma3nForConditionalGeneration,
Gemma3nModel,
Gemma3nTextModel,
)
class Gemma3nAudioModelTester:
def __init__(
self,
parent,
batch_size=2,
num_channels=32, # feature_size / input_feat_size
sampling_rate=16_000,
raw_audio_length=8_000,
is_training=True,
):
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.sampling_rate = sampling_rate
self.raw_audio_length = raw_audio_length
self.is_training = is_training
def get_feature_extractor_config(self):
return {
"feature_size": self.num_channels,
"sampling_rate": self.sampling_rate,
"padding_value": 0.0,
"return_attention_mask": True,
"frame_length_ms": 32.0,
"hop_length_ms": 10.0,
"dither": 0.0, # Important for determinism
}
def get_audio_encoder_config(self):
return Gemma3nAudioConfig(
input_feat_size=self.num_channels,
hidden_size=32,
conf_num_attention_heads=4,
conf_num_hidden_layers=2,
sscp_conv_channel_size=(16, 8),
conf_conv_kernel_size=3,
conf_attention_chunk_size=4,
conf_attention_context_left=5,
)
def prepare_config_and_inputs_for_common(self):
# Prepare inputs for the audio encoder
feature_extractor_config = self.get_feature_extractor_config()
audio_encoder_config = self.get_audio_encoder_config()
np.random.seed(0)
raw_speech_1 = np.sin(2 * np.pi * 440 * np.linspace(0, 1, self.raw_audio_length)).astype(np.float32)
raw_speech_2 = np.random.randn(self.raw_audio_length // 2).astype(np.float32)
raw_speech = [raw_speech_1, raw_speech_2]
feature_extractor = Gemma3nAudioFeatureExtractor(**feature_extractor_config)
audio_inputs = feature_extractor(raw_speech, return_tensors="pt")
input_features = audio_inputs["input_features"]
# The encoder expects a padding mask (True for padding), while the feature extractor
# returns an attention mask (True for valid tokens). We must invert it.
input_features_mask = ~audio_inputs["input_features_mask"].to(torch.bool)
inputs_dict = {
"audio_mel": input_features,
"audio_mel_mask": input_features_mask,
}
return audio_encoder_config, inputs_dict
@unittest.skip("Skipped for now!")
@require_torch
class Gemma3nAudioModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (Gemma3nAudioEncoder,) if is_torch_available() else ()
test_missing_keys = False
is_generative = False
_is_stateful = True
main_input_name = "audio_mel"
def setUp(self):
self.model_tester = Gemma3nAudioModelTester(self)
self.config_tester = ConfigTester(self, config_class=Gemma3nAudioConfig, hidden_size=37)
torch.manual_seed(0)
# The following values are golden outputs from a deterministic run of the components.
# They are used to ensure that changes to the code do not alter the numerical output.
# Generated with seeds np.random.seed(0) and torch.manual_seed(0).
self.expected_input_features_shape = (2, 48, 32)
self.expected_input_features_slice = np.array([-5.733152, -5.337127, -4.916284, -4.378989, -3.7622747])
self.expected_input_features_mask_shape = (2, 48)
self.expected_input_features_mask_slice = np.array([True, True, True, True, False])
self.expected_encoder_output_shape = (2, 3, 32)
self.expected_encoder_output_slice = torch.tensor([-0.4159, 0.6459, 0.6305, 2.2902, 0.9683])
self.expected_encoder_mask_shape = (2, 3)
self.expected_encoder_mask_slice = torch.tensor([False, False, True])
# Prepare a shared feature extractor and raw audio for the tests
self.feature_extractor = Gemma3nAudioFeatureExtractor(**self.model_tester.get_feature_extractor_config())
np.random.seed(0)
raw_speech_1 = np.sin(2 * np.pi * 440 * np.linspace(0, 1, self.model_tester.raw_audio_length)).astype(
np.float32
)
raw_speech_2 = np.random.randn(self.model_tester.raw_audio_length // 2).astype(np.float32)
self.raw_speech = [raw_speech_1, raw_speech_2]
@unittest.skip("Audio encoder does not support attention output")
def test_attention_outputs(self):
pass
@unittest.skip("Audio encoder does not support hidden state output")
def test_hidden_states_output(self):
pass
@unittest.skip("Audio encoder returns a tuple, not a ModelOutput object, skipping equivalence test.")
def test_model_outputs_equivalence(self):
pass
@unittest.skip("Audio encoder does not support retaining gradients on hidden states/attentions.")
def test_retain_grad_hidden_states_attentions(self):
pass
@unittest.skip("Audio encoder does not have a concept of token embeddings")
def test_model_get_set_embeddings(self):
pass
@unittest.skip("Audio encoder does not have a concept of token embeddings")
def test_resize_tokens_embeddings(self):
pass
@unittest.skip("This model has a complex downsampling scheme that is hard to test with the generic batching test.")
def test_batching_equivalence(self):
pass
def test_feature_extractor(self):
"""
Tests the feature extractor's output against pre-computed golden values.
This ensures the NumPy-based audio preprocessing is correct and consistent.
"""
audio_inputs = self.feature_extractor(
self.raw_speech, padding="longest", pad_to_multiple_of=128, return_tensors="np"
)
input_features = audio_inputs["input_features"]
self.assertEqual(input_features.shape, self.expected_input_features_shape)
np.testing.assert_allclose(input_features[0, 0, :5], self.expected_input_features_slice, rtol=1e-5, atol=1e-5)
input_features_mask = audio_inputs["input_features_mask"]
self.assertEqual(input_features_mask.shape, self.expected_input_features_mask_shape)
# The second audio sample is shorter (22 frames vs 48), so its mask should become False at index 22
np.testing.assert_array_equal(input_features_mask[1, 21:26], self.expected_input_features_mask_slice)
def test_audio_encoder(self):
"""
Tests the audio encoder's forward pass against pre-computed golden values.
This ensures the PyTorch-based audio encoding model is correct and consistent.
"""
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
model = Gemma3nAudioEncoder(config).to(torch_device).eval()
with torch.no_grad():
encoder_output, encoder_mask = model(**inputs_dict)
# Check output encodings
self.assertEqual(encoder_output.shape, self.expected_encoder_output_shape)
torch.testing.assert_close(
encoder_output[0, 0, :5], self.expected_encoder_output_slice.to(torch_device), rtol=1e-4, atol=1e-4
)
# Check output mask (True means padded)
# Second sample has 22 feature frames. After downsampling by 4 (conv) -> 5 frames. After downsampling by 4 (reduction) -> 1 frame.
# So the mask should be [False, True, True]
self.assertEqual(encoder_mask.shape, self.expected_encoder_mask_shape)
torch.testing.assert_close(encoder_mask[1, :], self.expected_encoder_mask_slice.to(torch_device))
class Gemma3nTextModelTester(CausalLMModelTester):
if is_torch_available():
base_model_class = Gemma3nTextModel
causal_lm_class = Gemma3nForCausalLM
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=False,
use_labels=True,
vocab_size=99,
vocab_size_per_layer_input=99,
hidden_size=16,
hidden_size_per_layer_input=16,
num_hidden_layers=4, # override to correctly test sharing cache pattern
num_kv_shared_layers=2, # important to override
layer_types=[
"full_attention",
"sliding_attention",
"full_attention",
"sliding_attention",
], # similarly we want to test sharing on both types
num_attention_heads=2,
num_key_value_heads=2,
altup_num_inputs=2,
intermediate_size=21,
hidden_activation="gelu_pytorch_tanh",
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
is_decoder=False,
):
self._verify_and_infer_model_attributes()
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.vocab_size_per_layer_input = vocab_size_per_layer_input
self.hidden_size = hidden_size
self.hidden_size_per_layer_input = hidden_size_per_layer_input
self.num_hidden_layers = num_hidden_layers
self.num_kv_shared_layers = num_kv_shared_layers
self.layer_types = layer_types
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.altup_num_inputs = altup_num_inputs
self.intermediate_size = intermediate_size
self.hidden_activation = hidden_activation
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.head_dim = self.hidden_size // self.num_attention_heads
self.is_decoder = is_decoder
@require_torch
class Gemma3nTextModelTest(CausalLMModelTest, unittest.TestCase):
model_tester_class = Gemma3nTextModelTester
_is_stateful = True
model_split_percents = [0.5, 0.6]
def _check_hidden_states_for_generate(
self, batch_size, hidden_states, prompt_length, output_length, config, use_cache=False
):
"Gemma3n has special hidden states shape with 1 additional dim (which is then reduced with projections)"
self.assertIsInstance(hidden_states, tuple)
self.assertListEqual(
[isinstance(iter_hidden_states, tuple) for iter_hidden_states in hidden_states],
[True] * len(hidden_states),
)
self.assertEqual(len(hidden_states), (output_length - prompt_length))
# When `output_hidden_states=True`, each iteration of generate appends the hidden states corresponding to the
# new token(s)
for generated_length, iter_hidden_states in enumerate(hidden_states):
# regardless of using cache, the first forward pass will have the full prompt as input
if use_cache and generated_length > 0:
model_input_length = 1
else:
model_input_length = prompt_length + generated_length
expected_shape = (config.altup_num_inputs, batch_size, model_input_length, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states],
[expected_shape] * len(iter_hidden_states),
)
@parameterized.expand(TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION)
def test_eager_matches_sdpa_inference(
self,
name,
dtype,
padding_side,
use_attention_mask,
output_attentions,
enable_kernels,
):
"We need to relax a bit the `atols` and `rtols` for fp32 here due to the altup projections"
atols = {
("cpu", False, torch.float32): 5e-2, # this was relaxed
("cpu", False, torch.float16): 5e-3,
("cpu", False, torch.bfloat16): 1e-2,
("cpu", True, torch.float32): 5e-2, # this was relaxed
("cpu", True, torch.float16): 5e-3,
("cpu", True, torch.bfloat16): 1e-2,
("cuda", False, torch.float32): 5e-2, # this was relaxed
("cuda", False, torch.bfloat16): 1e-2,
("cuda", False, torch.float16): 5e-3,
("cuda", True, torch.float32): 5e-2, # this was relaxed
("cuda", True, torch.bfloat16): 1e-2,
("cuda", True, torch.float16): 5e-3,
}
rtols = {
("cpu", False, torch.float32): 1e-2, # this was relaxed
("cpu", False, torch.float16): 5e-3,
("cpu", False, torch.bfloat16): 1e-2,
("cpu", True, torch.float32): 1e-2, # this was relaxed
("cpu", True, torch.float16): 5e-3,
("cpu", True, torch.bfloat16): 1e-2,
("cuda", False, torch.float32): 1e-2, # this was relaxed
("cuda", False, torch.bfloat16): 1e-2,
("cuda", False, torch.float16): 5e-3,
("cuda", True, torch.float32): 1e-2, # this was relaxed
("cuda", True, torch.bfloat16): 3e-2,
("cuda", True, torch.float16): 5e-3,
}
_test_eager_matches_sdpa_inference(
self,
name,
dtype,
padding_side,
use_attention_mask,
output_attentions,
enable_kernels,
atols=atols,
rtols=rtols,
)
@pytest.mark.generate
@unittest.skip("Gemma3n does not support QuantizedCache as it performs cache manipulation in the forward pass")
def test_generate_with_quant_cache(self):
pass
@unittest.skip("Gemma3n applies key/query norm which doesn't work with packing")
def test_eager_padding_matches_padding_free_with_position_ids(self):
pass
@unittest.skip("Gemma3n applies key/query norm which doesn't work with packing")
def test_sdpa_padding_matches_padding_free_with_position_ids(self):
pass
@unittest.skip("Gemma3n only support fp16 and bf16 data type")
def test_flash_attn_2_fp32_ln(self):
pass
@pytest.mark.generate
def test_generate_from_inputs_embeds_with_static_cache(self):
"""
Test that StaticCache can generate from inputs_embeds and calculates max_cache_length
correctly in `generate()`. We force the model to not stop generation until max-length is reached
to verify that the cache length is indeed set correctly and we don't run out of index when slicing the cache.
"""
for model_class in self.all_generative_model_classes:
# Here, we should ideally not skip any model, and test them all. However, some old models cannot correctly
# use a static cache because they don't create the causal masks correctly.
# TODO: cyril -> relax this by adding a `_support_static_cache` attribute
if not model_class._can_compile_fullgraph:
self.skipTest(reason="This model does not support the static cache format")
config, inputs_dict = self.prepare_config_and_inputs_for_generate()
if config.is_encoder_decoder:
self.skipTest(reason="This model is encoder-decoder and has Encoder-Decoder Cache")
model = model_class(config).to(torch_device).eval()
if "inputs_embeds" not in inspect.signature(model.prepare_inputs_for_generation).parameters:
self.skipTest(reason="This model does not support `inputs_embeds` in generation")
input_ids = inputs_dict.pop("input_ids")
model.config.use_cache = True
model.config.is_decoder = True
batch_size = input_ids.shape[0]
max_new_tokens = 10
# here we force to not stop at eos and go until max-length
model.generation_config.eos_token_id = model.config.get_text_config().eos_token_id = -1
generation_kwargs = {
"max_new_tokens": max_new_tokens,
"cache_implementation": "static",
"return_dict_in_generate": True, # Required to return `past_key_values`
}
text_config = model.config.get_text_config()
head_dim = (
getattr(text_config, "head_dim", None) or text_config.hidden_size // text_config.num_attention_heads
)
num_key_value_heads = (
text_config.num_attention_heads
if getattr(text_config, "num_key_value_heads", None) is None
else text_config.num_key_value_heads
)
num_hidden_layers = text_config.num_hidden_layers
inputs_embeds = model.get_input_embeddings()(input_ids)
outputs = model.generate(inputs_embeds=inputs_embeds, **generation_kwargs, **inputs_dict)
# we should get `max_length - 1` in shape, not `max_length - embeds_length`.
# -1 because the last generated token isn't yet in the cache.
max_length = max_new_tokens + inputs_embeds.shape[1] - 1
cache_shape = [batch_size, num_key_value_heads, max_length, head_dim]
self.assertIsInstance(outputs.past_key_values, StaticCache)
self.assertEqual(len(outputs.past_key_values), num_hidden_layers - text_config.num_kv_shared_layers)
self.assertListEqual(list(outputs.past_key_values.layers[0].keys.shape), cache_shape)
@pytest.mark.generate
def test_generate_with_static_cache(self):
"""
Tests that generating with static cache give almost same results as with dynamic cache, and the output cache
has the expected shapes
"""
for model_class in self.all_generative_model_classes:
# Here, we should ideally not skip any model, and test them all. However, some old models cannot correctly
# use a static cache because they don't create the causal masks correctly.
# TODO: cyril -> relax this by adding a `_support_static_cache` attribute
if not model_class._can_compile_fullgraph:
self.skipTest(reason="This model does not support the static cache format")
config, inputs_dict = self.prepare_config_and_inputs_for_generate()
set_config_for_less_flaky_test(config)
main_input = inputs_dict[model_class.main_input_name]
if config.is_encoder_decoder:
self.skipTest(reason="This model is encoder-decoder and has Encoder-Decoder Cache")
config.is_decoder = True
batch_size = main_input.shape[0]
seq_length = self.model_tester.seq_length
max_new_tokens = 20
for dtype in (torch.float32, torch.bfloat16):
model = model_class(copy.deepcopy(config)).to(torch_device).to(dtype).eval()
inputs_dict = {
k: v.to(dtype) if isinstance(v, torch.Tensor) and torch.is_floating_point(v) else v
for k, v in inputs_dict.items()
}
set_model_for_less_flaky_test(model)
generation_kwargs = {
"max_new_tokens": max_new_tokens,
"return_dict_in_generate": True, # Required to return `past_key_values`
"output_scores": True,
"use_cache": True,
}
static_cache_generation = model.generate(
**generation_kwargs, **inputs_dict, cache_implementation="static"
)
# Check 1: The cache shapes must match the expected shapes
max_cache_len = seq_length + max_new_tokens - 1 # cache len = gen len - 1, the last token has no cache
text_config = config.text_config if hasattr(config, "text_config") else config
head_dim = (
getattr(text_config, "head_dim", None)
or text_config.hidden_size // text_config.num_attention_heads
)
num_key_value_heads = (
text_config.num_attention_heads
if getattr(text_config, "num_key_value_heads", None) is None
else text_config.num_key_value_heads
)
num_hidden_layers = text_config.num_hidden_layers
cache_shape = (batch_size, num_key_value_heads, max_cache_len, head_dim)
self.assertTrue(isinstance(static_cache_generation.past_key_values, StaticCache))
self.assertTrue(
len(static_cache_generation.past_key_values)
== num_hidden_layers - text_config.num_kv_shared_layers
)
self.assertTrue(static_cache_generation.past_key_values.layers[0].keys.shape == cache_shape)
# Check 2: The outputs must be similar to the case with dynamic cache
dynamic_cache_generation = model.generate(**generation_kwargs, **inputs_dict)
assert_similar_generate_outputs(dynamic_cache_generation, static_cache_generation)
def test_model_rope_scaling_frequencies(self):
"""Tests the frequency properties of the different RoPE scaling types on the model RoPE layer."""
# Gemma3n has different RoPE configs per layer type
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
# Retrieves the RoPE layer class from the base model class. Uses `.named_modules()` to avoid hardcoding the
# named location of the RoPE layer class.
base_model = self.model_tester.base_model_class(config)
possible_rope_attributes = [
"pos_emb",
"rotary_emb", # most common case
"global_rotary_emb",
"local_rotary_emb",
]
for name, module in base_model.named_modules():
if any(potential_name in name for potential_name in possible_rope_attributes):
rope_class = type(module)
break
scaling_factor = 10
short_input_length = 10
long_input_length = int(config.max_position_embeddings * 1.5)
# Inputs
x = torch.randn(
1, dtype=torch.float32, device=torch_device
) # used exclusively to get the dtype and the device
position_ids_short = torch.arange(short_input_length, dtype=torch.long, device=torch_device)
position_ids_short = position_ids_short.unsqueeze(0)
position_ids_long = torch.arange(long_input_length, dtype=torch.long, device=torch_device)
position_ids_long = position_ids_long.unsqueeze(0)
# Sanity check original RoPE
rope_params = {"rope_type": "default", "rope_theta": 10_000.0}
config.rope_parameters = {"sliding_attention": rope_params, "full_attention": rope_params}
original_rope = rope_class(config=config).to(torch_device)
original_cos_short, original_sin_short = original_rope(x, position_ids_short, layer_type="sliding_attention")
original_cos_long, original_sin_long = original_rope(x, position_ids_long, layer_type="sliding_attention")
torch.testing.assert_close(original_cos_short, original_cos_long[:, :short_input_length, :])
torch.testing.assert_close(original_sin_short, original_sin_long[:, :short_input_length, :])
# Sanity check linear RoPE scaling
# New position "x" should match original position with index "x/scaling_factor"
rope_params = {"rope_type": "linear", "factor": scaling_factor, "rope_theta": 10_000.0}
config.rope_parameters = {"sliding_attention": rope_params, "full_attention": rope_params}
linear_scaling_rope = rope_class(config=config).to(torch_device)
linear_cos_short, linear_sin_short = linear_scaling_rope(x, position_ids_short, layer_type="sliding_attention")
linear_cos_long, linear_sin_long = linear_scaling_rope(x, position_ids_long, layer_type="sliding_attention")
torch.testing.assert_close(linear_cos_short, linear_cos_long[:, :short_input_length, :])
torch.testing.assert_close(linear_sin_short, linear_sin_long[:, :short_input_length, :])
for new_position in range(0, long_input_length, scaling_factor):
original_position = int(new_position // scaling_factor)
torch.testing.assert_close(linear_cos_long[:, new_position, :], original_cos_long[:, original_position, :])
torch.testing.assert_close(linear_sin_long[:, new_position, :], original_sin_long[:, original_position, :])
# Sanity check Dynamic NTK RoPE scaling
# Scaling should only be observed after a long input is fed. We can observe that the frequencies increase
# with scaling_factor (or that `inv_freq` decreases)
rope_params = {"rope_type": "dynamic", "factor": scaling_factor, "rope_theta": 10_000.0}
config.rope_parameters = {"sliding_attention": rope_params, "full_attention": rope_params}
ntk_scaling_rope = rope_class(config=config).to(torch_device)
ntk_cos_short, ntk_sin_short = ntk_scaling_rope(x, position_ids_short, layer_type="sliding_attention")
ntk_cos_long, ntk_sin_long = ntk_scaling_rope(x, position_ids_long, layer_type="sliding_attention")
torch.testing.assert_close(ntk_cos_short, original_cos_short)
torch.testing.assert_close(ntk_sin_short, original_sin_short)
with self.assertRaises(AssertionError):
torch.testing.assert_close(ntk_cos_long, original_cos_long)
with self.assertRaises(AssertionError):
torch.testing.assert_close(ntk_sin_long, original_sin_long)
self.assertTrue(
(ntk_scaling_rope.sliding_attention_inv_freq <= original_rope.sliding_attention_inv_freq).all()
)
# Sanity check Yarn RoPE scaling
# Scaling should be over the entire input
rope_params = {"rope_type": "yarn", "factor": scaling_factor, "rope_theta": 10_000.0}
config.rope_parameters = {"sliding_attention": rope_params, "full_attention": rope_params}
yarn_scaling_rope = rope_class(config=config).to(torch_device)
yarn_cos_short, yarn_sin_short = yarn_scaling_rope(x, position_ids_short, layer_type="sliding_attention")
yarn_cos_long, yarn_sin_long = yarn_scaling_rope(x, position_ids_long, layer_type="sliding_attention")
torch.testing.assert_close(yarn_cos_short, yarn_cos_long[:, :short_input_length, :])
torch.testing.assert_close(yarn_sin_short, yarn_sin_long[:, :short_input_length, :])
with self.assertRaises(AssertionError):
torch.testing.assert_close(yarn_cos_short, original_cos_short)
with self.assertRaises(AssertionError):
torch.testing.assert_close(yarn_sin_short, original_sin_short)
with self.assertRaises(AssertionError):
torch.testing.assert_close(yarn_cos_long, original_cos_long)
with self.assertRaises(AssertionError):
torch.testing.assert_close(yarn_sin_long, original_sin_long)
class Gemma3nVision2TextModelTester:
text_config = {"activation_sparsity_pattern": None}
forced_config_args = ["text_config"]
def __init__(
self,
parent,
mm_tokens_per_image=2,
image_token_id=3,
boi_token_id=4,
eoi_token_id=5,
boa_token_id=6,
eoa_token_id=7,
audio_token_id=8,
seq_length=25,
is_training=True,
vision_config=None,
use_cache=False,
vision_soft_tokens_per_image=4,
audio_soft_tokens_per_image=4,
):
self.parent = parent
# `image_token_id` is set to 0 to pass "resize_embeddings" test, do not modify
self.mm_tokens_per_image = mm_tokens_per_image
self.image_token_id = image_token_id
self.boi_token_id = boi_token_id
self.eoi_token_id = eoi_token_id
self.boa_token_id = boa_token_id
self.eoa_token_id = eoa_token_id
self.audio_token_id = audio_token_id
self.llm_tester = Gemma3nTextModelTester(self.parent)
self.text_config = self.llm_tester.get_config()
self.audio_tester = Gemma3nAudioModelTester(self.parent)
self.audio_config = self.audio_tester.get_audio_encoder_config()
# NOTE: gemma3n uses mobilenet backbone but timm doens't let us
# create a tiny MobileNet. So we use a random ViT backbone for testing!
if vision_config is None:
vision_config = {
"architecture": "vit_pe_core_large_patch14_336",
"use_labels": True,
"image_size": 20,
"patch_size": 5,
"num_channels": 3,
"is_training": True,
"hidden_size": 32,
"num_key_value_heads": 1,
"num_hidden_layers": 2,
"num_attention_heads": 4,
"intermediate_size": 37,
"model_args": {
"embed_dim": 64,
"img_size": (20, 20),
"depth": 2,
"global_pool": "",
"use_post_transformer_norm": False,
"init_values": 0.1,
"ref_feat_shape": (1, 1),
},
}
self.vision_config = vision_config
self.seq_length = seq_length
self.pad_token_id = self.text_config.pad_token_id
self.vision_soft_tokens_per_image = vision_soft_tokens_per_image
self.audio_soft_tokens_per_image = audio_soft_tokens_per_image
self.num_hidden_layers = self.text_config.num_hidden_layers
self.vocab_size = self.text_config.vocab_size
self.hidden_size = self.text_config.hidden_size
self.num_attention_heads = self.text_config.num_attention_heads
self.is_training = is_training
self.batch_size = 3
self.num_channels = vision_config["num_channels"]
self.image_size = vision_config["image_size"]
self.encoder_seq_length = seq_length
self.use_cache = use_cache
def get_config(self):
return Gemma3nConfig(
text_config=self.text_config,
vision_config=self.vision_config,
audio_config=self.audio_config,
image_token_id=self.image_token_id,
boi_token_id=self.boi_token_id,
eoi_token_id=self.eoi_token_id,
boa_token_id=self.boa_token_id,
eoa_token_id=self.eoa_token_id,
audio_token_id=self.audio_token_id,
mm_tokens_per_image=self.mm_tokens_per_image,
vision_soft_tokens_per_image=self.vision_soft_tokens_per_image,
audio_soft_tokens_per_image=self.audio_soft_tokens_per_image,
)
def prepare_config_and_inputs(self):
pixel_values = floats_tensor(
[
self.batch_size,
self.vision_config["num_channels"],
self.vision_config["image_size"],
self.vision_config["image_size"],
]
)
config = self.get_config()
return config, pixel_values
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
input_ids = ids_tensor([self.batch_size, self.seq_length], config.text_config.vocab_size - 1) + 1
attention_mask = input_ids.ne(self.pad_token_id).to(torch_device)
# set the 3 first tokens to be image, and ensure that no other tokens are image tokens
# do not change this unless you modified image size or patch size
input_ids[input_ids == config.image_token_id] = self.pad_token_id
input_ids[:, : self.vision_soft_tokens_per_image] = config.image_token_id
token_type_ids = torch.zeros_like(input_ids)
token_type_ids[input_ids == config.image_token_id] = 1
inputs_dict = {
"pixel_values": pixel_values,
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
return config, inputs_dict
@require_torch
class Gemma3nVision2TextModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = (Gemma3nModel, Gemma3nForConditionalGeneration) if is_torch_available() else ()
all_generative_model_classes = (Gemma3nForConditionalGeneration,) if is_torch_available() else ()
test_missing_keys = False
_is_stateful = True
model_split_percents = [0.5, 0.6]
# MP works but offload doesn't work when the SigLIP MultiheadAttention is offloaded
# TODO: One potential solution would be to add to set preload_module_classes = ["SiglipMultiheadAttentionPoolingHead"]
# in the dispatch_model function
test_cpu_offload = False
test_disk_offload_safetensors = False
test_disk_offload_bin = False
def setUp(self):
self.model_tester = Gemma3nVision2TextModelTester(self)
self.config_tester = ConfigTester(
self,
config_class=Gemma3nConfig,
hidden_size=37,
text_config={"activation_sparsity_pattern": None},
)
@unittest.skip(
reason="Siglip has no FLEX attention, and we don't have a proper way to set/test attn in VLMs. TODO @raushan"
)
def test_flex_attention_with_grads(self):
pass
@unittest.skip("Gemma3n applies key/query norm which doesn't work with packing")
def test_eager_padding_matches_padding_free_with_position_ids(self):
pass
@unittest.skip("Gemma3n applies key/query norm which doesn't work with packing")
def test_sdpa_padding_matches_padding_free_with_position_ids(self):
pass
@unittest.skip("Cannot set `output_attentions` for timm models.")
def test_attention_outputs(self):
pass
@unittest.skip("Cannot set `output_attentions` for timm models.")
def test_retain_grad_hidden_states_attentions(self):
pass
@unittest.skip("Cannot set `output_attentions` on timm models.")
def test_get_image_features_attentions(self):
pass
@unittest.skip("timm model has no gradient")
def test_training_gradient_checkpointing(self):
pass
@unittest.skip("timm model has no gradient")
def test_training_gradient_checkpointing_use_reentrant_true(self):
pass
@unittest.skip("timm model has no gradient")
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
def _image_features_get_expected_num_hidden_states(self, model_tester=None):
return 2
@parameterized.expand([True, False, None])
@unittest.skip("Audio modality is not tested here")
def test_get_audio_features_output(self, return_dict: bool | None):
pass
@unittest.skip("Audio modality is not tested here")
def test_get_audio_features_hidden_states(self, return_dict: bool | None):
pass
@unittest.skip("Audio modality is not tested here")
def test_get_audio_features_attentions(self, return_dict: bool | None):
pass
@pytest.mark.generate
@unittest.skip("Gemma3n does not support QuantizedCache as it performs cache manipulation in the forward pass")
def test_generate_with_quant_cache(self):
pass
def _check_hidden_states_for_generate(
self, batch_size, hidden_states, prompt_length, output_length, config, use_cache=False
):
"""
NOTE: Gemma3n has special hidden states shape with 1 additional dim (which is
then reduced with projections)
"""
self.assertIsInstance(hidden_states, tuple)
self.assertListEqual(
[isinstance(iter_hidden_states, tuple) for iter_hidden_states in hidden_states],
[True] * len(hidden_states),
)
self.assertEqual(len(hidden_states), (output_length - prompt_length))
# When `output_hidden_states=True`, each iteration of generate appends the hidden states corresponding to the
# new token(s)
for generated_length, iter_hidden_states in enumerate(hidden_states):
# regardless of using cache, the first forward pass will have the full prompt as input
if use_cache and generated_length > 0:
model_input_length = 1
else:
model_input_length = prompt_length + generated_length
expected_shape = (config.altup_num_inputs, batch_size, model_input_length, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states],
[expected_shape] * len(iter_hidden_states),
)
@parameterized.expand(TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION)
def test_eager_matches_sdpa_inference(
self,
name,
dtype,
padding_side,
use_attention_mask,
output_attentions,
enable_kernels,
):
"We need to relax a bit the `atols` and `rtols` for fp32 here due to the altup projections"
atols = {
("cpu", False, torch.float32): 5e-2, # this was relaxed
("cpu", False, torch.float16): 5e-3,
("cpu", False, torch.bfloat16): 1e-2,
("cpu", True, torch.float32): 5e-2, # this was relaxed
("cpu", True, torch.float16): 5e-3,
("cpu", True, torch.bfloat16): 1e-2,
("cuda", False, torch.float32): 5e-2, # this was relaxed
("cuda", False, torch.bfloat16): 1e-2,
("cuda", False, torch.float16): 5e-3,
("cuda", True, torch.float32): 5e-2, # this was relaxed
("cuda", True, torch.bfloat16): 1e-2,
("cuda", True, torch.float16): 5e-3,
}
rtols = {
("cpu", False, torch.float32): 1e-2, # this was relaxed
("cpu", False, torch.float16): 5e-3,
("cpu", False, torch.bfloat16): 1e-2,
("cpu", True, torch.float32): 1e-2, # this was relaxed
("cpu", True, torch.float16): 5e-3,
("cpu", True, torch.bfloat16): 1e-2,
("cuda", False, torch.float32): 1e-2, # this was relaxed
("cuda", False, torch.bfloat16): 1e-2,
("cuda", False, torch.float16): 5e-3,
("cuda", True, torch.float32): 1e-2, # this was relaxed
("cuda", True, torch.bfloat16): 3e-2,
("cuda", True, torch.float16): 5e-3,
}
_test_eager_matches_sdpa_inference(
self,
name,
dtype,
padding_side,
use_attention_mask,
output_attentions,
enable_kernels,
atols=atols,
rtols=rtols,
)
@slow
@require_torch_accelerator
class Gemma3nIntegrationTest(unittest.TestCase):
def setUp(self):
self.processor = AutoProcessor.from_pretrained("Google/gemma-3n-E4B-it", padding_side="left")
url = "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/cow_beach_1.png"
self.messages = [
{"role": "system", "content": [{"type": "text", "text": "You are a helpful assistant."}]},
{
"role": "user",
"content": [
{"type": "image", "url": url},
{"type": "text", "text": "What is shown in this image?"},
],
},
]
audio_ds = load_dataset(
"etechgrid/28.5k_wavfiles_dataset", "default", data_files="wav_dataset/103-1240-0000.wav"
)
self.audio_file_path = audio_ds["train"][0]["audio"].metadata.path
cleanup(torch_device, gc_collect=True)
def tearDown(self):
cleanup(torch_device, gc_collect=True)
def test_model_4b_bf16(self):
model_id = "Google/gemma-3n-E4B-it"
model = Gemma3nForConditionalGeneration.from_pretrained(model_id, dtype=torch.bfloat16).to(torch_device)
inputs = self.processor.apply_chat_template(
self.messages,
tokenize=True,
return_dict=True,
return_tensors="pt",
add_generation_prompt=True,
).to(torch_device, dtype=torch.bfloat16)
output = model.generate(**inputs, max_new_tokens=30, do_sample=False)
output_text = self.processor.batch_decode(output, skip_special_tokens=True)
EXPECTED_TEXTS = Expectations({
("cuda", None): ['user\nYou are a helpful assistant.\n\n\n\n\n\nWhat is shown in this image?\nmodel\nThe image shows a brown and white cow standing on a sandy beach next to a clear blue ocean. The cow is facing the viewer with its head slightly'],
("rocm", (9, 4)): ['user\nYou are a helpful assistant.\n\n\n\n\n\nWhat is shown in this image?\nmodel\nThe image shows a brown and white cow standing on a sandy beach next to a turquoise ocean. The sky is blue with a few white clouds. The'],
}).get_expectation() # fmt: skip
self.assertEqual(output_text, EXPECTED_TEXTS)
def test_model_with_audio(self):
"""
Tests the full model pipeline with batched audio inputs provided as file paths.
This ensures the processor correctly loads and processes audio files.
"""
model_id = "Google/gemma-3n-E4B-it"
model = Gemma3nForConditionalGeneration.from_pretrained(
model_id, dtype=torch.bfloat16, device_map=torch_device
)
messages = [
[
{
"role": "user",
"content": [
{"type": "text", "text": "Transcribe the following speech segment in English:"},
{"type": "audio", "audio": str(self.audio_file_path)},
],
}
],
]
inputs = self.processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
padding=True,
return_tensors="pt",
).to(torch_device, dtype=model.dtype)
input_len = inputs["input_ids"].shape[-1]
output = model.generate(**inputs, max_new_tokens=16, do_sample=False)
output = output[:, input_len:]
output_text = self.processor.batch_decode(output, skip_special_tokens=True)
EXPECTED_TEXTS = ["Chapter 1. Mrs. Rachel Lind is surprised.\n\nMrs. Rachel Lind"]
self.assertEqual(output_text, EXPECTED_TEXTS)
def test_model_4b_batch(self):
model_id = "Google/gemma-3n-E4B-it"
model = Gemma3nForConditionalGeneration.from_pretrained(
model_id, dtype=torch.bfloat16, device_map=torch_device
)
messages_2 = [
{"role": "system", "content": [{"type": "text", "text": "You are a helpful assistant."}]},
{
"role": "user",
"content": [
{
"type": "image",
"url": "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/cow_beach_1.png",
},
{
"type": "image",
"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg",
},
{"type": "text", "text": "Are these images identical?"},
],
},
]
inputs = self.processor.apply_chat_template(
[self.messages, messages_2],
tokenize=True,
return_dict=True,
return_tensors="pt",
padding=True,
add_generation_prompt=True,
).to(torch_device, dtype=torch.bfloat16)
output = model.generate(**inputs, max_new_tokens=30, do_sample=False)
output_text = self.processor.batch_decode(output, skip_special_tokens=True)
EXPECTED_TEXTS = Expectations({
("cuda", None): ['user\nYou are a helpful assistant.\n\n\n\n\n\nWhat is shown in this image?\nmodel\nThe image shows a brown and white cow standing on a sandy beach next to a clear blue ocean. The cow is facing the viewer with its head slightly', "user\nYou are a helpful assistant.\n\n\n\n\n\n\n\n\n\nAre these images identical?\nmodel\nNo, the images are not identical. \n\nHere's a breakdown of the differences:\n\n* **Subject:** The first image features a cow"],
("rocm", (9, 4)): ['user\nYou are a helpful assistant.\n\n\n\n\n\nWhat is shown in this image?\nmodel\nThe image shows a brown and white cow standing on a sandy beach next to a clear blue ocean. The cow is facing the viewer with its head slightly', "user\nYou are a helpful assistant.\n\n\n\n\n\n\n\n\n\nAre these images identical?\nmodel\nNo, the images are not identical. \n\nHere's a breakdown of the differences:\n\n* **Subject Matter:** The first image shows a"],
("xpu", None): ['user\nYou are a helpful assistant.\n\n\n\n\n\nWhat is shown in this image?\nmodel\nThe image shows a brown and white cow standing on a sandy beach next to a turquoise ocean. The cow is facing the viewer with its head slightly turned', "user\nYou are a helpful assistant.\n\n\n\n\n\n\n\n\n\nAre these images identical?\nmodel\nNo, the images are not identical. \n\nHere's a breakdown of the differences:\n\n* **Subject:** The first image features a cow"],
}).get_expectation() # fmt: skip
self.assertEqual(output_text, EXPECTED_TEXTS)
def test_model_4b_image(self):
model_id = "Google/gemma-3n-E4B-it"
model = Gemma3nForConditionalGeneration.from_pretrained(
model_id, dtype=torch.bfloat16, device_map=torch_device
)
inputs = self.processor.apply_chat_template(
self.messages,
tokenize=True,
return_dict=True,
return_tensors="pt",
add_generation_prompt=True,
).to(torch_device, dtype=torch.bfloat16)
output = model.generate(**inputs, max_new_tokens=30, do_sample=False)
output_text = self.processor.batch_decode(output, skip_special_tokens=True)
EXPECTED_NUM_IMAGES = 1 # Gemma3n does not support crops
EXPECTED_TEXTS = Expectations({
("cuda", None): ['user\nYou are a helpful assistant.\n\n\n\n\n\nWhat is shown in this image?\nmodel\nThe image shows a brown and white cow standing on a sandy beach next to a clear blue ocean. The cow is facing the viewer with its head slightly'],
("xpu", None): ['user\nYou are a helpful assistant.\n\n\n\n\n\nWhat is shown in this image?\nmodel\nThe image shows a brown and white cow standing on a sandy beach next to a clear blue ocean. The cow is facing the viewer with its head slightly'],
("rocm", (9, 4)): ['user\nYou are a helpful assistant.\n\n\n\n\n\nWhat is shown in this image?\nmodel\nThe image shows a brown and white cow standing on a sandy beach next to a turquoise ocean. The sky is blue with a few white clouds. The'],
}).get_expectation() # fmt: skip
self.assertEqual(len(inputs["pixel_values"]), EXPECTED_NUM_IMAGES)
self.assertEqual(output_text, EXPECTED_TEXTS)
@require_deterministic_for_xpu
def test_model_4b_multiimage(self):
model_id = "Google/gemma-3n-E4B-it"
model = Gemma3nForConditionalGeneration.from_pretrained(
model_id, dtype=torch.bfloat16, device_map=torch_device
)
messages = [
{"role": "system", "content": [{"type": "text", "text": "You are a helpful assistant."}]},
{
"role": "user",
"content": [
{
"type": "image",
"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg",
},
{"type": "text", "text": "What do you see here?"},
],
},
]
inputs = self.processor.apply_chat_template(
messages,
tokenize=True,
return_dict=True,
return_tensors="pt",
padding=True,
add_generation_prompt=True,
).to(torch_device, dtype=torch.bfloat16)
output = model.generate(**inputs, max_new_tokens=30, do_sample=False)
output_text = self.processor.batch_decode(output, skip_special_tokens=True)
EXPECTED_TEXTS = Expectations({
("cuda", None): ['user\nYou are a helpful assistant.\n\n\n\n\n\nWhat do you see here?\nmodel\nIn the image, I see a street scene in what appears to be a Chinatown district. Here are some of the key elements:\n\n* **A'],
("xpu", None): ['user\nYou are a helpful assistant.\n\n\n\n\n\nWhat do you see here?\nmodel\nIn the image, I see a street scene in what appears to be a Chinatown district. Here are the key elements:\n\n* **A prominent red'],
("rocm", (9, 4)): ['user\nYou are a helpful assistant.\n\n\n\n\n\nWhat do you see here?\nmodel\nIn the image, I see a street scene in what appears to be a Chinatown district. \n\nHere are some key elements:\n\n* **A'],
}).get_expectation() # fmt: skip
self.assertEqual(output_text, EXPECTED_TEXTS)
@unittest.skip("For now, using a gemma model with the 3n class is not supported")
def test_model_1b_text_only(self):
model_id = "google/gemma-3-1b-it"
model = Gemma3nForCausalLM.from_pretrained(model_id, dtype=torch.bfloat16, device_map=torch_device)
tokenizer = AutoTokenizer.from_pretrained(model_id, padding_side="left")
inputs = tokenizer("Write a poem about Machine Learning.", return_tensors="pt").to(torch_device)
output = model.generate(**inputs, max_new_tokens=30, do_sample=False)
output_text = tokenizer.batch_decode(output, skip_special_tokens=True)
EXPECTED_TEXTS = ['Write a poem about Machine Learning.\n\n---\n\nThe data flows, a river deep,\nWith patterns hidden, secrets sleep.\nA neural net, a watchful eye,\nLearning'] # fmt: skip
self.assertEqual(output_text, EXPECTED_TEXTS)
def test_generation_beyond_sliding_window(self):
"""Test that we can correctly generate beyond the sliding window. This is non trivial as
we need to correctly slice the attention mask in all cases (because we use a hybrid cache).
Outputs for every attention functions should be coherent and identical.
"""
model_id = "google/gemma-3n-E2B-it"
input_text = [
"This is a nice place. " * 800 + "I really enjoy the scenery,", # This is larger than 4096 tokens
"A list of colors: red, blue", # This will almost all be padding tokens
]
tokenizer = AutoTokenizer.from_pretrained(model_id, padding="left")
inputs = tokenizer(input_text, padding=True, return_tensors="pt").to(torch_device)
model = AutoModelForCausalLM.from_pretrained(
model_id, attn_implementation="eager", dtype=torch.bfloat16, device_map=torch_device
)
# Make sure prefill is larger than sliding window
input_size = inputs.input_ids.shape[-1]
self.assertTrue(input_size > model.config.get_text_config().sliding_window)
out = model.generate(**inputs, max_new_tokens=20, do_sample=False)[:, input_size:]
output_text = tokenizer.batch_decode(out)
EXPECTED_COMPLETIONS = [" and the people are so friendly. I'm so glad I came here. I'm so", ", green, yellow, orange, purple, pink, brown, black, white.\n\nHere'"] # fmt: skip
self.assertEqual(output_text, EXPECTED_COMPLETIONS)
@require_deterministic_for_xpu
def test_generation_beyond_sliding_window_with_generation_config(self):
"""Same as `test_generation_beyond_sliding_window`, but passing a GenerationConfig. Regression test for #36684 --
ensures `cache_implementation='hybrid'` is correctly inherited from the base `model.generation_config`.
"""
model_id = "google/gemma-3n-E2B-it"
input_text = [
"This is a nice place. " * 800 + "I really enjoy the scenery,", # This is larger than 4096 tokens
"A list of colors: red, blue", # This will almost all be padding tokens
]
tokenizer = AutoTokenizer.from_pretrained(model_id, padding="left")
inputs = tokenizer(input_text, padding=True, return_tensors="pt").to(torch_device)
model = AutoModelForCausalLM.from_pretrained(model_id, dtype=torch.bfloat16, device_map=torch_device)
# Make sure prefill is larger than sliding window
input_size = inputs.input_ids.shape[-1]
self.assertTrue(input_size > model.config.get_text_config().sliding_window)
out = model.generate(**inputs, max_new_tokens=20, do_sample=False)[:, input_size:]
output_text = tokenizer.batch_decode(out)
EXPECTED_COMPLETIONS = Expectations({
# FIXME: This test is VERY flaky on ROCm
("cuda", None): [" and I'm glad I came here. This is a nice place. This is a nice place", ", green, yellow, orange, purple, pink, brown, black, white.\n\nHere'"],
("rocm", (9, 4)): [' and I think it makes this place special. This is a nice place. This is a nice place', ', green, yellow, purple, orange, pink, brown, black, white.\n\nHere are'],
("xpu", None): [" and I think it's a nice place to visit. This is a nice place. This is", ", green, yellow, orange, purple, pink, brown, black, white.\n\nHere'"],
}).get_expectation() # fmt: skip
self.assertEqual(output_text, EXPECTED_COMPLETIONS)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/gemma3n/test_modeling_gemma3n.py",
"license": "Apache License 2.0",
"lines": 1046,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/models/gemma3n/test_processing_gemma3n.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers.models.gemma3n import Gemma3nProcessor
from transformers.testing_utils import require_sentencepiece, require_torch, require_torchaudio, require_vision
from ...test_processing_common import ProcessorTesterMixin
from .test_feature_extraction_gemma3n import floats_list
# TODO: omni-modal processor can't run tests from `ProcessorTesterMixin`
@require_torch
@require_torchaudio
@require_vision
@require_sentencepiece
class Gemma3nProcessorTest(ProcessorTesterMixin, unittest.TestCase):
processor_class = Gemma3nProcessor
model_id = "hf-internal-testing/namespace-google-repo_name-gemma-3n-E4B-it"
def prepare_image_inputs(self, batch_size: int | None = None, nested: bool = False):
return super().prepare_image_inputs(batch_size=batch_size, nested=True)
@classmethod
def _setup_test_attributes(cls, processor):
cls.image_token = processor.boi_token
def test_audio_feature_extractor(self):
processor = self.get_processor()
feature_extractor = self.get_component("feature_extractor")
raw_speech = floats_list((3, 1000))
input_feat_extract = feature_extractor(raw_speech, return_tensors="pt")
input_processor = processor(text="Transcribe:", audio=raw_speech, return_tensors="pt")
for key in input_feat_extract:
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/gemma3n/test_processing_gemma3n.py",
"license": "Apache License 2.0",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/dia/configuration_dia.py | # Copyright 2025 The Nari Labs and HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dia model configuration"""
from ...configuration_utils import PreTrainedConfig
from ...modeling_rope_utils import RopeParameters
from ...utils import logging
logger = logging.get_logger(__name__)
class DiaEncoderConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`DiaEncoder`]. It is used to instantiate a Dia
encoder according to the specified arguments, defining the encoder architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
max_position_embeddings (`int`, *optional*, defaults to 1024):
The maximum sequence length that this model might ever be used with.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
hidden_size (`int`, *optional*, defaults to 1024):
Dimensionality of the encoder layers and the pooler layer.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
num_key_value_heads (`int`, *optional*, defaults to 16):
Number of key and value heads for each attention layer in the Transformer encoder.
head_dim (`int`, *optional*, defaults to 128):
Dimensionality of the attention head.
intermediate_size (`int`, *optional*, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the normalization layers.
vocab_size (`int`, *optional*, defaults to 256):
Vocabulary size of the Dia model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`DiaModel`].
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"swish"` and `"gelu_new"` are supported.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
"""
model_type = "dia_encoder"
def __init__(
self,
max_position_embeddings: int = 1024,
num_hidden_layers: int = 12,
hidden_size: int = 1024,
num_attention_heads: int = 16,
num_key_value_heads: int = 16,
head_dim: int = 128,
intermediate_size: int = 4096,
norm_eps: float = 1e-5,
vocab_size: int = 256,
hidden_act: str = "silu",
rope_parameters: RopeParameters | None = None,
initializer_range: float = 0.02,
**kwargs,
):
self.max_position_embeddings = max_position_embeddings
self.num_hidden_layers = num_hidden_layers
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_attention_heads = num_attention_heads
self.head_dim = head_dim
self.norm_eps = norm_eps
self.vocab_size = vocab_size
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rope_parameters = rope_parameters
super().__init__(**kwargs)
class DiaDecoderConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`DiaDecoder`]. It is used to instantiate a Dia
decoder according to the specified arguments, defining the decoder architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
max_position_embeddings (`int`, *optional*, defaults to 3072):
The maximum sequence length that this model might ever be used with.
num_hidden_layers (`int`, *optional*, defaults to 18):
Number of hidden layers in the Transformer decoder.
hidden_size (`int`, *optional*, defaults to 2048):
Dimensionality of the decoder layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 8192):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*, defaults to 4):
Number of key and value heads for each attention layer in the Transformer decoder.
head_dim (`int`, *optional*, defaults to 128):
Dimensionality of the attention head.
cross_num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each cross-attention layer in the Transformer decoder.
cross_head_dim (`int`, *optional*, defaults to 128):
Dimensionality of the cross-attention head.
cross_num_key_value_heads (`int`, *optional*, defaults to 16):
Number of key and value heads for each cross-attention layer in the Transformer decoder.
cross_hidden_size (`int`, *optional*, defaults to 1024):
Dimensionality of the cross-attention layers.
norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the normalization layers.
vocab_size (`int`, *optional*, defaults to 1028):
Vocabulary size of the Dia model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`DiaModel`].
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder. If string, `"gelu"`, `"relu"`,
`"swish"` and `"gelu_new"` are supported.
num_channels (`int`, *optional*, defaults to 9):
Number of channels for the Dia decoder.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
is_encoder_decoder (`bool`, *optional*, defaults to `True`):
Indicating that this model is part of an encoder-decoder architecture.
pad_token_id (`int`, *optional*, defaults to 1025):
The token id used for padding sequences to the same length within a batch.
eos_token_id (`int`, *optional*, defaults to 1024):
The token id representing the end-of-sequence token, indicating that generation should stop.
bos_token_id (`int`, *optional*, defaults to 1026):
The token id representing the beginning-of-sequence token, used to initialize decoding.
"""
model_type = "dia_decoder"
def __init__(
self,
max_position_embeddings: int = 3072,
num_hidden_layers: int = 18,
hidden_size: int = 2048,
intermediate_size: int = 8192,
num_attention_heads: int = 16,
num_key_value_heads: int = 4,
head_dim: int = 128,
cross_num_attention_heads: int = 16,
cross_head_dim: int = 128,
cross_num_key_value_heads: int = 16,
cross_hidden_size: int = 1024,
norm_eps: float = 1e-5,
vocab_size: int = 1028,
hidden_act: str = "silu",
num_channels: int = 9,
rope_parameters: RopeParameters | None = None,
initializer_range: float = 0.02,
use_cache: bool = True,
is_encoder_decoder: bool = True,
pad_token_id: int = 1025,
eos_token_id: int = 1024,
bos_token_id: int = 1026,
**kwargs,
):
self.max_position_embeddings = max_position_embeddings
self.num_hidden_layers = num_hidden_layers
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.head_dim = head_dim
self.cross_num_key_value_heads = cross_num_key_value_heads
self.cross_num_attention_heads = cross_num_attention_heads
self.cross_head_dim = cross_head_dim
self.cross_hidden_size = cross_hidden_size
self.norm_eps = norm_eps
self.vocab_size = vocab_size
self.hidden_act = hidden_act
self.num_channels = num_channels
self.initializer_range = initializer_range
self.use_cache = use_cache
self.rope_parameters = rope_parameters
self.pad_token_id = pad_token_id
self.eos_token_id = eos_token_id
self.bos_token_id = bos_token_id
super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)
class DiaConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`DiaModel`]. It is used to instantiate a
Dia model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the
[nari-labs/Dia-1.6B](https://huggingface.co/nari-labs/Dia-1.6B) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
encoder_config (`DiaEncoderConfig`, *optional*):
Configuration for the encoder part of the model. If not provided, a default `DiaEncoderConfig` will be used.
decoder_config (`DiaDecoderConfig`, *optional*):
Configuration for the decoder part of the model. If not provided, a default `DiaDecoderConfig` will be used.
norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the normalization layers.
is_encoder_decoder (`bool`, *optional*, defaults to `True`):
Indicating that this model uses an encoder-decoder architecture.
pad_token_id (`int`, *optional*):
Deprecated. Please set this on `DiaDecoderConfig` directly. If provided, it will be forwarded
to `decoder_config`.
eos_token_id (`int`, *optional*):
Deprecated. Please set this on `DiaDecoderConfig` directly. If provided, it will be forwarded
to `decoder_config`.
bos_token_id (`int`, *optional*):
Deprecated. Please set this on `DiaDecoderConfig` directly. If provided, it will be forwarded
to `decoder_config`.
delay_pattern (`list[int]`, *optional*, defaults to `[0, 8, 9, 10, 11, 12, 13, 14, 15]`):
The delay pattern for the decoder. The length of this list must match `decoder_config.num_channels`.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
Example:
```python
>>> from transformers import DiaConfig, DiaModel
>>> # Initializing a DiaConfig with default values
>>> configuration = DiaConfig()
>>> # Initializing a DiaModel (with random weights) from the configuration
>>> model = DiaModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = "dia"
keys_to_ignore_at_inference = ["past_key_values"]
sub_configs = {"encoder_config": DiaEncoderConfig, "decoder_config": DiaDecoderConfig}
def __init__(
self,
encoder_config: DiaEncoderConfig | None = None,
decoder_config: DiaDecoderConfig | None = None,
norm_eps: float = 1e-5,
is_encoder_decoder: bool = True,
pad_token_id: int | None = None,
eos_token_id: int | None = None,
bos_token_id: int | None = None,
delay_pattern: list[int] | None = None,
initializer_range: float = 0.02,
use_cache: bool = True,
**kwargs,
):
if isinstance(encoder_config, dict):
encoder_config = DiaEncoderConfig(**encoder_config)
if isinstance(decoder_config, dict):
decoder_config = DiaDecoderConfig(**decoder_config)
self.encoder_config = encoder_config if encoder_config is not None else DiaEncoderConfig()
self.decoder_config = decoder_config if decoder_config is not None else DiaDecoderConfig()
self.norm_eps = norm_eps
self.delay_pattern = delay_pattern if delay_pattern is not None else [0, 8, 9, 10, 11, 12, 13, 14, 15]
self.initializer_range = initializer_range
self.use_cache = use_cache
# TODO: Remove token ID forwarding once the `nari-labs/Dia-1.6B`
# checkpoint is updated
if pad_token_id is not None:
logger.warning_once(
"Passing `pad_token_id` to `DiaConfig` is deprecated. "
"Please set it directly on `DiaDecoderConfig` instead."
)
self.decoder_config.pad_token_id = pad_token_id
if eos_token_id is not None:
logger.warning_once(
"Passing `eos_token_id` to `DiaConfig` is deprecated. "
"Please set it directly on `DiaDecoderConfig` instead."
)
self.decoder_config.eos_token_id = eos_token_id
if bos_token_id is not None:
logger.warning_once(
"Passing `bos_token_id` to `DiaConfig` is deprecated. "
"Please set it directly on `DiaDecoderConfig` instead."
)
self.decoder_config.bos_token_id = bos_token_id
assert self.decoder_config.num_channels == len(self.delay_pattern), (
"Number of channels must match delay pattern length."
)
super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)
def get_text_config(self, *args, **kwargs):
"""Defaulting to audio config as it's the decoder in this case which is usually the text backbone"""
return self.decoder_config
__all__ = ["DiaConfig", "DiaEncoderConfig", "DiaDecoderConfig"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/dia/configuration_dia.py",
"license": "Apache License 2.0",
"lines": 287,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/dia/convert_dia_to_hf.py | # Copyright 2025 The Nari Labs and HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Converts a Dia model in Nari Labs format to Hugging Face format."""
import argparse
import os
import re
import torch
from huggingface_hub import snapshot_download
from safetensors.torch import load_file
from transformers import (
DacModel,
DiaConfig,
DiaFeatureExtractor,
DiaForConditionalGeneration,
DiaProcessor,
DiaTokenizer,
GenerationConfig,
)
from transformers.utils.import_utils import is_tiktoken_available
# Provide just the list of layer keys you want to fix
shape_mappings = [
"encoder.layers.*.mlp.gate_up_proj.weight",
"encoder.layers.*.mlp.down_proj.weight",
"encoder.layers.*.self_attention.q_proj.weight",
"encoder.layers.*.self_attention.k_proj.weight",
"encoder.layers.*.self_attention.v_proj.weight",
"encoder.layers.*.self_attention.o_proj.weight",
"decoder.layers.*.mlp.gate_up_proj.weight",
"decoder.layers.*.mlp.down_proj.weight",
"decoder.layers.*.self_attention.q_proj.weight",
"decoder.layers.*.self_attention.k_proj.weight",
"decoder.layers.*.self_attention.v_proj.weight",
"decoder.layers.*.self_attention.o_proj.weight",
"decoder.layers.*.cross_attention.q_proj.weight",
"decoder.layers.*.cross_attention.k_proj.weight",
"decoder.layers.*.cross_attention.v_proj.weight",
"decoder.layers.*.cross_attention.o_proj.weight",
"decoder.logits_dense.weight",
]
# Provide renamings here
rename_mapping = {
"mlp.wo": "mlp.down_proj",
"mlp.wi_fused": "mlp.gate_up_proj",
}
def get_generation_config(config):
model_generation_config = GenerationConfig.from_model_config(config)
model_generation_config._from_model_config = False
model_generation_config.do_sample = True
model_generation_config.top_k = 45
model_generation_config.top_p = 0.95
model_generation_config.temperature = 1.2
model_generation_config.guidance_scale = 3.0
model_generation_config.max_length = 3072 # Decoder max length
return model_generation_config
def convert_dia_model_to_hf(checkpoint_path, verbose=False):
"""
Converts a Dia model in Nari Labs format to Hugging Face format.
Args:
checkpoint_path (`str`):
Path to the downloaded checkpoints.
verbose (`bool`, *optional*)
Whether to print information during conversion.
"""
# Download from HF Hub if checkpoint_path is None
checkpoint_path = snapshot_download(repo_id=checkpoint_path, allow_patterns=["*.pth", "*.safetensors"])
print(f"Downloaded checkpoint from Hugging Face Hub: {checkpoint_path}")
# Initialize base model with default config == 1.6B model
with torch.device("meta"):
hf_model = DiaForConditionalGeneration(config=DiaConfig())
hf_model_dict = hf_model.state_dict()
hf_model_keys = hf_model_dict.keys()
# Iterate through dir to catch all respective files - prefers safetensors but allows pt
files = os.listdir(checkpoint_path)
for file in files:
if file.endswith(".safetensors"):
load_function = load_file
elif file.endswith(".pth"):
load_function = torch.load
checkpoint_path = os.path.join(checkpoint_path, files[0])
nari_state_dict = load_function(checkpoint_path, "cpu")
# Conversion starts here
converted_state_dict = {}
embeddings = {}
for key, tensor in nari_state_dict.items():
# add prefix
key = "model." + key
# rename some weights
for original, rename in rename_mapping.items():
if original in key:
key = re.sub(original, rename, key)
# decoder multi channel
if "embeddings" in key:
embeddings_key = key.rsplit(".", 2)[0] + ".embed.weight"
if embeddings_key in embeddings:
embeddings[embeddings_key] += [tensor]
else:
embeddings[embeddings_key] = [tensor]
continue
elif re.sub(r"\d+", "*", key).removeprefix("model.") in shape_mappings:
# add exception to the head
if "logits_dense" in key:
key = re.sub("decoder.logits_dense", "logits_dense", key).removeprefix("model.")
# dense general
if key in hf_model_keys:
tensor_shape = tensor.shape
target_shape = hf_model_dict[key].shape
try:
tensor = tensor.reshape(target_shape[1], target_shape[0]).T
if verbose:
print(f"{key}: transpose reshaped from {tensor_shape} to {target_shape}")
except Exception as e:
print(f"WARNING: Could not reshape {key}: {e}")
converted_state_dict[key] = tensor
# Combining the embeddings as last step
embeddings = {k: torch.cat(v, dim=0) for k, v in embeddings.items()}
converted_state_dict.update(embeddings)
# Load converted weights into HF model
hf_model.load_state_dict(converted_state_dict, assign=True)
# Overwrite generation config
hf_model.generation_config = get_generation_config(DiaConfig())
return hf_model
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# # Required parameters
parser.add_argument(
"--checkpoint_path", type=str, default="nari-labs/Dia-1.6B", help="Path to the downloaded checkpoints"
)
parser.add_argument(
"--pytorch_dump_folder_path", default="AntonV/Dia-1.6B", type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--convert_preprocessor",
type=bool,
default=True,
help="Whether or not the preprocessor (tokenizer + feature extractor) should be converted along with the model.",
)
parser.add_argument(
"--verbose",
type=bool,
default=True,
help="Whether or not to log information during conversion.",
)
args = parser.parse_args()
model = convert_dia_model_to_hf(args.checkpoint_path, args.verbose)
if args.convert_preprocessor:
try:
if not is_tiktoken_available(with_blobfile=False):
raise ModuleNotFoundError(
"""`tiktoken` is not installed, use `pip install tiktoken` to convert the tokenizer"""
)
except Exception as e:
print(e)
else:
processor = DiaProcessor(
DiaFeatureExtractor(sampling_rate=44100, hop_length=512),
DiaTokenizer(),
DacModel.from_pretrained("descript/dac_44khz"),
)
processor.save_pretrained(args.pytorch_dump_folder_path)
model.save_pretrained(args.pytorch_dump_folder_path)
print(f"Saved converted checkpoint to {args.pytorch_dump_folder_path}")
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/dia/convert_dia_to_hf.py",
"license": "Apache License 2.0",
"lines": 172,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/dia/feature_extraction_dia.py | # Copyright 2025 The Nari Labs and HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Feature extractor class for Dia"""
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
logger = logging.get_logger(__name__)
class DiaFeatureExtractor(SequenceFeatureExtractor):
r"""
Constructs an Dia feature extractor.
This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains
most of the main methods. Users should refer to this superclass for more information regarding those methods.
Args:
feature_size (`int`, *optional*, defaults to 1):
The feature dimension of the extracted features. Use 1 for mono, 2 for stereo.
sampling_rate (`int`, *optional*, defaults to 16000):
The sampling rate at which the audio waveform should be digitalized, expressed in hertz (Hz).
padding_value (`float`, *optional*, defaults to 0.0):
The value that is used for padding.
hop_length (`int`, *optional*, defaults to 512):
Overlap length between successive windows.
"""
model_input_names = ["input_values", "n_quantizers"]
def __init__(
self,
feature_size: int = 1,
sampling_rate: int = 16000,
padding_value: float = 0.0,
hop_length: int = 512,
**kwargs,
):
super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs)
self.hop_length = hop_length
def __call__(
self,
raw_audio: np.ndarray | list[float] | list[np.ndarray] | list[list[float]],
padding: bool | str | PaddingStrategy | None = None,
truncation: bool | None = False,
max_length: int | None = None,
return_tensors: str | TensorType | None = None,
sampling_rate: int | None = None,
) -> BatchFeature:
"""
Main method to featurize and prepare for the model one or several sequence(s).
Args:
raw_audio (`np.ndarray`, `list[float]`, `list[np.ndarray]`, `list[list[float]]`):
The sequence or batch of sequences to be processed. Each sequence can be a numpy array, a list of float
values, a list of numpy arrays or a list of list of float values. The numpy array must be of shape
`(num_samples,)` for mono audio (`feature_size = 1`), or `(2, num_samples)` for stereo audio
(`feature_size = 2`).
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding
index) among:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, *optional*, defaults to `False`):
Activates truncation to cut input sequences longer than `max_length` to `max_length`.
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
return_tensors (`str` or [`~utils.TensorType`], *optional*, default to 'pt'):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
sampling_rate (`int`, *optional*):
The sampling rate at which the `audio` input was sampled. It is strongly recommended to pass
`sampling_rate` at the forward call to prevent silent errors.
"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}."
)
else:
logger.warning(
f"It is strongly recommended to pass the `sampling_rate` argument to `{self.__class__.__name__}()`. "
"Failing to do so can result in silent errors that might be hard to debug."
)
if padding and truncation:
raise ValueError("Both padding and truncation were set. Make sure you only set one.")
elif padding is None:
# by default let's pad the inputs
padding = True
is_batched = bool(
isinstance(raw_audio, (list, tuple)) and (isinstance(raw_audio[0], (np.ndarray, tuple, list)))
)
if is_batched:
raw_audio = [np.asarray(audio, dtype=np.float32).T for audio in raw_audio]
elif not is_batched and not isinstance(raw_audio, np.ndarray):
raw_audio = np.asarray(raw_audio, dtype=np.float32)
elif isinstance(raw_audio, np.ndarray) and raw_audio.dtype is np.dtype(np.float64):
raw_audio = raw_audio.astype(np.float32)
# always return batch
if not is_batched:
raw_audio = [np.asarray(raw_audio).T]
# convert stereo to mono if necessary, unique to Dia
for idx, example in enumerate(raw_audio):
if self.feature_size == 2 and example.ndim == 2:
raw_audio[idx] = np.mean(example, -1)
# verify inputs are valid
for idx, example in enumerate(raw_audio):
if example.ndim > 2:
raise ValueError(f"Expected input shape (channels, length) but got shape {example.shape}")
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f"Expected mono audio but example has {example.shape[-1]} channels")
if self.feature_size == 2 and example.ndim != 1: # note the conversion before
raise ValueError(f"Expected stereo audio but example has {example.shape[-1]} channels")
input_values = BatchFeature({"input_values": raw_audio})
# temporarily treat it as if we were mono as we also convert stereo to mono
original_feature_size = self.feature_size
self.feature_size = 1
# normal padding on batch
padded_inputs = self.pad(
input_values,
max_length=max_length,
truncation=truncation,
padding=padding,
return_attention_mask=True,
pad_to_multiple_of=self.hop_length,
)
padded_inputs["padding_mask"] = padded_inputs.pop("attention_mask")
input_values = []
for example in padded_inputs.pop("input_values"):
if self.feature_size == 1:
example = example[..., None]
input_values.append(example.T)
padded_inputs["input_values"] = input_values
if return_tensors is not None:
padded_inputs = padded_inputs.convert_to_tensors(return_tensors)
# rewrite back to original feature size
self.feature_size = original_feature_size
return padded_inputs
__all__ = ["DiaFeatureExtractor"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/dia/feature_extraction_dia.py",
"license": "Apache License 2.0",
"lines": 150,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/dia/generation_dia.py | # Copyright 2025 The Nari Labs and HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Callable
from typing import Any, Optional
import torch
import torch.distributed as dist
from ...generation.logits_process import (
DiaClassifierFreeGuidanceLogitsProcessor,
DiaEOSChannelFilterLogitsProcessor,
DiaEOSDelayPatternLogitsProcessor,
LogitsProcessorList,
TemperatureLogitsWarper,
)
from ...generation.stopping_criteria import StoppingCriteriaList
from ...generation.streamers import BaseStreamer
from ...generation.utils import GenerateOutput, GenerationConfig, GenerationMixin, GenerationMode
from ...integrations.deepspeed import is_deepspeed_zero3_enabled
from ...integrations.fsdp import is_fsdp_managed_module
from ...modeling_utils import PreTrainedModel
from ...utils import logging
logger = logging.get_logger(__name__)
class DiaGenerationMixin(GenerationMixin):
# Indicates CFG which needs preparation to be properly handled by repeats
_uses_cfg = None
def _get_logits_processor(
self,
generation_config: GenerationConfig,
input_ids_seq_length: int | None = None,
encoder_input_ids: torch.LongTensor | None = None,
prefix_allowed_tokens_fn: Callable[[int, torch.Tensor], list[int]] | None = None,
logits_processor: LogitsProcessorList | None = None,
device: str | None = None,
model_kwargs: dict[str, Any] | None = None,
negative_prompt_ids: torch.Tensor | None = None,
negative_prompt_attention_mask: torch.Tensor | None = None,
) -> LogitsProcessorList:
# Need either custom order or custom processor instead
# (Temporarily disabling those for the super function)
original_guidance_scale = generation_config.guidance_scale
original_temperature = generation_config.temperature
generation_config.guidance_scale = None
generation_config.temperature = None
# Get base processors and those we can integrate easily
custom_processors = LogitsProcessorList()
if original_temperature is not None and original_temperature != 1.0:
custom_processors.append(TemperatureLogitsWarper(original_temperature))
custom_processors.append(
DiaEOSChannelFilterLogitsProcessor(
num_channels=len(self.config.delay_pattern),
eos_token_id=self.config.decoder_config.eos_token_id,
)
)
merged_processors = super()._get_logits_processor(
generation_config=generation_config,
input_ids_seq_length=input_ids_seq_length,
encoder_input_ids=encoder_input_ids,
prefix_allowed_tokens_fn=None,
logits_processor=custom_processors,
device=device,
model_kwargs=model_kwargs,
negative_prompt_ids=negative_prompt_ids,
negative_prompt_attention_mask=negative_prompt_attention_mask,
)
# Custom processors we need at specific positions
if original_guidance_scale is not None and original_guidance_scale != 1:
cfg_processor = DiaClassifierFreeGuidanceLogitsProcessor(
guidance_scale=original_guidance_scale,
guidance_top_k=generation_config.top_k,
)
merged_processors.insert(0, cfg_processor)
merged_processors.append(
DiaEOSDelayPatternLogitsProcessor(
delay_pattern=self.config.delay_pattern,
eos_token_id=self.config.decoder_config.eos_token_id,
max_generation_len=generation_config.max_length,
device=device,
)
)
# Enable temporarily disabled values back
generation_config.guidance_scale = original_guidance_scale
generation_config.temperature = original_temperature
return merged_processors
def _prepare_generation_config(
self, generation_config: GenerationConfig | None, **kwargs: Any
) -> tuple[GenerationConfig, dict]:
generation_config, model_kwargs = super()._prepare_generation_config(generation_config, **kwargs)
if generation_config.temperature is not None and generation_config.temperature < 1.0:
logger.warning_once(
f"temperature < 1.0 is not supported for Dia; clamping to 1.0 (got {generation_config.temperature})"
)
generation_config.temperature = 1.0
# We allow generation up to max length + max delay pattern
# (will revert back to max length after generation)
generation_config.max_length += max(self.config.delay_pattern)
# Internal flag to indicate CFG that needs to prepare unconditioned input
self._uses_cfg = generation_config.guidance_scale is not None and generation_config.guidance_scale != 1
return generation_config, model_kwargs
def _prepare_model_inputs(
self,
inputs: torch.Tensor | None = None,
bos_token_id: torch.Tensor | None = None,
model_kwargs: dict[str, torch.Tensor] | None = None,
) -> tuple[torch.Tensor, str | None, dict[str, torch.Tensor]]:
inputs, input_name, model_kwargs = super()._prepare_model_inputs(
inputs=inputs,
bos_token_id=bos_token_id,
model_kwargs=model_kwargs,
)
# If CFG is requested we fill in the unconditioned parts
if self._uses_cfg:
unconditioned_inputs = torch.zeros_like(inputs)
inputs = torch.cat([inputs, unconditioned_inputs], dim=0)
if model_kwargs.get("attention_mask", None) is not None:
model_kwargs["attention_mask"] = model_kwargs["attention_mask"].repeat(2, 1)
return inputs, input_name, model_kwargs
def _prepare_decoder_input_ids_for_generation(
self,
batch_size: int,
model_input_name: str,
model_kwargs: dict[str, torch.Tensor],
decoder_start_token_id: torch.Tensor,
device: torch.device | None = None,
) -> tuple[torch.LongTensor, dict[str, torch.Tensor]]:
"""Prepares `decoder_input_ids` for generation with encoder-decoder models"""
# 1. Check whether the user has defined `decoder_input_ids` and `decoder_attention_mask`; if not error out
decoder_input_ids = decoder_attention_mask = None
if model_kwargs is not None and "decoder_input_ids" in model_kwargs:
decoder_input_ids = model_kwargs.pop("decoder_input_ids")
if model_kwargs is not None and "decoder_attention_mask" in model_kwargs:
decoder_attention_mask = model_kwargs.pop("decoder_attention_mask")
# We allow generating without preparation (no proper delay) but discourage it
if decoder_input_ids is None or decoder_attention_mask is None:
logger.warning_once(
"In order to generate with Dia, we need the processed audio input: Got `decoder_input_ids`:"
f" {decoder_input_ids is not None} and got `decoder_attention_mask`={decoder_attention_mask is not None}."
f" This can be achieved via the [`DiaProcessor`] but now defaulting to non-delayed generation."
)
num_channels = self.config.decoder_config.num_channels
real_batch_size = batch_size // 2 if self._uses_cfg else batch_size
if decoder_input_ids is None:
decoder_input_ids = torch.full(
(real_batch_size, 1, num_channels), decoder_start_token_id, dtype=torch.long, device=device
)
decoder_attention_mask = torch.ones(
size=(real_batch_size, decoder_input_ids.shape[1]), dtype=torch.long, device=device
)
# 2. Determine the valid input and what works as mask within the input
delay_mask = decoder_input_ids.long()
valid_input_size = (
decoder_input_ids.shape[1]
- (decoder_input_ids[:, :, 0] == self.config.decoder_config.pad_token_id).sum(dim=-1).max()
)
decoder_input_ids = delay_mask[:, :valid_input_size].transpose(1, 2).long()
decoder_attention_mask = decoder_attention_mask[:, :valid_input_size].long()
# 3. Overwrite into model kwargs
model_kwargs["decoder_attention_mask"] = decoder_attention_mask
model_kwargs["decoder_delay_mask"] = delay_mask
return decoder_input_ids, model_kwargs
def prepare_inputs_for_generation(
self,
input_ids,
encoder_outputs=None, # Using this to easily get the batch size
decoder_delay_mask=None,
**kwargs,
):
# Reshape decoder input_ids to 3D to be compile friendly and to fit the expected model input shape
batch_size = encoder_outputs[0].shape[0] // 2 if self._uses_cfg else encoder_outputs[0].shape[0]
input_ids = input_ids.reshape(batch_size, self.config.decoder_config.num_channels, -1).transpose(1, 2)
# Base method handles most things except CFG and the delay pattern mask
model_inputs = super().prepare_inputs_for_generation(input_ids, encoder_outputs=encoder_outputs, **kwargs)
# Post processing for CFG and overwriting via delay pattern mask
# 1. Delay pattern mask -- force tokens if not allowed to predict (!= pad_token in mask)
model_inputs["decoder_input_ids"] = self.apply_delay_mask(
input_ids, self.config.decoder_config.pad_token_id, decoder_delay_mask
)
# Depending on cache usage we need to pass all or just one
if model_inputs.get("use_cache", False) and model_inputs["cache_position"][0] > 0:
model_inputs["decoder_input_ids"] = model_inputs["decoder_input_ids"][:, -1, :][:, None, :]
# Be compile friendly
model_inputs["decoder_input_ids"] = model_inputs["decoder_input_ids"].contiguous()
# 2. Apply CFG duplication if needed
if self._uses_cfg:
for key in ["decoder_input_ids", "decoder_attention_mask", "decoder_position_ids"]:
if model_inputs.get(key, None) is not None:
# double first dimension and keep everything else the same
repeat_pattern = tuple([2] + [1] * (model_inputs[key].ndim - 1))
model_inputs[key] = model_inputs[key].repeat(*repeat_pattern)
return model_inputs
@staticmethod
def apply_delay_mask(input_ids: torch.Tensor, pad_id: int, delay_mask: torch.Tensor | None) -> torch.Tensor:
if delay_mask is None:
return input_ids
mask_len = min(input_ids.shape[1], delay_mask.shape[1])
valid_mask = delay_mask[:, :mask_len, :]
valid_input = input_ids[:, :mask_len, :]
# Overwrite the respective parts of the input
input_ids[:, :mask_len, :] = torch.where(valid_mask == pad_id, valid_input, valid_mask)
return input_ids
def _main_generate_loop(
self,
inputs: torch.Tensor | None = None,
generation_config: GenerationConfig | None = None,
logits_processor: LogitsProcessorList | None = None,
stopping_criteria: StoppingCriteriaList | None = None,
prefix_allowed_tokens_fn: Callable[[int, torch.Tensor], list[int]] | None = None,
synced_gpus: bool | None = None,
assistant_model: Optional["PreTrainedModel"] = None,
streamer: Optional["BaseStreamer"] = None,
negative_prompt_ids: torch.Tensor | None = None,
negative_prompt_attention_mask: torch.Tensor | None = None,
custom_generate: str | None = None,
**kwargs,
):
# ********** mostly taken from main generate function up to calling the different methods (see NOTE) **********
# 1. Handle `generation_config` and kwargs that might update it, and validate the `.generate()` call
generation_mode_kwargs = self._extract_generation_mode_kwargs(
custom_generate,
kwargs,
synced_gpus,
assistant_model,
streamer,
)
generation_config, model_kwargs = self._prepare_generation_config(generation_config, **kwargs)
generation_mode = generation_config.get_generation_mode(assistant_model)
if generation_mode not in (GenerationMode.SAMPLE, GenerationMode.GREEDY_SEARCH):
raise ValueError(
"Got incompatible mode for generation, should be one of greedy or sampling. "
"Ensure that beam search is de-activated by setting `num_beams=1`."
)
self._validate_model_kwargs(model_kwargs.copy())
self._validate_generation_mode(generation_mode, generation_config, generation_mode_kwargs)
# 2. Set generation parameters if not already defined
if synced_gpus is None:
synced_gpus = (is_deepspeed_zero3_enabled() or is_fsdp_managed_module(self)) and dist.get_world_size() > 1
logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
# 3. Define model inputs
kwargs_has_attention_mask = model_kwargs.get("attention_mask", None) is not None
inputs_tensor, model_input_name, model_kwargs = self._prepare_model_inputs(
inputs, generation_config.bos_token_id, model_kwargs
)
batch_size = inputs_tensor.shape[0]
device = inputs_tensor.device
self._prepare_special_tokens(generation_config, kwargs_has_attention_mask, device=device)
# 4. Define other model kwargs
if "encoder_outputs" not in model_kwargs:
# if model is encoder decoder encoder_outputs are created and added to `model_kwargs`
model_kwargs = self._prepare_encoder_decoder_kwargs_for_generation(
inputs_tensor, model_kwargs, model_input_name, generation_config
)
# 5. Prepare `input_ids` which will be used for auto-regressive generation
input_ids, model_kwargs = self._prepare_decoder_input_ids_for_generation(
batch_size=batch_size,
model_input_name=model_input_name,
model_kwargs=model_kwargs,
decoder_start_token_id=generation_config._decoder_start_token_tensor,
device=inputs_tensor.device,
)
if generation_config.token_healing:
input_ids = self.heal_tokens(input_ids, generation_mode_kwargs.get("tokenizer"))
if streamer is not None:
streamer.put(input_ids.cpu())
# 6. Prepare `max_length` depending on other stopping criteria.
# NOTE: incorrect `input_ids.shape[1]` previously
input_ids_length = input_ids.shape[-1]
has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None
has_default_min_length = kwargs.get("min_length") is None and generation_config.min_length is not None
generation_config = self._prepare_generated_length(
generation_config=generation_config,
has_default_max_length=has_default_max_length,
has_default_min_length=has_default_min_length,
model_input_name=model_input_name,
inputs_tensor=inputs_tensor,
input_ids_length=input_ids_length,
)
# If the model supports `logits_to_keep` in forward(), set it to 1 to avoid computing the whole
# logit matrix. This can save a lot of memory during the first forward pass. Note that assisted decoding
# dynamically overrides this value as it can need more than the last token logits
if self._supports_logits_to_keep() and "logits_to_keep" not in model_kwargs:
model_kwargs["logits_to_keep"] = 1
self._validate_generated_length(generation_config, input_ids_length, has_default_max_length)
# 7. Prepare the cache.
# - `model_kwargs` may be updated in place with a cache as defined by the parameters in `generation_config`.
# - different models have a different cache name expected by the model (default = "past_key_values")
# - `max_length`, prepared above, is used to determine the maximum cache length
max_cache_length = generation_config.max_length - 1
if (
inputs_tensor.shape[1] != input_ids_length
and model_input_name == "inputs_embeds"
and not self.config.is_encoder_decoder
):
max_cache_length += inputs_tensor.shape[1]
self._prepare_cache_for_generation(
generation_config, model_kwargs, generation_mode, batch_size, max_cache_length
)
# 8. prepare logits processors and stopping criteria
prepared_logits_processor = self._get_logits_processor(
generation_config=generation_config,
input_ids_seq_length=input_ids_length,
encoder_input_ids=inputs_tensor,
prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
logits_processor=logits_processor,
device=inputs_tensor.device,
model_kwargs=model_kwargs,
negative_prompt_ids=negative_prompt_ids,
negative_prompt_attention_mask=negative_prompt_attention_mask,
)
prepared_stopping_criteria = self._get_stopping_criteria(
generation_config=generation_config,
stopping_criteria=stopping_criteria,
tokenizer=generation_mode_kwargs.get("tokenizer"),
)
# Set model_kwargs `use_cache` so we can use it later in forward runs
model_kwargs["use_cache"] = generation_config.use_cache
# ******************* taken from main generate function up to calling the different methods *******************
# Prepare inner 2D logic in generation loop
input_ids = input_ids.reshape(-1, input_ids.shape[-1])
# 10. expand input_ids with `num_return_sequences` additional sequences per batch
if generation_config.num_return_sequences > 1:
raise ValueError("`num_return_sequences>1` is incompatible with Dia.")
# 11. run sample (it degenerates to greedy search when `generation_config.do_sample=False`)
return self._sample(
input_ids,
logits_processor=prepared_logits_processor,
stopping_criteria=prepared_stopping_criteria,
generation_config=generation_config,
**generation_mode_kwargs,
**model_kwargs,
)
@torch.no_grad()
def generate(
self,
inputs: torch.Tensor | None = None,
generation_config: GenerationConfig | None = None,
logits_processor: LogitsProcessorList | None = None,
stopping_criteria: StoppingCriteriaList | None = None,
prefix_allowed_tokens_fn: Callable[[int, torch.Tensor], list[int]] | None = None,
synced_gpus: bool | None = None,
assistant_model: Optional["PreTrainedModel"] = None,
streamer: Optional["BaseStreamer"] = None,
negative_prompt_ids: torch.Tensor | None = None,
negative_prompt_attention_mask: torch.Tensor | None = None,
custom_generate: str | None = None,
**kwargs,
) -> GenerateOutput | torch.LongTensor:
# We expect the initial input ids to be the complete mask (delayed input)
delay_mask = kwargs.get("decoder_input_ids")
if delay_mask is not None:
delay_mask = delay_mask.clone()
output = self._main_generate_loop(
inputs=inputs,
generation_config=generation_config,
logits_processor=logits_processor,
stopping_criteria=stopping_criteria,
prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
synced_gpus=synced_gpus,
assistant_model=assistant_model,
streamer=streamer,
negative_prompt_ids=negative_prompt_ids,
negative_prompt_attention_mask=negative_prompt_attention_mask,
custom_generate=custom_generate,
**kwargs,
)
return_dict_in_generate = not isinstance(output, torch.Tensor)
if return_dict_in_generate:
output_sequences = output.sequences
else:
output_sequences = output
# Reshape from 2D (bsz * channels, seq_len) to 3D (bsz, seq_len, channels)
num_channels = self.config.decoder_config.num_channels
bsz = output_sequences.shape[0] // num_channels
output_sequences = output_sequences.reshape(bsz, num_channels, -1).transpose(1, 2)
# Apply delay mask
output_sequences = self.apply_delay_mask(output_sequences, self.config.decoder_config.pad_token_id, delay_mask)
if return_dict_in_generate:
output.sequences = output_sequences
else:
output = output_sequences
return output
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/dia/generation_dia.py",
"license": "Apache License 2.0",
"lines": 390,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/dia/modular_dia.py | # Copyright 2025 The Nari Labs and HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch Dia model."""
from collections.abc import Callable
import torch
from torch import nn
from ... import initialization as init
from ...cache_utils import DynamicCache, EncoderDecoderCache
from ...masking_utils import create_bidirectional_mask, create_causal_mask
from ...modeling_flash_attention_utils import FlashAttentionKwargs
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPastAndCrossAttentions,
Seq2SeqLMOutput,
Seq2SeqModelOutput,
)
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...processing_utils import Unpack
from ...utils import auto_docstring, can_return_tuple, is_torchdynamo_compiling, logging
from ..llama.modeling_llama import (
LlamaAttention,
LlamaRMSNorm,
LlamaRotaryEmbedding,
eager_attention_forward,
)
from ..phi3.modeling_phi3 import Phi3MLP
from .configuration_dia import DiaConfig, DiaDecoderConfig, DiaEncoderConfig
from .generation_dia import DiaGenerationMixin
logger = logging.get_logger(__name__)
@auto_docstring
class DiaPreTrainedModel(PreTrainedModel):
config: DiaConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = True
main_input_name = "input_ids"
_no_split_modules = ["DiaEncoderLayer", "DiaDecoderLayer"]
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, DiaMultiChannelEmbedding):
offsets = torch.arange(self.config.num_channels, dtype=torch.long) * self.config.vocab_size
init.copy_(module.offsets, offsets)
class DiaMultiChannelEmbedding(nn.Module):
"""In order to efficiently compute the audio embedding from the 9 different channels,
we vectorize the embedding process by using a single embedding layer and an offset.
Example:
- num_embeds = 4
- vocab_size = 8
- num_channels = 3
We would have offsets = [0, 8, 16]
If audio_codes = [0, 1, 2, 3], [1, 3, 4, 7], [5, 6, 7, 8],
then tokens = audio_codes + offsets
= [0, 1, 2, 3, 9, 11, 12, 15, 21, 22, 23, 24]
This allows us to use a single embedding layer for all channels.
"""
def __init__(self, config: DiaDecoderConfig):
super().__init__()
self.embed = nn.Embedding(config.vocab_size * config.num_channels, config.hidden_size)
self.hidden_size = config.hidden_size
self.num_channels = config.num_channels
offsets = torch.arange(config.num_channels, dtype=torch.long) * config.vocab_size # (C,)
self.register_buffer("offsets", offsets, persistent=False)
def forward(self, audio_codes: torch.Tensor) -> torch.Tensor:
tokens = (audio_codes + self.offsets.to(audio_codes.device)).squeeze(1)
embeds = self.embed(tokens).view(tokens.shape[0], audio_codes.shape[1], -1, self.hidden_size)
return embeds.sum(dim=2)
class DiaMLP(Phi3MLP):
pass
class DiaRMSNorm(LlamaRMSNorm):
pass
class DiaRotaryEmbedding(LlamaRotaryEmbedding):
pass
class DiaSelfAttention(LlamaAttention):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: DiaEncoderConfig | DiaDecoderConfig, layer_idx: int, is_causal: bool = False):
nn.Module.__init__(self)
self.config = config
self.layer_idx = layer_idx
self.hidden_size = config.hidden_size
self.num_heads = self.config.num_attention_heads
self.num_key_value_heads = self.config.num_key_value_heads or self.num_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.head_dim = getattr(config, "head_dim", config.hidden_size // self.num_heads)
self.scaling = 1
self.attention_dropout = 0.0
self.is_causal = is_causal
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
class DiaCrossAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: DiaDecoderConfig, layer_idx: int):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.hidden_size = config.hidden_size
self.cross_hidden_size = config.cross_hidden_size
self.num_heads = self.config.cross_num_attention_heads
self.num_key_value_heads = self.config.cross_num_key_value_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.head_dim = config.cross_head_dim
self.scaling = 1
self.attention_dropout = 0.0
self.is_causal = False
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.cross_hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.cross_hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
def forward(
self,
hidden_states: torch.Tensor,
cross_attention_states: torch.Tensor,
attention_mask: torch.Tensor | None = None,
past_key_values: EncoderDecoderCache | None = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, torch.Tensor | None]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
cross_shape = (*cross_attention_states.shape[:-1], -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
is_updated = past_key_values.is_updated.get(self.layer_idx) if past_key_values is not None else False
if past_key_values is not None and is_updated:
# reuse k,v, cross_attentions
key_states = past_key_values.cross_attention_cache.layers[self.layer_idx].keys
value_states = past_key_values.cross_attention_cache.layers[self.layer_idx].values
else:
key_states = self.k_proj(cross_attention_states).view(cross_shape).transpose(1, 2)
value_states = self.v_proj(cross_attention_states).view(cross_shape).transpose(1, 2)
if past_key_values is not None:
# save all states to the cache
key_states, value_states = past_key_values.cross_attention_cache.update(
key_states,
value_states,
self.layer_idx,
)
# set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls
past_key_values.is_updated[self.layer_idx] = True
attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface(
self.config._attn_implementation, eager_attention_forward
)
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape((*input_shape, -1)).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class DiaEncoderLayer(GradientCheckpointingLayer):
def __init__(self, config: DiaEncoderConfig, layer_idx: int):
super().__init__()
self.pre_sa_norm = DiaRMSNorm(config.hidden_size, eps=config.norm_eps)
self.self_attention = DiaSelfAttention(config, layer_idx, is_causal=False)
self.post_sa_norm = DiaRMSNorm(config.hidden_size, eps=config.norm_eps)
self.mlp = DiaMLP(config)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None,
attention_mask: torch.Tensor | None = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, torch.Tensor | None]:
residual = hidden_states
normed_states = self.pre_sa_norm(hidden_states)
self_attn_output, self_attn_weights = self.self_attention(
normed_states,
position_embeddings=position_embeddings,
attention_mask=attention_mask,
**kwargs,
)
hidden_states = residual + self_attn_output
residual = hidden_states
normed_states = self.post_sa_norm(hidden_states)
mlp_out = self.mlp(normed_states)
hidden_states = residual + mlp_out
return hidden_states, self_attn_weights
class DiaEncoder(DiaPreTrainedModel):
def __init__(self, config: DiaEncoderConfig):
super().__init__(config)
self.config = config
self.embedding = nn.Embedding(config.vocab_size, config.hidden_size)
self.layers = nn.ModuleList(
[DiaEncoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = DiaRMSNorm(config.hidden_size, eps=config.norm_eps)
self.rotary_emb = DiaRotaryEmbedding(config=config)
self.post_init()
@auto_docstring
@can_return_tuple
def forward(
self,
input_ids: torch.Tensor,
attention_mask: torch.Tensor | None = None,
output_attentions: bool | None = False,
output_hidden_states: bool | None = False,
**kwargs: Unpack[FlashAttentionKwargs],
) -> BaseModelOutput | tuple:
hidden_states = self.embedding(input_ids)
# RoPE
# Note: We expect right padding and hence always generate
# the position ids on the fly to reduce preparation overhead
position_ids = torch.arange(input_ids.shape[-1], device=input_ids.device)[None, :]
attention_mask = create_bidirectional_mask(
config=self.config,
inputs_embeds=hidden_states,
attention_mask=attention_mask,
)
position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
for encoder_layer in self.layers:
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
layer_outputs = encoder_layer(
hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
hidden_states = self.norm(hidden_states)
if output_hidden_states:
encoder_states += (hidden_states,)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
class DiaDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: DiaDecoderConfig, layer_idx: int):
super().__init__()
self.embed_dim = config.hidden_size
self.self_attention = DiaSelfAttention(config, layer_idx, is_causal=True)
self.cross_attention = DiaCrossAttention(config, layer_idx)
self.pre_sa_norm = DiaRMSNorm(config.hidden_size, eps=config.norm_eps)
self.pre_ca_norm = DiaRMSNorm(config.hidden_size, eps=config.norm_eps)
self.pre_mlp_norm = DiaRMSNorm(config.hidden_size, eps=config.norm_eps)
self.mlp = DiaMLP(config)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None,
attention_mask: torch.Tensor | None = None,
encoder_hidden_states: torch.Tensor | None = None,
encoder_attention_mask: torch.Tensor | None = None,
past_key_values: EncoderDecoderCache | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs,
) -> tuple[torch.Tensor, torch.Tensor | None, torch.Tensor | None]:
self_attn_cache = past_key_values
if isinstance(self_attn_cache, EncoderDecoderCache):
self_attn_cache = self_attn_cache.self_attention_cache
residual = hidden_states
normed_states = self.pre_sa_norm(hidden_states)
self_attn_output, self_attn_weights = self.self_attention(
normed_states,
position_embeddings,
attention_mask,
# Needs to be an arg in order to function properly
# on inplace operations to be carried (e.g. compile)
self_attn_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = residual + self_attn_output
residual = hidden_states
normed_states = self.pre_ca_norm(hidden_states)
cross_states, cross_attn_weights = self.cross_attention(
normed_states,
encoder_hidden_states,
attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
**kwargs,
)
hidden_states = residual + cross_states
residual = hidden_states
normed_states = self.pre_mlp_norm(hidden_states)
mlp_out = self.mlp(normed_states)
hidden_states = residual + mlp_out
return hidden_states, self_attn_weights, cross_attn_weights
class DiaDecoder(DiaPreTrainedModel):
"""Transformer Decoder Stack using DenseGeneral."""
def __init__(self, config: DiaDecoderConfig):
super().__init__(config)
self.num_channels = config.num_channels
self.vocab_size = config.vocab_size
self.embeddings = DiaMultiChannelEmbedding(config)
self.layers = nn.ModuleList(
[DiaDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = DiaRMSNorm(config.hidden_size, eps=config.norm_eps)
self.rotary_emb = DiaRotaryEmbedding(config=config)
self.post_init()
@auto_docstring
@can_return_tuple
def forward(
self,
input_ids: torch.Tensor,
position_ids: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
encoder_hidden_states: torch.FloatTensor | None = None,
encoder_attention_mask: torch.LongTensor | None = None,
past_key_values: EncoderDecoderCache | None = None,
output_attentions: bool | None = False,
output_hidden_states: bool | None = False,
cache_position: torch.LongTensor | None = None,
**kwargs,
) -> BaseModelOutputWithPastAndCrossAttentions | tuple:
r"""
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length, num_codebooks)`):
The original `decoder_input_ids` in 3D shape to facilitate more efficient computations.
[What are input IDs?](../glossary#input-ids)
"""
batch_size, seq_length = input_ids.size()[:-1]
past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0
if cache_position is None:
cache_position = torch.arange(
past_key_values_length, past_key_values_length + seq_length, device=input_ids.device
)
if position_ids is None:
position_ids = cache_position[None, :]
# RoPE
hidden_states = self.embeddings(input_ids)
if attention_mask is None and not is_torchdynamo_compiling():
# required mask seq length can be calculated via length of past cache
mask_seq_length = past_key_values_length + seq_length
attention_mask = torch.ones(batch_size, mask_seq_length, device=input_ids.device)
attention_mask = create_causal_mask(
config=self.config,
inputs_embeds=hidden_states,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
)
encoder_attention_mask = create_bidirectional_mask(
config=self.config,
inputs_embeds=hidden_states,
attention_mask=encoder_attention_mask,
encoder_hidden_states=encoder_hidden_states,
)
position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids)
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
for layer in self.layers:
if output_hidden_states:
all_hidden_states += (hidden_states,)
layer_outputs = layer(
hidden_states,
# Needs to be an arg in order to function properly
# on inplace operations to be carried (e.g. compile)
position_embeddings,
attention_mask,
encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
cache_position=cache_position,
position_ids=position_ids,
**kwargs,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attns = all_self_attns + (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
hidden_states = self.norm(hidden_states)
if output_hidden_states:
all_hidden_states += (hidden_states,)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attentions,
)
@auto_docstring(
custom_intro="""
The bare Dia model outputting raw hidden-states without any specific head on top.
"""
)
class DiaModel(DiaPreTrainedModel):
def __init__(self, config: DiaConfig):
super().__init__(config)
self.config = config
self.encoder = DiaEncoder(config.encoder_config)
self.decoder = DiaDecoder(config.decoder_config)
self.post_init()
@auto_docstring
@can_return_tuple
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.LongTensor | None = None,
decoder_input_ids: torch.LongTensor | None = None,
decoder_position_ids: torch.LongTensor | None = None,
decoder_attention_mask: torch.LongTensor | None = None,
encoder_outputs: BaseModelOutput | tuple | None = None,
past_key_values: EncoderDecoderCache | None = None,
use_cache: bool | None = None,
output_attentions: bool | None = None,
output_hidden_states: bool | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs,
) -> tuple | Seq2SeqModelOutput:
r"""
decoder_input_ids (`torch.LongTensor` of shape `(batch_size * num_codebooks, target_sequence_length)
or (batch_size, target_sequence_length, num_codebooks)`, *optional*):
1. (batch_size * num_codebooks, target_sequence_length): corresponds to the general use case where
the audio input codebooks are flattened into the batch dimension. This also aligns with the flat-
tened audio logits which are used to calculate the loss.
2. (batch_size, sequence_length, num_codebooks): corresponds to the internally used shape of
Dia to calculate embeddings and subsequent steps more efficiently.
If no `decoder_input_ids` are provided, it will create a tensor of `bos_token_id` with shape
`(batch_size, 1, num_codebooks)`. Indices can be obtained using the [`DiaProcessor`]. See
[`DiaProcessor.__call__`] for more details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
decoder_position_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`):
Indices of positions of each input sequence tokens in the position embeddings.
Used to calculate the position embeddings up to `config.decoder_config.max_position_embeddings`.
[What are position IDs?](../glossary#position-ids)
"""
if input_ids is None and encoder_outputs is None:
raise ValueError(
"You should either provide text ids or the cached text encodings. Neither has been found."
)
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
if self.is_gradient_checkpointing and self.training:
if use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
if use_cache and past_key_values is None:
past_key_values = EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config))
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
**kwargs,
)
# If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput
elif not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
# On default we initialize the decoder with bos tokens if nothing has been provided
bsz, seq_len, channels = (encoder_outputs[0].shape[0], -1, self.config.decoder_config.num_channels)
if decoder_input_ids is None:
decoder_input_ids = torch.full(
size=(bsz, 1, channels), fill_value=self.config.decoder_config.bos_token_id, device=self.device
)
# Ensure 3D
if decoder_input_ids.ndim == 2:
decoder_input_ids = decoder_input_ids.reshape(bsz, channels, seq_len).transpose(1, 2)
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
position_ids=decoder_position_ids,
attention_mask=decoder_attention_mask,
encoder_hidden_states=encoder_outputs[0],
encoder_attention_mask=attention_mask,
past_key_values=past_key_values,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
return Seq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs[0],
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
@auto_docstring(
custom_intro="""
The Dia model consisting of a (byte) text encoder and audio decoder with a prediction head on top.
"""
)
class DiaForConditionalGeneration(DiaPreTrainedModel, DiaGenerationMixin):
base_model_prefix = "model"
output_modalities = ("audio",)
def __init__(self, config: DiaConfig):
super().__init__(config)
self.config = config
self.model = DiaModel(config)
self.num_channels = config.decoder_config.num_channels
self.vocab_size = config.decoder_config.vocab_size
self.logits_dense = nn.Linear(
config.decoder_config.hidden_size, (self.num_channels * self.vocab_size), bias=False
)
self.loss_type = "ForMaskedLM"
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
@can_return_tuple
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.LongTensor | None = None,
decoder_input_ids: torch.LongTensor | None = None,
decoder_position_ids: torch.LongTensor | None = None,
decoder_attention_mask: torch.LongTensor | None = None,
encoder_outputs: BaseModelOutput | tuple | None = None,
past_key_values: EncoderDecoderCache | None = None,
use_cache: bool | None = None,
output_attentions: bool | None = None,
output_hidden_states: bool | None = None,
labels: torch.LongTensor | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs,
) -> tuple | Seq2SeqLMOutput:
r"""
decoder_input_ids (`torch.LongTensor` of shape `(batch_size * num_codebooks, target_sequence_length)
or (batch_size, target_sequence_length, num_codebooks)`, *optional*):
1. (batch_size * num_codebooks, target_sequence_length): corresponds to the general use case where
the audio input codebooks are flattened into the batch dimension. This also aligns with the flat-
tened audio logits which are used to calculate the loss.
2. (batch_size, sequence_length, num_codebooks): corresponds to the internally used shape of
Dia to calculate embeddings and subsequent steps more efficiently.
If no `decoder_input_ids` are provided, it will create a tensor of `bos_token_id` with shape
`(batch_size, 1, num_codebooks)`. Indices can be obtained using the [`DiaProcessor`]. See
[`DiaProcessor.__call__`] for more details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
decoder_position_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`):
Indices of positions of each input sequence tokens in the position embeddings.
Used to calculate the position embeddings up to `config.decoder_config.max_position_embeddings`.
[What are position IDs?](../glossary#position-ids)
labels (`torch.LongTensor` of shape `(batch_size * num_codebooks,)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in
`[0, ..., config.decoder_config.vocab_size - 1]` or -100. Tokens with indices set to `-100`
are ignored (masked).
"""
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_position_ids=decoder_position_ids,
decoder_attention_mask=decoder_attention_mask,
encoder_outputs=encoder_outputs,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
cache_position=cache_position,
**kwargs,
)
last_hidden_state = outputs[0]
batch_size = last_hidden_state.shape[0]
# 3D <-> 2D makes it necessary to prioritize channel dim
audio_logits = (
self.logits_dense(last_hidden_state)
.view((batch_size, -1, self.num_channels, self.vocab_size))
.transpose(1, 2)
.contiguous()
.view(batch_size * self.num_channels, -1, self.vocab_size)
)
loss = None
if labels is not None:
loss = self.loss_function(logits=audio_logits, labels=labels, vocab_size=self.vocab_size, **kwargs)
return Seq2SeqLMOutput(
loss=loss,
logits=audio_logits,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
__all__ = ["DiaModel", "DiaPreTrainedModel", "DiaForConditionalGeneration"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/dia/modular_dia.py",
"license": "Apache License 2.0",
"lines": 604,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/dia/processing_dia.py | # Copyright 2025 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Processor class for Dia"""
import math
from pathlib import Path
from ...audio_utils import AudioInput, make_list_of_audio
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import AudioKwargs, ProcessingKwargs, ProcessorMixin, Unpack
from ...utils import auto_docstring, is_soundfile_available, is_torch_available
if is_torch_available():
import torch
if is_soundfile_available():
import soundfile as sf
class DiaAudioKwargs(AudioKwargs, total=False):
"""
bos_token_id (`int`, *optional*, defaults to `1026`):
The token ID used as the beginning-of-sequence token for audio codebooks. This token is prepended to each
audio sequence during encoding.
eos_token_id (`int`, *optional*, defaults to `1024`):
The token ID used as the end-of-sequence token for audio codebooks. This token is appended to audio sequences
during training (when `generation=False`) to mark the end of the audio.
pad_token_id (`int`, *optional*, defaults to `1025`):
The token ID used for padding audio codebook sequences. This token is used to fill positions in the delay
pattern where no valid audio token exists.
delay_pattern (`list[int]`, *optional*, defaults to `[0, 8, 9, 10, 11, 12, 13, 14, 15]`):
A list of delay values (in frames) for each codebook channel. The delay pattern creates temporal offsets
between different codebook channels, allowing the model to capture dependencies across channels. Each value
represents the number of frames to delay that specific channel.
generation (`bool`, *optional*, defaults to `True`):
Whether the processor is being used for generation (text-to-speech) or training. When `True`, the processor
prepares inputs for generation mode where audio is generated from text. When `False`, it prepares inputs for
training where both text and audio are provided.
"""
bos_token_id: int
eos_token_id: int
pad_token_id: int
delay_pattern: list[int]
generation: bool
class DiaProcessorKwargs(ProcessingKwargs, total=False):
audio_kwargs: DiaAudioKwargs
_defaults = {
"text_kwargs": {
"padding": True,
"padding_side": "right",
"add_special_tokens": False,
},
"audio_kwargs": {
"eos_token_id": 1024,
"pad_token_id": 1025,
"bos_token_id": 1026,
"delay_pattern": [0, 8, 9, 10, 11, 12, 13, 14, 15],
"generation": True,
"sampling_rate": 44100,
},
"common_kwargs": {
"return_tensors": "pt",
},
}
@auto_docstring
class DiaProcessor(ProcessorMixin):
audio_tokenizer_class = "DacModel"
def __init__(self, feature_extractor, tokenizer, audio_tokenizer):
r"""
audio_tokenizer (`DacModel`):
An instance of [`DacModel`] used to encode/decode audio into/from codebooks. It is a required input.
"""
super().__init__(feature_extractor, tokenizer, audio_tokenizer=audio_tokenizer)
@auto_docstring
def __call__(
self,
text: str | list[str],
audio: AudioInput | None = None,
output_labels: bool | None = False,
**kwargs: Unpack[DiaProcessorKwargs],
):
r"""
output_labels (`bool`, *optional*, defaults to `False`):
Whether to return labels for training. When `True`, the processor generates labels from the decoder input
sequence by shifting it by one position. Labels use special values: `-100` for tokens to ignore in loss
computation (padding and BOS tokens), and `-101` for audio frames used only for the backbone model (when
`depth_decoder_labels_ratio < 1.0`). Cannot be used together with `generation=True`.
"""
if not is_torch_available():
raise ValueError(
"The `DiaProcessor` relies on the `audio_tokenizer` which requires `torch` but we couldn't "
"find it in your environment. You can install torch via `pip install torch`."
)
if text is None:
raise ValueError("You need to specify the `text` input to process.")
output_kwargs = self._merge_kwargs(
DiaProcessorKwargs,
**kwargs,
)
text_kwargs = output_kwargs["text_kwargs"]
audio_kwargs = output_kwargs["audio_kwargs"]
return_tensors = text_kwargs.get("return_tensors", None)
if return_tensors != "pt":
raise ValueError(f"{self.__class__.__name__} only supports `return_tensors='pt'`.")
data = {}
# Text
if isinstance(text, str):
text = [text]
elif not (isinstance(text, (list, tuple)) and all(isinstance(t, str) for t in text)):
raise ValueError("Invalid input text. Please provide a string, or a list of strings")
encodings = self.tokenizer(text, **text_kwargs)
data.update(encodings)
# Audio
delay_pattern = audio_kwargs.pop("delay_pattern", None)
audio_bos_token_id = audio_kwargs.pop("bos_token_id", None)
audio_eos_token_id = audio_kwargs.pop("eos_token_id", None)
audio_pad_token_id = audio_kwargs.pop("pad_token_id", None)
generation = audio_kwargs.pop("generation", True)
if (
audio_bos_token_id is None
or audio_eos_token_id is None
or audio_pad_token_id is None
or delay_pattern is None
):
raise ValueError(
"To enable processing for Dia, we need the `bos_token_id`, `eos_token_id`, "
"`pad_token_id`, and `delay_pattern`. You may have accidentally overwritten one of those."
)
if generation and output_labels:
raise ValueError(
f"Labels with `generation` is incompatible, got generation={generation}, output_labels={output_labels}."
)
batch_size = data["input_ids"].shape[0]
num_channels = len(delay_pattern)
max_delay = max(delay_pattern)
# Voice cloning generation / general training
if audio is not None:
audio = make_list_of_audio(audio)
input_audios = self.feature_extractor(audio, **audio_kwargs)
compression_rate = math.prod(self.audio_tokenizer.config.downsampling_ratios)
max_encoded_sequence_len = input_audios["padding_mask"][0].shape[-1] // compression_rate
decoder_input_ids = []
decoder_attention_mask = []
# TODO: dac with batching is currently broken, but non-batch is working
# refer to https://gist.github.com/vasqu/643a45b680cf39fd7467271ee2eb6f80 for a validation script
for padding_mask, audio in zip(input_audios["padding_mask"], input_audios["input_values"]):
# get current length with hop length in mind (as if it were sampled as a single audio)
base_pad_len = self.feature_extractor.hop_length
current_audio_len = math.ceil(padding_mask.sum(dim=-1) / base_pad_len) * base_pad_len
encoded_sequence_len = current_audio_len // compression_rate
padding_len = max_encoded_sequence_len - encoded_sequence_len
# compute non-padded forward pass; one extra bos (and eos if training) is added
with torch.no_grad():
audio = audio[None, ..., :current_audio_len].to(self.audio_tokenizer.device)
input_ids = self.audio_tokenizer.encode(audio).audio_codes.transpose(1, 2)
if not generation:
input_ids = torch.nn.functional.pad(
input_ids, pad=(0, 0, 0, 1, 0, 0), mode="constant", value=audio_eos_token_id
)
# apply padding
# +1 for the bos within the real sequence
input_ids = torch.nn.functional.pad(
input_ids, pad=(0, 0, padding_len + 1, 0, 0, 0), mode="constant", value=audio_bos_token_id
)
num_valid_inputs = encoded_sequence_len + 1 + max_delay # sequence + bos + delay
num_valid_inputs += 0 if generation else 1 # eos if training
attention_mask = torch.tensor([0] * padding_len + [1] * num_valid_inputs, dtype=torch.long)[None, :]
decoder_input_ids.append(input_ids)
decoder_attention_mask.append(attention_mask)
decoder_input_ids = torch.cat(decoder_input_ids, dim=0)
decoder_attention_mask = torch.cat(decoder_attention_mask, dim=0)
# TTS generation
elif generation:
# all bos to start with TTS
decoder_input_ids = torch.full((batch_size, 1, num_channels), audio_bos_token_id, dtype=torch.long)
# we preemptively add the delay
decoder_attention_mask = torch.ones(size=(batch_size, 1 + max_delay), dtype=torch.long)
else:
raise ValueError("If you try to train, you should provide audio data as well.")
if batch_size != decoder_input_ids.shape[0]:
raise ValueError(
f"Need the same amount of samples for both text and audio, but got text samples={batch_size} and "
f"audio samples = {decoder_input_ids.shape[0]} instead."
)
# prepare shift indices per delay
max_seq_len = decoder_attention_mask.shape[-1]
max_audio_len = max_seq_len - max_delay
precomputed_idx = self.build_indices(
bsz=batch_size,
seq_len=max_seq_len,
num_channels=num_channels,
delay_pattern=delay_pattern,
revert=False,
)
# create delay pattern input
# the pad token will be used for masking which input is valid for prediction during generation
prefill = torch.full(
(batch_size, max_seq_len, num_channels),
fill_value=audio_pad_token_id,
dtype=torch.int,
)
prefill[:, :max_audio_len] = decoder_input_ids
delayed_decoder_input_ids = self.apply_audio_delay(
audio=prefill,
pad_token_id=audio_pad_token_id,
bos_token_id=audio_bos_token_id,
precomputed_idx=precomputed_idx,
)
data.update({"decoder_input_ids": delayed_decoder_input_ids, "decoder_attention_mask": decoder_attention_mask})
if output_labels:
# Base idea is to shift on the sequence dim
labels = data["decoder_input_ids"].clone()[:, 1:]
labels[labels == audio_pad_token_id] = -100
labels[labels == audio_bos_token_id] = -100
data["labels"] = labels.transpose(1, 2).reshape(batch_size * num_channels, -1).contiguous().long()
data["decoder_input_ids"] = data["decoder_input_ids"][:, :-1]
data["decoder_attention_mask"] = data["decoder_attention_mask"][:, :-1]
return BatchFeature(data=data, tensor_type=return_tensors)
def batch_decode(
self,
decoder_input_ids: "torch.Tensor",
audio_prompt_len: int | None = None,
**kwargs: Unpack[DiaProcessorKwargs],
) -> list["torch.Tensor"]:
"""
Decodes a batch of audio codebook sequences into their respective audio waveforms via the
`audio_tokenizer`. See [`~DacModel.decode`] for more information.
Args:
decoder_input_ids (`torch.Tensor`): The complete output sequence of the decoder.
audio_prompt_len (`int`): The audio prefix length (e.g. when using voice cloning).
"""
output_kwargs = self._merge_kwargs(
DiaProcessorKwargs,
**kwargs,
)
audio_kwargs = output_kwargs["audio_kwargs"]
delay_pattern = audio_kwargs.pop("delay_pattern", None)
audio_bos_token_id = audio_kwargs.pop("bos_token_id", None)
audio_pad_token_id = audio_kwargs.pop("pad_token_id", None)
if audio_bos_token_id is None or audio_pad_token_id is None or delay_pattern is None:
raise ValueError(
"To enable decoding for Dia, we need the `bos_token_id`, `pad_token_id`, "
"and `delay_pattern`. You may have accidentally overwritten one of those."
)
# either decode the whole audio sequence or only the generated parts
if audio_prompt_len is not None:
audio_prompt_len = torch.tensor(audio_prompt_len, device=decoder_input_ids.device, dtype=torch.long)
start_of_generation_idx = audio_prompt_len[None].expand(decoder_input_ids.shape[0])
else:
start_of_generation_idx = (decoder_input_ids[:, :, 0] == audio_bos_token_id).sum(dim=-1)
# -1 for the eos token
end_of_generation_idx = (
decoder_input_ids.shape[1] - (decoder_input_ids[:, :, 0] == audio_pad_token_id).sum(dim=-1) - 1
)
# revert delay
bsz, seq_len, num_channels = decoder_input_ids.shape
precomputed_idx = self.build_indices(
bsz=bsz,
seq_len=seq_len,
num_channels=num_channels,
delay_pattern=delay_pattern,
revert=True,
)
output_sequences = self.apply_audio_delay(
audio=decoder_input_ids,
# We do not care about these values as we cut them out
# with `start_of_generation_idx` and `end_of_generation_idx`
pad_token_id=-1,
bos_token_id=-1,
precomputed_idx=precomputed_idx,
).transpose(1, 2)
# retrieve the correct sequences each
audios = []
# TODO: see above, dac doesn't work in batches yet
with torch.no_grad():
for i in range(start_of_generation_idx.shape[0]):
output_i = output_sequences[i, :, start_of_generation_idx[i] : end_of_generation_idx[i]][None, ...]
output_i = output_i.to(self.audio_tokenizer.device)
audio_i = self.audio_tokenizer.decode(audio_codes=output_i).audio_values.cpu().squeeze()
audios.append(audio_i)
return audios
def decode(
self,
decoder_input_ids: "torch.Tensor",
audio_prompt_len: int | None = None,
**kwargs: Unpack[DiaProcessorKwargs],
) -> "torch.Tensor":
"""
Decodes a single sequence of audio codebooks into the respective audio waveform via the
`audio_tokenizer`. See [`~DacModel.decode`] and [`~DiaProcessor.batch_decode`] for more information.
"""
if decoder_input_ids.shape[0] != 1:
raise ValueError(
f"Expecting a single output to be decoded but received {decoder_input_ids.shape[0]} samples instead."
)
return self.batch_decode(decoder_input_ids, audio_prompt_len, **kwargs)[0]
def get_audio_prompt_len(
self,
decoder_attention_mask: "torch.Tensor",
**kwargs: Unpack[DiaProcessorKwargs],
) -> int:
"""Utility function to get the audio prompt length."""
output_kwargs = self._merge_kwargs(
DiaProcessorKwargs,
**kwargs,
)
audio_kwargs = output_kwargs["audio_kwargs"]
delay_pattern = audio_kwargs.pop("delay_pattern", None)
if delay_pattern is None:
raise ValueError(
"To enable the utility of retrieving the prompt length for Dia, we need the "
"`delay_pattern`. You may have accidentally overwritten this."
)
return decoder_attention_mask.shape[1] - max(delay_pattern)
# Copied from transformers.models.csm.processing_csm.CsmProcessor.save_audio with Csm->Dia
def save_audio(
self,
audio: AudioInput,
saving_path: str | Path | list[str | Path],
**kwargs: Unpack[DiaProcessorKwargs],
):
# TODO: @eustlb, this should be in AudioProcessor
if not is_soundfile_available():
raise ImportError("Please install `soundfile` to save audio files.")
# ensure correct audio input
audio = make_list_of_audio(audio)
# ensure correct saving path
if isinstance(saving_path, (str, Path)):
saving_path = [saving_path]
elif not (isinstance(saving_path, (list, tuple)) and all(isinstance(p, (str, Path)) for p in saving_path)):
raise ValueError("Invalid input path. Please provide a string, or a list of strings")
if len(audio) != len(saving_path):
raise ValueError("The number of audio and saving paths must be the same")
output_kwargs = self._merge_kwargs(
DiaProcessorKwargs,
**kwargs,
)
audio_kwargs = output_kwargs["audio_kwargs"]
sampling_rate = audio_kwargs["sampling_rate"]
for audio_value, p in zip(audio, saving_path):
if isinstance(audio_value, torch.Tensor):
audio_value = audio_value.cpu().float().numpy()
sf.write(p, audio_value, sampling_rate)
@staticmethod
def build_indices(
bsz: int,
seq_len: int,
num_channels: int,
delay_pattern: list[int],
revert: bool = False,
) -> tuple["torch.Tensor", "torch.Tensor"]:
"""
Precompute (sequence_idx, all_idx) so that out[seq, channel] = in[seq - delay[channel], channel]
or in[seq, channel] = out[seq + delay[channel], channel] if `revert`.
Negative sequence_idx => BOS; sequence_idx >= seq_len => PAD.
"""
delay_array = torch.tensor(delay_pattern, dtype=torch.int32)
# (0..seq_len-1)
sequence_idx = torch.arange(seq_len, dtype=torch.int32)[None, :].expand(bsz, seq_len)[..., None]
# + or - delay depending if we delay or revert the delay
if not revert:
sequence_idx = sequence_idx - delay_array[None, None, :]
else:
sequence_idx = sequence_idx + delay_array[None, None, :]
# if delay goes over the range we clamp back to valid values
valid_sequence_idx = torch.clamp(sequence_idx, 0, seq_len - 1)
batch_idx = torch.arange(bsz, dtype=torch.int32)[:, None, None].expand(bsz, seq_len, num_channels)
channel_idx = torch.arange(num_channels, dtype=torch.int32)[None, None, :].expand(bsz, seq_len, num_channels)
all_idx = torch.stack(
[batch_idx.reshape(-1), valid_sequence_idx.reshape(-1), channel_idx.reshape(-1)],
dim=1,
).long()
return sequence_idx, all_idx
@staticmethod
def apply_audio_delay(
audio: "torch.Tensor",
pad_token_id: int,
bos_token_id: int,
precomputed_idx: tuple["torch.Tensor", "torch.Tensor"],
) -> "torch.Tensor":
"""
Applies or reverts the delay pattern to batched audio tokens using precomputed indices,
inserting BOS where sequence_idx < 0 and PAD where sequence_idx >= seq_len.
Args:
audio: audio tokens of shape [bsz, seq_len, num_channels]
pad_token_id: the PAD token
bos_token_id: the BOS token
precomputed_idx: from `build_indices`
Returns:
final_audio: delayed or reverted audio tokens of shape [bsz, seq_len, num_channels]
"""
# Move everything to the same device
device = audio.device
sequence_idx, all_idx = precomputed_idx
sequence_idx = sequence_idx.to(device)
all_idx = all_idx.to(device)
# Gather per precomputed indices
batch_idx, valid_sequence_idx, channel_idx = torch.unbind(all_idx, dim=-1)
gathered_audio = audio[batch_idx, valid_sequence_idx, channel_idx].view(audio.size())
# Mask according to negative sequence_idx => BOS; sequence_idx >= seq_len => PAD
mask_bos = sequence_idx < 0
mask_pad = sequence_idx >= audio.shape[1]
final_audio = torch.where(mask_bos, bos_token_id, torch.where(mask_pad, pad_token_id, gathered_audio))
return final_audio
__all__ = ["DiaProcessor"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/dia/processing_dia.py",
"license": "Apache License 2.0",
"lines": 410,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/dia/tokenization_dia.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization class for Dia."""
from ...tokenization_python import AddedToken, PreTrainedTokenizer
from ...utils import logging
logger = logging.get_logger(__name__)
class DiaTokenizer(PreTrainedTokenizer):
"""
Construct a Dia tokenizer. Dia simply uses raw bytes utf-8 encoding except for special tokens `[S1]` and `[S2]`.
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
unk_token (`str`, *optional*, defaults to `"<pad>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
max_length (`int`, *optional*, defaults to 1024):
The maximum length of the sequences when encoding. Sequences longer than this will be truncated.
offset (`int`, *optional*, defaults to 0):
The offset of the tokenizer.
"""
model_input_names = ["input_ids", "attention_mask"]
def __init__(
self,
pad_token: str | None = "<pad>",
unk_token: str | None = "<pad>",
max_length: int | None = 1024,
offset: int = 0,
**kwargs,
):
# We have no eos/bos tokens but allow padding -- no l/r strip as we treat them as tokens as well
pad_token = AddedToken(pad_token) if isinstance(pad_token, str) else pad_token
unk_token = AddedToken(unk_token) if isinstance(unk_token, str) else unk_token
self._utf_vocab_size = 2**8 # utf is 8 bits
self._added_tokens_decoder = {0: pad_token, 1: AddedToken("[S1]"), 2: AddedToken("[S2]")}
self.offset = offset
super().__init__(
unk_token=unk_token,
pad_token=pad_token,
max_length=max_length,
offset=offset,
token_type_ids_pattern="all_zeros",
token_type_ids_include_special_tokens=True,
special_tokens_pattern="none",
**kwargs,
)
@property
def vocab_size(self):
return self._utf_vocab_size
def get_vocab(self):
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size + self.offset)}
vocab.update(self.added_tokens_encoder)
return vocab
def _tokenize(self, text: str) -> list[str]:
"""Take as input a string and return a list of strings (tokens) for words/sub-words"""
tokens = [chr(i) for i in text.encode("utf-8")]
return tokens
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
if len(token) != 1:
token_id = None
else:
token_id = ord(token) + self.offset
return token_id
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
token = chr(index - self.offset)
return token
def convert_tokens_to_string(self, tokens: list[str]) -> str:
"""Converts a sequence of tokens (string) in a single string."""
bstring = b""
for token in tokens:
if token in self.added_tokens_decoder:
added_token_obj = self.added_tokens_decoder[token]
tok_string = str(added_token_obj).encode("utf-8")
elif token in self.added_tokens_encoder:
tok_string = token.encode("utf-8")
else:
tok_string = token.encode("utf-8") # Assume general string token
bstring += tok_string
string = bstring.decode("utf-8", errors="ignore")
return string
__all__ = ["DiaTokenizer"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/dia/tokenization_dia.py",
"license": "Apache License 2.0",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/dia/test_feature_extraction_dia.py | # Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the Dia feature extractor."""
import itertools
import random
import unittest
import numpy as np
from transformers import DiaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
global_rng = random.Random()
# Copied from tests.models.whisper.test_feature_extraction_whisper.floats_list
def floats_list(shape, scale=1.0, rng=None, name=None):
"""Creates a random float32 tensor"""
if rng is None:
rng = global_rng
values = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
@require_torch
class DiaFeatureExtractionTester:
# Copied from tests.models.dac.test_feature_extraction_dac.DacFeatureExtractionTester.__init__
def __init__(
self,
parent,
batch_size=7,
min_seq_length=400,
max_seq_length=2000,
feature_size=1,
padding_value=0.0,
sampling_rate=16000,
hop_length=512,
):
self.parent = parent
self.batch_size = batch_size
self.min_seq_length = min_seq_length
self.max_seq_length = max_seq_length
self.hop_length = hop_length
self.seq_length_diff = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
self.feature_size = feature_size
self.padding_value = padding_value
self.sampling_rate = sampling_rate
# Copied from tests.models.dac.test_feature_extraction_dac.DacFeatureExtractionTester.prepare_feat_extract_dict
def prepare_feat_extract_dict(self):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"hop_length": self.hop_length,
}
# Copied from tests.models.encodec.test_feature_extraction_encodec.EnCodecFeatureExtractionTester.prepare_inputs_for_common
def prepare_inputs_for_common(self, equal_length=False, numpify=False):
def _flatten(list_of_lists):
return list(itertools.chain(*list_of_lists))
if equal_length:
audio_inputs = floats_list((self.batch_size, self.max_seq_length))
else:
# make sure that inputs increase in size
audio_inputs = [
_flatten(floats_list((x, self.feature_size)))
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff)
]
if numpify:
audio_inputs = [np.asarray(x) for x in audio_inputs]
return audio_inputs
@require_torch
class DiaFeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest.TestCase):
feature_extraction_class = DiaFeatureExtractor
def setUp(self):
self.feat_extract_tester = DiaFeatureExtractionTester(self)
# Copied from tests.models.dac.test_feature_extraction_dac.DacFeatureExtractionTest.test_call
def test_call(self):
# Tests that all call wrap to encode_plus and batch_encode_plus
feat_extract = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
audio_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)]
np_audio_inputs = [np.asarray(audio_input) for audio_input in audio_inputs]
# Test not batched input
encoded_sequences_1 = feat_extract(audio_inputs[0], return_tensors="np").input_values
encoded_sequences_2 = feat_extract(np_audio_inputs[0], return_tensors="np").input_values
self.assertTrue(np.allclose(encoded_sequences_1, encoded_sequences_2, atol=1e-3))
# Test batched
encoded_sequences_1 = feat_extract(audio_inputs, padding=True, return_tensors="np").input_values
encoded_sequences_2 = feat_extract(np_audio_inputs, padding=True, return_tensors="np").input_values
for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2):
self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3))
# Copied from tests.models.dac.test_feature_extraction_dac.DacFeatureExtractionTest.test_double_precision_pad
def test_double_precision_pad(self):
feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
np_audio_inputs = np.random.rand(100).astype(np.float64)
py_audio_inputs = np_audio_inputs.tolist()
for inputs in [py_audio_inputs, np_audio_inputs]:
np_processed = feature_extractor.pad([{"input_values": inputs}], return_tensors="np")
self.assertTrue(np_processed.input_values.dtype == np.float32)
pt_processed = feature_extractor.pad([{"input_values": inputs}], return_tensors="pt")
self.assertTrue(pt_processed.input_values.dtype == torch.float32)
# Copied from tests.models.dac.test_feature_extraction_dac.DacFeatureExtractionTest._load_datasamples
def _load_datasamples(self, num_samples):
from datasets import load_dataset
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
# automatic decoding with librispeech
audio_samples = ds.sort("id")[:num_samples]["audio"]
return [x["array"] for x in audio_samples]
# Copied from tests.models.dac.test_feature_extraction_dac.DacFeatureExtractionTest.test_integration with Dac->Dia
def test_integration(self):
# fmt: off
EXPECTED_INPUT_VALUES = torch.tensor(
[ 2.3803711e-03, 2.0751953e-03, 1.9836426e-03, 2.1057129e-03,
1.6174316e-03, 3.0517578e-04, 9.1552734e-05, 3.3569336e-04,
9.7656250e-04, 1.8310547e-03, 2.0141602e-03, 2.1057129e-03,
1.7395020e-03, 4.5776367e-04, -3.9672852e-04, 4.5776367e-04,
1.0070801e-03, 9.1552734e-05, 4.8828125e-04, 1.1596680e-03,
7.3242188e-04, 9.4604492e-04, 1.8005371e-03, 1.8310547e-03,
8.8500977e-04, 4.2724609e-04, 4.8828125e-04, 7.3242188e-04,
1.0986328e-03, 2.1057129e-03]
)
# fmt: on
input_audio = self._load_datasamples(1)
feature_extractor = DiaFeatureExtractor()
input_values = feature_extractor(input_audio, return_tensors="pt")["input_values"]
self.assertEqual(input_values.shape, (1, 1, 93696))
torch.testing.assert_close(input_values[0, 0, :30], EXPECTED_INPUT_VALUES, rtol=1e-4, atol=1e-4)
audio_input_end = torch.tensor(input_audio[0][-30:], dtype=torch.float32)
torch.testing.assert_close(input_values[0, 0, -46:-16], audio_input_end, rtol=1e-4, atol=1e-4)
def test_integration_stereo(self):
# fmt: off
EXPECTED_INPUT_VALUES = torch.tensor(
[2.3804e-03, 2.0752e-03, 1.9836e-03, 2.1057e-03, 1.6174e-03,
3.0518e-04, 9.1553e-05, 3.3569e-04, 9.7656e-04, 1.8311e-03,
2.0142e-03, 2.1057e-03, 1.7395e-03, 4.5776e-04, -3.9673e-04,
4.5776e-04, 1.0071e-03, 9.1553e-05, 4.8828e-04, 1.1597e-03,
7.3242e-04, 9.4604e-04, 1.8005e-03, 1.8311e-03, 8.8501e-04,
4.2725e-04, 4.8828e-04, 7.3242e-04, 1.0986e-03, 2.1057e-03]
)
# fmt: on
input_audio = self._load_datasamples(1)
input_audio = [np.tile(input_audio[0][None], reps=(2, 1))]
feature_extractor = DiaFeatureExtractor(feature_size=2)
input_values = feature_extractor(input_audio, return_tensors="pt").input_values
self.assertEqual(input_values.shape, (1, 1, 93696))
torch.testing.assert_close(input_values[0, 0, :30], EXPECTED_INPUT_VALUES, rtol=1e-4, atol=1e-4)
# Copied from tests.models.dac.test_feature_extraction_dac.DacFeatureExtractionTest.test_truncation_and_padding with Dac->Dia
def test_truncation_and_padding(self):
input_audio = self._load_datasamples(2)
# would be easier if the stride was like
feature_extractor = DiaFeatureExtractor()
# pad and trunc raise an error ?
with self.assertRaisesRegex(
ValueError,
"^Both padding and truncation were set. Make sure you only set one.$",
):
truncated_outputs = feature_extractor(
input_audio, padding="max_length", truncation=True, return_tensors="pt"
).input_values
# force truncate to max_length
truncated_outputs = feature_extractor(
input_audio, truncation=True, max_length=48000, return_tensors="pt"
).input_values
self.assertEqual(truncated_outputs.shape, (2, 1, 48128))
# pad:
padded_outputs = feature_extractor(input_audio, padding=True, return_tensors="pt").input_values
self.assertEqual(padded_outputs.shape, (2, 1, 93696))
# force pad to max length
truncated_outputs = feature_extractor(
input_audio, padding="max_length", max_length=100000, return_tensors="pt"
).input_values
self.assertEqual(truncated_outputs.shape, (2, 1, 100352))
# force no pad
with self.assertRaisesRegex(
ValueError,
r"Unable to convert output[\s\S]*padding=True",
):
truncated_outputs = feature_extractor(input_audio, padding=False, return_tensors="pt").input_values
truncated_outputs = feature_extractor(input_audio[0], padding=False, return_tensors="pt").input_values
self.assertEqual(truncated_outputs.shape, (1, 1, 93680))
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/dia/test_feature_extraction_dia.py",
"license": "Apache License 2.0",
"lines": 192,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/models/dia/test_modeling_dia.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch Dia model."""
import copy
import pathlib
import tempfile
import unittest
import pytest
from transformers.models.dia import DiaConfig, DiaDecoderConfig, DiaEncoderConfig
from transformers.testing_utils import (
cleanup,
is_flaky,
require_torch,
require_torch_accelerator,
slow,
torch_device,
)
from transformers.utils import is_soundfile_available, is_torch_available, is_torchaudio_available
from transformers.utils.import_utils import is_datasets_available
from ...generation.test_utils import GenerationTesterMixin, assert_similar_generate_outputs
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_datasets_available():
from datasets import Audio, load_dataset
if is_torch_available():
import torch
from transformers import (
DiaForConditionalGeneration,
DiaModel,
DiaProcessor,
PreTrainedConfig,
PreTrainedModel,
)
from transformers.cache_utils import (
StaticCache,
)
from transformers.models.dia.modeling_dia import DiaDecoder, DiaEncoder
if is_torchaudio_available():
import torchaudio
if is_soundfile_available():
import soundfile as sf
@require_torch
class DiaModelTester:
def __init__(
self,
parent,
batch_size=3, # need batch_size != num_hidden_layers
seq_length=7,
max_length=50,
is_training=True,
vocab_size=100,
hidden_size=16,
intermediate_size=37,
num_hidden_layers=2,
num_attention_heads=2,
head_dim=8,
decoder_hidden_size=32, # typically larger than encoder
hidden_act="silu",
eos_token_id=97, # special tokens all occur after eos
pad_token_id=98,
bos_token_id=99,
delay_pattern=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.max_length = max_length
self.is_training = is_training
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.head_dim = head_dim
self.decoder_hidden_size = decoder_hidden_size
self.hidden_act = hidden_act
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
# Set default delay pattern if not provided
self.delay_pattern = delay_pattern if delay_pattern is not None else [0, 1, 2]
self.num_channels = len(self.delay_pattern)
def get_config(self):
encoder_config = DiaEncoderConfig(
max_position_embeddings=self.max_length,
num_hidden_layers=self.num_hidden_layers,
hidden_size=self.hidden_size,
num_attention_heads=self.num_attention_heads,
num_key_value_heads=self.num_attention_heads, # same as num_attention_heads for testing
head_dim=self.head_dim,
intermediate_size=self.intermediate_size,
vocab_size=self.vocab_size,
hidden_act=self.hidden_act,
)
decoder_config = DiaDecoderConfig(
max_position_embeddings=self.max_length,
num_hidden_layers=self.num_hidden_layers,
hidden_size=self.decoder_hidden_size,
intermediate_size=self.intermediate_size,
num_attention_heads=self.num_attention_heads,
num_key_value_heads=1, # GQA
head_dim=self.head_dim,
cross_num_attention_heads=self.num_attention_heads,
cross_head_dim=self.head_dim,
cross_num_key_value_heads=1, # GQA
cross_hidden_size=self.hidden_size, # match encoder hidden size
vocab_size=self.vocab_size,
hidden_act=self.hidden_act,
num_channels=self.num_channels,
eos_token_id=self.eos_token_id,
pad_token_id=self.pad_token_id,
bos_token_id=self.bos_token_id,
)
config = DiaConfig(
encoder_config=encoder_config,
decoder_config=decoder_config,
delay_pattern=self.delay_pattern,
)
return config
def prepare_config_and_inputs(self) -> tuple[DiaConfig, dict]:
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
attention_mask = input_ids.ne(self.pad_token_id)
decoder_input_ids = ids_tensor([self.batch_size, self.seq_length, self.num_channels], self.vocab_size)
decoder_attention_mask = decoder_input_ids[..., 0].ne(self.pad_token_id)
config = self.get_config()
inputs_dict = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
}
return config, inputs_dict
def prepare_config_and_inputs_for_common(self) -> tuple[DiaConfig, dict]:
config, inputs_dict = self.prepare_config_and_inputs()
return config, inputs_dict
def create_and_check_model_forward(self, config, inputs_dict):
model = DiaModel(config=config).to(torch_device).eval()
input_ids = inputs_dict["input_ids"]
decoder_input_ids = inputs_dict["decoder_input_ids"]
# first forward pass
last_hidden_state = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids).last_hidden_state
self.parent.assertTrue(
last_hidden_state.shape, (self.batch_size, self.seq_length, config.decoder_config.hidden_size)
)
def check_encoder_decoder_model_standalone(self, config, inputs_dict):
model = DiaModel(config=config).to(torch_device).eval()
outputs = model(**inputs_dict)
encoder_last_hidden_state = outputs.encoder_last_hidden_state
last_hidden_state = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
encoder = model.get_encoder()
encoder.save_pretrained(tmpdirname)
encoder = DiaEncoder.from_pretrained(tmpdirname).to(torch_device)
encoder_last_hidden_state_2 = encoder(
input_ids=inputs_dict["input_ids"], attention_mask=inputs_dict["attention_mask"]
)[0]
self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 3e-3)
with tempfile.TemporaryDirectory() as tmpdirname:
decoder = model.get_decoder()
decoder.save_pretrained(tmpdirname)
decoder = DiaDecoder.from_pretrained(tmpdirname).to(torch_device)
last_hidden_state_2 = decoder(
input_ids=inputs_dict["decoder_input_ids"],
attention_mask=inputs_dict["decoder_attention_mask"],
encoder_hidden_states=encoder_last_hidden_state,
)[0]
self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 3e-3)
@require_torch
class DiaModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (DiaModel, DiaForConditionalGeneration) if is_torch_available() else ()
# We only allow greedy search / sampling with one sequence; see `skip_non_greedy_generate`
all_generative_model_classes = (DiaForConditionalGeneration,)
# TODO: support new pipeline behavior in tests
pipeline_model_mapping = {}
# pipeline_model_mapping = {"text-to-audio": DiaForConditionalGeneration} if is_torch_available() else {}
test_resize_embeddings = False
is_encoder_decoder = True
# Indicates VLMs usually but there are many audio models which are also composite
_is_composite = True
def setUp(self):
self.model_tester = DiaModelTester(self)
# Skipping `has_text_modality` but manually testing down below
self.config_tester = ConfigTester(self, has_text_modality=False, config_class=DiaConfig)
self.skip_non_greedy_generate()
def prepare_config_and_inputs_for_generate(self, batch_size=2):
# DIA should not have a `None` eos token id because it uses certain LogitsProcessors
# so we overwrite preparation
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# We don't want a few model inputs in our model input dictionary for generation tests
input_keys_to_ignore = [
# we don't want encoder-decoder models to start from filled decoder ids
"decoder_input_ids",
"decoder_attention_mask",
# we'll set cache use in each test differently
"use_cache",
# Ignore labels if it is in the input dict
"labels",
# model-specific exceptions should overload/overwrite this function
]
filtered_inputs_dict = {
k: v[:batch_size, ...] if isinstance(v, torch.Tensor) else v
for k, v in inputs_dict.items()
if k not in input_keys_to_ignore
}
return config, filtered_inputs_dict
def skip_non_greedy_generate(self):
skippable_tests = [
"test_sample_generate_dict_output", # return sequences > 1
"test_beam",
"test_contrastive",
"test_assisted",
"test_prompt_lookup",
"test_model_parallel_beam_search",
"test_generate_without_input_ids",
]
for test in skippable_tests:
if self._testMethodName.startswith(test):
self.skipTest(reason="Dia only supports greedy search / sampling with one sequence.")
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
"""Overridden to account for the 2D flattened structure"""
inputs_dict = copy.deepcopy(inputs_dict)
if return_labels:
inputs_dict["labels"] = torch.ones(
(
self.model_tester.batch_size * self.model_tester.num_channels,
self.model_tester.seq_length,
),
dtype=torch.long,
device=torch_device,
)
return inputs_dict
def test_config(self):
self.config_tester.run_common_tests()
# Manual testing because of composite configs
config = self.model_tester.prepare_config_and_inputs()[0]
self.assertTrue(hasattr(config.encoder_config, "vocab_size"), msg="Encoder `vocab_size` does not exist")
self.assertTrue(hasattr(config.decoder_config, "vocab_size"), msg="Decoder `vocab_size` does not exist")
def test_model_forward(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_forward(*config_and_inputs)
@is_flaky
def test_encoder_decoder_model_standalone(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs)
# Overriding shape checks as Dia has different shapes on encoder/decoder using a composite config
# + additional special cases where 3D x 2D meshes confuse the expected shape
def _check_logits(self, batch_size, logits, config):
batch_size *= len(config.delay_pattern) # Account for flattening
vocab_size = config.decoder_config.vocab_size
self.assertIsInstance(logits, tuple)
self.assertListEqual([iter_logits.shape[0] for iter_logits in logits], [batch_size] * len(logits))
# vocabulary difference equal to one (imagegptmodel?) or zero (all other models)
vocab_diff = vocab_size - logits[0].shape[-1]
self.assertTrue(vocab_diff in [0, 1])
self.assertListEqual([vocab_size - score.shape[-1] for score in logits], [vocab_diff] * len(logits))
def _check_attentions_for_generate(
self, batch_size, attentions, prompt_length, output_length, config, decoder_past_key_values
):
self.assertIsInstance(attentions, tuple)
self.assertListEqual(
[isinstance(iter_attentions, tuple) for iter_attentions in attentions], [True] * len(attentions)
)
self.assertEqual(len(attentions), (output_length - prompt_length))
use_cache = decoder_past_key_values is not None
has_static_cache = isinstance(decoder_past_key_values, StaticCache)
# When `output_attentions=True`, each iteration of generate appends the attentions corresponding to the new
# token(s)
for generated_length, iter_attentions in enumerate(attentions):
# regardless of using cache, the first forward pass will have the full prompt as input
if use_cache and generated_length > 0:
model_input_length = 1
else:
model_input_length = prompt_length + generated_length
query_length = (
prompt_length + generated_length
if not has_static_cache
else decoder_past_key_values.get_max_cache_shape()
)
expected_shape = (
batch_size,
config.decoder_config.num_attention_heads, # Decoder config
model_input_length,
query_length,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions], [expected_shape] * len(iter_attentions)
)
def _check_encoder_attention_for_generate(self, attentions, batch_size, config, prompt_length):
# Encoder config
encoder_expected_shape = (batch_size, config.encoder_config.num_attention_heads, prompt_length, prompt_length)
self.assertIsInstance(attentions, tuple)
self.assertListEqual(
[layer_attentions.shape for layer_attentions in attentions],
[encoder_expected_shape] * len(attentions),
)
def _check_hidden_states_for_generate(
self, batch_size, hidden_states, prompt_length, output_length, config, use_cache=False
):
self.assertIsInstance(hidden_states, tuple)
self.assertListEqual(
[isinstance(iter_hidden_states, tuple) for iter_hidden_states in hidden_states],
[True] * len(hidden_states),
)
self.assertEqual(len(hidden_states), (output_length - prompt_length))
# When `output_hidden_states=True`, each iteration of generate appends the hidden states corresponding to the
# new token(s)
for generated_length, iter_hidden_states in enumerate(hidden_states):
# regardless of using cache, the first forward pass will have the full prompt as input
if use_cache and generated_length > 0:
model_input_length = 1
else:
model_input_length = prompt_length + generated_length
# check hidden size
# we can have different hidden sizes between encoder and decoder --> check both
expected_shape_encoder = (batch_size, model_input_length, config.encoder_config.hidden_size)
expected_shape_decoder = (batch_size, model_input_length, config.decoder_config.hidden_size)
self.assertTrue(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states]
== [expected_shape_encoder] * len(iter_hidden_states)
or [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states]
== [expected_shape_decoder] * len(iter_hidden_states)
)
def _check_encoder_hidden_states_for_generate(self, hidden_states, batch_size, config, prompt_length):
# Encoder config
encoder_expected_shape = (batch_size, prompt_length, config.encoder_config.hidden_size)
self.assertIsInstance(hidden_states, tuple)
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in hidden_states],
[encoder_expected_shape] * len(hidden_states),
)
def _check_scores(self, batch_size, scores, generated_length, config):
# Special case where Dia keeps score in a 2D mesh of (bsz * channels, vocab)
vocab_size = config.decoder_config.vocab_size
expected_shape = (batch_size * len(config.delay_pattern), vocab_size)
self.assertIsInstance(scores, tuple)
self.assertEqual(len(scores), generated_length)
self.assertListEqual([iter_scores.shape for iter_scores in scores], [expected_shape] * len(scores))
def test_sdpa_can_dispatch_composite_models(self):
"""
Overwritten as it relies on hardcoded namings atm - checking for our case here specifically
"""
for model_class in self.all_model_classes:
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model = model_class.from_pretrained(tmpdirname)
sub_models_supporting_sdpa = [
(module._supports_sdpa or module._supports_attention_backend)
for name, module in model.named_modules()
if isinstance(module, PreTrainedModel) and name != ""
]
supports_sdpa_all_modules = (
all(sub_models_supporting_sdpa)
if len(sub_models_supporting_sdpa) > 0
else (model._supports_sdpa or model._supports_attention_backend)
)
if not supports_sdpa_all_modules:
with self.assertRaises(ValueError):
model_sdpa = model_class.from_pretrained(tmpdirname, attn_implementation="sdpa")
else:
model_sdpa = model_class.from_pretrained(tmpdirname, attn_implementation="sdpa")
for key in model_sdpa.config:
if isinstance(getattr(model_sdpa.config, key), PreTrainedConfig):
sub_config = getattr(model_sdpa.config, key)
self.assertTrue(sub_config._attn_implementation == "sdpa")
@pytest.mark.generate
@unittest.skip(reason="Custom processor `DiaEOSDelayPatternLogitsProcessor` forces eos token.")
def test_generate_continue_from_past_key_values(self):
"""Only a small change due to the expected shapes"""
# Tests that we can continue generating from past key values, returned from a previous `generate` call
for model_class in self.all_generative_model_classes:
config, inputs = self.model_tester.prepare_config_and_inputs_for_common()
# Let's make it always:
# 1. use cache (for obvious reasons)
# 2. generate to max length (which can be achieved by setting the eos token to an invalid value), which
# would make the test flaky (e.g. EOS is generated on iteration 1 on both generations, but the
# continuation would force it to generate beyond an EOS token)
# 3. ignore `token_type_ids` for simplicity
# 4. ignore `forced_eos_token_id`, which requires further manipulation of the continuation inputs and is
# active by default on some models
# 5. ignore `encoder_no_repeat_ngram_size`, which is set by default in some encoder-decoder models. When
# we use their decoder as a stand-alone model, `encoder_no_repeat_ngram_size` actually prevents
# repetition exclusively from the prompt. This test relies on comparing one call vs 2 calls
# with cache, what is considered a prompt is different in the two cases.
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
model = model_class(config).to(torch_device)
model.eval()
generate_kwargs = {
"pad_token_id": -1,
"eos_token_id": -1,
"forced_eos_token_id": None,
"encoder_no_repeat_ngram_size": 0,
"use_cache": True,
"do_sample": False,
"return_dict_in_generate": True,
"output_scores": True,
}
# Traditional way of generating text, with `return_dict_in_generate` to return the past key values
outputs = model.generate(**inputs, **generate_kwargs, max_new_tokens=4)
# Let's generate again, but passing the past key values in between (3 + 1 = 4 tokens). Note that the
# inputs may need to be tweaked across `generate` calls (like the attention mask).
outputs_cached = model.generate(**inputs, **generate_kwargs, max_new_tokens=3)
# Continue from the tokens generated above, preparing the inputs accordingly
inputs["past_key_values"] = outputs_cached.past_key_values
new_attention_len = outputs_cached.sequences.shape[1] # the only real modification in this test
inputs["decoder_input_ids"] = outputs_cached.sequences
if "decoder_attention_mask" in inputs:
inputs["decoder_attention_mask"] = torch.nn.functional.pad(
inputs["decoder_attention_mask"],
(0, new_attention_len - inputs["decoder_attention_mask"].shape[1]),
mode="constant",
value=1,
)
first_caches_scores = outputs_cached.scores
outputs_cached = model.generate(**inputs, **generate_kwargs, max_new_tokens=1)
full_cached_scores = first_caches_scores + outputs_cached.scores
outputs_cached.scores = full_cached_scores
# The two sets of generated text and past kv should be equal to each other
assert_similar_generate_outputs(outputs, outputs_cached)
self._check_caches_are_equal(outputs.past_key_values, outputs_cached.past_key_values)
@pytest.mark.generate
def test_prepare_inputs_for_generation_kwargs_forwards(self):
super().test_prepare_inputs_for_generation_kwargs_forwards(encoder_outputs=torch.randn(2, 2, 32))
@unittest.skip(reason="Indirectly checked in Dia through the generate methods.")
def test_hidden_states_output(self):
pass
@unittest.skip(
reason="Dia has too many mixed embedding types which would cause unintentional side effects, e.g. attempts at tying embeddings"
)
def test_model_get_set_embeddings(self):
pass
@unittest.skip(reason="Encoder-Decoder cache can not be initialized.")
def test_multi_gpu_data_parallel_forward(self):
pass
class DiaForConditionalGenerationIntegrationTest(unittest.TestCase):
"""
See https://gist.github.com/vasqu/0e3b06360373a4e612aa3b9a7c09185e for generating the integration tests
NOTE: We add a single `eos` line for the last channel which is skipped in the original Dia
(It doesn't change the behaviour as we cut by the eos token position)
"""
def setUp(self):
# it's a dummy ckpt but should suffice for testing purposes
self.model_checkpoint = "AntonV/Dia-1.6B"
self.sampling_rate = 44100
# prepare audio
librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
librispeech_dummy = librispeech_dummy.cast_column("audio", Audio(sampling_rate=self.sampling_rate))
audio_sample_1 = librispeech_dummy[-1]["audio"]["array"]
audio_sample_2 = librispeech_dummy[-2]["audio"]["array"]
# 10 and 5 codebooks as prefix - saved as files as we need wav files for the original Dia
dac_chunk_len = 512
self.audio_prompt_1_path = "/tmp/dia_test_sample_1.mp3"
self.audio_prompt_2_path = "/tmp/dia_test_sample_2.mp3"
sf.write(self.audio_prompt_1_path, audio_sample_1[: (dac_chunk_len * 10)], self.sampling_rate)
sf.write(self.audio_prompt_2_path, audio_sample_2[: (dac_chunk_len * 5)], self.sampling_rate)
def tearDown(self):
pathlib.Path(self.audio_prompt_1_path).unlink()
pathlib.Path(self.audio_prompt_2_path).unlink()
cleanup(torch_device, gc_collect=True)
@slow
@require_torch_accelerator
def test_dia_model_integration_generate_tts(self):
text = ["[S1] Dia is an open weights text to dialogue model.", "This is a test"]
processor = DiaProcessor.from_pretrained(self.model_checkpoint)
inputs = processor(text=text, padding=True, return_tensors="pt").to(torch_device)
model = DiaForConditionalGeneration.from_pretrained(self.model_checkpoint).to(torch_device)
outputs = model.generate(**inputs, max_new_tokens=32, do_sample=False)
# fmt: off
EXPECTED_OUTPUT_TOKENS = torch.tensor([[[1026, 1026, 1026, 1026, 1026, 1026, 1026, 1026, 1026],
[ 568, 1026, 1026, 1026, 1026, 1026, 1026, 1026, 1026],
[ 568, 1026, 1026, 1026, 1026, 1026, 1026, 1026, 1026],
[ 568, 1026, 1026, 1026, 1026, 1026, 1026, 1026, 1026],
[ 568, 1026, 1026, 1026, 1026, 1026, 1026, 1026, 1026],
[ 568, 1026, 1026, 1026, 1026, 1026, 1026, 1026, 1026],
[ 568, 1026, 1026, 1026, 1026, 1026, 1026, 1026, 1026],
[ 568, 1026, 1026, 1026, 1026, 1026, 1026, 1026, 1026],
[ 568, 1026, 1026, 1026, 1026, 1026, 1026, 1026, 1026],
[ 568, 778, 1026, 1026, 1026, 1026, 1026, 1026, 1026],
[ 568, 778, 338, 1026, 1026, 1026, 1026, 1026, 1026],
[ 568, 804, 10, 524, 1026, 1026, 1026, 1026, 1026],
[ 568, 804, 10, 674, 967, 1026, 1026, 1026, 1026],
[ 568, 804, 10, 674, 364, 360, 1026, 1026, 1026],
[ 568, 804, 10, 674, 364, 981, 728, 1026, 1026],
[ 568, 804, 10, 674, 364, 981, 741, 550, 1026],
[ 568, 804, 10, 674, 364, 981, 568, 378, 90],
[1024, 804, 10, 674, 364, 981, 568, 378, 731],
[1025, 804, 10, 674, 364, 981, 568, 378, 731],
[1025, 804, 10, 674, 364, 981, 568, 378, 731],
[1025, 804, 10, 674, 364, 981, 568, 378, 731],
[1025, 804, 10, 674, 364, 981, 568, 378, 731],
[1025, 804, 10, 674, 364, 981, 568, 378, 731],
[1025, 804, 10, 674, 364, 981, 568, 378, 731],
[1025, 804, 10, 674, 364, 981, 568, 378, 731],
[1025, 1024, 10, 674, 364, 981, 568, 378, 731],
[1025, 1025, 1024, 674, 364, 981, 568, 378, 731],
[1025, 1025, 1025, 1024, 364, 981, 568, 378, 731],
[1025, 1025, 1025, 1025, 1024, 981, 568, 378, 731],
[1025, 1025, 1025, 1025, 1025, 1024, 568, 378, 731],
[1025, 1025, 1025, 1025, 1025, 1025, 1024, 378, 731],
[1025, 1025, 1025, 1025, 1025, 1025, 1025, 1024, 731],
[1025, 1025, 1025, 1025, 1025, 1025, 1025, 1025, 1024]],
[[1026, 1026, 1026, 1026, 1026, 1026, 1026, 1026, 1026],
[ 568, 1026, 1026, 1026, 1026, 1026, 1026, 1026, 1026],
[ 568, 1026, 1026, 1026, 1026, 1026, 1026, 1026, 1026],
[ 698, 1026, 1026, 1026, 1026, 1026, 1026, 1026, 1026],
[ 592, 1026, 1026, 1026, 1026, 1026, 1026, 1026, 1026],
[ 592, 1026, 1026, 1026, 1026, 1026, 1026, 1026, 1026],
[ 592, 1026, 1026, 1026, 1026, 1026, 1026, 1026, 1026],
[ 592, 1026, 1026, 1026, 1026, 1026, 1026, 1026, 1026],
[ 592, 1026, 1026, 1026, 1026, 1026, 1026, 1026, 1026],
[ 592, 778, 1026, 1026, 1026, 1026, 1026, 1026, 1026],
[ 592, 778, 338, 1026, 1026, 1026, 1026, 1026, 1026],
[ 592, 697, 10, 524, 1026, 1026, 1026, 1026, 1026],
[ 592, 288, 476, 649, 967, 1026, 1026, 1026, 1026],
[ 592, 740, 386, 674, 364, 360, 1026, 1026, 1026],
[ 592, 402, 386, 347, 362, 981, 728, 1026, 1026],
[ 592, 402, 721, 728, 327, 981, 741, 550, 1026],
[ 592, 402, 721, 728, 460, 62, 676, 378, 90],
[1024, 402, 721, 728, 837, 595, 195, 982, 784],
[1025, 402, 721, 677, 497, 102, 692, 24, 330],
[1025, 402, 721, 677, 511, 102, 503, 871, 609],
[1025, 402, 721, 677, 511, 96, 801, 871, 894],
[1025, 402, 721, 677, 511, 745, 314, 498, 775],
[1025, 402, 721, 677, 511, 745, 314, 498, 105],
[1025, 402, 721, 677, 511, 745, 314, 861, 889],
[1025, 893, 721, 677, 511, 744, 314, 871, 353],
[1025, 1024, 888, 677, 511, 744, 314, 871, 332],
[1025, 1025, 1024, 518, 511, 744, 314, 871, 366],
[1025, 1025, 1025, 1024, 611, 744, 314, 871, 366],
[1025, 1025, 1025, 1025, 1024, 980, 314, 871, 366],
[1025, 1025, 1025, 1025, 1025, 1024, 45, 124, 366],
[1025, 1025, 1025, 1025, 1025, 1025, 1024, 871, 366],
[1025, 1025, 1025, 1025, 1025, 1025, 1025, 1024, 719],
[1025, 1025, 1025, 1025, 1025, 1025, 1025, 1025, 1024]]])
# fmt: on
torch.testing.assert_close(outputs.cpu(), EXPECTED_OUTPUT_TOKENS)
@slow
@require_torch_accelerator
def test_dia_model_integration_generate_audio_context(self):
text = ["[S1] Dia is an open weights text to dialogue model.", "This is a test"]
audio_sample_1 = (
torchaudio.load(self.audio_prompt_1_path, channels_first=True, backend="soundfile")[0].squeeze().numpy()
)
audio_sample_2 = (
torchaudio.load(self.audio_prompt_2_path, channels_first=True, backend="soundfile")[0].squeeze().numpy()
)
audio = [audio_sample_1, audio_sample_2]
processor = DiaProcessor.from_pretrained(self.model_checkpoint)
inputs = processor(text=text, audio=audio, padding=True, return_tensors="pt").to(torch_device)
model = DiaForConditionalGeneration.from_pretrained(self.model_checkpoint).to(torch_device)
# dia has right padding while we have left padding (for faster prefill)
# additionally we have new tokens vs dia's max tokens (hence we compare each in the respective settings)
outputs_1 = model.generate(**inputs, max_new_tokens=22, do_sample=False)
outputs_2 = model.generate(**inputs, max_new_tokens=27, do_sample=False)
# fmt: off
EXPECTED_OUTPUT_TOKENS_1 = torch.tensor([[1026, 1026, 1026, 1026, 1026, 1026, 1026, 1026, 1026],
[ 578, 1026, 1026, 1026, 1026, 1026, 1026, 1026, 1026],
[ 592, 1026, 1026, 1026, 1026, 1026, 1026, 1026, 1026],
[ 494, 1026, 1026, 1026, 1026, 1026, 1026, 1026, 1026],
[ 330, 1026, 1026, 1026, 1026, 1026, 1026, 1026, 1026],
[ 330, 1026, 1026, 1026, 1026, 1026, 1026, 1026, 1026],
[ 330, 1026, 1026, 1026, 1026, 1026, 1026, 1026, 1026],
[ 330, 1026, 1026, 1026, 1026, 1026, 1026, 1026, 1026],
[ 330, 1026, 1026, 1026, 1026, 1026, 1026, 1026, 1026],
[ 330, 501, 1026, 1026, 1026, 1026, 1026, 1026, 1026],
[ 330, 204, 34, 1026, 1026, 1026, 1026, 1026, 1026],
[ 330, 254, 915, 863, 1026, 1026, 1026, 1026, 1026],
[ 330, 215, 458, 313, 50, 1026, 1026, 1026, 1026],
[ 330, 615, 529, 216, 801, 237, 1026, 1026, 1026],
[ 330, 580, 563, 233, 337, 37, 1018, 1026, 1026],
[ 330, 567, 530, 753, 607, 179, 954, 242, 1026],
[ 330, 627, 6, 1010, 500, 189, 598, 858, 247],
[1024, 432, 480, 530, 122, 3, 788, 149, 814],
[1025, 875, 826, 458, 98, 540, 181, 122, 608],
[1025, 495, 840, 413, 337, 784, 591, 150, 1017],
[1025, 808, 189, 137, 445, 0, 227, 658, 345],
[1025, 397, 89, 753, 1016, 173, 984, 0, 910],
[1025, 875, 460, 934, 50, 335, 670, 818, 722],
[1025, 875, 460, 762, 119, 372, 503, 858, 584],
[1025, 348, 555, 475, 469, 458, 963, 41, 664],
[1025, 1024, 852, 683, 761, 193, 595, 895, 885],
[1025, 1025, 1024, 135, 761, 902, 163, 623, 385],
[1025, 1025, 1025, 1024, 852, 282, 581, 623, 70],
[1025, 1025, 1025, 1025, 1024, 41, 661, 790, 977],
[1025, 1025, 1025, 1025, 1025, 1024, 580, 401, 464],
[1025, 1025, 1025, 1025, 1025, 1025, 1024, 756, 61],
[1025, 1025, 1025, 1025, 1025, 1025, 1025, 1024, 752],
[1025, 1025, 1025, 1025, 1025, 1025, 1025, 1025, 1024]])
EXPECTED_OUTPUT_TOKENS_2 = torch.tensor([[1026, 1026, 1026, 1026, 1026, 1026, 1026, 1026, 1026],
[ 619, 1026, 1026, 1026, 1026, 1026, 1026, 1026, 1026],
[ 315, 1026, 1026, 1026, 1026, 1026, 1026, 1026, 1026],
[ 315, 1026, 1026, 1026, 1026, 1026, 1026, 1026, 1026],
[ 315, 1026, 1026, 1026, 1026, 1026, 1026, 1026, 1026],
[ 315, 1026, 1026, 1026, 1026, 1026, 1026, 1026, 1026],
[ 315, 1026, 1026, 1026, 1026, 1026, 1026, 1026, 1026],
[ 315, 1026, 1026, 1026, 1026, 1026, 1026, 1026, 1026],
[ 315, 1026, 1026, 1026, 1026, 1026, 1026, 1026, 1026],
[ 315, 968, 1026, 1026, 1026, 1026, 1026, 1026, 1026],
[ 315, 1007, 458, 1026, 1026, 1026, 1026, 1026, 1026],
[ 315, 35, 266, 68, 1026, 1026, 1026, 1026, 1026],
[ 315, 359, 285, 811, 154, 1026, 1026, 1026, 1026],
[ 315, 906, 407, 297, 785, 649, 1026, 1026, 1026],
[ 315, 249, 678, 868, 899, 257, 950, 1026, 1026],
[ 315, 249, 217, 471, 292, 908, 196, 469, 1026],
[ 315, 249, 825, 771, 839, 802, 633, 590, 531],
[1024, 249, 150, 53, 126, 76, 794, 626, 442],
[1025, 249, 825, 218, 359, 864, 526, 626, 770],
[1025, 249, 150, 137, 530, 845, 877, 600, 111],
[1025, 249, 150, 287, 730, 991, 135, 259, 39],
[1025, 249, 825, 104, 198, 1020, 719, 625, 208],
[1025, 249, 825, 997, 602, 256, 859, 322, 518],
[1025, 668, 825, 979, 584, 256, 98, 665, 589],
[1025, 954, 458, 54, 206, 52, 244, 822, 599],
[1025, 1024, 104, 914, 435, 579, 860, 92, 661],
[1025, 1025, 1024, 848, 126, 74, 304, 92, 753],
[1025, 1025, 1025, 1024, 362, 376, 304, 586, 753],
[1025, 1025, 1025, 1025, 1024, 633, 996, 586, 83],
[1025, 1025, 1025, 1025, 1025, 1024, 179, 898, 928],
[1025, 1025, 1025, 1025, 1025, 1025, 1024, 506, 102],
[1025, 1025, 1025, 1025, 1025, 1025, 1025, 1024, 79],
[1025, 1025, 1025, 1025, 1025, 1025, 1025, 1025, 1024]])
# fmt: on
torch.testing.assert_close(outputs_1[0].cpu(), EXPECTED_OUTPUT_TOKENS_1)
torch.testing.assert_close(outputs_2[1, 5:].cpu(), EXPECTED_OUTPUT_TOKENS_2) # left padding
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/dia/test_modeling_dia.py",
"license": "Apache License 2.0",
"lines": 639,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/smollm3/modular_smollm3.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Callable
import torch
from ...cache_utils import Cache
from ...configuration_utils import PreTrainedConfig, layer_type_validation
from ...modeling_flash_attention_utils import FlashAttentionKwargs
from ...modeling_rope_utils import RopeParameters
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS
from ...processing_utils import Unpack
from ...utils import logging
from ..llama.modeling_llama import (
LlamaAttention,
LlamaDecoderLayer,
LlamaForCausalLM,
LlamaForQuestionAnswering,
LlamaForSequenceClassification,
LlamaForTokenClassification,
LlamaPreTrainedModel,
apply_rotary_pos_emb,
eager_attention_forward,
)
from ..qwen2.modeling_qwen2 import Qwen2Model, Qwen2RotaryEmbedding
logger = logging.get_logger(__name__)
class SmolLM3Config(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`SmolLM3Model`]. It is used to instantiate a
SmolLM3 model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the SmolLM3 3B.
e.g. [HuggingFaceTB/SmolLM3-3B](https://huggingface.co/HuggingFaceTB/SmolLM3-3B)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 128256):
Vocabulary size of the SmolLM3 model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`SmolLM3Model`]
hidden_size (`int`, *optional*, defaults to 2048):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 11008):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 36):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
num_key_value_heads (`int`, *optional*, defaults to 4):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details checkout [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `16`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 32768):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*, defaults to 128004):
The id of the padding token.
bos_token_id (`int`, *optional*, defaults to 128000):
The id of the beginning of sentence token.
eos_token_id (`int`, *optional*, defaults to 128001):
The id of the end of sentence token.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
use_sliding_window (`bool`, *optional*, defaults to `False`):
Whether to use sliding window attention.
sliding_window (`int`, *optional*):
Sliding window attention (SWA) window size. If not specified, will default to `None`.
no_rope_layers (`List[int]`, *optional*):
List with at least the same length as the number of layers in the model.
A `1` at an index position indicates that the corresponding layer will use RoPE,
while a `0` indicates that it's a NoPE layer.
no_rope_layer_interval (`int`, *optional*, defaults to 4):
If `no_rope_layers` is `None`, it will be created using a NoPE layer every
`no_rope_layer_interval` layers.
layer_types (`list`, *optional*):
Attention pattern for each layer. Automatically computed based on sliding window and NoPE settings.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
```python
>>> from transformers import SmolLM3Model, SmolLM3Config
>>> # Initializing a SmolLM3 style configuration
>>> configuration = SmolLM3Config()
>>> # Initializing a model from the SmolLM3 style configuration
>>> model = SmolLM3Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "smollm3"
keys_to_ignore_at_inference = ["past_key_values"]
default_theta = 2000000.0
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise",
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.gate_proj": "colwise",
"layers.*.mlp.up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
}
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
"norm": (["hidden_states"], ["hidden_states"]),
}
def __init__(
self,
vocab_size: int | None = 128256,
hidden_size: int | None = 2048,
intermediate_size: int | None = 11008,
num_hidden_layers: int | None = 36,
num_attention_heads: int | None = 16,
num_key_value_heads: int | None = 4,
hidden_act: str | None = "silu",
max_position_embeddings: int | None = 32768,
initializer_range: float | None = 0.02,
rms_norm_eps: int | None = 1e-6,
use_cache: bool | None = True,
pad_token_id: int | None = 128004,
bos_token_id: int | None = 128000,
eos_token_id: int | None = 128001,
rope_parameters: RopeParameters | dict[str, RopeParameters] | None = None,
use_sliding_window: bool | None = False,
sliding_window: int | None = None,
no_rope_layers: int | None = None,
no_rope_layer_interval: int | None = 4,
layer_types: int | None = None,
attention_bias: bool | None = False,
attention_dropout: float | None = 0.0,
mlp_bias: bool | None = False,
tie_word_embeddings: bool | None = True,
**kwargs,
):
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.tie_word_embeddings = tie_word_embeddings
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.mlp_bias = mlp_bias
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.use_sliding_window = use_sliding_window
self.sliding_window = sliding_window
# for backward compatibility
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
if no_rope_layers is None:
self.no_rope_layers = [
int((layer_idx + 1) % no_rope_layer_interval != 0) for layer_idx in range(num_hidden_layers)
]
else:
self.no_rope_layers = no_rope_layers
self.no_rope_layer_interval = no_rope_layer_interval
# Update layer_types based on sliding window and NoPE pattern
if layer_types is None:
layer_types = []
for layer_idx in range(num_hidden_layers):
has_rope = self.no_rope_layers[layer_idx]
if use_sliding_window and sliding_window is not None and not has_rope:
layer_types.append("sliding_attention")
else:
layer_types.append("full_attention")
self.layer_types = layer_types
layer_type_validation(self.layer_types, self.num_hidden_layers)
self.rope_parameters = rope_parameters
super().__init__(**kwargs)
class SmolLM3RotaryEmbedding(Qwen2RotaryEmbedding):
pass
class SmolLM3Attention(LlamaAttention):
def __init__(self, config: SmolLM3Config, layer_idx: int):
super().__init__(config, layer_idx)
self.use_rope = config.no_rope_layers[layer_idx]
self.sliding_window = (
config.sliding_window
if config.use_sliding_window and config.layer_types[layer_idx] == "sliding_attention"
else None
)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: torch.Tensor | None,
past_key_values: Cache | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, torch.Tensor | None]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
if self.use_rope:
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
cache_kwargs = {"cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface(
self.config._attn_implementation, eager_attention_forward
)
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
sliding_window=self.sliding_window,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class SmolLM3DecoderLayer(LlamaDecoderLayer):
def __init__(self, config: SmolLM3Config, layer_idx: int):
super().__init__(config, layer_idx)
self.attention_type = config.layer_types[layer_idx]
class SmolLM3PreTrainedModel(LlamaPreTrainedModel):
pass
class SmolLM3Model(Qwen2Model):
pass
class SmolLM3ForCausalLM(LlamaForCausalLM):
pass
class SmolLM3ForSequenceClassification(LlamaForSequenceClassification):
pass
class SmolLM3ForTokenClassification(LlamaForTokenClassification):
pass
class SmolLM3ForQuestionAnswering(LlamaForQuestionAnswering):
pass
__all__ = [
"SmolLM3Config",
"SmolLM3PreTrainedModel",
"SmolLM3Model",
"SmolLM3ForCausalLM",
"SmolLM3ForSequenceClassification",
"SmolLM3ForTokenClassification",
"SmolLM3ForQuestionAnswering",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/smollm3/modular_smollm3.py",
"license": "Apache License 2.0",
"lines": 270,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/smollm3/test_modeling_smollm3.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch SmolLM3 model."""
import gc
import unittest
import pytest
from parameterized import parameterized
from transformers import AutoTokenizer, BitsAndBytesConfig, SmolLM3Config, is_torch_available
from transformers.generation.configuration_utils import GenerationConfig
from transformers.testing_utils import (
backend_empty_cache,
is_flaky,
require_bitsandbytes,
require_flash_attn,
require_torch,
slow,
torch_device,
)
from transformers.utils.import_utils import is_torch_greater_or_equal
if is_torch_available():
import torch
from transformers import (
SmolLM3ForCausalLM,
SmolLM3ForQuestionAnswering,
SmolLM3ForSequenceClassification,
SmolLM3ForTokenClassification,
SmolLM3Model,
)
from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester
from ...test_modeling_common import (
TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION,
ModelTesterMixin,
)
class SmolLM3ModelTester(CausalLMModelTester):
config_class = SmolLM3Config
if is_torch_available():
base_model_class = SmolLM3Model
causal_lm_class = SmolLM3ForCausalLM
question_answering_class = SmolLM3ForQuestionAnswering
sequence_classification_class = SmolLM3ForSequenceClassification
token_classification_class = SmolLM3ForTokenClassification
@require_torch
class SmolLM3ModelTest(CausalLMModelTest, unittest.TestCase):
model_tester_class = SmolLM3ModelTester
@parameterized.expand(TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION)
@is_flaky()
def test_eager_matches_sdpa_inference(self, *args):
# flaky test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions
return getattr(ModelTesterMixin, self._testMethodName)(self)
@require_torch
class SmolLM3IntegrationTest(unittest.TestCase):
model_id = "HuggingFaceTB/SmolLM3-3B"
@slow
def test_model_3b_logits(self):
input_ids = [1, 306, 4658, 278, 6593, 310, 2834, 338]
model = SmolLM3ForCausalLM.from_pretrained(self.model_id, device_map="auto")
input_ids = torch.tensor([input_ids]).to(model.model.embed_tokens.weight.device)
with torch.no_grad():
out = model(input_ids).logits.float().cpu()
# Expected mean on dim = -1
EXPECTED_MEAN = torch.tensor([[9.3306, 8.1721, 6.4764, 7.6011, 11.1218, 7.5343, 7.1195, 8.0956]])
torch.testing.assert_close(out.mean(-1), EXPECTED_MEAN, rtol=1e-2, atol=1e-2)
# slicing logits[0, 0, 0:30]
EXPECTED_SLICE = torch.tensor(
[15.7759, 17.6274, 16.3404, 14.5543, 13.1366, 14.2475, 15.8710, 15.6753, 12.3856, 13.0386, 14.0792, 12.7253,
13.9634, 12.1271, 12.4320, 16.0329, 17.3975, 17.1396, 17.8666, 17.0103, 17.2962, 16.8777, 16.7144, 16.3023,
16.6084, 12.4649, 12.0723, 14.1148, 14.8239, 15.2733]) # fmt: skip
torch.testing.assert_close(out[0, 0, :30], EXPECTED_SLICE, rtol=1e-4, atol=1e-4)
del model
backend_empty_cache(torch_device)
gc.collect()
@slow
def test_model_3b_generation(self):
EXPECTED_TEXT_COMPLETION = """Gravity is the force that pulls objects toward the center of the Earth. It is a force that is always present, even"""
prompt = "Gravity is the force"
tokenizer = AutoTokenizer.from_pretrained(self.model_id)
model = SmolLM3ForCausalLM.from_pretrained(self.model_id, device_map="auto")
input_ids = tokenizer.encode(prompt, return_tensors="pt").to(model.model.embed_tokens.weight.device)
# greedy generation outputs
generated_ids = model.generate(input_ids, max_new_tokens=20, temperature=0)
text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, text)
del model
backend_empty_cache(torch_device)
gc.collect()
@require_bitsandbytes
@slow
@require_flash_attn
@pytest.mark.flash_attn_test
def test_model_3b_long_prompt(self):
EXPECTED_OUTPUT_TOKEN_IDS = [306, 338]
# An input with 4097 tokens that is above the size of the sliding window
input_ids = [1] + [306, 338] * 2048
model = SmolLM3ForCausalLM.from_pretrained(
self.model_id,
device_map="auto",
quantization_config=BitsAndBytesConfig(load_in_4bit=True),
attn_implementation="flash_attention_2",
)
input_ids = torch.tensor([input_ids]).to(model.model.embed_tokens.weight.device)
generated_ids = model.generate(input_ids, max_new_tokens=4, temperature=0)
self.assertEqual(EXPECTED_OUTPUT_TOKEN_IDS, generated_ids[0][-2:].tolist())
# Assisted generation
assistant_model = model
assistant_model.generation_config.num_assistant_tokens = 2
assistant_model.generation_config.num_assistant_tokens_schedule = "constant"
generated_ids = model.generate(input_ids, max_new_tokens=4, temperature=0)
self.assertEqual(EXPECTED_OUTPUT_TOKEN_IDS, generated_ids[0][-2:].tolist())
del assistant_model
del model
backend_empty_cache(torch_device)
gc.collect()
@pytest.mark.torch_export_test
@slow
def test_export_static_cache(self):
from transformers.integrations.executorch import (
TorchExportableModuleWithStaticCache,
convert_and_export_with_cache,
)
tokenizer = AutoTokenizer.from_pretrained(
self.model_id, pad_token="<|finetune_right_pad_id|>", padding_side="right"
)
EXPECTED_TEXT_COMPLETION = "Gravity is the force that pulls objects toward the center of the Earth. It is a force that is always present, and"
max_generation_length = tokenizer(EXPECTED_TEXT_COMPLETION, return_tensors="pt", padding=True)[
"input_ids"
].shape[-1]
# Load model
device = "cpu" # TODO (joao / export experts): should be on `torch_device`, but causes GPU OOM
dtype = torch.bfloat16
cache_implementation = "static"
attn_implementation = "sdpa"
batch_size = 1
model = SmolLM3ForCausalLM.from_pretrained(
self.model_id,
device_map=device,
dtype=dtype,
attn_implementation=attn_implementation,
generation_config=GenerationConfig(
use_cache=True,
cache_implementation=cache_implementation,
max_length=max_generation_length,
cache_config={
"batch_size": batch_size,
"max_cache_len": max_generation_length,
},
),
)
prompt = ["Gravity is the force"]
prompt_tokens = tokenizer(prompt, return_tensors="pt", padding=True).to(model.device)
prompt_token_ids = prompt_tokens["input_ids"]
max_new_tokens = max_generation_length - prompt_token_ids.shape[-1]
# Static Cache + export
strict = is_torch_greater_or_equal("2.7.0") # Due to https://github.com/pytorch/pytorch/issues/150994
exported_program = convert_and_export_with_cache(model, strict=strict)
ep_generated_ids = TorchExportableModuleWithStaticCache.generate(
exported_program=exported_program, prompt_token_ids=prompt_token_ids, max_new_tokens=max_new_tokens
)
ep_generated_text = tokenizer.batch_decode(ep_generated_ids, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, ep_generated_text)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/smollm3/test_modeling_smollm3.py",
"license": "Apache License 2.0",
"lines": 170,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/generation/test_flash_attention_parity.py | # Copyright 2025 Eduard Durech and SGLang team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Usage:
# RUN_SLOW=1 pytest -s tests/generation/test_flash_attention_parity.py
import unittest
import pytest
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.testing_utils import require_flash_attn, require_flash_attn_3, require_torch_gpu, slow
class FlashAttentionParityTest(unittest.TestCase):
# From https://github.com/sgl-project/sglang/blob/main/python/sglang/test/test_utils.py
def _lcs(self, X, Y):
m = len(X)
n = len(Y)
L = [[0] * (n + 1) for _ in range(m + 1)]
for i in range(m + 1):
for j in range(n + 1):
if i == 0 or j == 0:
L[i][j] = 0
elif X[i - 1] == Y[j - 1]:
L[i][j] = L[i - 1][j - 1] + 1
else:
L[i][j] = max(L[i - 1][j], L[i][j - 1])
return L[m][n]
# From https://github.com/sgl-project/sglang/blob/main/python/sglang/test/test_utils.py
def _calculate_rouge_l(self, output_strs_list1, output_strs_list2):
rouge_l_scores = []
for s1, s2 in zip(output_strs_list1, output_strs_list2):
lcs_len = self._lcs(s1, s2)
precision = lcs_len / len(s1) if len(s1) > 0 else 0
recall = lcs_len / len(s2) if len(s2) > 0 else 0
if precision + recall > 0:
fmeasure = (2 * precision * recall) / (precision + recall)
else:
fmeasure = 0.0
rouge_l_scores.append(fmeasure)
return rouge_l_scores
def _benchmark_generation(self, model, inputs, n_warmup=3, n_runs=5):
for _ in range(n_warmup):
model.generate(**inputs, max_new_tokens=20, do_sample=False)
torch.cuda.synchronize()
start_time = torch.cuda.Event(enable_timing=True)
end_time = torch.cuda.Event(enable_timing=True)
start_time.record()
for _ in range(n_runs):
model.generate(**inputs, max_new_tokens=20, do_sample=False)
end_time.record()
torch.cuda.synchronize()
return start_time.elapsed_time(end_time) / n_runs
@pytest.mark.flash_attn_3_test
@require_torch_gpu
@require_flash_attn
@require_flash_attn_3
@slow
def test_flash_attention_2_3_parity(self):
model_id = "meta-llama/Llama-3.2-1B-Instruct"
prompt = ["The ETH AI Center is", "What is life?"]
# 1. Load model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
model_id,
dtype=torch.bfloat16,
attn_implementation="flash_attention_2",
).to("cuda")
tokenizer = AutoTokenizer.from_pretrained(model_id)
tokenizer.pad_token_id = tokenizer.eos_token_id
# 2. Generate with both models
inputs = tokenizer(prompt, padding=True, padding_side="left", return_tensors="pt").to("cuda")
with torch.no_grad():
output_2 = model.generate(
**inputs, max_new_tokens=20, do_sample=False, output_scores=True, return_dict_in_generate=True
)
model.set_attn_implementation("flash_attention_3")
output_3 = model.generate(
**inputs, max_new_tokens=20, do_sample=False, output_scores=True, return_dict_in_generate=True
)
# 3. Correctness check
# 3a. Logits
logits_2 = torch.stack(output_2.scores)
logits_3 = torch.stack(output_3.scores)
torch.testing.assert_close(logits_2, logits_3, atol=1e-3, rtol=1e-3)
logprobs_2 = torch.nn.functional.log_softmax(logits_2, dim=-1)
logprobs_3 = torch.nn.functional.log_softmax(logits_3, dim=-1)
max_logprob_diff = torch.max(torch.abs(logprobs_2 - logprobs_3)).item()
# 3b. Generated text
text_2s, text_3s = [], []
for i in range(len(prompt)):
text_2s.append(tokenizer.decode(output_2.sequences[i], skip_special_tokens=True))
text_3s.append(tokenizer.decode(output_3.sequences[i], skip_special_tokens=True))
rouge_scores = self._calculate_rouge_l(text_2s, text_3s)
for i in range(len(rouge_scores)):
assert rouge_scores[i] > 0.99, f"Generated texts at prompt {i} do not match (ROUGE-L: {rouge_scores[i]})"
# 4. Performance check
with torch.no_grad():
time_3 = self._benchmark_generation(model, inputs)
model.set_attn_implementation("flash_attention_2")
time_2 = self._benchmark_generation(model, inputs)
print(f"\n--- Flash Attention {2, 3} Parity Test on {model_id} ---")
print(f"Prompt: '{prompt}'")
print(f"Generated text with Flash Attention 2: {text_2s}")
print(f"Generated text with Flash Attention 3: {text_3s}")
print(f"ROUGE-L: {rouge_scores}")
print(f"Max absolute difference in logprobs: {max_logprob_diff:.5e}")
print(f"Flash Attention 2 latency: {time_2:.2f} ms")
print(f"Flash Attention 3 latency: {time_3:.2f} ms")
print(f"Speed-up: {time_2 / time_3:.2f}x")
print("---")
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/generation/test_flash_attention_parity.py",
"license": "Apache License 2.0",
"lines": 118,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/dots1/configuration_dots1.py | # Copyright 2025 The rednote-hilab team and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...configuration_utils import PreTrainedConfig, layer_type_validation
from ...modeling_rope_utils import RopeParameters
from ...utils import logging
logger = logging.get_logger(__name__)
class Dots1Config(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Dots1Model`]. It is used to instantiate a
`dots.llm1` model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of
[rednote-hilab/dots.llm1.base](https://huggingface.co/rednote-hilab/dots.llm1.base).
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 152064):
Vocabulary size of the model. Defines the number of different tokens that can be represented by the
`input_ids` passed when calling [`Dots1Model`].
hidden_size (`int`, *optional*, defaults to 4608):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 10944):
Dimension of the MLP representations.
moe_intermediate_size (`int`, *optional*, defaults to 1408):
Dimension of the MoE representations.
num_hidden_layers (`int`, *optional*, defaults to 62):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*, defaults to 32):
Number of key/value heads for Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, Multi
Head Attention (MHA) is used. If `num_key_value_heads=1`, Multi Query Attention (MQA) is used. Otherwise,
Grouped Query Attention (GQA) is used. If not specified, defaults to `num_attention_heads`.
n_shared_experts (`int`, *optional*, default=None):
Number of shared experts. None means dense model.
n_routed_experts (`int`, *optional*, default=None):
Number of routed experts. None means dense model.
n_group (`int`, *optional*, defaults to 1):
Number of groups for routed experts.
topk_group (`int`, *optional*, defaults to 1):
Number of selected groups for each token (selected experts only within `topk_group` groups).
num_experts_per_tok (`int`, *optional*, default=None):
Number of selected experts. None means dense model.
first_k_dense_replace (`int`, *optional*, defaults to 0):
Number of dense layers at the beginning of the model before the first MoE layer.
norm_topk_prob (`bool`, *optional*, defaults to `False`):
Whether to normalize the weights of the routed experts.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string).
max_position_embeddings (`int`, *optional*, defaults to 2048):
Maximum sequence length the model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
Standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
Epsilon used by the RMS normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions. Only relevant if `config.is_decoder=True`.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie the input and output word embeddings.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the self-attention projections.
attention_dropout (`float`, *optional*, defaults to 0.0):
Dropout ratio for the attention probabilities.
routed_scaling_factor (`float`, *optional*, defaults to 1.0):
Scaling factor for routed experts.
sliding_window (`int`, *optional*, defaults to 4096):
Size of the sliding window for attention. If not specified, defaults to `4096`.
max_window_layers (`int`, *optional*, defaults to 62):
The number of layers using full attention. The first `max_window_layers` layers will use full attention, while any
additional layer afterwards will use SWA (Sliding Window Attention).
layer_types (`list`, *optional*):
Attention pattern for each layer.
pad_token_id (`int`, *optional*):
Padding token id.
bos_token_id (`int`, *optional*):
Beginning of stream token id.
eos_token_id (`int`, *optional*):
End of stream token id.
Examples:
```python
>>> from transformers import Dots1Model, Dots1Config
>>> # Initializing a Dots1 style configuration
>>> configuration = Dots1Config()
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = "dots1"
keys_to_ignore_at_inference = ["past_key_values"]
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise",
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.experts.gate_up_proj": "rowwise",
"layers.*.mlp.experts.down_proj": "rowwise",
"layers.*.mlp.shared_experts.gate_proj": "colwise",
"layers.*.mlp.shared_experts.up_proj": "colwise",
"layers.*.mlp.shared_experts.down_proj": "rowwise",
"layers.*.mlp.gate_proj": "colwise",
"layers.*.mlp.up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
}
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
"norm": (["hidden_states"], ["hidden_states"]),
}
attribute_map = {
"num_local_experts": "n_routed_experts",
}
def __init__(
self,
vocab_size: int | None = 152064,
hidden_size: int | None = 4608,
intermediate_size: int | None = 10944,
moe_intermediate_size: int | None = 1408,
num_hidden_layers: int | None = 62,
num_attention_heads: int | None = 32,
num_key_value_heads: int | None = 32,
n_shared_experts: int | None = None,
n_routed_experts: int | None = None,
n_group: int | None = 1,
topk_group: int | None = 1,
num_experts_per_tok: int | None = None,
first_k_dense_replace: int | None = 0,
norm_topk_prob: bool | None = False,
hidden_act: str | None = "silu",
max_position_embeddings: int | None = 2048,
initializer_range: float | None = 0.02,
rms_norm_eps: int | None = 1e-6,
use_cache: bool | None = True,
tie_word_embeddings: bool | None = False,
rope_parameters: RopeParameters | dict[str, RopeParameters] | None = None,
attention_bias: bool | None = False,
attention_dropout: float | None = 0.0,
routed_scaling_factor: float | None = 1.0,
sliding_window: int | None = 4096,
max_window_layers: int | None = 62,
layer_types: list[str] | None = None,
pad_token_id: int | None = None,
bos_token_id: int | None = None,
eos_token_id: int | None = None,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.moe_intermediate_size = moe_intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.n_shared_experts = n_shared_experts
self.n_routed_experts = n_routed_experts
self.num_experts_per_tok = num_experts_per_tok
self.first_k_dense_replace = first_k_dense_replace
self.norm_topk_prob = norm_topk_prob
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.n_group = n_group
self.topk_group = topk_group
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.routed_scaling_factor = routed_scaling_factor
self.sliding_window = sliding_window
self.max_window_layers = max_window_layers
self.layer_types = layer_types
if self.layer_types is None:
self.layer_types = [
"sliding_attention"
if self.sliding_window is not None and i >= self.max_window_layers
else "full_attention"
for i in range(self.num_hidden_layers)
]
layer_type_validation(self.layer_types, self.num_hidden_layers)
self.tie_word_embeddings = tie_word_embeddings
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.rope_parameters = rope_parameters
super().__init__(**kwargs)
__all__ = ["Dots1Config"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/dots1/configuration_dots1.py",
"license": "Apache License 2.0",
"lines": 201,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/dots1/modular_dots1.py | # Copyright 2025 The rednote-hilab team and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ...modeling_outputs import CausalLMOutputWithPast
from ...processing_utils import Unpack
from ...utils import logging
from ..deepseek_v3.modeling_deepseek_v3 import (
DeepseekV3DecoderLayer,
DeepseekV3MLP,
DeepseekV3MoE,
DeepseekV3PreTrainedModel,
DeepseekV3TopkRouter,
)
from ..qwen3.modeling_qwen3 import (
Qwen3Attention,
Qwen3ForCausalLM,
Qwen3Model,
Qwen3RMSNorm,
Qwen3RotaryEmbedding,
TransformersKwargs,
)
from .configuration_dots1 import Dots1Config
logger = logging.get_logger(__name__)
class Dots1RMSNorm(Qwen3RMSNorm):
pass
class Dots1RotaryEmbedding(Qwen3RotaryEmbedding):
pass
class Dots1Attention(Qwen3Attention):
pass
class Dots1MLP(DeepseekV3MLP):
pass
class Dots1TopkRouter(DeepseekV3TopkRouter):
pass
class Dots1MoE(DeepseekV3MoE):
def route_tokens_to_experts(self, router_logits):
router_logits = router_logits.sigmoid() # main diff with deepseekv3
router_logits_for_choice = router_logits + self.gate.e_score_correction_bias
group_scores = (
router_logits_for_choice.view(-1, self.n_group, self.n_routed_experts // self.n_group)
.topk(2, dim=-1)[0]
.sum(dim=-1)
)
group_idx = torch.topk(group_scores, k=self.topk_group, dim=-1, sorted=False)[1]
group_mask = torch.zeros_like(group_scores)
group_mask.scatter_(1, group_idx, 1)
score_mask = (
group_mask.unsqueeze(-1)
.expand(-1, self.n_group, self.n_routed_experts // self.n_group)
.reshape(-1, self.n_routed_experts)
)
scores_for_choice = router_logits_for_choice.masked_fill(~score_mask.bool(), 0.0)
topk_indices = torch.topk(scores_for_choice, k=self.top_k, dim=-1, sorted=False)[1]
topk_weights = router_logits.gather(1, topk_indices)
if self.norm_topk_prob:
denominator = topk_weights.sum(dim=-1, keepdim=True) + 1e-20
topk_weights /= denominator
topk_weights = topk_weights * self.routed_scaling_factor
return topk_indices, topk_weights
class Dots1DecoderLayer(DeepseekV3DecoderLayer):
def __init__(self, config: Dots1Config, layer_idx: int):
super().__init__(config, layer_idx)
self.attention_type = config.layer_types[layer_idx]
class Dots1PreTrainedModel(DeepseekV3PreTrainedModel):
_keys_to_ignore_on_load_unexpected = None
class Dots1Model(Qwen3Model):
pass
class Dots1ForCausalLM(Qwen3ForCausalLM):
def forward(
self,
**super_kwargs: Unpack[TransformersKwargs],
) -> CausalLMOutputWithPast:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoTokenizer, Dots1ForCausalLM
>>> model = Dots1ForCausalLM.from_pretrained("rednote-hilab/dots1.llm1.inst")
>>> tokenizer = AutoTokenizer.from_pretrained("rednote-hilab/dots1.llm1.inst")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```"""
return super().forward(**super_kwargs)
__all__ = [
"Dots1PreTrainedModel",
"Dots1Model",
"Dots1ForCausalLM",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/dots1/modular_dots1.py",
"license": "Apache License 2.0",
"lines": 105,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/dots1/test_modeling_dots1.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch dots1 model."""
import gc
import unittest
from transformers import AutoTokenizer, is_torch_available
from transformers.testing_utils import (
backend_empty_cache,
cleanup,
require_torch,
require_torch_accelerator,
slow,
torch_device,
)
from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester
if is_torch_available():
import torch
from transformers import (
Dots1ForCausalLM,
Dots1Model,
)
class Dots1ModelTester(CausalLMModelTester):
if is_torch_available():
base_model_class = Dots1Model
def __init__(
self,
parent,
n_routed_experts=8,
n_shared_experts=1,
n_group=1,
topk_group=1,
num_experts_per_tok=8,
):
super().__init__(parent=parent, num_experts_per_tok=num_experts_per_tok)
self.n_routed_experts = n_routed_experts
self.n_shared_experts = n_shared_experts
self.n_group = n_group
self.topk_group = topk_group
@require_torch
class Dots1ModelTest(CausalLMModelTest, unittest.TestCase):
model_tester_class = Dots1ModelTester
@require_torch_accelerator
class Dots1IntegrationTest(unittest.TestCase):
# This variable is used to determine which CUDA device are we using for our runners (A10 or T4)
# Depending on the hardware we get different logits / generations
cuda_compute_capability_major_version = None
@classmethod
def setUpClass(cls):
if is_torch_available() and torch.cuda.is_available():
# 8 is for A100 / A10 and 7 for T4
cls.cuda_compute_capability_major_version = torch.cuda.get_device_capability()[0]
def tearDown(self):
# See LlamaIntegrationTest.tearDown(). Can be removed once LlamaIntegrationTest.tearDown() is removed.
cleanup(torch_device, gc_collect=False)
@slow
def test_model_15b_a2b_generation(self):
EXPECTED_TEXT_COMPLETION = (
"""To be or not to be, that is the question:\nWhether 'tis nobler in the mind to suffer\nThe"""
)
prompt = "To be or not to"
tokenizer = AutoTokenizer.from_pretrained("redmoe-ai-v1/dots.llm1.test", use_fast=False)
model = Dots1ForCausalLM.from_pretrained("redmoe-ai-v1/dots.llm1.test", device_map="auto")
input_ids = tokenizer.encode(prompt, return_tensors="pt").to(model.model.embed_tokens.weight.device)
# greedy generation outputs
generated_ids = model.generate(input_ids, max_new_tokens=20, do_sample=False)
text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, text)
del model
backend_empty_cache(torch_device)
gc.collect()
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/dots1/test_modeling_dots1.py",
"license": "Apache License 2.0",
"lines": 81,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/t5gemma/modular_t5gemma.py | # Copyright 2025 Google Inc. HuggingFace Inc. team. All rights reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Callable
from typing import Any
import torch
import torch.nn as nn
from ... import initialization as init
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from ...configuration_utils import PreTrainedConfig
from ...generation import GenerationMixin
from ...modeling_flash_attention_utils import FlashAttentionKwargs
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPastAndCrossAttentions,
Seq2SeqLMOutput,
Seq2SeqModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from ...modeling_rope_utils import RopeParameters
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...processing_utils import Unpack
from ...utils import (
TransformersKwargs,
auto_docstring,
can_return_tuple,
logging,
)
from ...utils.generic import merge_with_config_defaults
from ...utils.output_capturing import OutputRecorder, capture_outputs
from ..gemma2.configuration_gemma2 import Gemma2Config
from ..gemma2.modeling_gemma2 import (
Gemma2Attention,
Gemma2MLP,
Gemma2PreTrainedModel,
Gemma2RMSNorm,
Gemma2RotaryEmbedding,
create_causal_mask,
create_sliding_window_causal_mask,
eager_attention_forward,
)
_CHECKPOINT_FOR_DOC = "google/t5gemma-2b-2b-prefixlm-it"
logger = logging.get_logger(__name__)
class T5GemmaModuleConfig(Gemma2Config):
r"""
This is the configuration class to store the configuration of a [`T5GemmaModuleModel`]. It is used to instantiate an T5GemmaModule
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the T5GemmaModule-7B.
e.g. [google/t5_gemma_module-7b](https://huggingface.co/google/t5_gemma_module-7b)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 256000):
Vocabulary size of the T5GemmaModule model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`T5GemmaModuleModel`]
hidden_size (`int`, *optional*, defaults to 2304):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 9216):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 26):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*, defaults to 4):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
head_dim (`int`, *optional*, defaults to 256):
The attention head dimension.
hidden_activation (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
The non-linear activation function (function or string) in the decoder. Will default to `"gelu_pytorch_tanh"`
if not specified. `"gelu_pytorch_tanh"` uses an approximation of the `"gelu"` activation function.
max_position_embeddings (`int`, *optional*, defaults to 8192):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*, defaults to 0):
Padding token id.
eos_token_id (`int`, *optional*, defaults to 1):
End of stream token id.
bos_token_id (`int`, *optional*, defaults to 2):
Beginning of stream token id.
tie_word_embeddings (`bool`, *optional*, defaults to `True`):
Whether to tie weight embeddings
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
query_pre_attn_scalar (`float`, *optional*, defaults to 256):
scaling factor used on the attention scores
sliding_window (`int`, *optional*, defaults to 4096):
in T5GemmaModule, every other layer uses sliding window attention. This is the size of the sliding window.
layer_types (`list`, *optional*):
Attention pattern for each layer.
final_logit_softcapping (`float`, *optional*, defaults to 30.0):
scaling factor when applying tanh softcapping on the logits.
attn_logit_softcapping (`float`, *optional*, defaults to 50.0):
scaling factor when applying tanh softcapping on the attention scores.
is_decoder (`bool`, *optional*, defaults to `False`):
Whether to only use the decoder in an encoder-decoder architecture, otherwise it has no effect on
decoder-only or encoder-only architectures.
```python
>>> from transformers import T5GemmaModuleModel, T5GemmaModuleConfig
>>> # Initializing a T5GemmaModule t5_gemma_module-7b style configuration
>>> configuration = T5GemmaModuleConfig()
>>> # Initializing a model from the t5_gemma_module-7b style configuration
>>> model = T5GemmaModuleModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
def __init__(
self,
vocab_size: int | None = 256000,
hidden_size: int | None = 2304,
intermediate_size: int | None = 9216,
num_hidden_layers: int | None = 26,
num_attention_heads: int | None = 8,
num_key_value_heads: int | None = 4,
head_dim: int | None = 256,
hidden_activation: str | None = "gelu_pytorch_tanh",
max_position_embeddings: int | None = 8192,
initializer_range: float | None = 0.02,
rms_norm_eps: int | None = 1e-6,
use_cache: bool | None = True,
pad_token_id: int | None = 0,
eos_token_id: int | None = 1,
bos_token_id: int | None = 2,
tie_word_embeddings: bool | None = True,
rope_parameters: RopeParameters | dict[str, RopeParameters] | None = None,
attention_bias: bool | None = False,
attention_dropout: float | None = 0.0,
query_pre_attn_scalar: int | None = 256,
sliding_window: int | None = 4096,
layer_types: list[str] | None = None,
final_logit_softcapping: float | None = 30.0,
attn_logit_softcapping: float | None = 50.0,
is_decoder: bool | None = False,
**kwargs,
):
self.is_decoder = is_decoder
super().__init__(
vocab_size=vocab_size,
hidden_size=hidden_size,
intermediate_size=intermediate_size,
num_hidden_layers=num_hidden_layers,
num_attention_heads=num_attention_heads,
num_key_value_heads=num_key_value_heads,
head_dim=head_dim,
hidden_activation=hidden_activation,
max_position_embeddings=max_position_embeddings,
initializer_range=initializer_range,
rms_norm_eps=rms_norm_eps,
use_cache=use_cache,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
bos_token_id=bos_token_id,
tie_word_embeddings=tie_word_embeddings,
rope_parameters=rope_parameters,
attention_bias=attention_bias,
attention_dropout=attention_dropout,
query_pre_attn_scalar=query_pre_attn_scalar,
sliding_window=sliding_window,
layer_types=layer_types,
final_logit_softcapping=final_logit_softcapping,
attn_logit_softcapping=attn_logit_softcapping,
**kwargs,
)
del self.use_bidirectional_attention
class T5GemmaConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`T5GemmaModel`]. It is used to instantiate an T5Gemma
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to a hypothetical balanced Gemma2 encoder-decoder model.
e.g. [google/t5gemma-2b-2b-prefixlm-it](https://huggingface.co/google/t5gemma-2b-2b-prefixlm-it)
```python
>>> from transformers import T5GemmaConfig, T5GemmaModel
>>> t5gemma_config = T5GemmaConfig.from_pretrained("google/t5gemma-2b-2b-prefixlm-it")
>>> model = T5GemmaModel(t5gemma_config)
```
Configuration objects inherit from [PreTrainedConfig] and can be used to control the model outputs. Read the
documentation from [PreTrainedConfig] for more information.
Args:
encoder (`Union[T5GemmaModuleConfig, dict]`, optional, *optional*):
Configuration for the encoder.
decoder (`Union[T5GemmaModuleConfig, dict]`, optional, *optional*):
Configuration for the decoder.
is_encoder_decoder (bool, optional, *optional*, defaults to `True`):
Whether the model is used as an encoder/decoder or not.
dropout_rate (`float`, *optional*, defaults to 0.0):
The ratio for all dropout layers (following T5).
classifier_dropout_rate (`float`, *optional*, defaults to 0.0):
The dropout ratio for classifier (following T5).
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for attention.
tie_word_embeddings (`bool`, *optional*, defaults to `True`):
Whether tie input and output embeddings.
vocab_size (`int`, *optional*, defaults to 256000):
Vocabulary size of the T5Gemma model (the same as Gemma 2).
kwargs (additional keyword arguments, optional, *optional*):
Will be passed to the PreTrainedConfig base class.
"""
model_type = "t5gemma"
keys_to_ignore_at_inference = ["past_key_values"]
sub_configs = {"encoder": T5GemmaModuleConfig, "decoder": T5GemmaModuleConfig}
def __init__(
self,
encoder: T5GemmaModuleConfig | dict[Any, Any] | None = None,
decoder: T5GemmaModuleConfig | dict[Any, Any] | None = None,
is_encoder_decoder: bool | None = True,
dropout_rate: float | None = 0.0,
classifier_dropout_rate: float | None = 0.0,
attention_dropout: float | None = 0.0,
tie_word_embeddings: bool | None = True,
vocab_size: int | None = 256000,
**kwargs,
):
if isinstance(encoder, dict):
encoder = T5GemmaModuleConfig(**encoder)
elif encoder is None:
encoder = T5GemmaModuleConfig()
else:
assert isinstance(encoder, T5GemmaModuleConfig), f"{type(encoder)} is not supported."
if isinstance(decoder, dict):
decoder = T5GemmaModuleConfig(**decoder)
elif decoder is None:
decoder = encoder
else:
assert isinstance(decoder, T5GemmaModuleConfig), f"{type(decoder)} is not supported."
encoder = T5GemmaModuleConfig(**encoder.to_dict())
decoder = T5GemmaModuleConfig(**decoder.to_dict())
encoder.is_decoder = False
encoder.dropout_rate = dropout_rate
encoder.attention_dropout = attention_dropout
self.encoder = encoder
decoder.is_decoder = True
decoder.use_cache = True
decoder.dropout_rate = dropout_rate
decoder.attention_dropout = attention_dropout
decoder.cross_attention_hidden_size = encoder.hidden_size
self.decoder = decoder
for special_token_key in ["bos_token_id", "pad_token_id", "eos_token_id"]:
if special_token_key not in kwargs:
kwargs[special_token_key] = getattr(decoder, special_token_key)
super().__init__(**kwargs)
self.is_encoder_decoder = is_encoder_decoder
self.initializer_range = kwargs.get("initializer_range", decoder.initializer_range)
self.classifier_dropout_rate = classifier_dropout_rate
self.tie_word_embeddings = tie_word_embeddings
# Used in pipeline generation.
self.vocab_size = vocab_size
class T5GemmaRMSNorm(Gemma2RMSNorm):
pass
class T5GemmaMLP(Gemma2MLP):
def __init__(self, config):
super().__init__(config)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(self, x):
hidden_states = self.act_fn(self.gate_proj(x)) * self.up_proj(x)
hidden_states = self.dropout(hidden_states)
down_proj = self.down_proj(hidden_states)
return down_proj
class T5GemmaRotaryEmbedding(Gemma2RotaryEmbedding):
pass
class T5GemmaSelfAttention(Gemma2Attention):
def __init__(self, config: T5GemmaModuleConfig, layer_idx: int):
super().__init__(config, layer_idx)
# Required by flash attention: encoder selfattention is non-causal
self.is_causal = config.is_decoder
class T5GemmaCrossAttention(Gemma2Attention):
def __init__(self, config: T5GemmaModuleConfig, layer_idx: int):
super().__init__(config, layer_idx)
del self.sliding_window
del self.layer_type
self.is_causal = False
if config.cross_attention_hidden_size is None:
raise ValueError("Cross-attention needs cross_attention_hidden_size to be specified.")
self.k_proj = nn.Linear(
config.cross_attention_hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.v_proj = nn.Linear(
config.cross_attention_hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor | None,
encoder_hidden_states: torch.Tensor | None,
past_key_values: Cache | None = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, torch.Tensor | None, tuple[torch.Tensor] | None]:
if encoder_hidden_states is None:
raise ValueError("Encoder hidden state is required for cross attention.")
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
if past_key_values is not None:
is_updated = past_key_values.is_updated.get(self.layer_idx)
curr_past_key_values = past_key_values.cross_attention_cache
if past_key_values is None or not is_updated:
encoder_input_shape = encoder_hidden_states.shape[:-1]
encoder_hidden_shape = (*encoder_input_shape, -1, self.head_dim)
key_states = self.k_proj(encoder_hidden_states).view(encoder_hidden_shape).transpose(1, 2)
value_states = self.v_proj(encoder_hidden_states).view(encoder_hidden_shape).transpose(1, 2)
if past_key_values is not None:
key_states, value_states = curr_past_key_values.update(key_states, value_states, self.layer_idx)
past_key_values.is_updated[self.layer_idx] = True
else:
key_states = curr_past_key_values.layers[self.layer_idx].keys
value_states = curr_past_key_values.layers[self.layer_idx].values
attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface(
self.config._attn_implementation, eager_attention_forward
)
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=self.attention_dropout if self.training else 0.0,
scaling=self.scaling,
sliding_window=None,
softcap=self.attn_logit_softcapping,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
def bidirectional_mask_function(attention_mask: torch.Tensor | None) -> Callable:
"""
This creates bidirectional attention mask.
"""
def inner_mask(batch_idx: int, head_idx: int, q_idx: int, kv_idx: int) -> bool:
if attention_mask is None:
return torch.ones((), dtype=torch.bool)
return attention_mask[batch_idx, kv_idx].to(torch.bool)
return inner_mask
def sliding_window_bidirectional_mask_function(sliding_window: int) -> Callable:
"""
This creates bidirectional attention mask with sliding window.
"""
def inner_mask(batch_idx: int, head_idx: int, q_idx: int, kv_idx: int) -> bool:
return (q_idx - sliding_window < kv_idx) & (kv_idx < q_idx + sliding_window)
return inner_mask
class T5GemmaEncoderLayer(GradientCheckpointingLayer):
"""Encoder sub-layer."""
def __init__(self, config, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.config = config
self.layer_idx = layer_idx
self.attention_type = config.layer_types[layer_idx]
self.self_attn = T5GemmaSelfAttention(
config=config,
layer_idx=layer_idx,
)
self.pre_self_attn_layernorm = T5GemmaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_self_attn_layernorm = T5GemmaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.mlp = T5GemmaMLP(config)
self.pre_feedforward_layernorm = T5GemmaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_feedforward_layernorm = T5GemmaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
**kwargs,
) -> tuple[torch.FloatTensor,]:
residual = hidden_states
hidden_states = self.pre_self_attn_layernorm(hidden_states)
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
position_embeddings=position_embeddings,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=None,
**kwargs,
)
hidden_states = self.post_self_attn_layernorm(hidden_states)
hidden_states = residual + self.dropout(hidden_states)
residual = hidden_states
hidden_states = self.pre_feedforward_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = self.post_feedforward_layernorm(hidden_states)
hidden_states = residual + self.dropout(hidden_states)
return hidden_states
class T5GemmaDecoderLayer(GradientCheckpointingLayer):
"""Decoder sub-layer: an extra cross-attention layer."""
def __init__(self, config, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.config = config
self.layer_idx = layer_idx
self.attention_type = config.layer_types[layer_idx]
self.self_attn = T5GemmaSelfAttention(
config=config,
layer_idx=layer_idx,
)
self.pre_self_attn_layernorm = T5GemmaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_self_attn_layernorm = T5GemmaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.mlp = T5GemmaMLP(config)
self.pre_feedforward_layernorm = T5GemmaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_feedforward_layernorm = T5GemmaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.dropout = nn.Dropout(config.dropout_rate)
self.cross_attn = T5GemmaCrossAttention(config=config, layer_idx=layer_idx)
self.pre_cross_attn_layernorm = T5GemmaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_cross_attn_layernorm = T5GemmaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: EncoderDecoderCache | None = None,
use_cache: bool | None = False,
cache_position: torch.LongTensor | None = None,
encoder_hidden_states: torch.Tensor | None = None,
encoder_attention_mask: torch.Tensor | None = None,
**kwargs,
) -> torch.FloatTensor:
residual = hidden_states
hidden_states = self.pre_self_attn_layernorm(hidden_states)
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
position_embeddings=position_embeddings,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values.self_attention_cache if past_key_values is not None else None,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = self.post_self_attn_layernorm(hidden_states)
hidden_states = residual + self.dropout(hidden_states)
residual = hidden_states
hidden_states = self.pre_cross_attn_layernorm(hidden_states)
hidden_states, _ = self.cross_attn(
hidden_states=hidden_states,
encoder_hidden_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
**kwargs,
)
hidden_states = self.post_cross_attn_layernorm(hidden_states)
hidden_states = residual + self.dropout(hidden_states)
residual = hidden_states
hidden_states = self.pre_feedforward_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = self.post_feedforward_layernorm(hidden_states)
hidden_states = residual + self.dropout(hidden_states)
return hidden_states
class T5GemmaClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, hidden_size: int, num_labels: int, classifier_dropout_rate: float = 0.0):
super().__init__()
self.dropout = nn.Dropout(p=classifier_dropout_rate)
self.out_proj = nn.Linear(hidden_size, num_labels)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dropout(hidden_states)
hidden_states = self.out_proj(hidden_states)
return hidden_states
class T5GemmaLMHead(nn.Module):
"""Head for language modeling (generation) tasks."""
def __init__(self, hidden_size: int, vocab_size: int, bias: bool = False):
super().__init__()
self.out_proj = nn.Linear(hidden_size, vocab_size, bias=bias)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
logits = self.out_proj(hidden_states)
return logits
@auto_docstring
class T5GemmaPreTrainedModel(Gemma2PreTrainedModel):
config: T5GemmaConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["T5GemmaEncoderLayer", "T5GemmaDecoderLayer"]
_can_record_outputs = {
"hidden_states": T5GemmaDecoderLayer,
"attentions": [
OutputRecorder(T5GemmaSelfAttention, index=1, layer_name="self_attn"),
OutputRecorder(T5GemmaSelfAttention, index=1, layer_name="cross_attn"),
OutputRecorder(T5GemmaCrossAttention, index=1, layer_name="cross_attn"),
],
}
@torch.no_grad()
def _init_weights(self, module):
# TODO: support initialization for encoders and decoders separately(?)
PreTrainedModel._init_weights(self, module)
std = self.config.initializer_range
if isinstance(module, T5GemmaClassificationHead):
scale = module.out_proj.weight.shape[0] ** -0.5
init.normal_(module.out_proj.weight, mean=0.0, std=std * scale)
if hasattr(module.out_proj, "bias") and module.out_proj.bias is not None:
init.zeros_(module.out_proj.bias)
elif isinstance(module, T5GemmaLMHead):
if not self.config.tie_word_embeddings:
scale = module.out_proj.weight.shape[0] ** -0.5
init.normal_(module.out_proj.weight, mean=0.0, std=std * scale)
# We initialize with 0s to be 1 centered as the RMSNorm here does (1 + weight)
elif "RMSNorm" in module.__class__.__name__:
init.zeros_(module.weight)
def _shift_right(self, input_ids):
"""
Shifts input_ids to the right, prepends the decoder_start_token_id, and handles
pad_token_id replacement for labels that were -100.
This is a common preparation step for decoder inputs in sequence-to-sequence models.
"""
decoder_start_token_id = self.config.decoder.bos_token_id
pad_token_id = self.config.decoder.pad_token_id
if decoder_start_token_id is None:
raise ValueError("self.model.config.decoder.bos_token_id has to be defined. ")
# shift inputs to the right
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()
shifted_input_ids[..., 0] = decoder_start_token_id
if pad_token_id is None:
raise ValueError("self.model.config.decoder.pad_token_id has to be defined.")
# Is this T5 specific?
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
return shifted_input_ids
def make_default_2d_attention_mask(
token_ids: torch.LongTensor | None,
hidden_states: torch.Tensor,
pad_token_id: int | None,
) -> torch.Tensor:
"""Construct the default attention mask."""
if token_ids is not None:
if pad_token_id is None:
raise ValueError("`pad_token_id` is required for padding information.")
attention_mask = (token_ids != pad_token_id).to(hidden_states.device, torch.long)
else:
attention_mask = torch.ones(
(hidden_states.shape[0], hidden_states.shape[1]), device=hidden_states.device, dtype=torch.long
)
return attention_mask
class T5GemmaEncoder(T5GemmaPreTrainedModel):
_can_record_outputs = {
"attentions": T5GemmaSelfAttention,
"hidden_states": T5GemmaEncoderLayer,
}
def __init__(self, config):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.norm = T5GemmaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.gradient_checkpointing = False
self.layers = nn.ModuleList(
[T5GemmaEncoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.dropout = nn.Dropout(config.dropout_rate)
self.rotary_emb = T5GemmaRotaryEmbedding(config=config)
# Initialize weights and apply final processing
self.post_init()
@merge_with_config_defaults
@capture_outputs
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
inputs_embeds: torch.FloatTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | BaseModelOutput:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
# As we want to pass `past_key_values=None` explicitly everywhere, we need to pop them from kwargs if present
kwargs.pop("past_key_values", None)
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
cache_position = torch.arange(0, inputs_embeds.shape[1], device=inputs_embeds.device)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
if attention_mask is None:
attention_mask = make_default_2d_attention_mask(input_ids, inputs_embeds, self.config.pad_token_id)
if not isinstance(self_attn_mask_mapping := attention_mask, dict):
mask_kwargs = {
"config": self.config,
"inputs_embeds": inputs_embeds,
"attention_mask": attention_mask,
"cache_position": cache_position,
"past_key_values": None,
"position_ids": position_ids,
}
self_attn_mask_mapping = {
"full_attention": create_causal_mask(
**mask_kwargs,
or_mask_function=bidirectional_mask_function(attention_mask),
),
"sliding_attention": create_sliding_window_causal_mask(
**mask_kwargs,
or_mask_function=sliding_window_bidirectional_mask_function(self.config.sliding_window),
and_mask_function=bidirectional_mask_function(attention_mask),
),
}
hidden_states = inputs_embeds
normalizer = torch.tensor(self.config.hidden_size**0.5, dtype=hidden_states.dtype)
hidden_states = hidden_states * normalizer
hidden_states = self.dropout(hidden_states)
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for layer_module in self.layers[: self.config.num_hidden_layers]:
hidden_states = layer_module(
hidden_states,
position_embeddings,
self_attn_mask_mapping[layer_module.attention_type],
position_ids,
**kwargs,
)
hidden_states = self.norm(hidden_states)
hidden_states = self.dropout(hidden_states)
return BaseModelOutput(
last_hidden_state=hidden_states,
)
class T5GemmaDecoder(T5GemmaPreTrainedModel):
_can_record_outputs = {
"attentions": OutputRecorder(T5GemmaSelfAttention, index=1),
"cross_attentions": OutputRecorder(T5GemmaCrossAttention, index=1),
"hidden_states": T5GemmaDecoderLayer,
}
def __init__(self, config):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.norm = T5GemmaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.gradient_checkpointing = False
self.layers = nn.ModuleList(
[T5GemmaDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.dropout = nn.Dropout(config.dropout_rate)
self.rotary_emb = T5GemmaRotaryEmbedding(config=config)
# Initialize weights and apply final processing
self.post_init()
@merge_with_config_defaults
@capture_outputs
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: EncoderDecoderCache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
use_cache: bool | None = None,
cache_position: torch.LongTensor | None = None,
encoder_hidden_states: torch.Tensor | None = None,
encoder_attention_mask: torch.Tensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | BaseModelOutputWithPastAndCrossAttentions:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if encoder_hidden_states is None:
raise ValueError("`encoder_hidden_states` must be given in decoder")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if not self.training and use_cache and past_key_values is None:
# We do not pass the config to the cross attn cache to avoid initializing SWA
# --> we use full attention between our cross attentions
past_key_values = EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache())
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
if attention_mask is None and past_key_values is None:
attention_mask = make_default_2d_attention_mask(input_ids, inputs_embeds, self.config.pad_token_id)
if not isinstance(self_attn_mask_mapping := attention_mask, dict):
mask_kwargs = {
"config": self.config,
"inputs_embeds": inputs_embeds,
"attention_mask": attention_mask,
"cache_position": cache_position,
"past_key_values": past_key_values.self_attention_cache if past_key_values is not None else None,
"position_ids": position_ids,
}
self_attn_mask_mapping = {
"full_attention": create_causal_mask(**mask_kwargs),
"sliding_attention": create_sliding_window_causal_mask(**mask_kwargs),
}
if not isinstance(cross_attn_mask_mapping := encoder_attention_mask, dict):
mask_kwargs = {
"config": self.config,
"inputs_embeds": encoder_hidden_states,
"attention_mask": encoder_attention_mask,
"cache_position": cache_position,
"past_key_values": None,
"position_ids": None,
}
cross_attn_mask_mapping = {
"full_attention": create_causal_mask(
**mask_kwargs,
or_mask_function=bidirectional_mask_function(encoder_attention_mask),
),
}
hidden_states = inputs_embeds
normalizer = torch.tensor(self.config.hidden_size**0.5, dtype=hidden_states.dtype)
hidden_states = hidden_states * normalizer
hidden_states = self.dropout(hidden_states)
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for layer_module in self.layers[: self.config.num_hidden_layers]:
hidden_states = layer_module(
hidden_states,
position_embeddings,
self_attn_mask_mapping[layer_module.attention_type],
position_ids,
past_key_values,
use_cache,
cache_position,
encoder_hidden_states,
cross_attn_mask_mapping["full_attention"],
**kwargs,
)
hidden_states = self.norm(hidden_states)
hidden_states = self.dropout(hidden_states)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
@auto_docstring
class T5GemmaModel(T5GemmaPreTrainedModel):
def __init__(self, config: T5GemmaConfig):
super().__init__(config)
if not config.is_encoder_decoder:
raise ValueError("T5GemmaModel only support encoder-decoder modeling. Use `T5GemmaEncoderModel` instead.")
self.encoder = T5GemmaEncoder(config.encoder)
self.decoder = T5GemmaDecoder(config.decoder)
self.post_init()
def get_input_embeddings(self):
return self.encoder.get_input_embeddings()
def set_input_embeddings(self, new_embeddings):
return self.encoder.set_input_embeddings(new_embeddings)
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.FloatTensor | None = None,
position_ids: torch.LongTensor | None = None,
decoder_input_ids: torch.LongTensor | None = None,
decoder_attention_mask: torch.BoolTensor | None = None,
decoder_position_ids: torch.LongTensor | None = None,
encoder_outputs: BaseModelOutput | None = None,
past_key_values: EncoderDecoderCache | None = None,
inputs_embeds: torch.Tensor | None = None,
decoder_inputs_embeds: torch.Tensor | None = None,
use_cache: bool | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> Seq2SeqModelOutput:
r"""
decoder_position_ids (`torch.LongTensor` of shape `(batch_size, decoder_sequence_length)`, *optional*):
Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range `[0,
config.decoder.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)
"""
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
**kwargs,
)
encoder_hidden_states = encoder_outputs.last_hidden_state
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
position_ids=decoder_position_ids,
inputs_embeds=decoder_inputs_embeds,
past_key_values=past_key_values,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=attention_mask,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
return Seq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states
if kwargs.get("output_hidden_states", False)
else (decoder_outputs.last_hidden_state,),
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
@auto_docstring
class T5GemmaEncoderModel(T5GemmaPreTrainedModel):
def __init__(self, config: T5GemmaConfig):
super().__init__(config)
if config.is_encoder_decoder:
raise ValueError("T5GemmaEncoderModel only supports encoder-only model. Use `T5GemmaModel` instead.")
self.encoder = T5GemmaEncoder(config.encoder)
self.post_init()
def get_input_embeddings(self):
return self.encoder.get_input_embeddings()
def set_input_embeddings(self, new_embeddings):
return self.encoder.set_input_embeddings(new_embeddings)
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.FloatTensor | None = None,
position_ids: torch.LongTensor | None = None,
inputs_embeds: torch.Tensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutput:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
**kwargs,
)
return encoder_outputs
class T5GemmaForConditionalGeneration(T5GemmaPreTrainedModel, GenerationMixin):
_tied_weights_keys = {"lm_head.out_proj.weight": "model.decoder.embed_tokens.weight"}
_tp_plan = {"lm_head.out_proj": "colwise_gather_output"}
_pp_plan = {"lm_head.out_proj": (["hidden_states"], ["logits"])}
def __init__(self, config: T5GemmaConfig):
config.is_encoder_decoder = True
super().__init__(config)
self.model = T5GemmaModel(config)
self.vocab_size = config.decoder.vocab_size
self.lm_head = T5GemmaLMHead(config.decoder.hidden_size, self.vocab_size)
self.loss_type = "ForMaskedLM"
self.post_init()
def set_output_embeddings(self, new_embeddings):
self.lm_head.out_proj = new_embeddings
def get_output_embeddings(self):
return self.lm_head.out_proj
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.FloatTensor | None = None,
position_ids: torch.LongTensor | None = None,
decoder_input_ids: torch.LongTensor | None = None,
decoder_attention_mask: torch.BoolTensor | None = None,
decoder_position_ids: torch.LongTensor | None = None,
encoder_outputs: BaseModelOutput | None = None,
past_key_values: EncoderDecoderCache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
decoder_inputs_embeds: torch.FloatTensor | None = None,
labels: torch.LongTensor | None = None,
use_cache: bool | None = None,
cache_position: torch.LongTensor | None = None,
logits_to_keep: int | torch.Tensor = 0,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.FloatTensor] | Seq2SeqLMOutput:
r"""
decoder_position_ids (`torch.LongTensor` of shape `(batch_size, decoder_sequence_length)`, *optional*):
Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range `[0,
config.decoder.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
"""
if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:
# get decoder inputs from shifting lm labels to the right
decoder_input_ids = self._shift_right(labels)
decoder_outputs: Seq2SeqModelOutput = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
decoder_position_ids=decoder_position_ids,
encoder_outputs=encoder_outputs,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = decoder_outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
decoder_config = self.get_decoder().config
if decoder_config.final_logit_softcapping is not None:
logits = logits / decoder_config.final_logit_softcapping
logits = torch.tanh(logits)
logits = logits * decoder_config.final_logit_softcapping
loss = None
if labels is not None:
# Input has right-shifted so we directly perform masked lm loss
loss = self.loss_function(logits, labels, self.vocab_size, **kwargs)
return Seq2SeqLMOutput(
loss=loss,
logits=logits,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.decoder_hidden_states,
decoder_attentions=decoder_outputs.decoder_attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=decoder_outputs.encoder_last_hidden_state,
encoder_hidden_states=decoder_outputs.encoder_hidden_states,
encoder_attentions=decoder_outputs.encoder_attentions,
)
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
return self._shift_right(labels)
@auto_docstring
class T5GemmaForSequenceClassification(T5GemmaPreTrainedModel):
def __init__(self, config: T5GemmaConfig, is_encoder_decoder: bool | None = None):
r"""
is_encoder_decoder (`Optional`, *optional*):
Whether use encoder_decoder for sequence classification. When set to False, only encoder is used.
"""
if is_encoder_decoder is not None:
config.is_encoder_decoder = is_encoder_decoder
super().__init__(config)
self.num_labels = config.num_labels
if config.is_encoder_decoder:
self.model = T5GemmaModel(config)
else:
self.model = T5GemmaEncoderModel(config)
hidden_size = config.encoder.hidden_size
if config.is_encoder_decoder:
hidden_size = config.decoder.hidden_size
classifier_dropout = getattr(config, "classifier_dropout_rate", 0.1)
self.score = T5GemmaClassificationHead(hidden_size, self.num_labels, classifier_dropout)
self.post_init()
def get_input_embeddings(self):
return self.model.get_input_embeddings()
def set_input_embeddings(self, value):
self.model.set_input_embeddings(value)
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
decoder_input_ids: torch.LongTensor | None = None,
decoder_attention_mask: torch.Tensor | None = None,
decoder_position_ids: torch.LongTensor | None = None,
encoder_outputs: BaseModelOutput | None = None,
inputs_embeds: torch.FloatTensor | None = None,
decoder_inputs_embeds: torch.FloatTensor | None = None,
labels: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> SequenceClassifierOutput:
r"""
decoder_position_ids (`torch.LongTensor` of shape `(batch_size, decoder_sequence_length)`, *optional*):
Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range `[0,
config.decoder.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
if self.config.is_encoder_decoder and (input_ids is None and inputs_embeds is not None):
raise NotImplementedError(
f"Passing input embeddings is currently not supported for {self.__class__.__name__} in encoder-decoder mode."
)
# Following T5, we automatically creates decoder_input_ids from input_ids if no decoder_input_ids are provided
if self.config.is_encoder_decoder and (decoder_input_ids is None and decoder_inputs_embeds is None):
if input_ids is None:
raise ValueError(
"If no `decoder_input_ids` or `decoder_inputs_embeds` are "
"passed, `input_ids` cannot be `None`. Please pass either "
"`input_ids` or `decoder_input_ids` or `decoder_inputs_embeds`."
)
decoder_input_ids = self._shift_right(input_ids)
if self.config.is_encoder_decoder:
outputs: Seq2SeqModelOutput = self.model(
input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
decoder_position_ids=decoder_position_ids,
encoder_outputs=encoder_outputs,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=False,
**kwargs,
)
last_hidden_state = outputs.last_hidden_state
hidden_states = outputs.decoder_hidden_states
attentions = outputs.decoder_attentions
else:
outputs: BaseModelOutput = self.model(
input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
**kwargs,
)
last_hidden_state = outputs.last_hidden_state
hidden_states = outputs.hidden_states
attentions = outputs.attentions
logits = self.score(last_hidden_state)
if input_ids is not None:
batch_size = input_ids.shape[0]
else:
batch_size = inputs_embeds.shape[0]
if self.config.pad_token_id is None and batch_size != 1:
raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
if self.config.pad_token_id is None:
last_non_pad_token = -1
elif input_ids is not None:
# To handle both left- and right- padding, we take the rightmost token that is not equal to pad_token_id
non_pad_mask = (input_ids != self.config.pad_token_id).to(logits.device, torch.int32)
token_indices = torch.arange(input_ids.shape[-1], device=logits.device, dtype=torch.int32)
last_non_pad_token = (token_indices * non_pad_mask).argmax(-1)
if self.config.is_encoder_decoder:
last_non_pad_token += 1 # due to the right shift.
last_non_pad_token = torch.clamp(last_non_pad_token, max=decoder_input_ids.shape[-1] - 1)
else:
last_non_pad_token = -1
logger.warning_once(
f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
"unexpected if using padding tokens in conjunction with `inputs_embeds.`"
)
pooled_logits = logits[torch.arange(batch_size, device=logits.device), last_non_pad_token]
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, pooled_logits=pooled_logits, config=self.config)
return SequenceClassifierOutput(
loss=loss,
logits=pooled_logits,
hidden_states=hidden_states,
attentions=attentions,
)
@auto_docstring
class T5GemmaForTokenClassification(T5GemmaPreTrainedModel):
def __init__(self, config: T5GemmaConfig, is_encoder_decoder: bool | None = None):
r"""
is_encoder_decoder (`Optional`, *optional*):
Whether use encoder_decoder for token classification. When set to False, only encoder is used.
"""
if is_encoder_decoder is not None:
config.is_encoder_decoder = is_encoder_decoder
super().__init__(config)
self.num_labels = config.num_labels
if config.is_encoder_decoder:
self.model = T5GemmaModel(config)
else:
self.model = T5GemmaEncoderModel(config)
hidden_size = config.encoder.hidden_size
if config.is_encoder_decoder:
hidden_size = config.decoder.hidden_size
classifier_dropout = getattr(config, "classifier_dropout_rate", 0.1)
self.score = T5GemmaClassificationHead(hidden_size, self.num_labels, classifier_dropout)
self.post_init()
def get_input_embeddings(self):
return self.model.get_input_embeddings()
def set_input_embeddings(self, value):
self.model.set_input_embeddings(value)
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
decoder_input_ids: torch.LongTensor | None = None,
decoder_attention_mask: torch.Tensor | None = None,
decoder_position_ids: torch.LongTensor | None = None,
encoder_outputs: BaseModelOutput | None = None,
inputs_embeds: torch.FloatTensor | None = None,
decoder_inputs_embeds: torch.FloatTensor | None = None,
labels: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> TokenClassifierOutput:
r"""
decoder_position_ids (`torch.LongTensor` of shape `(batch_size, decoder_sequence_length)`, *optional*):
Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range `[0,
config.decoder.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
if self.config.is_encoder_decoder and (input_ids is None and inputs_embeds is not None):
raise NotImplementedError(
f"Passing input embeddings is currently not supported for {self.__class__.__name__} in encoder-decoder mode."
)
if self.config.is_encoder_decoder and (decoder_input_ids is None and decoder_inputs_embeds is None):
if input_ids is None:
raise ValueError(
"If no `decoder_input_ids` or `decoder_inputs_embeds` are "
"passed, `input_ids` cannot be `None`. Please pass either "
"`input_ids` or `decoder_input_ids` or `decoder_inputs_embeds`."
)
decoder_input_ids = self._shift_right(input_ids)
if self.config.is_encoder_decoder:
outputs: Seq2SeqModelOutput = self.model(
input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
decoder_position_ids=decoder_position_ids,
encoder_outputs=encoder_outputs,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=False,
**kwargs,
)
last_hidden_state = outputs.last_hidden_state
hidden_states = outputs.decoder_hidden_states
attentions = outputs.decoder_attentions
else:
outputs: BaseModelOutput = self.model(
input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
**kwargs,
)
last_hidden_state = outputs.last_hidden_state
hidden_states = outputs.hidden_states
attentions = outputs.attentions
logits = self.score(last_hidden_state)
loss = None
if labels is not None:
loss = self.loss_function(logits, labels, self.config)
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=hidden_states,
attentions=attentions,
)
__all__ = [
"T5GemmaConfig",
"T5GemmaModuleConfig",
"T5GemmaForConditionalGeneration",
"T5GemmaModel",
"T5GemmaEncoderModel",
"T5GemmaPreTrainedModel",
"T5GemmaForSequenceClassification",
"T5GemmaForTokenClassification",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/t5gemma/modular_t5gemma.py",
"license": "Apache License 2.0",
"lines": 1169,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/t5gemma/test_modeling_t5gemma.py | # Copyright 2025 Google Inc. HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch T5Gemma model."""
import copy
import inspect
import unittest
import pytest
from parameterized import parameterized
from pytest import mark
from transformers import T5GemmaConfig, T5GemmaModuleConfig, is_torch_available
from transformers.testing_utils import (
require_flash_attn,
require_torch,
require_torch_accelerator,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin, assert_similar_generate_outputs
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
import torch.nn.functional as F
from transformers import (
T5GemmaEncoderModel,
T5GemmaForConditionalGeneration,
T5GemmaForSequenceClassification,
T5GemmaForTokenClassification,
T5GemmaModel,
)
class T5GemmaModelTester:
config_class = T5GemmaConfig
module_config_class = T5GemmaModuleConfig
if is_torch_available():
model_class = T5GemmaModel
causal_lm_class = T5GemmaForConditionalGeneration
sequence_classification_class = T5GemmaForSequenceClassification
token_classification_class = T5GemmaForTokenClassification
def __init__(
self,
parent,
batch_size=13,
is_training=True,
use_attention_mask=True,
use_labels=True,
vocab_size=99,
# decoder-specific
seq_length=7,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
num_key_value_heads=2,
intermediate_size=37,
# encoder-specific
encoder_seq_length=7,
encoder_hidden_size=32,
encoder_num_hidden_layers=2,
encoder_num_attention_heads=4,
encoder_num_key_value_heads=2,
encoder_intermediate_size=37,
# common
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
# special ids
eos_token_id=1,
pad_token_id=0,
bos_token_id=2,
):
self.parent = parent
self.batch_size = batch_size
self.is_training = is_training
self.use_attention_mask = use_attention_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
# decoder
self.seq_length = seq_length
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.intermediate_size = intermediate_size
# encoder
self.encoder_seq_length = encoder_seq_length
self.encoder_hidden_size = encoder_hidden_size
self.encoder_num_hidden_layers = encoder_num_hidden_layers
self.encoder_num_attention_heads = encoder_num_attention_heads
self.encoder_num_key_value_heads = encoder_num_key_value_heads
self.encoder_intermediate_size = encoder_intermediate_size
# common
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
self.head_dim = self.hidden_size // self.num_attention_heads
# assume encoder and decoder have the same head dimension.
assert self.head_dim == self.encoder_hidden_size // self.encoder_num_attention_heads
# special ids
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
# assume the number of attention heads are the same across encoder and decoder
# only used for generation testing purpose.
assert self.num_attention_heads == self.encoder_num_attention_heads
def get_encoder_config(self):
return self.module_config_class(
vocab_size=self.vocab_size,
hidden_size=self.encoder_hidden_size,
num_hidden_layers=self.encoder_num_hidden_layers,
num_attention_heads=self.encoder_num_attention_heads,
num_key_value_heads=self.encoder_num_key_value_heads,
intermediate_size=self.encoder_intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_decoder=False,
initializer_range=self.initializer_range,
head_dim=self.head_dim,
bos_token_id=self.bos_token_id,
eos_token_id=self.eos_token_id,
pad_token_id=self.pad_token_id,
)
def get_decoder_config(self):
return self.module_config_class(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
num_key_value_heads=self.num_key_value_heads,
intermediate_size=self.intermediate_size,
cross_attention_hidden_size=self.encoder_hidden_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_decoder=True,
initializer_range=self.initializer_range,
head_dim=self.head_dim,
bos_token_id=self.bos_token_id,
eos_token_id=self.eos_token_id,
pad_token_id=self.pad_token_id,
)
def get_config(self, is_encoder_decoder=True):
return self.config_class(
encoder=self.get_encoder_config(),
decoder=self.get_decoder_config(),
is_encoder_decoder=is_encoder_decoder,
# Used for generation test.
num_attention_heads=self.num_attention_heads,
num_key_value_heads=self.num_key_value_heads,
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
)
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size)
decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
# Remove BOS symbols from inputs.
input_ids = torch.where(input_ids == self.bos_token_id, 42, input_ids)
decoder_input_ids = torch.where(decoder_input_ids == self.bos_token_id, 42, decoder_input_ids)
# Avoid leading PAD tokens from inputs.
# `T5GemmaForTokenClassification` and `T5GemmaForSequenceClassification` specify `use_cache=False` when
# calling `self.model`. For `self.use_attention_mask=False` case below, the model goes through
# `make_default_2d_attention_mask`. When there are some pad tokens at the beginning of a sequence, it can't
# attend to any place, and the computed mask `[-3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38]`
# causes larger differences in some equivalence tests.
# Let's avoid such leading PAD tokens.
decoder_input_ids[:, 0] = self.pad_token_id + 1
attention_mask = None
decoder_attention_mask = None
if self.use_attention_mask:
attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2)
decoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
lm_labels = None
if self.use_labels:
lm_labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
config = self.get_config()
return (
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
}
return config, inputs_dict
def create_and_check_model(
self,
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
):
model = self.model_class(config=config).to(torch_device).eval()
result = model(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
decoder_attention_mask=decoder_attention_mask,
)
decoder_output = result.last_hidden_state
decoder_past = result.past_key_values
encoder_output = result.encoder_last_hidden_state
self.parent.assertEqual(
encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.encoder_hidden_size)
)
self.parent.assertEqual(decoder_output.size(), (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertIsNotNone(decoder_past)
self.parent.assertEqual(len(decoder_past.self_attention_cache), config.decoder.num_hidden_layers)
self.parent.assertEqual(len(decoder_past.cross_attention_cache), config.decoder.num_hidden_layers)
def check_prepare_lm_labels_via_shift_left(
self,
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
):
model = self.model_class(config=config).to(torch_device).eval()
# _shift_right should be called on labels
shifted_labels = model._shift_right(lm_labels)
# first token should be decoder_start_token_id
self.parent.assertTrue(torch.all(shifted_labels[:, 0] == config.decoder.bos_token_id))
# the rest should be the labels shifted by one, with -100 replaced by pad_token_id
labels_without_ignore_index = lm_labels.masked_fill(lm_labels == -100, config.decoder.pad_token_id)
self.parent.assertTrue(torch.all(shifted_labels[:, 1:] == labels_without_ignore_index[:, :-1]))
def create_and_check_with_lm_head(
self,
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
):
model = self.causal_lm_class(config=config).to(torch_device).eval()
outputs = model(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
decoder_attention_mask=decoder_attention_mask,
labels=lm_labels,
)
self.parent.assertEqual(len(outputs), 5)
self.parent.assertEqual(outputs["logits"].size(), (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(outputs["loss"].size(), ())
def create_and_check_with_sequence_classification_head(
self,
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
):
labels = torch.tensor([1] * self.batch_size, dtype=torch.long, device=torch_device)
model = self.sequence_classification_class(config=config).to(torch_device).eval()
outputs = model(
input_ids=input_ids,
decoder_input_ids=input_ids,
labels=labels,
)
self.parent.assertEqual(outputs["logits"].size(), (self.batch_size, config.num_labels))
self.parent.assertEqual(outputs["loss"].size(), ())
def create_and_check_encoderonly_for_sequence_classification_head(
self,
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
is_encoder_decoder,
):
labels = torch.tensor([1] * self.batch_size, dtype=torch.long, device=torch_device)
model = self.sequence_classification_class(config=config, is_encoder_decoder=is_encoder_decoder)
model = model.to(torch_device).eval()
outputs = model(
input_ids=input_ids,
decoder_input_ids=input_ids,
labels=labels,
)
self.parent.assertEqual(outputs["logits"].size(), (self.batch_size, config.num_labels))
self.parent.assertEqual(outputs["loss"].size(), ())
def create_and_check_encoderonly_for_token_classification_head(
self,
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
is_encoder_decoder,
):
labels = torch.tensor([1] * self.seq_length * self.batch_size, dtype=torch.long, device=torch_device)
model = self.token_classification_class(config=config, is_encoder_decoder=is_encoder_decoder)
model = model.to(torch_device).eval()
outputs = model(
input_ids=input_ids,
decoder_input_ids=input_ids,
labels=labels,
)
self.parent.assertEqual(outputs["logits"].size(), (self.batch_size, self.seq_length, config.num_labels))
self.parent.assertEqual(outputs["loss"].size(), ())
def create_and_check_decoder_model_past(
self,
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
):
model = self.model_class(config=config).get_decoder().to(torch_device).eval()
encoder_hidden_states = torch.ones(
(self.batch_size, self.encoder_seq_length, self.encoder_hidden_size), dtype=torch.float32
).to(torch_device)
# first forward pass
outputs = model(input_ids, encoder_hidden_states=encoder_hidden_states, use_cache=True)
outputs_use_cache_conf = model(input_ids, encoder_hidden_states=encoder_hidden_states)
outputs_no_past = model(input_ids, encoder_hidden_states=encoder_hidden_states, use_cache=False)
self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf))
self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1)
output, past_key_values = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
output_from_no_past = model(next_input_ids, encoder_hidden_states=encoder_hidden_states)["last_hidden_state"]
output_from_past = model(
next_tokens, encoder_hidden_states=encoder_hidden_states, past_key_values=past_key_values
)["last_hidden_state"]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_decoder_model_attention_mask_past(
self,
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
):
model = self.model_class(config=config).get_decoder().to(torch_device).eval()
encoder_hidden_states = torch.ones(
(self.batch_size, self.encoder_seq_length, self.encoder_hidden_size), dtype=torch.float32
).to(torch_device)
# create attention mask
attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
half_seq_length = input_ids.shape[-1] // 2
attn_mask[:, half_seq_length:] = 0
# first forward pass
output, past_key_values = model(
input_ids, encoder_hidden_states=encoder_hidden_states, attention_mask=attn_mask, use_cache=True
).to_tuple()
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# change a random masked slice from input_ids
random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1
random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1)
input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens
# append to next input_ids and attn_mask
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
attn_mask = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)],
dim=1,
)
# get two different outputs
output_from_no_past = model(
next_input_ids, encoder_hidden_states=encoder_hidden_states, attention_mask=attn_mask
)["last_hidden_state"]
output_from_past = model(
next_tokens,
encoder_hidden_states=encoder_hidden_states,
past_key_values=past_key_values,
attention_mask=attn_mask,
)["last_hidden_state"]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_decoder_model_past_large_inputs(
self,
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
):
model = self.model_class(config=config).get_decoder().to(torch_device).eval()
encoder_hidden_states = torch.ones(
(self.batch_size, self.encoder_seq_length, self.encoder_hidden_size), dtype=torch.float32
).to(torch_device)
# first forward pass
outputs = model(
input_ids, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, use_cache=True
)
output, past_key_values = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_mask = ids_tensor((self.batch_size, 3), vocab_size=2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([attention_mask, next_mask], dim=-1)
output_from_no_past = model(
next_input_ids, encoder_hidden_states=encoder_hidden_states, attention_mask=next_attention_mask
)["last_hidden_state"]
output_from_past = model(
next_tokens,
encoder_hidden_states=encoder_hidden_states,
attention_mask=next_attention_mask,
past_key_values=past_key_values,
)["last_hidden_state"]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_generate_with_past_key_values(
self,
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
):
model = self.causal_lm_class(config=config).to(torch_device).eval()
torch.manual_seed(0)
output_without_past_cache = model.generate(
input_ids[:1], num_beams=2, max_length=5, do_sample=True, use_cache=False
)
torch.manual_seed(0)
output_with_past_cache = model.generate(input_ids[:1], num_beams=2, max_length=5, do_sample=True)
self.parent.assertTrue(torch.all(output_with_past_cache == output_without_past_cache))
def create_and_check_model_fp16_forward(
self,
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
):
model = self.model_class(config=config).to(torch_device).half().eval()
output = model(input_ids, decoder_input_ids=input_ids, attention_mask=attention_mask)["last_hidden_state"]
self.parent.assertFalse(torch.isnan(output).any().item())
@require_torch
class T5GemmaModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
T5GemmaModel,
T5GemmaForConditionalGeneration,
T5GemmaForSequenceClassification,
T5GemmaForTokenClassification,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": T5GemmaModel,
"text-classification": T5GemmaForSequenceClassification,
"zero-shot": T5GemmaForSequenceClassification,
}
if is_torch_available()
else {}
)
_is_stateful = True
is_encoder_decoder = True
# used in `test_torch_compile_for_training`
_torch_compile_train_cls = T5GemmaForConditionalGeneration if is_torch_available() else None
# `t5gemma` will give warning or raise error if it is not `eager` during training.
_torch_compile_train_attn_implementation = "eager"
# won't fix
def setUp(self):
self.model_tester = T5GemmaModelTester(self)
self.config_tester = ConfigTester(
self,
config_class=T5GemmaConfig,
# For faking the testing.
hidden_size=37,
vocab_size=self.model_tester.vocab_size,
num_attention_heads=self.model_tester.num_attention_heads,
num_hidden_layers=self.model_tester.num_hidden_layers,
)
def is_pipeline_test_to_skip(
self,
pipeline_test_case_name,
config_class,
model_architecture,
tokenizer_name,
image_processor_name,
feature_extractor_name,
processor_name,
):
if tokenizer_name is None:
return True
if pipeline_test_case_name == "QAPipelineTests" and not tokenizer_name.endswith("Fast"):
return True
return False
def test_config(self):
# Skip `create_and_test_config_from_and_save_pretrained_composite` because the config has twice the same subconfig
self.config_tester.create_and_test_config_from_and_save_pretrained_composite = lambda: None
self.config_tester.run_common_tests()
def test_shift_right(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_prepare_lm_labels_via_shift_left(*config_and_inputs)
@unittest.skip("This was not properly written, submodules need the attribute to be overwritten")
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
# Based on tests.models.t5.test_modeling_t5.T5ModelTest.test_inputs_embeds
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (T5GemmaModel, T5GemmaForConditionalGeneration):
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
if not self.is_encoder_decoder:
input_ids = inputs["input_ids"]
del inputs["input_ids"]
else:
encoder_input_ids = inputs["input_ids"]
decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids)
del inputs["input_ids"]
inputs.pop("decoder_input_ids", None)
wte = model.get_input_embeddings()
if not self.is_encoder_decoder:
inputs["inputs_embeds"] = wte(input_ids)
else:
inputs["inputs_embeds"] = wte(encoder_input_ids)
inputs["decoder_inputs_embeds"] = wte(decoder_input_ids)
with torch.no_grad():
model(**inputs)[0]
@unittest.skip("This was not properly written, submodules need the attribute to be overwritten")
def test_config_and_model_silu_gated(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
config = config_and_inputs[0]
config.feed_forward_proj = "gated-silu"
self.model_tester.create_and_check_model(*config_and_inputs)
def test_with_lm_head(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_with_lm_head(*config_and_inputs)
def test_with_sequence_classification_head(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_with_sequence_classification_head(*config_and_inputs)
@parameterized.expand([(True,), (False,)])
def test_encoderonly_sequence_classification_head(self, is_encoder_decoder):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_encoderonly_for_sequence_classification_head(
*config_and_inputs, is_encoder_decoder
)
@parameterized.expand([(True,), (False,)])
def test_encoderonly_token_classification_head(self, is_encoder_decoder):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_encoderonly_for_token_classification_head(
*config_and_inputs, is_encoder_decoder
)
@unittest.skip("This was not properly written, submodules need the attribute to be overwritten")
def test_decoder_model_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*config_and_inputs)
def test_decoder_model_past_with_attn_mask(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs)
# Based on tests.models.t5.test_modeling_t5.T5ModelTest.test_decoder_model_past_with_3d_attn_mask
def test_decoder_model_past_with_3d_attn_mask(self):
(
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
) = self.model_tester.prepare_config_and_inputs()
attention_mask = ids_tensor(
[self.model_tester.batch_size, self.model_tester.encoder_seq_length, self.model_tester.encoder_seq_length],
vocab_size=2,
)
decoder_attention_mask = ids_tensor(
[self.model_tester.batch_size, self.model_tester.seq_length, self.model_tester.seq_length],
vocab_size=2,
)
self.model_tester.create_and_check_decoder_model_attention_mask_past(
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
)
def test_decoder_model_past_with_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)
def test_generate_with_past_key_values(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_generate_with_past_key_values(*config_and_inputs)
@unittest.skipIf(torch_device == "cpu", "Can't do half precision")
def test_model_fp16_forward(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fp16_forward(*config_and_inputs)
# Based on tests.models.gemma.test_modeling_gemma.GemmaModelTest.test_Gemma_sequence_classification_model with Gemma -> T5Gemma (Add is_encoder_decoder option)
def test_T5Gemma_sequence_classification_model(self):
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.num_labels = 3
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(1).to(torch_device)
sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size)
for is_encoder_decoder in [True, False]:
model = (
self.model_tester.sequence_classification_class(config, is_encoder_decoder=is_encoder_decoder)
.to(torch_device)
.eval()
)
result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels)
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels))
# Based on tests.models.gemma.test_modeling_gemma.GemmaModelTest.test_Gemma_sequence_classification_model_for_single_label with Gemma -> T5Gemma (Add is_encoder_decoder option)
def test_T5Gemma_sequence_classification_model_for_single_label(self):
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.num_labels = 3
config.problem_type = "single_label_classification"
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(1).to(torch_device)
sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size)
for is_encoder_decoder in [True, False]:
model = (
self.model_tester.sequence_classification_class(config, is_encoder_decoder=is_encoder_decoder)
.to(torch_device)
.eval()
)
result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels)
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels))
# Based on tests.models.gemma.test_modeling_gemma.GemmaModelTest.test_Gemma_sequence_classification_model_for_multi_label with Gemma -> T5Gemma (Add is_encoder_decoder option)
def test_T5Gemma_sequence_classification_model_for_multi_label(self):
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.num_labels = 3
config.problem_type = "multi_label_classification"
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(1).to(torch_device)
sequence_labels = ids_tensor(
[self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size
).to(torch.float)
for is_encoder_decoder in [True, False]:
model = (
self.model_tester.sequence_classification_class(config, is_encoder_decoder=is_encoder_decoder)
.to(torch_device)
.eval()
)
result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels)
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels))
# Based on tests.models.gemma.test_modeling_gemma.GemmaModelTest.test_Gemma_token_classification_model with Gemma -> T5Gemma (Add is_encoder_decoder option)
def test_T5Gemma_token_classification_model(self):
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.num_labels = 3
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(1).to(torch_device)
token_labels = ids_tensor([self.model_tester.batch_size, self.model_tester.seq_length], config.num_labels)
for is_encoder_decoder in [True, False]:
model = (
self.model_tester.token_classification_class(config, is_encoder_decoder=is_encoder_decoder)
.to(torch_device)
.eval()
)
result = model(input_ids, attention_mask=attention_mask, labels=token_labels)
self.assertEqual(
result.logits.shape,
(self.model_tester.batch_size, self.model_tester.seq_length, self.model_tester.num_labels),
)
# Based on tests.models.gemma.test_modeling_gemma.GemmaModelTest.test_sdpa_equivalence
# Add decoder_input_ids and adjust hidden states.
@require_torch_accelerator
def test_sdpa_equivalence(self):
for model_class in self.all_model_classes:
if not model_class._supports_sdpa:
self.skipTest(reason="Model does not support SDPA")
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
model = model_class(config).to(torch_device)
dummy_input = inputs_dict[model_class.main_input_name].to(torch_device)
decoder_dummy_input = torch.ones_like(dummy_input)
model.config._attn_implementation = "sdpa"
states_sdpa = model(dummy_input, decoder_input_ids=decoder_dummy_input, output_hidden_states=True)
model.config._attn_implementation = "eager"
states_eager = model(dummy_input, decoder_input_ids=decoder_dummy_input, output_hidden_states=True)
if hasattr(states_sdpa, "decoder_hidden_states"):
states_sdpa = states_sdpa.decoder_hidden_states[-1]
states_eager = states_eager.decoder_hidden_states[-1]
else:
states_sdpa = states_sdpa.hidden_states[-1]
states_eager = states_eager.hidden_states[-1]
torch.testing.assert_close(states_sdpa, states_eager, atol=1e-5, rtol=1e-5)
@unittest.skip("T5Gemma eager/FA2 attention outputs are expected to be different")
def test_flash_attn_2_equivalence(self):
pass
# Based on tests.test_modeling_common.ModelTesterMixin.test_attention_outputs
# Skip token classification
@unittest.skip("This was not properly written, submodules need the attribute to be overwritten")
def test_attention_outputs(self):
if not self.has_attentions:
self.skipTest(reason="Model does not output attentions")
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# force eager attention to support output attentions
config._attn_implementation = "eager"
seq_len = getattr(self.model_tester, "seq_length", None)
decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len)
encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len)
decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length)
encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
chunk_length = getattr(self.model_tester, "chunk_length", None)
if chunk_length is not None and hasattr(self.model_tester, "num_hashes"):
encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
# Skip token and sequence classification.
if model_class in [
self.model_tester.token_classification_class,
self.model_tester.sequence_classification_class,
]:
continue
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
model = model_class._from_config(config, attn_implementation="eager")
config = model.config
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config._attn_implementation = "eager"
config.output_attentions = True
model = model_class._from_config(config, attn_implementation="eager")
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:]),
[self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length],
)
else:
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
out_len = len(outputs)
if self.is_encoder_decoder:
correct_outlen = 5
# loss is at first position
if "labels" in inputs_dict:
correct_outlen += 1 # loss is added to beginning
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
self.assertEqual(out_len, correct_outlen)
# decoder attentions
decoder_attentions = outputs.decoder_attentions
self.assertIsInstance(decoder_attentions, (list, tuple))
self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length],
)
# cross attentions
cross_attentions = outputs.cross_attentions
self.assertIsInstance(cross_attentions, (list, tuple))
self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(cross_attentions[0].shape[-3:]),
[
self.model_tester.num_attention_heads,
decoder_seq_length,
encoder_key_length,
],
)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
if hasattr(self.model_tester, "num_hidden_states_types"):
added_hidden_states = self.model_tester.num_hidden_states_types
elif self.is_encoder_decoder:
added_hidden_states = 2
else:
added_hidden_states = 1
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
if chunk_length is not None:
self.assertListEqual(
list(self_attentions[0].shape[-4:]),
[self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length],
)
else:
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
@unittest.skip("Mismatch issue doesn't exist in T5Gemma.")
def test_load_with_mismatched_shapes(self):
pass
# Based on tests.generation.test_utils.GenerationTesterMixin.test_generate_continue_from_past_key_values
# Updated decoder_attention_mask to consider the appended bos token
@pytest.mark.generate
def test_generate_continue_from_past_key_values(self):
# Tests that we can continue generating from past key values, returned from a previous `generate` call
for model_class in self.all_generative_model_classes:
if model_class == self.model_tester.token_classification_class:
continue
if any(model_name in model_class.__name__.lower() for model_name in ["imagegpt", "mllama"]):
self.skipTest(reason="Won't fix: old model with unique inputs/caches/other")
if any(model_name in model_class.__name__.lower() for model_name in ["umt5"]):
self.skipTest(reason="TODO: needs modeling or test input preparation fixes for compatibility")
config, inputs = self.model_tester.prepare_config_and_inputs_for_common()
if not hasattr(config.get_text_config(), "use_cache"):
self.skipTest(reason=f"{model_class.__name__} doesn't support caching")
# Let's make it always:
# 1. use cache (for obvious reasons)
# 2. generate to max length (which can be achieved by setting the eos token to an invalid value), which
# would make the test flaky (e.g. EOS is generated on iteration 1 on both generations, but the
# continuation would force it to generate beyond an EOS token)
# 3. ignore `token_type_ids` for simplicity
# 4. ignore `forced_eos_token_id`, which requires further manipulation of the continuation inputs and is
# active by default on some models
# 5. ignore `encoder_no_repeat_ngram_size`, which is set by default in some encoder-decoder models. When
# we use their decoder as a stand-alone model, `encoder_no_repeat_ngram_size` actually prevents
# repetition exclusively from the prompt. This test relies on comparing one call vs 2 calls
# with cache, what is considered a prompt is different in the two cases.
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
model = model_class(config).to(torch_device)
model.eval()
# If "past_key_values" is not returned, skip the test (e.g. RWKV uses a different cache name and format)
outputs = model(**inputs)
if "past_key_values" not in outputs:
self.skipTest(reason="This model doesn't return `past_key_values`")
generate_kwargs = {
"pad_token_id": -1,
"eos_token_id": -1,
"forced_eos_token_id": None,
"encoder_no_repeat_ngram_size": 0,
"use_cache": True,
"do_sample": False,
"return_dict_in_generate": True,
"output_scores": True,
}
# Traditional way of generating text, with `return_dict_in_generate` to return the past key values
outputs = model.generate(**inputs, **generate_kwargs, max_new_tokens=4)
# Let's generate again, but passing the past key values in between (3 + 1 = 4 tokens). Note that the
# inputs may need to be tweaked across `generate` calls (like the attention mask).
outputs_cached = model.generate(**inputs, **generate_kwargs, max_new_tokens=3)
# Continue from the tokens generated above, preparing the inputs accordingly
inputs["past_key_values"] = outputs_cached.past_key_values
new_attention_len = outputs_cached.sequences.shape[-1]
# It must be encoder-decoder models
self.assertTrue(config.is_encoder_decoder)
inputs["decoder_input_ids"] = outputs_cached.sequences
if "decoder_attention_mask" in inputs:
decoder_attention_mask = inputs["decoder_attention_mask"]
# Add BOS mask: the new sequence comes with a new BOS token, which is not included in the original inputs
padding_tensor = torch.ones_like(decoder_attention_mask[:, :1])
decoder_attention_mask = torch.cat([padding_tensor, decoder_attention_mask], dim=1)
inputs["decoder_attention_mask"] = torch.nn.functional.pad(
decoder_attention_mask,
(0, new_attention_len - decoder_attention_mask.shape[1]),
mode="constant",
value=1,
)
first_caches_scores = outputs_cached.scores
outputs_cached = model.generate(**inputs, **generate_kwargs, max_new_tokens=1)
full_cached_scores = first_caches_scores + outputs_cached.scores
outputs_cached.scores = full_cached_scores
# The two sets of generated text and past kv should be equal to each other
assert_similar_generate_outputs(outputs, outputs_cached)
self._check_caches_are_equal(outputs.past_key_values, outputs_cached.past_key_values)
# Based on tests.test_modeling_common.ModelTesterMixin.test_inputs_embeds_matches_input_ids
# Update encoder and decoder embeddings
def test_inputs_embeds_matches_input_ids(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
model_class = self.model_tester.model_class
model = model_class(config)
model.to(torch_device)
model.eval()
model_forward_args = inspect.signature(model.forward).parameters
if "inputs_embeds" not in model_forward_args:
self.skipTest(reason="This model doesn't use `inputs_embeds`")
inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
pad_token_id = config.pad_token_id if config.pad_token_id is not None else 1
encoder_embedding = model.get_encoder().get_input_embeddings()
decoder_embedding = model.get_decoder().get_input_embeddings()
encoder_input_ids = inputs["input_ids"]
decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids)
encoder_input_ids[encoder_input_ids == pad_token_id] = max(0, pad_token_id + 1)
decoder_input_ids[decoder_input_ids == pad_token_id] = max(0, pad_token_id + 1)
del inputs["input_ids"]
inputs.pop("decoder_input_ids", None)
inputs_embeds = encoder_embedding(encoder_input_ids)
decoder_inputs_embeds = decoder_embedding(decoder_input_ids)
with torch.no_grad():
out_ids = model(input_ids=encoder_input_ids, decoder_input_ids=decoder_input_ids, **inputs)[0]
out_embeds = model(inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, **inputs)[0]
torch.testing.assert_close(out_embeds, out_ids)
# Based on tests.test_modeling_common.ModelTesterMixin.test_inputs_embeds_matches_input_ids
# Adjust token classiifcation
@unittest.skip("This was not properly written, submodules need the attribute to be overwritten")
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
if model_class in [
self.model_tester.token_classification_class,
self.model_tester.sequence_classification_class,
]:
model = model_class(config, is_encoder_decoder=False)
else:
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
self.assertEqual(len(hidden_states), expected_num_layers)
if hasattr(self.model_tester, "encoder_seq_length"):
seq_length = self.model_tester.encoder_seq_length
if hasattr(self.model_tester, "chunk_length") and self.model_tester.chunk_length > 1:
seq_length = seq_length * self.model_tester.chunk_length
else:
seq_length = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[seq_length, self.model_tester.hidden_size],
)
if config.is_encoder_decoder:
hidden_states = outputs.decoder_hidden_states
self.assertIsInstance(hidden_states, (list, tuple))
self.assertEqual(len(hidden_states), expected_num_layers)
seq_len = getattr(self.model_tester, "seq_length", None)
decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[decoder_seq_length, self.model_tester.hidden_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
# Based on tests.models.t5.test_modeling_t5.T5ModelTest.test_custom_4d_attention_mask
# Excluding the final token from input_ids
def test_custom_4d_attention_mask(self):
for model_class in self.all_generative_model_classes:
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
model = model_class(config).to(device=torch_device, dtype=torch.float32)
(
input_ids,
_,
input_ids_shared_prefix,
mask_shared_prefix,
_,
) = self._get_custom_4d_mask_test_data()
logits = model.forward(
decoder_input_ids=input_ids,
input_ids=input_ids[:, :-1],
).logits
# logits.shape == torch.Size([3, 4, ...])
logits_shared_prefix = model(
input_ids=input_ids[:1, :-1],
decoder_input_ids=input_ids_shared_prefix,
decoder_attention_mask=mask_shared_prefix,
)[0]
# logits_shared_prefix.shape == torch.Size([1, 6, ...])
out_last_tokens = logits[:, -1, :] # last tokens in each batch line
out_shared_prefix_last_tokens = logits_shared_prefix[0, -3:, :] # last three tokens
# comparing softmax-normalized logits:
normalized_0 = F.softmax(out_last_tokens)
normalized_1 = F.softmax(out_shared_prefix_last_tokens)
torch.testing.assert_close(normalized_0, normalized_1, rtol=1e-3, atol=1e-4)
# Based on tests.test_modeling_common.ModelTesterMixin.test_flex_attention_with_grads
# Update hidden size for encoder and decoder
@require_torch_accelerator
def test_flex_attention_with_grads(self):
for model_class in self.all_model_classes:
# TODO: raushan, fix for composite models after making VLMs support new attn API
if not model_class._supports_flex_attn or self._is_composite:
self.skipTest(reason="This model does not support flex attention")
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config._attn_implementation = "flex_attention"
# Flex Attention cannot use dropout
config.encoder.attention_dropout = 0
config.decoder.attention_dropout = 0
# Flex attention relies on triton on compilation
# However, triton cannot handle hidden dimensions of less than 16
# --> forcing at least a hidden dim of 16
config.encoder.hidden_size *= max(
16
// getattr(
config.encoder, "head_dim", config.encoder.hidden_size // config.encoder.num_attention_heads
),
1,
)
config.decoder.hidden_size *= max(
16
// getattr(
config.decoder, "head_dim", config.decoder.hidden_size // config.decoder.num_attention_heads
),
1,
)
config.decoder.cross_attention_hidden_size = config.encoder.hidden_size
config.decoder.head_dim = max(16, config.decoder.head_dim)
config.encoder.head_dim = max(16, config.encoder.head_dim)
model = model_class(config).to(device=torch_device)
self.assertTrue(model.config._attn_implementation == "flex_attention")
# Elaborate workaround for encoder-decoder models as some do not specify their main input
dummy_inputs = {model.main_input_name: inputs_dict[model.main_input_name].to(torch_device)}
if config.is_encoder_decoder:
dummy_inputs["decoder_input_ids"] = inputs_dict["decoder_input_ids"].to(torch_device)
dummy_inputs["decoder_attention_mask"] = inputs_dict["decoder_attention_mask"].to(torch_device)
# If this does not raise an error, the test passes (see https://github.com/huggingface/transformers/pull/35605)
_ = model(**dummy_inputs)
@require_flash_attn
@require_torch_accelerator
@mark.flash_attn_test
def test_generate_beyond_sliding_window_with_flash_attn(self):
config, input_ids, _, attention_mask, _, _ = self.model_tester.prepare_config_and_inputs()
config.decoder.sliding_window = 2 # arbitrary but less than seq_len
model = self.model_tester.causal_lm_class(config=config).to(dtype=torch.float16, device=torch_device).eval()
model.set_attn_implementation("flash_attention_2")
# Only generate beyond prefill, we don't care about the output as it only checks for crashes
_ = model.generate(input_ids, attention_mask=attention_mask, max_new_tokens=2, use_cache=True)
class T5GemmaEncoderOnlyModelTester:
config_class = T5GemmaConfig
module_config_class = T5GemmaModuleConfig
if is_torch_available():
model_class = T5GemmaEncoderModel
def __init__(
self,
parent,
batch_size=13,
is_training=True,
use_attention_mask=True,
use_labels=True,
vocab_size=99,
seq_length=7,
# default to encoders
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
num_key_value_heads=2,
intermediate_size=37,
# common
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
# special ids
eos_token_id=1,
pad_token_id=0,
bos_token_id=2,
):
self.parent = parent
self.batch_size = batch_size
self.is_training = is_training
self.use_attention_mask = use_attention_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
# encoder
self.seq_length = seq_length
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.intermediate_size = intermediate_size
# common
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
self.head_dim = self.hidden_size // self.num_attention_heads
# special ids
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
def get_encoder_config(self):
return self.module_config_class(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
num_key_value_heads=self.num_key_value_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_decoder=False,
initializer_range=self.initializer_range,
head_dim=self.head_dim,
bos_token_id=self.bos_token_id,
eos_token_id=self.eos_token_id,
pad_token_id=self.pad_token_id,
)
def get_config(self):
return self.config_class(
encoder=self.get_encoder_config(),
decoder=None,
is_encoder_decoder=False,
# Used for generation test.
num_attention_heads=self.num_attention_heads,
num_key_value_heads=self.num_key_value_heads,
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
)
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
# Remove BOS symbols from inputs.
input_ids = torch.where(input_ids == self.bos_token_id, 42, input_ids)
attention_mask = None
if self.use_attention_mask:
attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
config = self.get_config()
return (
config,
input_ids,
attention_mask,
)
def create_and_check_model(
self,
config,
input_ids,
attention_mask,
):
model = self.model_class(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids=input_ids,
attention_mask=attention_mask,
)
result = model(input_ids=input_ids)
encoder_output = result.last_hidden_state
self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_model_fp16_forward(
self,
config,
input_ids,
attention_mask,
):
model = self.model_class(config=config).to(torch_device).half().eval()
output = model(input_ids, attention_mask=attention_mask)["last_hidden_state"]
self.parent.assertFalse(torch.isnan(output).any().item())
def create_and_check_with_token_classification_head(
self,
config,
input_ids,
attention_mask,
):
labels = torch.tensor([1] * self.seq_length * self.batch_size, dtype=torch.long, device=torch_device)
model = T5GemmaForTokenClassification(config=config, is_encoder_decoder=False).to(torch_device).eval()
outputs = model(
input_ids=input_ids,
labels=labels,
attention_mask=attention_mask,
)
self.parent.assertEqual(outputs["logits"].size(), (self.batch_size, self.seq_length, config.num_labels))
self.parent.assertEqual(outputs["loss"].size(), ())
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
attention_mask,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class T5GemmaEncoderOnlyModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (T5GemmaEncoderModel, T5GemmaForTokenClassification) if is_torch_available() else ()
test_resize_embeddings = False
_is_stateful = True
is_encoder_decoder = False
# won't fix
def setUp(self):
self.model_tester = T5GemmaEncoderOnlyModelTester(self)
self.config_tester = ConfigTester(
self,
config_class=T5GemmaConfig,
# For faking the testing.
hidden_size=37,
vocab_size=self.model_tester.vocab_size,
num_attention_heads=self.model_tester.num_attention_heads,
num_hidden_layers=self.model_tester.num_hidden_layers,
)
def test_config(self):
# Skip `create_and_test_config_from_and_save_pretrained_composite` because the config has twice the same subconfig
self.config_tester.create_and_test_config_from_and_save_pretrained_composite = lambda: None
self.config_tester.run_common_tests()
@unittest.skip("This was not properly written, submodules need the attribute to be overwritten")
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skipIf(torch_device == "cpu", "Can't do half precision")
def test_model_fp16_forward(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fp16_forward(*config_and_inputs)
def test_with_token_classification_head(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_with_token_classification_head(*config_and_inputs)
@unittest.skip(reason="This module does not support standalone training")
def test_training(self):
pass
@unittest.skip(reason="This module does not support standalone training")
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(reason="This module does not support standalone training")
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@unittest.skip(reason="This module does not support standalone training")
def test_training_gradient_checkpointing_use_reentrant_true(self):
pass
# Based on tests.test_modeling_common.ModelTesterMixin.test_flex_attention_with_grads
# Update hidden size for encoder
@require_torch_accelerator
def test_flex_attention_with_grads(self):
for model_class in self.all_model_classes:
# TODO: raushan, fix for composite models after making VLMs support new attn API
if not model_class._supports_flex_attn or self._is_composite:
self.skipTest(reason="This model does not support flex attention")
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config._attn_implementation = "flex_attention"
# Flex Attention cannot use dropout
config.encoder.attention_dropout = 0
# Flex attention relies on triton on compilation
# However, triton cannot handle hidden dimensions of less than 16
# --> forcing at least a hidden dim of 16
config.encoder.hidden_size *= max(
16
// getattr(
config.encoder, "head_dim", config.encoder.hidden_size // config.encoder.num_attention_heads
),
1,
)
config.encoder.head_dim = max(16, config.encoder.head_dim)
model = model_class(config).to(device=torch_device)
self.assertTrue(model.config._attn_implementation == "flex_attention")
# Elaborate workaround for encoder-decoder models as some do not specify their main input
dummy_inputs = {model.main_input_name: inputs_dict[model.main_input_name].to(torch_device)}
# If this does not raise an error, the test passes (see https://github.com/huggingface/transformers/pull/35605)
_ = model(**dummy_inputs)
# Based on tests.models.t5.test_modeling_t5.TestAsymmetricT5
# Adapted for T5Gemma
@require_torch
class TestAsymmetricT5Gemma(unittest.TestCase):
def build_model_and_check_forward_pass(self, **kwargs):
tester = T5GemmaModelTester(self, **kwargs)
config, *inputs = tester.prepare_config_and_inputs()
(
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
) = inputs
model = T5GemmaForConditionalGeneration(config=config).to(torch_device).eval()
outputs = model(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
labels=lm_labels,
)
# outputs = model(*inputs)
assert len(outputs) == 5
assert outputs["logits"].size() == (tester.batch_size, tester.seq_length, tester.vocab_size)
assert outputs["loss"].size() == ()
return model.model
def test_small_decoder(self):
model = self.build_model_and_check_forward_pass(num_hidden_layers=1, encoder_num_hidden_layers=2)
assert len(model.encoder.layers) == 2
assert len(model.decoder.layers) == 1
def test_defaulting_to_symmetry(self):
model = self.build_model_and_check_forward_pass(num_hidden_layers=2, encoder_num_hidden_layers=2)
assert len(model.decoder.layers) == len(model.encoder.layers) == 2
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/t5gemma/test_modeling_t5gemma.py",
"license": "Apache License 2.0",
"lines": 1364,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/glm4v/convert_glm4v_mgt_weights_to_hf.py | # Copyright 2025 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
import pickle
import re
from pathlib import Path
import torch
from safetensors.torch import save_file
# Avoid Using Megatron Lib
class UnpicklerWrapper(pickle.Unpickler):
def find_class(self, mod_name, name):
class DummyClass:
def __init__(self, *args, **kwargs):
pass
if mod_name.startswith("megatron") or mod_name.startswith("glm") or mod_name.startswith("__main__"):
return DummyClass
return super().find_class(mod_name, name)
pickle.Unpickler = UnpicklerWrapper
def dict_access_multi(a_dict, keys):
if len(keys) == 0:
return a_dict
return dict_access_multi(a_dict[keys[0]], keys[1:])
def _build_neox_to_llama_perm(rotary_dim: int) -> torch.Tensor:
half = rotary_dim // 2
perm = torch.empty(rotary_dim, dtype=torch.long)
perm[0::2] = torch.arange(0, half)
perm[1::2] = torch.arange(half, rotary_dim)
return perm
def _apply_rope_permute(q_or_k: torch.Tensor, blocks: int, head_dim: int, rotary_dim: int, neox_to_llama: bool = True):
if rotary_dim == 0:
return q_or_k
if neox_to_llama:
perm = _build_neox_to_llama_perm(rotary_dim).to(q_or_k.device)
else:
perm = torch.empty(rotary_dim, dtype=torch.long, device=q_or_k.device)
half = rotary_dim // 2
perm[0::2] = torch.arange(0, half, device=q_or_k.device)
perm[1::2] = torch.arange(half, rotary_dim, device=q_or_k.device)
inv = torch.empty_like(perm)
inv[perm] = torch.arange(rotary_dim, device=q_or_k.device)
perm = inv
if q_or_k.dim() == 2:
h = q_or_k.view(blocks, head_dim, -1)
h[:, :rotary_dim, ...] = h[:, perm, ...]
return h.reshape(q_or_k.shape)
else:
h = q_or_k.view(blocks, head_dim)
h[:, :rotary_dim] = h[:, perm]
return h.reshape(q_or_k.shape)
def merge_qkv(
sd_list,
original_tp,
num_attention_heads,
multi_query_group_num,
attention_dim,
interleaved_qkv,
convert_neox_to_llama: bool = True,
):
rotary_dim = attention_dim // 2
group_size = (num_attention_heads // multi_query_group_num + 2) * attention_dim
q_chunks, k_chunks, v_chunks = [], [], []
for sd in sd_list:
if interleaved_qkv:
shape = sd.shape
x = sd.view((multi_query_group_num // original_tp, group_size) + shape[1:])
q_, k_, v_ = x.split(
[
(num_attention_heads // multi_query_group_num) * attention_dim,
attention_dim,
attention_dim,
],
dim=1,
)
q_chunks.append(q_.reshape((-1,) + shape[1:]).clone())
k_chunks.append(k_.reshape((-1,) + shape[1:]).clone())
v_chunks.append(v_.reshape((-1,) + shape[1:]).clone())
else:
q_, k_, v_ = sd.split(
[
num_attention_heads * attention_dim // original_tp,
multi_query_group_num * attention_dim // original_tp,
multi_query_group_num * attention_dim // original_tp,
],
dim=0,
)
q_chunks.append(q_.clone())
k_chunks.append(k_.clone())
v_chunks.append(v_.clone())
q = torch.cat(q_chunks, dim=0)
k = torch.cat(k_chunks, dim=0)
v = torch.cat(v_chunks, dim=0)
if convert_neox_to_llama and rotary_dim > 0:
q = _apply_rope_permute(q, num_attention_heads, attention_dim, rotary_dim, neox_to_llama=True)
k = _apply_rope_permute(k, multi_query_group_num, attention_dim, rotary_dim, neox_to_llama=True)
return q, k, v
def merge_qkv_vit(sd_list, original_tp, num_attention_heads, multi_query_group_num, attention_dim):
group_size = (num_attention_heads // multi_query_group_num + 2) * attention_dim
q, k, v = [], [], []
for sd in sd_list:
shape = sd.shape
q_, k_, v_ = sd.view((multi_query_group_num // original_tp, group_size) + (shape[1:])).split(
[
(num_attention_heads // multi_query_group_num * attention_dim),
attention_dim,
attention_dim,
],
dim=1,
)
q_ = q_.reshape((-1,) + (shape[1:]))
k_ = k_.reshape((-1,) + (shape[1:]))
v_ = v_.reshape((-1,) + (shape[1:]))
q.append(q_.clone())
k.append(k_.clone())
v.append(v_.clone())
q = torch.cat(q, dim=0)
k = torch.cat(k, dim=0)
v = torch.cat(v, dim=0)
return q, k, v
def merge_glu(sd_list):
return torch.cat(
[sd.chunk(dim=0, chunks=2)[0].clone() for sd in sd_list]
+ [sd.chunk(dim=0, chunks=2)[1].clone() for sd in sd_list],
dim=0,
)
def merge_glu_vit(sd_list, original_tp=None):
if not isinstance(sd_list, list):
sd_list = [sd_list]
gate_proj = torch.cat([sd.chunk(dim=0, chunks=2)[0].clone() for sd in sd_list], dim=0)
up_proj = torch.cat([sd.chunk(dim=0, chunks=2)[1].clone() for sd in sd_list], dim=0)
return gate_proj, up_proj
def split_glu(sd, cnt, idx):
return torch.cat(
(
sd.chunk(dim=0, chunks=2)[0].chunk(cnt, dim=0)[idx].clone(),
sd.chunk(dim=0, chunks=2)[1].chunk(cnt, dim=0)[idx].clone(),
),
dim=0,
)
def merge_tensors(
tp_sd,
keys,
original_tp,
target_tp,
current_tp,
slice_dim=None,
merge_fn=None,
):
cnt = original_tp // target_tp
offset = cnt * current_tp
sd_list = [dict_access_multi(tp_sd[i + offset], keys) for i in range(cnt)]
if slice_dim is not None:
return torch.cat(sd_list, dim=slice_dim)
assert merge_fn is not None
return merge_fn(sd_list)
def save_sharded_model(state_dict, output_path, max_shard_size_gb=5, num_layers=40, vision_num_layers=24):
os.makedirs(output_path, exist_ok=True)
layered_dict = {}
for layer_idx in range(num_layers):
layer_key = f"layer_{layer_idx}"
layered_dict[layer_key] = {}
for key, value in state_dict.items():
if f"model.language_model.layers.{layer_idx}." in key:
if isinstance(value, list):
assert len(value) == 1, f"{key} {value}"
value = value[0]
layered_dict[layer_key][key] = value
for layer_idx in range(vision_num_layers):
layer_key = f"visual_layer_{layer_idx}"
layered_dict[layer_key] = {}
for key, value in state_dict.items():
if f"model.visual.blocks.{layer_idx}." in key:
layered_dict[layer_key][key] = value
layered_dict["others"] = {}
for key, value in state_dict.items():
if not any(f"model.language_model.layers.{i}." in key for i in range(num_layers)) and not any(
f"model.visual.blocks.{i}." in key for i in range(vision_num_layers)
):
layered_dict["others"][key] = value
# Determine layer ordering
layer_order = []
for i in range(num_layers):
layer_order.append(f"layer_{i}")
for i in range(vision_num_layers):
layer_order.append(f"visual_layer_{i}")
layer_order.append("others")
# Calculate sizes and create shards by layer
param_sizes = {}
shards = []
current_shard = {}
current_shard_size = 0
max_shard_size_bytes = max_shard_size_gb * 1024 * 1024 * 1024
for layer_key in layer_order:
layer_weights = layered_dict[layer_key]
layer_size = sum(param.numel() * param.element_size() for param in layer_weights.values())
if current_shard_size + layer_size > max_shard_size_bytes and current_shard:
shards.append(current_shard)
current_shard = {}
current_shard_size = 0
for param_name, param in layer_weights.items():
current_shard[param_name] = param
current_shard_size += param.numel() * param.element_size()
param_sizes[param_name] = param.numel() * param.element_size()
if current_shard:
shards.append(current_shard)
index_dict = {"metadata": {"total_size": sum(param_sizes.values())}, "weight_map": {}}
for i, shard in enumerate(shards):
shard_filename = f"model-{i + 1:05d}-of-{len(shards):05d}.safetensors"
shard_path = os.path.join(output_path, shard_filename)
for param_name in shard:
index_dict["weight_map"][param_name] = shard_filename
save_file(shard, shard_path, metadata={"format": "pt"})
print(f"Saved shard {i + 1}/{len(shards)}: {shard_filename}")
print(f" Shard size: {sum(p.numel() * p.element_size() for p in shard.values()) / (1024**3):.2f} GB")
print(f" Keys in shard: {len(shard)}")
index_path = os.path.join(output_path, "model.safetensors.index.json")
with open(index_path, "w") as f:
json.dump(index_dict, f, indent=2)
return len(shards)
def merge_tp_weights(model_path, output_path, vllm_config_path=None):
origin_tp, origin_ep, origin_pp = -1, -1, -1
check_ep_or_pp_later = False
for item in Path(model_path).iterdir():
if item.is_dir():
match = re.match(r"mp_rank_(\d{2})(?:_(\d{3}))?(?:_(\d{3}))?", item.name)
if match:
groups = match.groups()
tp = int(groups[0])
origin_tp = max(origin_tp, tp + 1)
# maybe TP-EP or TP-PP, need check later
if groups[1] is not None and groups[2] is None:
pp = int(groups[1])
origin_pp = max(origin_pp, pp + 1)
origin_ep = 1
check_ep_or_pp_later = True
elif groups[1] is not None and groups[2] is not None:
pp = int(groups[1])
ep = int(groups[2])
origin_pp = max(origin_pp, pp + 1)
origin_ep = max(origin_ep, ep + 1)
else:
origin_ep = 1
origin_pp = 1
tensor_names_by_file = {}
mgt_sd = {}
for item in Path(model_path).iterdir():
if item.is_dir():
match = re.match(r"mp_rank_(\d{2})(?:_(\d{3}))?(?:_(\d{3}))?$", item.name)
if match:
groups = match.groups()
tp = int(groups[0])
pp = int(groups[1]) if groups[1] is not None else 0
ep = int(groups[2]) if groups[2] is not None else 0
file_path = item / "model_optim_rng.pt"
assert file_path.exists(), f"model_optim_rng.pt not found in {item}"
file_sd = torch.load(file_path, map_location="cpu", weights_only=False)
for k in list(file_sd.keys()):
if "_extra_state" in k or "dummy_parameter" in k:
file_sd.pop(k)
mgt_sd[(tp, pp, ep)] = file_sd
tensor_names = set()
if "model" in file_sd:
for key in file_sd["model"].keys():
tensor_names.add(key)
tensor_names_by_file[(tp, pp, ep)] = tensor_names
change_pp_to_ep = False
if check_ep_or_pp_later:
prefix_distribution = {}
for (tp, pp, ep), prefixes in tensor_names_by_file.items():
for prefix in prefixes:
if prefix not in prefix_distribution:
prefix_distribution[prefix] = set()
prefix_distribution[prefix].add((tp, pp, ep))
for prefix, locations in prefix_distribution.items():
if len(locations) > 1:
pp_values = {loc[1] for loc in locations}
if len(pp_values) > 1:
print(f"find '{prefix}' in multi ranks {pp_values} the parallelism should be TP-EP")
origin_ep = origin_pp
origin_pp = 1
change_pp_to_ep = True
break
else:
print(f"find '{prefix}' only in one ep, parallelism should be TP-PP")
break
print(f"Detected tensor parallel degree TP={origin_tp} EP={origin_ep} PP={origin_pp}")
assert max(origin_tp, origin_ep) * origin_pp == len(tensor_names_by_file), "maybe some problem in origin weight"
organized_sd = {}
for (tp, pp, ep), file_sd in mgt_sd.items():
if change_pp_to_ep:
pp, ep = ep, pp
organized_sd.setdefault(pp, {})
organized_sd[pp][(ep, tp)] = file_sd
find_vpp = "model0" in file_sd
# support VPP, if each pp rank has n vpp blocks, we will treat the original model
# was parallel as pp n * origin_pp
if find_vpp:
organized_sd_vpp = {}
for i in range(origin_pp):
for (ep, tp), file_sd in organized_sd[i].items():
model_keys = sorted(
[key for key in file_sd.keys() if key.startswith("model") and key[5:].isdigit()],
key=lambda x: int(x[5:]),
)
vp_blocks = len(model_keys)
for idx, key in enumerate(model_keys):
assert key in file_sd, f"model {key} not found"
organized_sd_vpp.setdefault(idx * origin_pp + i, {})
organized_sd_vpp[idx * origin_pp + i][(ep, tp)] = {"model": file_sd[key]}
origin_pp = origin_pp * vp_blocks
organized_sd = organized_sd_vpp
ignore_list = ["_extra_state", "dummy_parameter"]
layer_share_list = [
"norm",
"conv3d",
"downsample",
"router",
"mlp.linear_fc2.bias",
"self_attention.linear_proj.bias",
"position_embeddings",
]
full_weights = {}
vit_layer_offset = 0
llm_layer_offset = 0
llm_layer_pattern = re.compile(r"^(decoder\.layers\.)(\d+)(\..*)$")
vit_layer_pattern = re.compile(r"^(vision_model\.transformer\.layers\.)(\d+)(\..*)$")
for pp in sorted(organized_sd.keys()):
pp_dict = organized_sd[pp]
next_llm_layer_offset = llm_layer_offset
next_vit_layer_offset = vit_layer_offset
ep_map = {}
tp_map = {}
tp_seen = set()
for (ep, tp), item in pp_dict.items():
if tp not in tp_seen:
tp_seen.add(tp)
tp_map[tp] = item
ep_map[ep] = item
for tp in sorted(tp_map.keys()):
sd = tp_map[tp]
for full_name, tensor in sd["model"].items():
if any(x in full_name for x in ignore_list):
continue
llm_name_match = llm_layer_pattern.match(full_name)
if llm_name_match:
# Use a closure to avoid global variable issues
def offset_layer(x, offset=llm_layer_offset):
nonlocal next_llm_layer_offset
_real_layer = int(x.group(2)) + offset
next_llm_layer_offset = max(next_llm_layer_offset, _real_layer + 1)
return f"{x.group(1)}{_real_layer}{x.group(3)}"
full_name = llm_layer_pattern.sub(offset_layer, full_name)
vit_name_match = vit_layer_pattern.match(full_name)
if vit_name_match:
# Use a closure to avoid global variable issues
def offset_layer(x, offset=vit_layer_offset):
nonlocal next_vit_layer_offset
_real_layer = int(x.group(2)) + offset
next_vit_layer_offset = max(next_vit_layer_offset, _real_layer + 1)
return f"{x.group(1)}{_real_layer}{x.group(3)}"
full_name = vit_layer_pattern.sub(offset_layer, full_name)
if layer_share_list and any(x in full_name for x in layer_share_list):
if full_name not in full_weights:
full_weights[full_name] = tensor
else:
assert torch.equal(tensor, full_weights[full_name]), (
f"detect diff param in tp named: {full_name}"
)
elif not re.search(r"\.experts\.", full_name):
full_weights.setdefault(full_name, [None for _ in range(origin_tp)])
full_weights[full_name][tp] = tensor
for ep in sorted(ep_map.keys()):
sd = ep_map[ep]
for full_name, tensor in sd["model"].items():
if any(x in full_name for x in ignore_list):
continue
name_match = llm_layer_pattern.match(full_name)
if name_match:
# Use a closure to avoid global variable issues
def offset_layer(x, offset=llm_layer_offset):
nonlocal next_llm_layer_offset
_real_layer = int(x.group(2)) + offset
next_llm_layer_offset = max(next_llm_layer_offset, _real_layer + 1)
return f"{x.group(1)}{_real_layer}{x.group(3)}"
full_name = llm_layer_pattern.sub(offset_layer, full_name)
if re.search(r"\.experts\.", full_name):
full_weights.setdefault(full_name, [None for _ in range(origin_ep)])
full_weights[full_name][ep] = tensor
llm_layer_offset = next_llm_layer_offset
vit_layer_offset = next_vit_layer_offset
for k in sorted(full_weights.keys()):
item = full_weights[k]
if isinstance(item, list):
print(f"{k} {len(item)} {item[0].shape} {item[0].dtype}", flush=True)
else:
print(f"{k} {item.shape} {item.dtype}", flush=True)
print(f"Loading vLLM configuration file: {vllm_config_path}")
with open(vllm_config_path, "r") as f:
model_config = json.load(f)
text_config = model_config.get("text_config", {})
vision_config = model_config.get("vision_config", {})
num_layers = text_config.get("num_hidden_layers", 40)
num_heads = text_config.get("num_attention_heads", 32)
num_kv_heads = text_config.get("num_key_value_heads", 2)
hidden_size = model_config.get("hidden_size", 4096)
head_dim = model_config.get("attention_dim", hidden_size // num_heads)
vision_num_layers = vision_config.get("depth", 24)
vit_n_head = vision_config.get("num_heads", 12)
print(
f"Model parameters: num_layers={num_layers}, vision_num_layers={vision_num_layers}, "
f"num_heads={num_heads}, multi_query_group_num={num_kv_heads}"
)
print("Merging tensor parallel weights...")
interleaved_qkv = True
num_attention_heads = num_heads
multi_query_group_num = num_kv_heads
attention_dim = head_dim
complete_state_dict = {}
# LLM
layer_i = 0
while f"decoder.layers.{layer_i}.self_attention.linear_qkv.layer_norm_weight" in full_weights:
if f"decoder.layers.{layer_i}.self_attention.linear_qkv.layer_norm_weight" in full_weights:
complete_state_dict[f"model.language_model.layers.{layer_i}.input_layernorm.weight"] = full_weights[
f"decoder.layers.{layer_i}.self_attention.linear_qkv.layer_norm_weight"
]
if f"decoder.layers.{layer_i}.pre_mlp_layernorm.weight" in full_weights:
complete_state_dict[f"model.language_model.layers.{layer_i}.post_attention_layernorm.weight"] = (
full_weights[f"decoder.layers.{layer_i}.pre_mlp_layernorm.weight"]
)
elif f"decoder.layers.{layer_i}.mlp.linear_fc1.layer_norm_weight" in full_weights:
complete_state_dict[f"model.language_model.layers.{layer_i}.post_attention_layernorm.weight"] = (
full_weights[f"decoder.layers.{layer_i}.mlp.linear_fc1.layer_norm_weight"]
)
# GLM-4.1V Only
if f"decoder.layers.{layer_i}.post_mlp_layernorm.weight" in full_weights:
complete_state_dict[f"model.language_model.layers.{layer_i}.post_mlp_layernorm.weight"] = full_weights[
f"decoder.layers.{layer_i}.post_mlp_layernorm.weight"
]
if f"decoder.layers.{layer_i}.post_self_attn_layernorm.weight" in full_weights:
complete_state_dict[f"model.language_model.layers.{layer_i}.post_self_attn_layernorm.weight"] = (
full_weights[f"decoder.layers.{layer_i}.post_self_attn_layernorm.weight"]
)
q, k, v = merge_qkv(
sd_list=full_weights[f"decoder.layers.{layer_i}.self_attention.linear_qkv.weight"],
original_tp=origin_tp,
num_attention_heads=num_attention_heads,
multi_query_group_num=multi_query_group_num,
attention_dim=attention_dim,
interleaved_qkv=interleaved_qkv,
)
complete_state_dict[f"model.language_model.layers.{layer_i}.self_attn.q_proj.weight"] = q.clone()
complete_state_dict[f"model.language_model.layers.{layer_i}.self_attn.k_proj.weight"] = k.clone()
complete_state_dict[f"model.language_model.layers.{layer_i}.self_attn.v_proj.weight"] = v.clone()
if f"decoder.layers.{layer_i}.self_attention.linear_qkv.bias" in full_weights:
q_bias, k_bias, v_bias = merge_qkv(
sd_list=full_weights[f"decoder.layers.{layer_i}.self_attention.linear_qkv.bias"],
original_tp=origin_tp,
num_attention_heads=num_attention_heads,
multi_query_group_num=multi_query_group_num,
attention_dim=attention_dim,
interleaved_qkv=interleaved_qkv,
)
complete_state_dict[f"model.language_model.layers.{layer_i}.self_attn.q_proj.bias"] = q_bias.clone()
complete_state_dict[f"model.language_model.layers.{layer_i}.self_attn.k_proj.bias"] = k_bias.clone()
complete_state_dict[f"model.language_model.layers.{layer_i}.self_attn.v_proj.bias"] = v_bias.clone()
o_proj = torch.cat(full_weights[f"decoder.layers.{layer_i}.self_attention.linear_proj.weight"], dim=1)
complete_state_dict[f"model.language_model.layers.{layer_i}.self_attn.o_proj.weight"] = o_proj.clone()
# MLP - Use gate_up_proj
gate_up_proj = torch.cat(full_weights[f"decoder.layers.{layer_i}.mlp.linear_fc1.weight"], dim=0)
complete_state_dict[f"model.language_model.layers.{layer_i}.mlp.gate_up_proj.weight"] = gate_up_proj.clone()
complete_state_dict[f"model.language_model.layers.{layer_i}.mlp.down_proj.weight"] = torch.cat(
full_weights[f"decoder.layers.{layer_i}.mlp.linear_fc2.weight"], dim=1
)
layer_i += 1
# Embedd Model, LM Head, and Norm
embed_tokens = torch.cat(full_weights["embedding.word_embeddings.weight"], dim=0)
complete_state_dict["model.language_model.embed_tokens.weight"] = embed_tokens.clone()
lm_head = torch.cat(full_weights["output_layer.weight"], dim=0)
complete_state_dict["lm_head.weight"] = lm_head.clone()
complete_state_dict["model.language_model.norm.weight"] = full_weights["decoder.final_layernorm.weight"].clone()
# VLM
for layer_i in range(vision_num_layers):
complete_state_dict[f"model.visual.blocks.{layer_i}.norm1.weight"] = full_weights[
f"vision_model.transformer.layers.{layer_i}.self_attention.linear_qkv.layer_norm_weight"
]
complete_state_dict[f"model.visual.blocks.{layer_i}.norm2.weight"] = full_weights[
f"vision_model.transformer.layers.{layer_i}.mlp.linear_fc1.layer_norm_weight"
]
q, k, v = merge_qkv_vit(
sd_list=full_weights[f"vision_model.transformer.layers.{layer_i}.self_attention.linear_qkv.weight"],
original_tp=origin_tp,
num_attention_heads=vit_n_head,
multi_query_group_num=vit_n_head,
attention_dim=attention_dim,
)
complete_state_dict[f"model.visual.blocks.{layer_i}.attn.qkv.weight"] = torch.cat((q, k, v), dim=0)
proj_weight = torch.cat(
full_weights[f"vision_model.transformer.layers.{layer_i}.self_attention.linear_proj.weight"], dim=1
)
complete_state_dict[f"model.visual.blocks.{layer_i}.attn.proj.weight"] = proj_weight.clone()
gate_proj_weight, up_proj_weight = merge_glu_vit(
full_weights[f"vision_model.transformer.layers.{layer_i}.mlp.linear_fc1.weight"]
)
complete_state_dict[f"model.visual.blocks.{layer_i}.mlp.gate_proj.weight"] = gate_proj_weight.clone()
complete_state_dict[f"model.visual.blocks.{layer_i}.mlp.up_proj.weight"] = up_proj_weight.clone()
down_proj_weight = torch.cat(
full_weights[f"vision_model.transformer.layers.{layer_i}.mlp.linear_fc2.weight"], dim=1
)
complete_state_dict[f"model.visual.blocks.{layer_i}.mlp.down_proj.weight"] = down_proj_weight.clone()
complete_state_dict["model.visual.downsample.weight"] = (
full_weights["vision_model.downsample.weight"].clone().contiguous()
)
complete_state_dict["model.visual.downsample.bias"] = (
full_weights["vision_model.downsample.bias"].clone().contiguous()
)
# Merger
gate_proj, up_proj = merge_glu_vit(full_weights["vision_projection.encoder.linear_fc1.weight"])
down_proj = torch.cat(full_weights["vision_projection.encoder.linear_fc2.weight"], dim=1)
proj = torch.cat(full_weights["vision_projection.linear_fc_extra.weight"], dim=0)
complete_state_dict["model.visual.merger.gate_proj.weight"] = gate_proj.clone().contiguous()
complete_state_dict["model.visual.merger.up_proj.weight"] = up_proj.clone().contiguous()
complete_state_dict["model.visual.merger.down_proj.weight"] = down_proj.clone().contiguous()
complete_state_dict["model.visual.merger.proj.weight"] = proj.clone().contiguous()
if "vision_projection.layer_norm.weight" in full_weights:
complete_state_dict["model.visual.merger.post_projection_norm.weight"] = full_weights[
"vision_projection.layer_norm.weight"
]
if "vision_projection.layer_norm.bias" in full_weights:
complete_state_dict["model.visual.merger.post_projection_norm.bias"] = full_weights[
"vision_projection.layer_norm.bias"
]
complete_state_dict["model.visual.embeddings.position_embedding.weight"] = (
full_weights["vision_model.position_embeddings.weight"].clone().contiguous()
)
complete_state_dict["model.visual.patch_embed.proj.weight"] = (
full_weights["vision_model.conv3d.weight"].clone().contiguous()
)
complete_state_dict["model.visual.patch_embed.proj.bias"] = (
full_weights["vision_model.conv3d.bias"].clone().contiguous()
)
# Check for additional vision model norm layers mentioned in the expected output
if "vision_model.post_conv_layernorm.weight" in full_weights:
complete_state_dict["model.visual.post_conv_layernorm.weight"] = (
full_weights["vision_model.post_conv_layernorm.weight"].clone().contiguous()
)
if "vision_model.post_layernorm.weight" in full_weights:
complete_state_dict["model.visual.post_layernorm.weight"] = (
full_weights["vision_model.post_layernorm.weight"].clone().contiguous()
)
print(f"Total keys in state dict: {len(complete_state_dict)}")
save_sharded_model(
complete_state_dict,
output_path=output_path,
max_shard_size_gb=5,
num_layers=num_layers,
vision_num_layers=vision_num_layers,
)
hf_config = {
"architectures": ["Glm4vForConditionalGeneration"],
"model_type": "glm4v",
"image_start_token_id": model_config.get("image_start_token_id", 151339),
"image_end_token_id": model_config.get("image_end_token_id", 151340),
"video_start_token_id": model_config.get("video_start_token_id", 151341),
"video_end_token_id": model_config.get("video_end_token_id", 151342),
"transformers_version": "4.57.1",
}
txt_config = {
"model_type": "glm4v_text",
"attention_bias": model_config.get("add_qkv_bias", True),
"attention_dropout": 0.0,
"pad_token_id": model_config.get("pad_token_id", 151329),
"eos_token_id": model_config.get("eos_token_id", [151329, 151336, 151338]),
"image_token_id": model_config.get("image_token_id", 151363),
"video_token_id": model_config.get("video_token_id", 151364),
"hidden_act": text_config.get("hidden_act", "silu"),
"hidden_size": text_config.get("hidden_size", 4096),
"initializer_range": 0.02,
"intermediate_size": text_config.get("intermediate_size", 13696),
"max_position_embeddings": text_config.get("seq_length", 131072),
"num_attention_heads": text_config.get("num_attention_heads", 32),
"num_hidden_layers": text_config.get("num_layers", 40),
"num_key_value_heads": text_config.get("num_key_value_heads", 2),
"rms_norm_eps": text_config.get("layernorm_epsilon", 1e-05),
"dtype": text_config.get("torch_dtype", "bfloat16"),
"use_cache": text_config.get("use_cache", True),
"vocab_size": text_config.get("vocab_size", 151552),
"tie_word_embeddings": False,
"rope_parameters": {
"rope_type": "default",
"rope_theta": 10000.0,
"mrope_section": [8, 12, 12],
"partial_rotary_factor": 0.5,
},
}
hf_config["text_config"] = txt_config
if "vision_config" in model_config:
vision_config = {
"model_type": "glm4v_vision",
"hidden_size": model_config["vision_config"].get("hidden_size", 1536),
"depth": model_config["vision_config"].get("num_layers", 24),
"num_heads": model_config["vision_config"].get("num_attention_heads", 12),
"attention_bias": model_config["vision_config"].get("attention_bias", False),
"intermediate_size": model_config.get("ffn_hidden_size", 13696),
"hidden_act": model_config["vision_config"].get("hidden_act", "silu"),
"hidden_dropout_prob": model_config["vision_config"].get("hidden_dropout_prob", 0.0),
"initializer_range": 0.02,
"image_size": model_config["vision_config"].get("image_size", 336),
"patch_size": model_config["vision_config"].get("patch_size", 14),
"out_hidden_size": model_config.get("hidden_size", 4096),
"rms_norm_eps": model_config["vision_config"].get("layernorm_epsilon", 1e-05),
"spatial_merge_size": model_config["vision_config"].get("downsample_ratio", 2),
"temporal_patch_size": model_config["vision_config"].get("t_patch", 2),
}
hf_config["vision_config"] = vision_config
config_path = os.path.join(output_path, "config.json")
with open(config_path, "w") as f:
json.dump(hf_config, f, indent=2)
print(f"Conversion complete! Model saved to {output_path}")
def parse_args():
parser = argparse.ArgumentParser(description="Convert Megatron model to HuggingFace format")
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to Megatron model directory",
)
parser.add_argument("--output_path", type=str, required=True, help="Output path for HuggingFace model directory")
parser.add_argument(
"--config_path", type=str, help="Path to vLLM configuration file for creating HuggingFace config"
)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
merge_tp_weights(args.model_path, args.output_path, args.config_path)
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/glm4v/convert_glm4v_mgt_weights_to_hf.py",
"license": "Apache License 2.0",
"lines": 646,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/glm4v/image_processing_glm4v.py | # Copyright 2025 The ZhipuAI Inc. team and HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image processor class for GLM-4.1V."""
import math
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import (
convert_to_rgb,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
infer_channel_dimension_format,
is_scaled_image,
make_flat_list_of_images,
to_numpy_array,
valid_images,
validate_preprocess_arguments,
)
from ...processing_utils import ImagesKwargs
from ...utils import TensorType, logging
from ...video_utils import VideoInput
logger = logging.get_logger(__name__)
class Glm4vImageProcessorKwargs(ImagesKwargs, total=False):
"""
patch_size (`int`, *optional*, defaults to 14):
The spatial patch size of the vision encoder.
temporal_patch_size (`int`, *optional*, defaults to 2):
The temporal patch size of the vision encoder.
merge_size (`int`, *optional*, defaults to 2):
The merge size of the vision encoder to llm encoder.
"""
patch_size: int
temporal_patch_size: int
merge_size: int
def smart_resize(
num_frames: int,
height: int,
width: int,
temporal_factor: int = 2,
factor: int = 28,
min_pixels: int = 112 * 112,
max_pixels: int = 14 * 14 * 2 * 2 * 2 * 6144,
):
if num_frames < temporal_factor:
raise ValueError(f"t:{num_frames} must be larger than temporal_factor:{temporal_factor}")
if height < factor or width < factor:
scale = max(factor / height, factor / width)
height = int(height * scale)
width = int(width * scale)
if max(height, width) / min(height, width) > 200:
raise ValueError(
f"absolute aspect ratio must be smaller than 200, got {max(height, width) / min(height, width)}"
)
h_bar = round(height / factor) * factor
w_bar = round(width / factor) * factor
t_bar = round(num_frames / temporal_factor) * temporal_factor
if t_bar * h_bar * w_bar > max_pixels:
beta = math.sqrt((num_frames * height * width) / max_pixels)
h_bar = max(factor, math.floor(height / beta / factor) * factor)
w_bar = max(factor, math.floor(width / beta / factor) * factor)
elif t_bar * h_bar * w_bar < min_pixels:
beta = math.sqrt(min_pixels / (num_frames * height * width))
h_bar = math.ceil(height * beta / factor) * factor
w_bar = math.ceil(width * beta / factor) * factor
return h_bar, w_bar
class Glm4vImageProcessor(BaseImageProcessor):
r"""
Constructs a GLM-4V image processor that dynamically resizes images based on the original images.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions.
size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 112 * 112, "longest_edge": 28 * 28 * 15000}`):
Size of the image's `(height, width)` dimensions after resizing. Can be overridden by the `size` parameter
in the `preprocess` method. Available options are:
- `{"height": int, "width": int}`: The image will be resized to the exact size `(height, width)`.
Do NOT keep the aspect ratio.
- `{"shortest_edge": int, "longest_edge": int}`: The image will be resized to a maximum size respecting
the aspect ratio and keeping the shortest edge less or equal to `shortest_edge` and the longest edge
less or equal to `longest_edge`.
- `{"max_height": int, "max_width": int}`: The image will be resized to the maximum size respecting the
aspect ratio and keeping the height less or equal to `max_height` and the width less or equal to
`max_width`.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
Resampling filter to use when resizing the image.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image.
image_mean (`float` or `List[float]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`):
Mean to use if normalizing the image. This is a float or list of floats for each channel in the image.
image_std (`float` or `List[float]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`):
Standard deviation to use if normalizing the image. This is a float or list of floats for each channel in the image.
do_convert_rgb (`bool`, *optional*, defaults to `True`):
Whether to convert the image to RGB.
patch_size (`int`, *optional*, defaults to 14):
The spatial patch size of the vision encoder.
temporal_patch_size (`int`, *optional*, defaults to 2):
The temporal patch size of the vision encoder.
merge_size (`int`, *optional*, defaults to 2):
The merge size of the vision encoder to llm encoder.
"""
model_input_names = ["pixel_values", "image_grid_thw"]
valid_kwargs = Glm4vImageProcessorKwargs
def __init__(
self,
do_resize: bool = True,
size: dict[str, int] | None = None,
resample: PILImageResampling = PILImageResampling.BICUBIC,
do_rescale: bool = True,
rescale_factor: int | float = 1 / 255,
do_normalize: bool = True,
image_mean: float | list[float] | None = None,
image_std: float | list[float] | None = None,
do_convert_rgb: bool = True,
patch_size: int = 14,
temporal_patch_size: int = 2,
merge_size: int = 2,
**kwargs,
) -> None:
super().__init__(**kwargs)
if size is not None and ("shortest_edge" not in size or "longest_edge" not in size):
raise ValueError("size must contain 'shortest_edge' and 'longest_edge' keys.")
elif size is None:
size = {"shortest_edge": 112 * 112, "longest_edge": 28 * 28 * 15000}
self.size = size
self.do_resize = do_resize
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD
self.patch_size = patch_size
self.temporal_patch_size = temporal_patch_size
self.merge_size = merge_size
self.do_convert_rgb = do_convert_rgb
def _preprocess(
self,
images: ImageInput | VideoInput,
do_resize: bool | None = None,
size: dict[str, int] | None = None,
resample: PILImageResampling | None = None,
do_rescale: bool | None = None,
rescale_factor: float | None = None,
do_normalize: bool | None = None,
image_mean: float | list[float] | None = None,
image_std: float | list[float] | None = None,
patch_size: int | None = None,
temporal_patch_size: int | None = None,
merge_size: int | None = None,
do_convert_rgb: bool | None = None,
data_format: ChannelDimension | None = ChannelDimension.FIRST,
input_data_format: str | ChannelDimension | None = None,
):
"""
Preprocess an image or batch of images. Copy of the `preprocess` method from `CLIPImageProcessor`.
Args:
images (`ImageInput`):
Image or batch of images to preprocess. Expects pixel values ranging from 0 to 255. If pixel values range from 0 to 1, set `do_rescale=False`.
vision_info (`List[Dict]`, *optional*):
Optional list of dictionaries containing additional information about vision inputs.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`Dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing. `shortest_edge` and `longest_edge` keys must be present.
resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the `PILImageResampling` enums.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Scale factor to use if rescaling the image.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
Mean to use if normalizing the image. Can be a float or a list of floats corresponding to the number of channels in the image.
image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
Standard deviation to use if normalizing the image. Can be a float or a list of floats corresponding to the number of channels in the image.
patch_size (`int`, *optional*, defaults to `self.patch_size`):
The spatial patch size of the vision encoder.
temporal_patch_size (`int`, *optional*, defaults to `self.temporal_patch_size`):
The temporal patch size of the vision encoder.
merge_size (`int`, *optional*, defaults to `self.merge_size`):
The merge size of the vision encoder to llm encoder.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
Whether to convert the image to RGB.
data_format (`ChannelDimension`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
images = make_flat_list_of_images(images)
if do_convert_rgb:
images = [convert_to_rgb(image) for image in images]
# All transformations expect numpy arrays.
images = [to_numpy_array(image) for image in images]
if do_rescale and is_scaled_image(images[0]):
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])
height, width = get_image_size(images[0], channel_dim=input_data_format)
resized_height, resized_width = height, width
processed_images = []
for image in images:
if do_resize:
resized_height, resized_width = smart_resize(
num_frames=temporal_patch_size,
height=height,
width=width,
temporal_factor=temporal_patch_size,
factor=patch_size * merge_size,
min_pixels=size["shortest_edge"],
max_pixels=size["longest_edge"],
)
image = resize(
image, size=(resized_height, resized_width), resample=resample, input_data_format=input_data_format
)
if do_rescale:
image = self.rescale(image, scale=rescale_factor, input_data_format=input_data_format)
if do_normalize:
image = self.normalize(
image=image, mean=image_mean, std=image_std, input_data_format=input_data_format
)
image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
processed_images.append(image)
patches = np.array(processed_images)
if data_format == ChannelDimension.LAST:
patches = patches.transpose(0, 3, 1, 2)
if patches.shape[0] % temporal_patch_size != 0:
repeats = np.repeat(
patches[-1][np.newaxis], temporal_patch_size - (patches.shape[0] % temporal_patch_size), axis=0
)
patches = np.concatenate([patches, repeats], axis=0)
channel = patches.shape[1]
grid_t = patches.shape[0] // temporal_patch_size
grid_h, grid_w = resized_height // patch_size, resized_width // patch_size
patches = patches.reshape(
grid_t,
temporal_patch_size,
channel,
grid_h // merge_size,
merge_size,
patch_size,
grid_w // merge_size,
merge_size,
patch_size,
)
patches = patches.transpose(0, 3, 6, 4, 7, 2, 1, 5, 8)
flatten_patches = patches.reshape(
grid_t * grid_h * grid_w, channel * temporal_patch_size * patch_size * patch_size
)
return flatten_patches, (grid_t, grid_h, grid_w)
def preprocess(
self,
images: ImageInput,
do_resize: bool | None = None,
size: dict[str, int] | None = None,
resample: PILImageResampling | None = None,
do_rescale: bool | None = None,
rescale_factor: float | None = None,
do_normalize: bool | None = None,
image_mean: float | list[float] | None = None,
image_std: float | list[float] | None = None,
patch_size: int | None = None,
temporal_patch_size: int | None = None,
merge_size: int | None = None,
do_convert_rgb: bool | None = None,
return_tensors: str | TensorType | None = None,
data_format: ChannelDimension | None = ChannelDimension.FIRST,
input_data_format: str | ChannelDimension | None = None,
):
"""
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`Dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
the longest edge resized to keep the input aspect ratio.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
`True`.
patch_size (`int`, *optional*, defaults to `self.patch_size`):
The spatial patch size of the vision encoder.
temporal_patch_size (`int`, *optional*, defaults to `self.temporal_patch_size`):
The temporal patch size of the vision encoder.
merge_size (`int`, *optional*, defaults to `self.merge_size`):
The merge size of the vision encoder to llm encoder.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
Whether to convert the image to RGB.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
size = size if size is not None else self.size
if size is not None and ("shortest_edge" not in size or "longest_edge" not in size):
raise ValueError("size must contain 'shortest_edge' and 'longest_edge' keys.")
do_resize = do_resize if do_resize is not None else self.do_resize
resample = resample if resample is not None else self.resample
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
patch_size = patch_size if patch_size is not None else self.patch_size
temporal_patch_size = temporal_patch_size if temporal_patch_size is not None else self.temporal_patch_size
merge_size = merge_size if merge_size is not None else self.merge_size
do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
if images is not None:
images = self.fetch_images(images)
images = make_flat_list_of_images(images)
if images is not None and not valid_images(images):
raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor")
validate_preprocess_arguments(
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_resize=do_resize,
size=size,
resample=resample,
)
data = {}
if images is not None:
pixel_values, vision_grid_thws = [], []
for image in images:
patches, image_grid_thw = self._preprocess(
image,
do_resize=do_resize,
size=size,
resample=resample,
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
patch_size=patch_size,
temporal_patch_size=temporal_patch_size,
merge_size=merge_size,
data_format=data_format,
do_convert_rgb=do_convert_rgb,
input_data_format=input_data_format,
)
pixel_values.extend(patches)
vision_grid_thws.append(image_grid_thw)
pixel_values = np.array(pixel_values)
vision_grid_thws = np.array(vision_grid_thws)
data.update({"pixel_values": pixel_values, "image_grid_thw": vision_grid_thws})
return BatchFeature(data=data, tensor_type=return_tensors)
def get_number_of_image_patches(self, height: int, width: int, images_kwargs=None):
"""
A utility that returns number of image patches for a given image size.
Args:
height (`int`):
Height of the input image.
width (`int`):
Width of the input image.
images_kwargs (`dict`, *optional*)
Any kwargs to override defaults of the image processor.
Returns:
`int`: Number of image patches per image.
"""
patch_size = images_kwargs.get("patch_size", self.patch_size)
merge_size = images_kwargs.get("merge_size", self.merge_size)
size = images_kwargs.get("size", self.size)
factor = patch_size * merge_size
resized_height, resized_width = smart_resize(
num_frames=self.temporal_patch_size,
height=height,
width=width,
factor=factor,
min_pixels=size["shortest_edge"],
max_pixels=size["longest_edge"],
temporal_factor=self.temporal_patch_size,
)
grid_h, grid_w = resized_height // patch_size, resized_width // patch_size
return grid_h * grid_w
__all__ = ["Glm4vImageProcessor"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/glm4v/image_processing_glm4v.py",
"license": "Apache License 2.0",
"lines": 433,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/glm4v/image_processing_glm4v_fast.py | # Copyright 2025 The ZhipuAI Inc. team and HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Image processor class for GLM-4.1V."""
from typing import Optional
import torch
import torchvision.transforms.v2.functional as tvF
from ...image_processing_utils import (
BatchFeature,
)
from ...image_processing_utils_fast import (
BaseImageProcessorFast,
group_images_by_shape,
reorder_images,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ImageInput,
PILImageResampling,
SizeDict,
)
from ...processing_utils import Unpack
from ...utils import (
TensorType,
auto_docstring,
logging,
)
from .image_processing_glm4v import Glm4vImageProcessor, Glm4vImageProcessorKwargs, smart_resize
logger = logging.get_logger(__name__)
@auto_docstring
class Glm4vImageProcessorFast(BaseImageProcessorFast):
do_resize = True
resample = PILImageResampling.BICUBIC
size = {"shortest_edge": 112 * 112, "longest_edge": 28 * 28 * 15000}
do_rescale = True
do_normalize = True
image_mean = OPENAI_CLIP_MEAN
image_std = OPENAI_CLIP_STD
do_convert_rgb = True
patch_size = 14
temporal_patch_size = 2
merge_size = 2
valid_kwargs = Glm4vImageProcessorKwargs
model_input_names = ["pixel_values", "image_grid_thw"]
def __init__(self, **kwargs: Unpack[Glm4vImageProcessorKwargs]):
super().__init__(**kwargs)
if self.size is not None and (
self.size.get("shortest_edge", None) is None or self.size.get("longest_edge", None) is None
):
raise ValueError("size must contain 'shortest_edge' and 'longest_edge' keys.")
def _further_process_kwargs(
self,
size: SizeDict | None = None,
**kwargs,
) -> dict:
"""
Update kwargs that need further processing before being validated
Can be overridden by subclasses to customize the processing of kwargs.
"""
if size is not None and ("shortest_edge" not in size or "longest_edge" not in size):
raise ValueError("size must contain 'shortest_edge' and 'longest_edge' keys.")
return super()._further_process_kwargs(size=size, **kwargs)
def _preprocess(
self,
images: list["torch.Tensor"],
do_resize: bool,
size: SizeDict,
interpolation: Optional["tvF.InterpolationMode"],
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
image_mean: float | list[float] | None,
image_std: float | list[float] | None,
patch_size: int,
temporal_patch_size: int,
merge_size: int,
disable_grouping: bool | None,
return_tensors: str | TensorType | None,
**kwargs,
) -> BatchFeature:
"""
Preprocess an image or batch of images. Copy of the `preprocess` method from `CLIPImageProcessor`.
"""
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
resized_images_grouped = {}
for shape, stacked_images in grouped_images.items():
height, width = stacked_images.shape[-2:]
if do_resize:
resized_height, resized_width = smart_resize(
num_frames=temporal_patch_size,
height=height,
width=width,
temporal_factor=temporal_patch_size,
factor=patch_size * merge_size,
min_pixels=size.shortest_edge,
max_pixels=size.longest_edge,
)
stacked_images = self.resize(
stacked_images,
size=SizeDict(height=resized_height, width=resized_width),
interpolation=interpolation,
)
resized_images_grouped[shape] = stacked_images
resized_images = reorder_images(resized_images_grouped, grouped_images_index)
grouped_images, grouped_images_index = group_images_by_shape(resized_images, disable_grouping=disable_grouping)
processed_images_grouped = {}
processed_grids = {}
for shape, stacked_images in grouped_images.items():
resized_height, resized_width = stacked_images.shape[-2:]
patches = self.rescale_and_normalize(
stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
if patches.ndim == 4: # (B, C, H, W)
patches = patches.unsqueeze(1) # (B, T=1, C, H, W)
if patches.shape[1] % temporal_patch_size != 0:
repeats = patches[:, -1:].repeat(
1, temporal_patch_size - (patches.shape[1] % temporal_patch_size), 1, 1, 1
)
patches = torch.cat([patches, repeats], dim=1)
batch_size, t_len, channel = patches.shape[:3]
grid_t = t_len // temporal_patch_size
grid_h, grid_w = resized_height // patch_size, resized_width // patch_size
patches = patches.view(
batch_size,
grid_t,
temporal_patch_size,
channel,
grid_h // merge_size,
merge_size,
patch_size,
grid_w // merge_size,
merge_size,
patch_size,
)
# (B, grid_t, gh, gw, mh, mw, C, tp, ph, pw)
patches = patches.permute(0, 1, 4, 7, 5, 8, 3, 2, 6, 9)
flatten_patches = patches.reshape(
batch_size,
grid_t * grid_h * grid_w,
channel * temporal_patch_size * patch_size * patch_size,
)
processed_images_grouped[shape] = flatten_patches
processed_grids[shape] = [[grid_t, grid_h, grid_w]] * batch_size
processed_images = reorder_images(processed_images_grouped, grouped_images_index)
processed_grids = reorder_images(processed_grids, grouped_images_index)
pixel_values = torch.cat(processed_images, dim=0)
image_grid_thw = torch.tensor(processed_grids)
return BatchFeature(
data={"pixel_values": pixel_values, "image_grid_thw": image_grid_thw}, tensor_type=return_tensors
)
def get_number_of_image_patches(self, height: int, width: int, images_kwargs=None):
return Glm4vImageProcessor.get_number_of_image_patches(self, height, width, images_kwargs)
@auto_docstring
def preprocess(
self,
images: ImageInput,
**kwargs: Unpack[Glm4vImageProcessorKwargs],
) -> BatchFeature:
return super().preprocess(images, **kwargs)
__all__ = ["Glm4vImageProcessorFast"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/glm4v/image_processing_glm4v_fast.py",
"license": "Apache License 2.0",
"lines": 171,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/glm4v/modular_glm4v.py | # Copyright 2025 The ZhipuAI Inc. team and HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from collections.abc import Callable
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import LayerNorm
from ... import initialization as init
from ...activations import ACT2FN
from ...cache_utils import Cache, DynamicCache
from ...configuration_utils import PreTrainedConfig
from ...feature_extraction_utils import BatchFeature
from ...image_utils import ImageInput
from ...masking_utils import create_causal_mask
from ...modeling_flash_attention_utils import FlashAttentionKwargs
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import BaseModelOutputWithPast, BaseModelOutputWithPooling
from ...modeling_rope_utils import RopeParameters
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...processing_utils import Unpack
from ...tokenization_utils_base import PreTokenizedInput, TextInput
from ...utils import (
TransformersKwargs,
auto_docstring,
can_return_tuple,
logging,
torch_compilable_check,
)
from ...utils.generic import maybe_autocast, merge_with_config_defaults
from ...utils.output_capturing import capture_outputs
from ...video_utils import VideoInput
from ..glm4.modeling_glm4 import Glm4MLP, Glm4RMSNorm, Glm4RotaryEmbedding, eager_attention_forward
from ..qwen2_5_vl.modeling_qwen2_5_vl import (
Qwen2_5_VisionPatchEmbed,
Qwen2_5_VisionRotaryEmbedding,
Qwen2_5_VLCausalLMOutputWithPast,
Qwen2_5_VLForConditionalGeneration,
Qwen2_5_VLMLP,
Qwen2_5_VLModelOutputWithPast,
Qwen2_5_VLPreTrainedModel,
Qwen2_5_VLTextModel,
Qwen2_5_VLVisionAttention,
Qwen2_5_VLVisionBlock,
)
from ..qwen2_vl.modeling_qwen2_vl import Qwen2VLModel
from ..qwen2_vl.processing_qwen2_vl import (
Qwen2VLProcessor,
Qwen2VLProcessorKwargs,
)
logger = logging.get_logger(__name__)
class Glm4vVisionConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Glm4vVisionModel`]. It is used to instantiate an Glm4vVisionModel
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield
a similar configuration to that of
GLM-4.1V-9B-Thinking [THUDM/GLM-4.1V-9B-Thinking](https://huggingface.co/THUDM/GLM-4.1V-9B-Thinking).
Args:
depth (`int`, *optional*, defaults to 24):
Number of layers (depth) in the model.
hidden_size (`int`, *optional*, defaults to 1536):
Dimensionality of the encoder layers and the pooler layer.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to add a bias to the queries, keys and values.
attention_dropout (`float`, *optional*, defaults to 0.0):
Dropout probability for attention weights.
num_heads (`<fill_type>`, *optional*, defaults to 12): <fill_docstring>
in_channels (`<fill_type>`, *optional*, defaults to 3): <fill_docstring>
image_size (`int` or `list[int]`, *optional*, defaults to 336):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 14):
The size (resolution) of each patch.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
spatial_merge_size (`int`, *optional*, defaults to 2):
The size used for merging spatial dimensions.
temporal_patch_size (`int`, *optional*, defaults to 2):
The size used for patches along the temporal dimension.
out_hidden_size (`int`, *optional*, defaults to 4096):
The output hidden size of the vision model.
intermediate_size (`int`, *optional*, defaults to 13696):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
Example:
```python
>>> from transformers import Glm4vVisionConfig, Glm4vVisionModel
>>> # Initializing a Glm4vVisionConfig GLM-4.1V-9B style configuration
>>> configuration = Glm4vVisionConfig()
>>> # Initializing a model (with random weights) from the GLM-4.1V-9B configuration
>>> model = Glm4vVisionModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "glm4v_vision"
base_config_key = "vision_config"
def __init__(
self,
depth=24,
hidden_size=1536,
hidden_act="silu",
attention_bias=False,
attention_dropout=0.0,
num_heads=12,
in_channels=3,
image_size=336,
patch_size=14,
rms_norm_eps=1e-05,
spatial_merge_size=2,
temporal_patch_size=2,
out_hidden_size=4096,
intermediate_size=13696,
initializer_range=0.02,
**kwargs,
):
super().__init__(**kwargs)
self.depth = depth
self.hidden_size = hidden_size
self.hidden_act = hidden_act
self.num_heads = num_heads
self.in_channels = in_channels
self.image_size = image_size
self.patch_size = patch_size
self.spatial_merge_size = spatial_merge_size
self.temporal_patch_size = temporal_patch_size
self.out_hidden_size = out_hidden_size
self.intermediate_size = intermediate_size
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
class Glm4vTextConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Glm4vModel`]. It is used to instantiate a
GLM-4.1V model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of
GLM-4.1V-9B-Thinking [THUDM/GLM-4.1V-9B-Thinking](https://huggingface.co/THUDM/GLM-4.1V-9B-Thinking).
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 151552):
Vocabulary size of the Glm4v model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`Glm4vModel`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 13696):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 40):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer encoder.
num_key_value_heads (`int`, *optional*, defaults to 2):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details checkout [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `32`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 32768):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
pad_token_id (`int`, *optional*):
The id of the padding token.
```python
>>> from transformers import Glm4vTextModel, Glm4vConfig
>>> # Initializing a GLM-4.1V style configuration
>>> configuration = Glm4vConfig()
>>> # Initializing a model from the GLM-4.1V style configuration
>>> model = Glm4vTextModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "glm4v_text"
base_config_key = "text_config"
keys_to_ignore_at_inference = ["past_key_values"]
# Default tensor parallel plan for base model `Glm4v`
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise",
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.gate_up_proj": "colwise_gather_output", # we need to replicate here due to the `chunk` operation
"layers.*.mlp.down_proj": "rowwise_split_input", # input is replicated due to the `chunk` operation
}
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
"norm": (["hidden_states"], ["hidden_states"]),
}
def __init__(
self,
vocab_size: int | None = 151552,
hidden_size: int | None = 4096,
intermediate_size: int | None = 13696,
num_hidden_layers: int | None = 40,
num_attention_heads: int | None = 32,
num_key_value_heads: int | None = 2,
hidden_act: str | None = "silu",
max_position_embeddings: int | None = 32768,
initializer_range: float | None = 0.02,
rms_norm_eps: int | None = 1e-05,
use_cache: bool | None = True,
attention_dropout: float | None = 0.0,
rope_parameters: RopeParameters | dict[str, RopeParameters] | None = None,
pad_token_id: int | None = None,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.attention_dropout = attention_dropout
self.rope_parameters = rope_parameters
self.pad_token_id = pad_token_id
super().__init__(ignore_keys_at_rope_validation={"mrope_section"}, **kwargs)
class Glm4vConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Glm4vModel`]. It is used to instantiate a
GLM-4.1V model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of
GLM-4.1V-9B-Thinking [THUDM/GLM-4.1V-9B-Thinking](https://huggingface.co/THUDM/GLM-4.1V-9B-Thinking).
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
text_config (`Union[PreTrainedConfig, dict]`, *optional*, defaults to `Glm4vTextConfig`):
The config object or dictionary of the text backbone.
vision_config (`Union[PreTrainedConfig, dict]`, *optional*, defaults to `Glm4vVisionConfig`):
The config object or dictionary of the vision backbone.
image_token_id (`int`, *optional*, defaults to 151343):
The image token index to encode the image prompt.
video_token_id (`int`, *optional*, defaults to 151344):
The video token index to encode the image prompt.
image_start_token_id (`int`, *optional*, defaults to 151339):
The image start token index to encode the start of image.
image_end_token_id (`int`, *optional*, defaults to 151340):
The image end token index to encode the end of image.
video_start_token_id (`int`, *optional*, defaults to 151341):
The video start token index to encode the start of video.
video_end_token_id (`int`, *optional*, defaults to 151342):
The video end token index to encode the end of video.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether the model's input and output word embeddings should be tied.
```python
>>> from transformers import Glm4vForConditionalGeneration, Glm4vConfig
>>> # Initializing a GLM-4.1V style configuration
>>> configuration = Glm4vConfig()
>>> # Initializing a model from the GLM-4.1V style configuration
>>> model = Glm4vForConditionalGeneration(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "glm4v"
sub_configs = {"vision_config": Glm4vVisionConfig, "text_config": Glm4vTextConfig}
keys_to_ignore_at_inference = ["past_key_values"]
def __init__(
self,
text_config=None,
vision_config=None,
image_token_id=151343,
video_token_id=151344,
image_start_token_id=151339,
image_end_token_id=151340,
video_start_token_id=151341,
video_end_token_id=151342,
tie_word_embeddings=False,
**kwargs,
):
if isinstance(vision_config, dict):
self.vision_config = self.sub_configs["vision_config"](**vision_config)
elif vision_config is None:
self.vision_config = self.sub_configs["vision_config"]()
if isinstance(text_config, dict):
self.text_config = self.sub_configs["text_config"](**text_config)
elif text_config is None:
self.text_config = self.sub_configs["text_config"](**kwargs)
self.image_token_id = image_token_id
self.video_token_id = video_token_id
self.video_start_token_id = video_start_token_id
self.video_end_token_id = video_end_token_id
self.image_start_token_id = image_start_token_id
self.image_end_token_id = image_end_token_id
self.tie_word_embeddings = tie_word_embeddings
super().__init__(**kwargs)
# Will be used for both Text and Vision modalities
class Glm4vRMSNorm(Glm4RMSNorm):
pass
class Glm4VisionMlp(Qwen2_5_VLMLP):
def __init__(self, config, bias: bool = False):
super().__init__(config, bias)
self.intermediate_size = config.out_hidden_size
class Glm4vVisionPatchEmbed(Qwen2_5_VisionPatchEmbed):
def __init__(self, config: Glm4vVisionConfig) -> None:
nn.Module.__init__(self)
self.patch_size = config.patch_size
self.temporal_patch_size = config.temporal_patch_size
self.in_channels = config.in_channels
self.embed_dim = config.hidden_size
kernel_size = [self.temporal_patch_size, self.patch_size, self.patch_size]
self.proj = nn.Conv3d(self.in_channels, self.embed_dim, kernel_size=kernel_size, stride=kernel_size)
class Glm4vVisionRotaryEmbedding(Qwen2_5_VisionRotaryEmbedding):
pass
class Glm4vVisionPatchMerger(nn.Module):
def __init__(self, dim: int, context_dim: int, hidden_act: str, bias: bool = False) -> None:
super().__init__()
self.proj = nn.Linear(dim, dim, bias=bias)
self.post_projection_norm = LayerNorm(dim)
self.gate_proj = nn.Linear(dim, context_dim, bias=bias)
self.up_proj = nn.Linear(dim, context_dim, bias=bias)
self.down_proj = nn.Linear(context_dim, dim, bias=bias)
self.act1 = nn.GELU()
self.act_fn = ACT2FN[hidden_act]
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
hidden_state = self.proj(hidden_state)
hidden_state = self.act1(self.post_projection_norm(hidden_state))
return self.down_proj(self.act_fn(self.gate_proj(hidden_state)) * self.up_proj(hidden_state))
class Glm4vVisionEmbeddings(nn.Module):
def __init__(self, config: Glm4vVisionConfig):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.image_size = config.image_size
self.patch_size = config.patch_size
self.num_patches = (self.image_size // self.patch_size) ** 2
self.num_positions = self.num_patches
self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
self.interpolated_method = "bicubic"
def forward(self, embeddings, lengths, image_shapes, h_coords, w_coords) -> torch.Tensor:
"""
Forward pass with integrated position encoding adaptation using 2D interpolation.
Args:
embeddings: Input embeddings tensor
lengths (torch.Tensor): Sequence lengths for each image in the batch.
image_shapes (torch.Tensor): Tensor of shape [batch_size, 3] representing the image shapes (t, h, w).
h_coords (torch.Tensor): Tensor of shape [total_seq] representing the h coordinate for each patch.
w_coords (torch.Tensor): Tensor of shape [total_seq] representing the w coordinate for each patch.
Returns:
torch.Tensor: Embeddings with adapted position encoding added.
"""
# Get position embedding parameters
pos_embed_weight = self.position_embedding.weight
hidden_size = pos_embed_weight.shape[1]
device = pos_embed_weight.device
# Convert inputs to tensors if needed
if isinstance(lengths, list):
lengths = torch.tensor(lengths, device=device, dtype=torch.long)
# Prepare 2D position embedding
orig_size_sq = pos_embed_weight.shape[0]
orig_size = int(orig_size_sq**0.5)
pos_embed_2d = (
pos_embed_weight.view(orig_size, orig_size, hidden_size)
.permute(2, 0, 1)
.unsqueeze(0)
.to(device=device, dtype=torch.float32)
)
# Calculate target dimensions for each patch
target_h = torch.cat([image_shapes[i, 1].repeat(lengths[i]) for i in range(len(lengths))]).to(
device=device, dtype=torch.float32
)
target_w = torch.cat([image_shapes[i, 2].repeat(lengths[i]) for i in range(len(lengths))]).to(
device=device, dtype=torch.float32
)
# Normalize coordinates to [-1, 1] range for grid_sample
norm_w = ((w_coords + 0.5) / target_w) * 2 - 1
norm_h = ((h_coords + 0.5) / target_h) * 2 - 1
# Create sampling grid
grid = torch.stack((norm_w, norm_h), dim=-1).unsqueeze(0).unsqueeze(2)
# Perform bicubic interpolation
interpolated_embed_fp32 = F.grid_sample(
pos_embed_2d, grid, mode=self.interpolated_method, align_corners=False, padding_mode="border"
)
# Reshape and convert back to original dtype
adapted_pos_embed_fp32 = interpolated_embed_fp32.squeeze(0).squeeze(-1).permute(1, 0)
adapted_pos_embed = adapted_pos_embed_fp32.to(pos_embed_weight.dtype).to(embeddings.device)
# Add adapted position encoding to embeddings
embeddings = embeddings + adapted_pos_embed
return embeddings
class Glm4vVisionAttention(Qwen2_5_VLVisionAttention):
def __init__(self, config: Glm4vVisionConfig) -> None:
super().__init__(config)
self.attention_dropout = config.attention_dropout
self.qkv = nn.Linear(config.hidden_size, config.hidden_size * 3, bias=config.attention_bias)
self.proj = nn.Linear(config.hidden_size, config.hidden_size, bias=False)
class Glm4vVisionBlock(Qwen2_5_VLVisionBlock):
def __init__(self, config) -> None:
super().__init__(config)
self.norm1 = Glm4vRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.norm2 = Glm4vRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.attn = Glm4vVisionAttention(config)
self.mlp = Glm4VisionMlp(config, bias=False)
class Glm4vTextRotaryEmbedding(Glm4RotaryEmbedding):
def __init__(self, config: Glm4vTextConfig, device=None):
super().__init__()
self.mrope_section = config.rope_parameters.get("mrope_section", [8, 12, 12])
def forward(self, x, position_ids):
# In contrast to other models, GLM-V has different position ids for the grids
# So we expand the inv_freq to shape (3, ...)
inv_freq_expanded = self.inv_freq[None, None, :, None].float().expand(3, position_ids.shape[1], -1, 1)
position_ids_expanded = position_ids[:, :, None, :].float() # shape (3, bs, 1, positions)
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with maybe_autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(2, 3)
freqs = self.apply_mrope(freqs, self.mrope_section)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
def apply_mrope(self, freqs, mrope_section):
section = mrope_section
chunks = freqs.split(section, dim=-1)
result = torch.cat([chunk[i % 3] for i, chunk in enumerate(chunks)], dim=-1)
return result
def rotate_half_llm(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., 0::2]
x2 = x[..., 1::2]
return torch.stack((-x2, x1), dim=-1).flatten(-2)
def apply_rotary_pos_emb(q, k, cos, sin, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
# Interleave them instead of usual shape
cos = cos[..., : cos.shape[-1] // 2].repeat_interleave(2, dim=-1)
sin = sin[..., : sin.shape[-1] // 2].repeat_interleave(2, dim=-1)
# Keep half or full tensor for later concatenation
rotary_dim = cos.shape[-1]
q_rot, q_pass = q[..., :rotary_dim], q[..., rotary_dim:]
k_rot, k_pass = k[..., :rotary_dim], k[..., rotary_dim:]
# Apply rotary embeddings on the first half or full tensor
q_embed = (q_rot * cos) + (rotate_half_llm(q_rot) * sin)
k_embed = (k_rot * cos) + (rotate_half_llm(k_rot) * sin)
# Concatenate back to full shape
q_embed = torch.cat([q_embed, q_pass], dim=-1)
k_embed = torch.cat([k_embed, k_pass], dim=-1)
return q_embed, k_embed
class Glm4vTextAttention(nn.Module):
"""
Multi-headed attention from 'Attention Is All You Need' paper.
and "Generating Long Sequences with Sparse Transformers".
"""
def __init__(self, config: Glm4vTextConfig, layer_idx: int | None = None):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.hidden_size = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.hidden_size // self.num_heads
self.num_key_value_heads = config.num_key_value_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.is_causal = True
self.attention_dropout = config.attention_dropout
self.rope_parameters = config.rope_parameters
self.scaling = self.head_dim**-0.5
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=True)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None,
attention_mask: torch.Tensor | None = None,
past_key_values: Cache | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, torch.Tensor | None, tuple[torch.Tensor] | None]:
bsz, q_len, _ = hidden_states.size()
query_states = self.q_proj(hidden_states)
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
query_states = query_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
key_states = key_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface(
self.config._attn_implementation, eager_attention_forward
)
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(bsz, q_len, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class Glm4vTextMLP(Glm4MLP):
pass
class Glm4vTextDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: Glm4vTextConfig, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = Glm4vTextAttention(config, layer_idx)
self.mlp = Glm4vTextMLP(config)
self.input_layernorm = Glm4vRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = Glm4vRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_self_attn_layernorm = Glm4vRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_mlp_layernorm = Glm4vRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
@auto_docstring
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
use_cache: bool | None = False,
cache_position: torch.LongTensor | None = None,
**kwargs,
) -> tuple[torch.FloatTensor, tuple[torch.FloatTensor, torch.FloatTensor] | None]:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
position_embeddings=position_embeddings,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = self.post_self_attn_layernorm(hidden_states)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = self.post_mlp_layernorm(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
class Glm4vModelOutputWithPast(Qwen2_5_VLModelOutputWithPast):
pass
class Glm4vPreTrainedModel(Qwen2_5_VLPreTrainedModel):
_no_split_modules = ["Glm4vTextDecoderLayer", "Glm4vVisionBlock"]
_can_record_outputs = {
"hidden_states": Glm4vTextDecoderLayer,
"attentions": Glm4vTextAttention,
}
def _init_weights(self, module):
PreTrainedModel._init_weights(self, module)
if isinstance(module, Glm4vVisionRotaryEmbedding):
inv_freq = 1.0 / (module.theta ** (torch.arange(0, module.dim, 2, dtype=torch.float) / module.dim))
init.copy_(module.inv_freq, inv_freq)
class Glm4vVisionModel(Glm4vPreTrainedModel):
config: Glm4vVisionConfig
input_modalities = ("image", "video")
_no_split_modules = ["Glm4vVisionBlock"]
_can_record_outputs = {
"hidden_states": Glm4vVisionBlock,
"attentions": Glm4vVisionAttention,
}
def __init__(self, config) -> None:
super().__init__(config)
self.spatial_merge_size = config.spatial_merge_size
self.patch_size = config.patch_size
self.embeddings = Glm4vVisionEmbeddings(config)
self.patch_embed = Glm4vVisionPatchEmbed(config)
head_dim = config.hidden_size // config.num_heads
self.rotary_pos_emb = Glm4vVisionRotaryEmbedding(head_dim // 2)
self.blocks = nn.ModuleList([Glm4vVisionBlock(config) for _ in range(config.depth)])
self.merger = Glm4vVisionPatchMerger(
dim=config.out_hidden_size, context_dim=config.intermediate_size, hidden_act=config.hidden_act
)
self.post_conv_layernorm = Glm4vRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.downsample = nn.Conv2d(
in_channels=config.hidden_size,
out_channels=config.out_hidden_size,
kernel_size=config.spatial_merge_size,
stride=config.spatial_merge_size,
)
self.post_layernorm = Glm4vRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.gradient_checkpointing = False
self.post_init()
def rot_pos_emb(self, grid_thw):
pos_ids = []
for t, h, w in grid_thw:
hpos_ids = torch.arange(h).unsqueeze(1).expand(-1, w)
hpos_ids = hpos_ids.reshape(
h // self.spatial_merge_size,
self.spatial_merge_size,
w // self.spatial_merge_size,
self.spatial_merge_size,
)
hpos_ids = hpos_ids.permute(0, 2, 1, 3)
hpos_ids = hpos_ids.flatten()
wpos_ids = torch.arange(w).unsqueeze(0).expand(h, -1)
wpos_ids = wpos_ids.reshape(
h // self.spatial_merge_size,
self.spatial_merge_size,
w // self.spatial_merge_size,
self.spatial_merge_size,
)
wpos_ids = wpos_ids.permute(0, 2, 1, 3)
wpos_ids = wpos_ids.flatten()
pos_ids.append(torch.stack([hpos_ids, wpos_ids], dim=-1).repeat(t, 1))
pos_ids = torch.cat(pos_ids, dim=0)
max_grid_size = grid_thw[:, 1:].max()
rotary_pos_emb_full = self.rotary_pos_emb(max_grid_size)
rotary_pos_emb = rotary_pos_emb_full[pos_ids].flatten(1)
return rotary_pos_emb, pos_ids
@merge_with_config_defaults
@capture_outputs
@auto_docstring
def forward(
self, hidden_states: torch.Tensor, grid_thw: torch.Tensor, **kwargs: Unpack[TransformersKwargs]
) -> tuple | BaseModelOutputWithPooling:
r"""
hidden_states (`torch.Tensor` of shape `(seq_len, hidden_size)`):
The final hidden states of the model.
grid_thw (`torch.Tensor` of shape `(num_images_or_videos, 3)`):
The temporal, height and width of feature shape of each image in LLM.
Returns:
`torch.Tensor`: hidden_states.
"""
hidden_states = self.patch_embed(hidden_states)
hidden_states = self.post_conv_layernorm(hidden_states)
rotary_pos_emb, image_type_ids = self.rot_pos_emb(grid_thw)
emb = torch.cat((rotary_pos_emb, rotary_pos_emb), dim=-1)
position_embeddings = (emb.cos(), emb.sin())
cu_seqlens = torch.repeat_interleave(grid_thw[:, 1] * grid_thw[:, 2], grid_thw[:, 0]).cumsum(
dim=0,
# Select dtype based on the following factors:
# - FA2 requires that cu_seqlens_q must have dtype int32
# - torch.onnx.export requires that cu_seqlens_q must have same dtype as grid_thw
# See https://github.com/huggingface/transformers/pull/34852 for more information
dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32,
)
cu_seqlens = F.pad(cu_seqlens, (1, 0), value=0)
seqlens = (cu_seqlens[1:] - cu_seqlens[:-1]).tolist()
hidden_states = self.embeddings(
hidden_states,
seqlens,
grid_thw,
image_type_ids[:, 0].to(hidden_states.device),
image_type_ids[:, 1].to(hidden_states.device),
)
for blk in self.blocks:
hidden_states = blk(
hidden_states,
cu_seqlens=cu_seqlens,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = self.post_layernorm(hidden_states)
hidden_states = hidden_states.view(
-1, self.spatial_merge_size, self.spatial_merge_size, hidden_states.shape[-1]
)
hidden_states = hidden_states.permute(0, 3, 1, 2)
hidden_states = self.downsample(hidden_states).view(-1, self.config.out_hidden_size)
merged_hidden_states = self.merger(hidden_states)
return BaseModelOutputWithPooling(
last_hidden_state=hidden_states,
pooler_output=merged_hidden_states,
)
class Glm4vTextModel(Qwen2_5_VLTextModel):
def __init__(self, config: Glm4vTextConfig):
super().__init__(config)
self.layers = nn.ModuleList(
[Glm4vTextDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = Glm4vRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = Glm4vTextRotaryEmbedding(config=config)
del self._attn_implementation
del self.has_sliding_layers
@auto_docstring
@merge_with_config_defaults
@capture_outputs
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
use_cache: bool | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple | BaseModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
# torch.jit.trace() doesn't support cache objects in the output
if use_cache and past_key_values is None and not torch.jit.is_tracing():
past_key_values = DynamicCache(config=self.config)
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
# the hard coded `3` is for temporal, height and width.
if position_ids is None:
position_ids = cache_position.view(1, 1, -1).expand(3, inputs_embeds.shape[0], -1)
elif position_ids.ndim == 2:
position_ids = position_ids[None, ...].expand(3, position_ids.shape[0], -1)
# NOTE: we need to pass text position ids for packing. Qwen2-VL uses 3D positions
# where each dim indicates visual spatial positions for temporal/height/width grids.
# There are two scenarios when FA2-like packed masking might be activated.
# 1. User specifically passed packed `position_ids` and no attention mask.
# In this case we expect the useer to create correct position ids for all 3 grids
# and prepend text-only position ids to it. The final tensor will be [4, bs, seq-len]
# 2. User runs forward with no attention mask and no position ids. In this case, position ids
# are prepared by the model (`get_rope_index`) as `[4, bs, seq-len]` tensor. Text-only positions are
# prepended by us when creating positions so that the mask is constructed correctly. NOTE: failing to pass
# text-only positions will cause incorrect mask construction, do not change `prepare_input_for_generation`
if position_ids.ndim == 3 and position_ids.shape[0] == 4:
text_position_ids = position_ids[0]
position_ids = position_ids[1:]
else:
# If inputs are not packed (usual 3D positions), do not prepare mask from position_ids
text_position_ids = None
mask_kwargs = {
"config": self.config,
"inputs_embeds": inputs_embeds,
"attention_mask": attention_mask,
"cache_position": cache_position,
"past_key_values": past_key_values,
"position_ids": text_position_ids,
}
# Create the masks
causal_mask = create_causal_mask(**mask_kwargs)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids)
for decoder_layer in self.layers:
layer_outputs = decoder_layer(
hidden_states,
attention_mask=causal_mask,
position_ids=text_position_ids,
past_key_values=past_key_values,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = layer_outputs
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
class Glm4vModel(Qwen2VLModel):
_checkpoint_conversion_mapping = {}
_no_split_modules = ["Glm4vTextDecoderLayer", "Glm4vVisionBlock"]
def __init__(self, config):
super().__init__(config)
self.visual = Glm4vVisionModel._from_config(config.vision_config)
@can_return_tuple
@auto_docstring
def get_video_features(
self,
pixel_values_videos: torch.FloatTensor,
video_grid_thw: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | BaseModelOutputWithPooling:
r"""
pixel_values_videos (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
The tensors corresponding to the input videos.
video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
The temporal, height and width of feature shape of each video in LLM.
"""
pixel_values_videos = pixel_values_videos.type(self.visual.dtype)
# reshape video_grid_thw -> [b, 3] -> [1, h, w] * frames
temp_frames_hw = []
video_grid_thw_list = video_grid_thw.tolist()
for t, h, w in video_grid_thw_list:
repeated_row = torch.tensor([1, h, w]).unsqueeze(0).repeat(t, 1)
temp_frames_hw.append(repeated_row)
flattened_video_grid_thw = torch.cat(temp_frames_hw, dim=0)
vision_outputs = self.visual(
pixel_values_videos, grid_thw=flattened_video_grid_thw, return_dict=True, **kwargs
)
split_sizes = (video_grid_thw.prod(-1) // self.visual.spatial_merge_size**2).tolist()
video_embeds = torch.split(vision_outputs.pooler_output, split_sizes)
vision_outputs.pooler_output = video_embeds
return vision_outputs
def get_placeholder_mask(
self,
input_ids: torch.LongTensor,
inputs_embeds: torch.FloatTensor,
image_features: torch.FloatTensor | None = None,
video_features: torch.FloatTensor | None = None,
):
"""
Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is
equal to the length of multimodal features. If the lengths are different, an error is raised.
"""
if input_ids is None:
special_image_mask = inputs_embeds == self.get_input_embeddings()(
torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device)
)
special_image_mask = special_image_mask.all(-1)
special_video_mask = inputs_embeds == self.get_input_embeddings()(
torch.tensor(self.config.video_token_id, dtype=torch.long, device=inputs_embeds.device)
)
special_video_mask = special_video_mask.all(-1)
else:
# GLM-4.1V and GLM-4.5V special_video_mask is special_image_mask
special_image_mask = input_ids == self.config.image_token_id
special_video_mask = input_ids == self.config.image_token_id
n_image_tokens = special_image_mask.sum()
special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
if image_features is not None:
torch_compilable_check(
inputs_embeds[special_image_mask].numel() == image_features.numel(),
f"Image features and image tokens do not match, tokens: {n_image_tokens}, features: {image_features.shape[0]}",
)
n_video_tokens = special_video_mask.sum()
special_video_mask = special_video_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
if video_features is not None:
torch_compilable_check(
inputs_embeds[special_video_mask].numel() == video_features.numel(),
f"Video features and video tokens do not match, tokens: {n_video_tokens}, features: {video_features.shape[0]}",
)
return special_image_mask, special_video_mask
def get_rope_index(
self,
input_ids: torch.LongTensor,
mm_token_type_ids: torch.IntTensor,
image_grid_thw: torch.LongTensor | None = None,
video_grid_thw: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
**kwargs,
) -> tuple[torch.Tensor, torch.Tensor]:
"""
Calculate the 3D rope index based on image and video's sizes. The utility expects a `vision + text`
sequence and will error out otherwise. For pure text sequence, please rely on model's auto-inferred
position ids. In a mixed vision + text sequence, vision tokens use 3D RoPE (temporal, height, width)
while text tokens use standard 1D RoPE.
Example:
Temporal patches: 3; Height patches: 2; Width patches: 2
Each vision input results in (temporal x height × width) positions. Here: 3 x 2 × 2 = 12 positions total.
Temporal position IDs are spaced by:
`interval = tokens_per_second * temporal_patch_size / fps`
If fps = 1; tokens_per_second = 25; temporal_patch_size = 2, temporal IDs increase by 50 for each temporal patch:
`[0, 0, 0, 0, 50, 50, 50, 50, 100, 100, 100, 100]`
Height IDs repeat per row: `[0, 0, 1, 1, ...]`
Width IDs alternate per column: `[0, 1, 0, 1, ...]`
Text tokens follow standard 1D RoPE and the position IDs grow consequently with a step of `1`
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
mm_token_type_ids (`torch.IntTensor` of shape `(batch_size, sequence_length)`):
Token type ids matching each modality to a different value in the input sequence, i.e. text (0), image (1), video (2).
image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
The temporal, height and width of feature shape of each image in LLM.
video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
The temporal, height and width of feature shape of each video in LLM.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
Returns:
position_ids (`torch.LongTensor` of shape `(3, batch_size, sequence_length)`)
mrope_position_deltas (`torch.Tensor` of shape `(batch_size)`)
"""
spatial_merge_size = self.config.vision_config.spatial_merge_size
mrope_position_deltas = []
position_ids = torch.zeros(
3,
input_ids.shape[0],
input_ids.shape[1],
dtype=input_ids.dtype,
device=input_ids.device,
)
grid_iters = {
1: iter(image_grid_thw) if image_grid_thw is not None else None,
2: iter(video_grid_thw) if video_grid_thw is not None else None,
}
for batch_idx, current_input_ids in enumerate(input_ids):
input_token_type = mm_token_type_ids[batch_idx]
if attention_mask is not None:
current_input_ids = current_input_ids[attention_mask[batch_idx].bool()]
input_token_type = input_token_type[attention_mask[batch_idx].bool()]
input_type_group = []
for key, group in itertools.groupby(enumerate(input_token_type.tolist()), lambda x: x[1]):
group = list(group)
start_index = group[0][0]
end_index = group[-1][0] + 1
input_type_group.append((key, start_index, end_index))
current_pos = 0
video_group_index = 0
llm_pos_ids_list = []
for modality_type, start_idx, end_idx in input_type_group:
# text == 0
if modality_type == 0:
text_len = end_idx - start_idx
llm_pos_ids_list.append(
torch.arange(text_len, device=input_ids.device).view(1, -1).expand(3, -1) + current_pos
)
current_pos += text_len
# image == 1, video == 2
else:
# GLM4V splits video into segments per frame but there's only one `grid_thw`
# per whole video. We can't exhaus the iterator and have to re-use the grid
# while processing the same video!
if modality_type == 2:
if video_group_index == 0:
grid_thw = next(grid_iters[modality_type])
video_group_index += 1
video_group_index = 0 if video_group_index >= grid_thw[0] else video_group_index
else:
grid_thw = next(grid_iters[modality_type])
# Videos are processed per frame separately, each temporal grid is always `1`
temp_merge_size = grid_thw[0]
vision_position_ids = self.get_vision_position_ids(
current_pos, grid_thw, temp_merge_size, spatial_merge_size, device=input_ids.device
)
llm_pos_ids_list.append(vision_position_ids)
current_pos += max(grid_thw[1], grid_thw[2]) // spatial_merge_size
llm_positions = torch.cat(llm_pos_ids_list, dim=1).reshape(3, -1)
if attention_mask is not None:
position_ids[:, batch_idx, attention_mask[batch_idx].bool()] = llm_positions.to(position_ids.device)
else:
position_ids[:, batch_idx] = llm_positions.to(position_ids.device)
mrope_position_deltas.append(llm_positions.max() + 1 - len(current_input_ids))
mrope_position_deltas = torch.tensor(mrope_position_deltas, device=input_ids.device).unsqueeze(1)
return position_ids, mrope_position_deltas
@auto_docstring
@can_return_tuple
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
pixel_values: torch.Tensor | None = None,
pixel_values_videos: torch.FloatTensor | None = None,
image_grid_thw: torch.LongTensor | None = None,
video_grid_thw: torch.LongTensor | None = None,
rope_deltas: torch.LongTensor | None = None,
mm_token_type_ids: torch.IntTensor | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | Glm4vModelOutputWithPast:
r"""
image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
The temporal, height and width of feature shape of each image in LLM.
video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
The temporal, height and width of feature shape of each video in LLM.
rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*):
The rope index difference between sequence length and multimodal rope.
"""
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.get_input_embeddings()(input_ids)
if pixel_values is not None:
image_embeds = self.get_image_features(pixel_values, image_grid_thw, return_dict=True).pooler_output
image_embeds = torch.cat(image_embeds, dim=0).to(inputs_embeds.device, inputs_embeds.dtype)
image_mask, _ = self.get_placeholder_mask(input_ids, inputs_embeds, image_features=image_embeds)
inputs_embeds = inputs_embeds.masked_scatter(image_mask, image_embeds)
if pixel_values_videos is not None:
video_embeds = self.get_video_features(pixel_values_videos, video_grid_thw, return_dict=True).pooler_output
video_embeds = torch.cat(video_embeds, dim=0).to(inputs_embeds.device, inputs_embeds.dtype)
_, video_mask = self.get_placeholder_mask(input_ids, inputs_embeds, video_features=video_embeds)
inputs_embeds = inputs_embeds.masked_scatter(video_mask, video_embeds)
if position_ids is None:
position_ids = self.compute_3d_position_ids(
input_ids=input_ids,
image_grid_thw=image_grid_thw,
video_grid_thw=video_grid_thw,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
past_key_values=past_key_values,
mm_token_type_ids=mm_token_type_ids,
)
outputs = self.language_model(
input_ids=None,
position_ids=position_ids,
attention_mask=attention_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
cache_position=cache_position,
**kwargs,
)
return Glm4vModelOutputWithPast(
**outputs,
rope_deltas=self.rope_deltas,
)
class Glm4vCausalLMOutputWithPast(Qwen2_5_VLCausalLMOutputWithPast):
pass
class Glm4vForConditionalGeneration(Qwen2_5_VLForConditionalGeneration):
_checkpoint_conversion_mapping = {}
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
labels: torch.LongTensor | None = None,
pixel_values: torch.Tensor | None = None,
pixel_values_videos: torch.FloatTensor | None = None,
image_grid_thw: torch.LongTensor | None = None,
video_grid_thw: torch.LongTensor | None = None,
mm_token_type_ids: torch.IntTensor | None = None,
cache_position: torch.LongTensor | None = None,
logits_to_keep: int | torch.Tensor = 0,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | Glm4vCausalLMOutputWithPast:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
The temporal, height and width of feature shape of each image in LLM.
video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
The temporal, height and width of feature shape of each video in LLM.
Example:
```python
>>> from PIL import Image
>>> import httpx
>>> from io import BytesIO
>>> from transformers import AutoProcessor, Glm4vForConditionalGeneration
>>> model = Glm4vForConditionalGeneration.from_pretrained("zai-org/GLM-4.1V-9B-Thinking")
>>> processor = AutoProcessor.from_pretrained("zai-org/GLM-4.1V-9B-Thinking")
>>> messages = [
{
"role": "user",
"content": [
{"type": "image", "url": "https://www.ilankelman.org/stopsigns/australia.jpg"},
{"type": "text", "text": "What is shown in this image?"},
],
},
]
>>> url = "https://www.ilankelman.org/stopsigns/australia.jpg"
>>> with httpx.stream("GET", url) as response:
... image = Image.open(BytesIO(response.read()))
>>> text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
>>> inputs = processor(text=[text], images=[image], vision_infos=[vision_infos])
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"The image shows a street scene with a red stop sign in the foreground. In the background, there is a large red gate with Chinese characters ..."
```"""
outputs = self.model(
input_ids=input_ids,
pixel_values=pixel_values,
pixel_values_videos=pixel_values_videos,
image_grid_thw=image_grid_thw,
video_grid_thw=video_grid_thw,
mm_token_type_ids=mm_token_type_ids,
position_ids=position_ids,
attention_mask=attention_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs[0]
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size)
return Glm4vCausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
rope_deltas=outputs.rope_deltas,
)
def prepare_inputs_for_generation(
self,
input_ids,
past_key_values=None,
attention_mask=None,
inputs_embeds=None,
cache_position=None,
position_ids=None,
use_cache=True,
pixel_values=None,
pixel_values_videos=None,
image_grid_thw=None,
video_grid_thw=None,
is_first_iteration=False,
**kwargs,
):
# Overwritten -- in specific circumstances we don't want to forward image inputs to the model
model_inputs = super().prepare_inputs_for_generation(
input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
cache_position=cache_position,
position_ids=position_ids,
pixel_values=pixel_values,
pixel_values_videos=pixel_values_videos,
image_grid_thw=image_grid_thw,
video_grid_thw=video_grid_thw,
use_cache=use_cache,
is_first_iteration=is_first_iteration,
**kwargs,
)
if not is_first_iteration and use_cache:
model_inputs["pixel_values"] = None
model_inputs["pixel_values_videos"] = None
return model_inputs
def _get_image_nums_and_video_nums(
self,
input_ids: torch.LongTensor | None,
inputs_embeds: torch.Tensor | None = None,
) -> tuple[torch.Tensor, torch.Tensor]:
"""
Get the number of images and videos for each sample to calculate the separation length of the sample tensor.
These parameters are not passed through the processor to avoid unpredictable impacts from interface modifications.
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Returns:
image_nums (`torch.LongTensor` of shape `(batch_size, num_images_sample)`)
video_nums (`torch.LongTensor` of shape `(batch_size, num_videos_sample)`)
"""
if inputs_embeds is not None:
is_image = (
inputs_embeds
== self.get_input_embeddings()(
torch.tensor(self.config.image_start_token_id, dtype=torch.long, device=inputs_embeds.device)
)
)[..., 0]
is_video_start = (
inputs_embeds
== self.get_input_embeddings()(
torch.tensor(self.config.video_start_token_id, dtype=torch.long, device=inputs_embeds.device)
)
)[..., 0]
is_video_end = (
inputs_embeds
== self.get_input_embeddings()(
torch.tensor(self.config.video_end_token_id, dtype=torch.long, device=inputs_embeds.device)
)
)[..., 0]
else:
is_image = input_ids == self.config.image_start_token_id
is_video_start = input_ids == self.config.video_start_token_id
is_video_end = input_ids == self.config.video_end_token_id
# Cumulative sum to track if we're inside a video span
# We'll assume well-formed video tags (i.e. matching starts and ends)
video_level = torch.cumsum(is_video_start.int() - is_video_end.int(), dim=1)
inside_video = video_level > 0 # shape (batch_size, seq_length)
# Mask out image tokens that are inside video spans
standalone_images = is_image & (~inside_video)
# Count per batch
image_counts = standalone_images.sum(dim=1)
video_counts = is_video_start.sum(dim=1)
return image_counts, video_counts
class Glm4vProcessorKwargs(Qwen2VLProcessorKwargs):
_defaults = {
"text_kwargs": {
"padding": False,
"return_token_type_ids": False,
"return_mm_token_type_ids": True,
},
"videos_kwargs": {"return_metadata": True},
}
class Glm4vProcessor(Qwen2VLProcessor):
def __init__(self, image_processor=None, tokenizer=None, video_processor=None, chat_template=None, **kwargs):
super().__init__(image_processor, tokenizer, video_processor, chat_template=chat_template)
self.image_token = "<|image|>" if not hasattr(tokenizer, "image_token") else tokenizer.image_token
self.video_token = "<|video|>" if not hasattr(tokenizer, "video_token") else tokenizer.video_token
self.video_start_id = tokenizer.convert_tokens_to_ids("<|begin_of_video|>")
self.video_end_id = tokenizer.convert_tokens_to_ids("<|end_of_video|>")
def __call__(
self,
images: ImageInput | None = None,
text: TextInput | PreTokenizedInput | list[TextInput] | list[PreTokenizedInput] = None,
videos: VideoInput | None = None,
**kwargs: Unpack[Glm4vProcessorKwargs],
) -> BatchFeature:
r"""
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
- **pixel_values_videos** -- Pixel values of videos to be fed to a model. Returned when `videos` is not `None`.
- **image_grid_thw** -- List of image 3D grid in LLM. Returned when `images` is not `None`.
- **video_grid_thw** -- List of video 3D grid in LLM. Returned when `videos` is not `None`.
"""
output_kwargs = self._merge_kwargs(
Glm4vProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
if images is not None:
image_inputs = self.image_processor(images=images, **output_kwargs["images_kwargs"])
image_grid_thw = image_inputs["image_grid_thw"]
else:
image_inputs = {}
image_grid_thw = None
if videos is not None:
videos_inputs = self.video_processor(videos=videos, **output_kwargs["videos_kwargs"])
# If user has not requested video metadata, pop it
if not kwargs.get("return_metadata"):
video_metadata = videos_inputs.pop("video_metadata")
else:
video_metadata = videos_inputs["video_metadata"]
video_grid_thw = videos_inputs["video_grid_thw"]
else:
videos_inputs = {}
video_grid_thw = None
if not isinstance(text, list):
text = [text]
text = text.copy() # below lines change text in-place
if image_grid_thw is not None:
merge_length = self.image_processor.merge_size**2
index = 0
for i in range(len(text)):
while self.image_token in text[i]:
num_image_tokens = image_grid_thw[index].prod() // merge_length
text[i] = text[i].replace(self.image_token, "<|placeholder|>" * num_image_tokens, 1)
index += 1
text[i] = text[i].replace("<|placeholder|>", self.image_token)
if video_grid_thw is not None:
merge_length = self.video_processor.merge_size**2
video_index = 0
for i in range(len(text)):
while self.video_token in text[i]:
num_frames = video_grid_thw[video_index][0]
video_structure = ""
metadata = video_metadata[video_index]
if metadata.fps is None:
logger.warning_once(
"SmolVLM requires frame timestamps to construct prompts, but the `fps` of the input video could not be inferred. "
"Probably `video_metadata` was missing from inputs and you passed pre-sampled frames. "
"Defaulting to `fps=24`. Please provide `video_metadata` for more accurate results."
)
metadata.fps = 24 if metadata.fps is None else metadata.fps
timestamps = metadata.timestamps[::2] # mrope
unique_timestamps = []
for idx in range(0, len(timestamps)):
unique_timestamps.append(timestamps[idx])
selected_timestamps = unique_timestamps[:num_frames]
while len(selected_timestamps) < num_frames:
selected_timestamps.append(selected_timestamps[-1] if selected_timestamps else 0)
for frame_idx in range(num_frames):
timestamp_sec = selected_timestamps[frame_idx]
frame_structure = self.replace_frame_token_id(timestamp_sec)
video_structure += frame_structure
text[i] = text[i].replace(self.video_token, video_structure, 1)
num_image_tokens = (
video_grid_thw[video_index].prod() // merge_length // video_grid_thw[video_index][0]
)
for frame_idx in range(num_frames):
if self.image_token in text[i]:
text[i] = text[i].replace(self.image_token, "<|placeholder|>" * num_image_tokens, 1)
video_index += 1
text[i] = text[i].replace("<|placeholder|>", self.image_token)
return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None)
return_mm_token_type_ids = output_kwargs["text_kwargs"].pop("return_mm_token_type_ids", False)
text_inputs = self.tokenizer(text, **output_kwargs["text_kwargs"])
self._check_special_mm_tokens(text, text_inputs, modalities=["image", "video"])
if return_mm_token_type_ids:
array_ids = np.array(text_inputs["input_ids"])
mm_token_type_ids = np.zeros_like(text_inputs["input_ids"])
# Replace 0 -> 2 only inside video segments because GLM4v
# uses the same special token to denote images and video
# Otherwise replace 0 -> 1 for image modality
starts = np.cumsum(array_ids == self.video_start_id, axis=1)
ends = np.cumsum(array_ids == self.video_end_id, axis=1)
is_video_modality = starts > ends
mm_token_type_ids[(array_ids == self.image_token_id) & is_video_modality] = 2
mm_token_type_ids[(array_ids == self.image_token_id) & (~is_video_modality)] = 1
text_inputs["mm_token_type_ids"] = mm_token_type_ids.tolist()
return BatchFeature(data={**text_inputs, **image_inputs, **videos_inputs}, tensor_type=return_tensors)
def replace_frame_token_id(self, timestamp_sec):
return f"<|begin_of_image|>{self.image_token}<|end_of_image|>{int(timestamp_sec)}"
__all__ = [
"Glm4vConfig",
"Glm4vTextConfig",
"Glm4vVisionConfig",
"Glm4vForConditionalGeneration",
"Glm4vModel",
"Glm4vPreTrainedModel",
"Glm4vProcessor",
"Glm4vTextModel",
"Glm4vVisionModel",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/glm4v/modular_glm4v.py",
"license": "Apache License 2.0",
"lines": 1347,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/glm4v/video_processing_glm4v.py | # Copyright 2025 The ZhipuAI Inc. team and HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""video processor class for GLM-4.1V."""
import math
import numpy as np
import torch
from ...image_processing_utils import BatchFeature
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
PILImageResampling,
SizeDict,
get_image_size,
)
from ...processing_utils import Unpack, VideosKwargs
from ...utils import TensorType, add_start_docstrings
from ...video_processing_utils import BASE_VIDEO_PROCESSOR_DOCSTRING, BaseVideoProcessor
from ...video_utils import VideoMetadata, group_videos_by_shape, reorder_videos
from .image_processing_glm4v import smart_resize
class Glm4vVideoProcessorInitKwargs(VideosKwargs, total=False):
max_image_size: dict[str, int]
patch_size: int
temporal_patch_size: int
merge_size: int
max_duration: int
@add_start_docstrings(
"Constructs a fast GLM-4V image processor that dynamically resizes videos based on the original videos.",
BASE_VIDEO_PROCESSOR_DOCSTRING,
"""
patch_size (`int`, *optional*, defaults to 14):
The spacial patch size of the vision encoder.
temporal_patch_size (`int`, *optional*, defaults to 2):
The temporal patch size of the vision encoder.
merge_size (`int`, *optional*, defaults to 2):
The merge size of the vision encoder to llm encoder.
""",
)
class Glm4vVideoProcessor(BaseVideoProcessor):
resample = PILImageResampling.BICUBIC
size = {"shortest_edge": 112 * 112, "longest_edge": 28 * 28 * 2 * 30000}
max_image_size = {"longest_edge": 28 * 28 * 2 * 30000}
image_mean = OPENAI_CLIP_MEAN
image_std = OPENAI_CLIP_STD
do_resize = True
do_rescale = True
do_normalize = True
do_convert_rgb = True
do_sample_frames = True
patch_size = 14
temporal_patch_size = 2
max_duration = 300
merge_size = 2
valid_kwargs = Glm4vVideoProcessorInitKwargs
num_frames = 16
fps = 2
model_input_names = ["pixel_values_videos", "video_grid_thw"]
def __init__(self, **kwargs: Unpack[Glm4vVideoProcessorInitKwargs]):
super().__init__(**kwargs)
if self.size is not None and (
self.size.get("shortest_edge", None) is None or self.size.get("longest_edge", None) is None
):
raise ValueError("size must contain 'shortest_edge' and 'longest_edge' keys.")
def _further_process_kwargs(
self,
size: SizeDict | None = None,
**kwargs,
) -> dict:
"""
Update kwargs that need further processing before being validated
Can be overridden by subclasses to customize the processing of kwargs.
"""
if size is not None and ("shortest_edge" not in size or "longest_edge" not in size):
raise ValueError("size must contain 'shortest_edge' and 'longest_edge' keys.")
return super()._further_process_kwargs(size=size, **kwargs)
def sample_frames(
self,
metadata: VideoMetadata,
fps: int | float | None = None,
**kwargs,
):
"""
Args:
metadata (`VideoMetadata`):
Metadata of the video containing information about total duration, fps and total number of frames.
fps (`int` or `float`, *optional*):
Target frames to sample per second. Defaults to `self.fps`.
Returns:
np.ndarray:
Indices to sample video frames.
"""
if metadata is None or getattr(metadata, "fps", None) is None:
raise ValueError(
"Asked to sample frames per second but no video metadata was provided which is required when sampling in GLM4V. "
"Please pass in `VideoMetadata` object or set `do_sample_frames=False`"
)
total_frames = metadata.total_num_frames
requested_fps = fps if fps is not None else self.fps
max_frame_idx = total_frames - 1
duration = metadata.duration or round(max_frame_idx / metadata.fps) + 1
if duration <= self.max_duration:
n = int(math.floor(duration * requested_fps))
frame_indices = [min(max_frame_idx, int(math.ceil(i * metadata.fps / requested_fps))) for i in range(n)]
else:
num_samples = int(self.max_duration * requested_fps)
if num_samples >= total_frames:
frame_indices = list(range(total_frames))
else:
target_seconds = np.linspace(0, duration, num_samples, endpoint=True)
frame_indices = [min(max_frame_idx, int(math.ceil(t * metadata.fps))) for t in target_seconds]
seen, uniq = set(), []
for idx in frame_indices:
if idx not in seen:
seen.add(idx)
uniq.append(idx)
if len(uniq) & 1:
uniq.append(uniq[-1])
return np.array(uniq)
def _preprocess(
self,
videos: list[torch.Tensor],
do_convert_rgb: bool = True,
do_resize: bool = True,
size: SizeDict | None = None,
interpolation: PILImageResampling = PILImageResampling.BICUBIC,
do_rescale: bool = True,
rescale_factor: float = 1 / 255.0,
do_normalize: bool = True,
image_mean: float | list[float] | None = None,
image_std: float | list[float] | None = None,
patch_size: int | None = None,
temporal_patch_size: int | None = None,
merge_size: int | None = None,
return_tensors: str | TensorType | None = None,
**kwargs,
):
grouped_videos, grouped_videos_index = group_videos_by_shape(videos)
resized_videos_grouped = {}
for shape, stacked_videos in grouped_videos.items():
B, T, C, H, W = stacked_videos.shape
num_frames, height, width = T, H, W
if do_resize:
resized_height, resized_width = smart_resize(
num_frames=num_frames,
height=height,
width=width,
temporal_factor=temporal_patch_size,
factor=patch_size * merge_size,
min_pixels=size.shortest_edge,
max_pixels=size.longest_edge,
)
stacked_videos = stacked_videos.view(B * T, C, H, W)
stacked_videos = self.resize(
stacked_videos,
size=SizeDict(height=resized_height, width=resized_width),
interpolation=interpolation,
)
stacked_videos = stacked_videos.view(B, T, C, resized_height, resized_width)
resized_videos_grouped[shape] = stacked_videos
resized_videos = reorder_videos(resized_videos_grouped, grouped_videos_index)
# Group videos by size for further processing
# Needed in case do_resize is False, or resize returns videos with different sizes
grouped_videos, grouped_videos_index = group_videos_by_shape(resized_videos)
processed_videos_grouped = {}
processed_grids = {}
for shape, stacked_videos in grouped_videos.items():
resized_height, resized_width = get_image_size(stacked_videos[0], channel_dim=ChannelDimension.FIRST)
# Fused rescale and normalize
stacked_videos = self.rescale_and_normalize(
stacked_videos, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
patches = stacked_videos
# Check that videos have `num_frames` divisible by `temporal_patch_size`
if patches.shape[1] % temporal_patch_size != 0:
repeats = patches[:, -1:].repeat(1, temporal_patch_size - 1, 1, 1, 1)
patches = torch.cat([patches, repeats], dim=1)
batch_size, grid_t, channel = patches.shape[:3]
grid_t = grid_t // temporal_patch_size
grid_h, grid_w = resized_height // patch_size, resized_width // patch_size
patches = patches.view(
batch_size,
grid_t,
temporal_patch_size,
channel,
grid_h // merge_size,
merge_size,
patch_size,
grid_w // merge_size,
merge_size,
patch_size,
)
patches = patches.permute(0, 1, 4, 7, 5, 8, 3, 2, 6, 9)
flatten_patches = patches.reshape(
batch_size,
grid_t * grid_h * grid_w,
channel * temporal_patch_size * patch_size * patch_size,
)
processed_videos_grouped[shape] = flatten_patches
processed_grids[shape] = [[grid_t, grid_h, grid_w]] * batch_size
processed_videos = reorder_videos(processed_videos_grouped, grouped_videos_index)
processed_grids = reorder_videos(processed_grids, grouped_videos_index)
pixel_values_videos = torch.cat(processed_videos, dim=0)
video_grid_thw = torch.tensor(processed_grids)
data = {
"pixel_values_videos": pixel_values_videos,
"video_grid_thw": video_grid_thw,
}
return BatchFeature(data=data, tensor_type=return_tensors)
__all__ = ["Glm4vVideoProcessor"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/glm4v/video_processing_glm4v.py",
"license": "Apache License 2.0",
"lines": 220,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/glm4v/test_modeling_glm4v.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch GLM-4.1V model."""
import copy
import unittest
from transformers import (
AutoProcessor,
Glm4vConfig,
Glm4vForConditionalGeneration,
Glm4vModel,
is_torch_available,
)
from transformers.testing_utils import (
Expectations,
cleanup,
require_deterministic_for_xpu,
require_flash_attn,
require_torch,
require_torch_accelerator,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import (
ModelTesterMixin,
floats_tensor,
ids_tensor,
)
if is_torch_available():
import torch
class Glm4vVisionText2TextModelTester:
def __init__(
self,
parent,
batch_size=3,
seq_length=7,
num_channels=3,
ignore_index=-100,
image_size=112,
video_start_token_id=3,
video_end_token_id=4,
image_start_token_id=5,
image_end_token_id=6,
image_token_id=7,
video_token_id=8,
is_training=True,
text_config={
"vocab_size": 99,
"hidden_size": 16,
"intermediate_size": 22,
"num_hidden_layers": 2,
"num_attention_heads": 2,
"num_key_value_heads": 1,
"output_channels": 64,
"hidden_act": "silu",
"max_position_embeddings": 512,
"rope_parameters": {"type": "default", "mrope_section": [2, 1, 1]},
"rope_theta": 10000,
"tie_word_embeddings": True,
"bos_token_id": 0,
"eos_token_id": 0,
"pad_token_id": 0,
},
vision_config={
"depth": 2,
"hidden_act": "silu",
"hidden_size": 48,
"out_hidden_size": 16,
"intermediate_size": 22,
"patch_size": 14,
"spatial_merge_size": 1,
"temporal_patch_size": 2,
},
):
self.parent = parent
self.ignore_index = ignore_index
self.bos_token_id = text_config["bos_token_id"]
self.eos_token_id = text_config["eos_token_id"]
self.pad_token_id = text_config["pad_token_id"]
self.video_start_token_id = video_start_token_id
self.video_end_token_id = video_end_token_id
self.image_start_token_id = image_start_token_id
self.image_end_token_id = image_end_token_id
self.image_token_id = image_token_id
self.video_token_id = video_token_id
self.text_config = text_config
self.vision_config = vision_config
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.is_training = is_training
self.hidden_size = text_config["hidden_size"]
self.num_hidden_layers = text_config["num_hidden_layers"]
self.num_attention_heads = text_config["num_attention_heads"]
self.vocab_size = text_config["vocab_size"]
self.num_image_tokens = 64
self.seq_length = seq_length + self.num_image_tokens
def get_config(self):
return Glm4vConfig(
text_config=self.text_config,
vision_config=self.vision_config,
image_token_id=self.image_token_id,
video_token_id=self.video_token_id,
video_start_token_id=self.video_start_token_id,
video_end_token_id=self.video_end_token_id,
image_start_token_id=self.image_start_token_id,
image_end_token_id=self.image_end_token_id,
)
def prepare_config_and_inputs(self):
config = self.get_config()
patch_size = config.vision_config.patch_size
temporal_patch_size = config.vision_config.temporal_patch_size
pixel_values = floats_tensor(
[
self.batch_size * (self.image_size**2) // (patch_size**2),
self.num_channels * (patch_size**2) * temporal_patch_size,
]
)
return config, pixel_values
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
input_ids[input_ids == self.video_token_id] = self.pad_token_id
input_ids[input_ids == self.image_token_id] = self.pad_token_id
input_ids[input_ids == self.video_start_token_id] = self.pad_token_id
input_ids[input_ids == self.image_start_token_id] = self.pad_token_id
input_ids[input_ids == self.video_end_token_id] = self.pad_token_id
input_ids[input_ids == self.image_end_token_id] = self.pad_token_id
input_ids[:, 0] = self.image_start_token_id
input_ids[:, 1 : 1 + self.num_image_tokens] = self.image_token_id
input_ids[:, 1 + self.num_image_tokens] = self.image_end_token_id
patch_size = config.vision_config.patch_size
patches_per_side = self.image_size // patch_size
mm_token_type_ids = torch.zeros_like(input_ids)
mm_token_type_ids[:, 1 : 1 + self.num_image_tokens] = 1
inputs_dict = {
"pixel_values": pixel_values,
"image_grid_thw": torch.tensor(
[[1, patches_per_side, patches_per_side]] * self.batch_size, device=torch_device
),
"input_ids": input_ids,
"attention_mask": attention_mask,
"mm_token_type_ids": mm_token_type_ids,
}
return config, inputs_dict
@require_torch
class Glm4vModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = (Glm4vModel, Glm4vForConditionalGeneration) if is_torch_available() else ()
model_split_percents = [0.7, 0.9] # model too big to split at 0.5
_is_composite = True
def setUp(self):
self.model_tester = Glm4vVisionText2TextModelTester(self)
self.config_tester = ConfigTester(self, config_class=Glm4vConfig, has_text_modality=False)
def test_config(self):
self.config_tester.run_common_tests()
# GLM4V has images shaped as (bs*patch_len, dim) so we can't slice to batches in generate
def prepare_config_and_inputs_for_generate(self, batch_size=2):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# We don't want a few model inputs in our model input dictionary for generation tests
input_keys_to_ignore = [
# we don't want to mask attention heads
# we don't want encoder-decoder models to start from filled decoder ids
"decoder_input_ids",
"decoder_attention_mask",
# we'll set cache use in each test differently
"use_cache",
# Ignore labels if it is in the input dict
"labels",
# model-specific exceptions should overload/overwrite this function
]
# The diff from the general `prepare_config_and_inputs_for_generate` lies here
patch_size = config.vision_config.patch_size
filtered_image_length = batch_size * (self.model_tester.image_size**2) // (patch_size**2)
filtered_inputs_dict = {
k: v[:batch_size, ...] if isinstance(v, torch.Tensor) else v
for k, v in inputs_dict.items()
if k not in input_keys_to_ignore
}
filtered_inputs_dict["pixel_values"] = inputs_dict["pixel_values"][:filtered_image_length]
# It is important set `eos_token_id` to `None` to avoid early stopping (would break for length-based checks)
text_gen_config = config.get_text_config(decoder=True)
if text_gen_config.eos_token_id is not None and text_gen_config.pad_token_id is None:
text_gen_config.pad_token_id = (
text_gen_config.eos_token_id
if isinstance(text_gen_config.eos_token_id, int)
else text_gen_config.eos_token_id[0]
)
text_gen_config.eos_token_id = None
text_gen_config.forced_eos_token_id = None
return config, filtered_inputs_dict
@unittest.skip(reason="No available kernels - not supported")
def test_sdpa_can_dispatch_on_flash(self):
pass
@unittest.skip(reason="Size mismatch")
def test_multi_gpu_data_parallel_forward(self):
pass
@unittest.skip("Error with compilation")
def test_generate_from_inputs_embeds_with_static_cache(self):
pass
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
input_ids = inputs["input_ids"]
del inputs["input_ids"]
del inputs["pixel_values"]
del inputs["image_grid_thw"]
wte = model.get_input_embeddings()
inputs["inputs_embeds"] = wte(input_ids)
with torch.no_grad():
model(**inputs)[0]
def test_inputs_embeds_matches_input_ids(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = self._prepare_for_class(inputs_dict, model_class)
input_ids = inputs["input_ids"]
del inputs["input_ids"]
del inputs["pixel_values"]
del inputs["image_grid_thw"]
inputs_embeds = model.get_input_embeddings()(input_ids)
with torch.no_grad():
out_ids = model(input_ids=input_ids, **inputs)[0]
out_embeds = model(inputs_embeds=inputs_embeds, **inputs)[0]
torch.testing.assert_close(out_embeds, out_ids)
@require_torch
class Glm4vIntegrationTest(unittest.TestCase):
def setUp(self):
cleanup(torch_device, gc_collect=True)
self.processor = AutoProcessor.from_pretrained("THUDM/GLM-4.1V-9B-Thinking")
self.message = [
{
"role": "user",
"content": [
{
"type": "image",
"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg",
},
{"type": "text", "text": "What kind of dog is this?"},
],
}
]
self.message2 = [
{
"role": "user",
"content": [
{
"type": "image",
"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/coco_sample.png",
},
{"type": "text", "text": "What kind of dog is this?"},
],
}
]
def tearDown(self):
cleanup(torch_device, gc_collect=True)
@slow
def test_small_model_integration_test(self):
model = Glm4vForConditionalGeneration.from_pretrained(
"THUDM/GLM-4.1V-9B-Thinking", dtype="auto", device_map="auto"
)
inputs = self.processor.apply_chat_template(
self.message, tokenize=True, add_generation_prompt=True, return_dict=True, return_tensors="pt"
)
expected_input_ids = [151331, 151333, 151336, 198, 151339, 151343, 151343, 151343, 151343, 151343, 151343, 151343, 151343, 151343, 151343, 151343, 151343] # fmt: skip
assert expected_input_ids == inputs.input_ids[0].tolist()[:17]
expected_pixel_slice = torch.tensor(
[
[-0.0988, -0.0842, -0.0842],
[-0.5660, -0.5514, -0.4200],
[-0.0259, -0.0259, -0.0259],
[-0.1280, -0.0988, -0.2010],
[-0.4638, -0.5806, -0.6974],
[-1.2083, -1.2229, -1.2083],
],
dtype=torch.float32,
device="cpu",
)
assert torch.allclose(expected_pixel_slice, inputs.pixel_values[:6, :3], atol=3e-3)
# verify generation
inputs = inputs.to(torch_device)
# This model on the hub has `do_sample=True`.
torch.manual_seed(42)
output = model.generate(**inputs, max_new_tokens=30)
EXPECTED_DECODED_TEXT = "\nWhat kind of dog is this?\n<think>Got it, let's look at the image. The animal in the picture doesn't look like a dog; it's actually a cat. Specifically"
self.assertEqual(
self.processor.decode(output[0], skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
@slow
def test_small_model_integration_test_batch(self):
model = Glm4vForConditionalGeneration.from_pretrained(
"THUDM/GLM-4.1V-9B-Thinking", dtype="auto", device_map="auto"
)
batch_messages = [self.message] * 2
inputs = self.processor.apply_chat_template(
batch_messages, tokenize=True, add_generation_prompt=True, return_dict=True, return_tensors="pt"
).to(torch_device)
# This model on the hub has `do_sample=True`.
torch.manual_seed(42)
# it should not matter whether two images are the same size or not
output = model.generate(**inputs, max_new_tokens=30)
EXPECTED_DECODED_TEXT = [
"\nWhat kind of dog is this?\n<think>Got it, let's look at the image. The animal in the picture doesn't look like a dog; it's actually a cat. Specifically",
"\nWhat kind of dog is this?\n<think>Got it, let's look at the image. The animal in the picture has a stocky body, thick fur, and a face that's"
] # fmt: skip
self.assertEqual(
self.processor.batch_decode(output, skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
@slow
def test_small_model_integration_test_with_video(self):
processor = AutoProcessor.from_pretrained("THUDM/GLM-4.1V-9B-Thinking", max_image_size={"longest_edge": 50176})
model = Glm4vForConditionalGeneration.from_pretrained(
"THUDM/GLM-4.1V-9B-Thinking", dtype=torch.float16, device_map="auto"
)
questions = ["Describe this video."]
video_urls = ["https://huggingface.co/datasets/hf-internal-testing/fixtures_videos/resolve/main/tennis.mp4"]
messages = [
[
{
"role": "user",
"content": [
{
"type": "video",
"video": video_url,
},
{"type": "text", "text": question},
],
}
]
for question, video_url in zip(questions, video_urls)
]
inputs = processor.apply_chat_template(
messages, tokenize=True, add_generation_prompt=True, return_dict=True, return_tensors="pt", padding=True
).to(torch_device)
# This model on the hub has `do_sample=True`.
torch.manual_seed(42)
output = model.generate(**inputs, max_new_tokens=30)
EXPECTED_DECODED_TEXT = ["\n012345Describe this video.\n<think>Got it, let's analyze the video. First, the scene is an indoor tennis court. There are two players: one in a white shirt"] # fmt: skip
self.assertEqual(
processor.batch_decode(output, skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
@slow
@require_deterministic_for_xpu
def test_small_model_integration_test_expand(self):
model = Glm4vForConditionalGeneration.from_pretrained(
"THUDM/GLM-4.1V-9B-Thinking", dtype="auto", device_map="auto"
)
inputs = self.processor.apply_chat_template(
self.message, tokenize=True, add_generation_prompt=True, return_dict=True, return_tensors="pt"
).to(torch_device)
# This model on the hub has `do_sample=True`.
torch.manual_seed(42)
output = model.generate(**inputs, max_new_tokens=30, do_sample=False, num_beams=2, num_return_sequences=2)
# fmt: off
EXPECTED_DECODED_TEXTS = Expectations(
{
(None, None): ["\nWhat kind of dog is this?\n<think>Got it, let's look at the image. The animal in the picture doesn't look like a dog; it's actually a cat. Specifically",
"\nWhat kind of dog is this?\n<think>Got it, let's look at the image. The animal in the picture doesn't look like a dog; it's actually a cat, specifically"
],
("xpu", None): ["\nWhat kind of dog is this?\n<think>Got it, let's look at the image. The animal in the picture is not a dog; it's a cat. Specifically, it looks",
"\nWhat kind of dog is this?\n<think>Got it, let's look at the image. The animal in the picture is not a dog; it's a cat, specifically a Pallas"
],
}
)
# fmt: on
EXPECTED_DECODED_TEXT = EXPECTED_DECODED_TEXTS.get_expectation()
decoded_text = self.processor.batch_decode(output, skip_special_tokens=True)
self.assertEqual(decoded_text, EXPECTED_DECODED_TEXT)
@slow
def test_small_model_integration_test_batch_wo_image(self):
model = Glm4vForConditionalGeneration.from_pretrained(
"THUDM/GLM-4.1V-9B-Thinking", dtype="auto", device_map="auto"
)
message_wo_image = [
{"role": "user", "content": [{"type": "text", "text": "Who are you?"}]},
]
batched_messages = [self.message, message_wo_image]
inputs = self.processor.apply_chat_template(
batched_messages,
tokenize=True,
add_generation_prompt=True,
return_dict=True,
return_tensors="pt",
padding=True,
).to(torch_device)
# This model on the hub has `do_sample=True`.
torch.manual_seed(42)
# it should not matter whether two images are the same size or not
output = model.generate(**inputs, max_new_tokens=30)
EXPECTED_DECODED_TEXT = [
"\nWhat kind of dog is this?\n<think>Got it, let's look at the image. The animal in the picture doesn't look like a dog; it's actually a cat. Specifically",
"\nWho are you?\n<think>Got it, let's look at the user's question: \"Who are you?\" This is a common question when someone is just starting a conversation"
] # fmt: skip
self.assertEqual(
self.processor.batch_decode(output, skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
@slow
def test_small_model_integration_test_batch_different_resolutions(self):
model = Glm4vForConditionalGeneration.from_pretrained(
"THUDM/GLM-4.1V-9B-Thinking", dtype="auto", device_map="auto"
)
batched_messages = [self.message, self.message2]
inputs = self.processor.apply_chat_template(
batched_messages,
tokenize=True,
add_generation_prompt=True,
return_dict=True,
return_tensors="pt",
padding=True,
).to(torch_device)
# This model on the hub has `do_sample=True`.
torch.manual_seed(42)
# it should not matter whether two images are the same size or not
output = model.generate(**inputs, max_new_tokens=30)
EXPECTED_DECODED_TEXT = [
"\nWhat kind of dog is this?\n<think>Got it, let's look at the image. The animal in the picture doesn't look like a dog; it's actually a cat. Specifically",
"\nWhat kind of dog is this?\n<think>Got it, let's look at the image. Wait, the animals here are cats, not dogs. The question is about a dog, but",
] # fmt: skip
self.assertEqual(
self.processor.batch_decode(output, skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
@slow
@require_flash_attn
@require_torch_accelerator
def test_small_model_integration_test_batch_flashatt2(self):
model = Glm4vForConditionalGeneration.from_pretrained(
"THUDM/GLM-4.1V-9B-Thinking",
dtype=torch.bfloat16,
attn_implementation="flash_attention_2",
device_map="auto",
)
batched_messages = [self.message, self.message2]
inputs = self.processor.apply_chat_template(
batched_messages,
tokenize=True,
add_generation_prompt=True,
return_dict=True,
return_tensors="pt",
padding=True,
).to(torch_device)
# This model on the hub has `do_sample=True`.
torch.manual_seed(42)
# it should not matter whether two images are the same size or not
output = model.generate(**inputs, max_new_tokens=30)
EXPECTED_DECODED_TEXT = [
"\nWhat kind of dog is this?\n<think>Got it, let's look at the image. The animal in the picture doesn't look like a dog. Wait, it's a cat,",
"\nWhat kind of dog is this?\n<think>Got it, let's look at the image. Wait, the animals here are cats, not dogs. The question is about a dog, but"
] # fmt: skip
self.assertEqual(
self.processor.batch_decode(output, skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
@slow
@require_flash_attn
@require_torch_accelerator
def test_small_model_integration_test_batch_wo_image_flashatt2(self):
model = Glm4vForConditionalGeneration.from_pretrained(
"THUDM/GLM-4.1V-9B-Thinking",
dtype=torch.bfloat16,
attn_implementation="flash_attention_2",
device_map="auto",
)
message_wo_image = [
{"role": "user", "content": [{"type": "text", "text": "Who are you?"}]},
]
batched_messages = [self.message, message_wo_image]
inputs = self.processor.apply_chat_template(
batched_messages,
tokenize=True,
add_generation_prompt=True,
return_dict=True,
return_tensors="pt",
padding=True,
).to(torch_device)
# This model on the hub has `do_sample=True`.
torch.manual_seed(42)
# it should not matter whether two images are the same size or not
output = model.generate(**inputs, max_new_tokens=30)
EXPECTED_DECODED_TEXT = [
"\nWhat kind of dog is this?\n<think>Got it, let's look at the image. The animal in the picture doesn't look like a dog; it's actually a cat. Specifically",
"\nWho are you?\n<think>Got it, let's look at the user's question: \"Who are you?\" This is a common question when someone is just starting a conversation"
] # fmt: skip
self.assertEqual(
self.processor.batch_decode(output, skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/glm4v/test_modeling_glm4v.py",
"license": "Apache License 2.0",
"lines": 510,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/models/glm4v/test_video_processing_glm4v.py | # Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from transformers.image_utils import IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available
from ...test_video_processing_common import VideoProcessingTestMixin, prepare_video_inputs
if is_torch_available():
from PIL import Image
if is_vision_available():
if is_torchvision_available():
from transformers import Glm4vVideoProcessor
from transformers.models.glm4v.video_processing_glm4v import smart_resize
class Glm4vVideoProcessingTester:
def __init__(
self,
parent,
batch_size=5,
num_frames=8,
num_channels=3,
min_resolution=30,
max_resolution=80,
temporal_patch_size=2,
patch_size=14,
merge_size=2,
do_resize=True,
size=None,
do_normalize=True,
image_mean=IMAGENET_STANDARD_MEAN,
image_std=IMAGENET_STANDARD_STD,
do_convert_rgb=True,
):
size = size if size is not None else {"longest_edge": 20, "shortest_edge": 10}
self.parent = parent
self.batch_size = batch_size
self.num_frames = num_frames
self.num_channels = num_channels
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.do_convert_rgb = do_convert_rgb
self.temporal_patch_size = temporal_patch_size
self.patch_size = patch_size
self.merge_size = merge_size
def prepare_video_processor_dict(self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
"do_sample_frames": True,
}
def prepare_video_metadata(self, videos):
video_metadata = []
for video in videos:
if isinstance(video, list):
num_frames = len(video)
elif hasattr(video, "shape"):
if len(video.shape) == 4: # (T, H, W, C)
num_frames = video.shape[0]
else:
num_frames = 1
else:
num_frames = self.num_frames
metadata = {
"fps": 2,
"duration": num_frames / 2,
"total_num_frames": num_frames,
}
video_metadata.append(metadata)
return video_metadata
def expected_output_video_shape(self, videos):
grid_t = self.num_frames // self.temporal_patch_size
hidden_dim = self.num_channels * self.temporal_patch_size * self.patch_size * self.patch_size
seq_len = 0
for video in videos:
if isinstance(video, list) and isinstance(video[0], Image.Image):
video = np.stack([np.array(frame) for frame in video])
elif hasattr(video, "shape"):
pass
else:
video = np.array(video)
if hasattr(video, "shape") and len(video.shape) >= 3:
if len(video.shape) == 4:
t, height, width = video.shape[:3]
elif len(video.shape) == 3:
height, width = video.shape[:2]
t = 1
else:
t, height, width = self.num_frames, self.min_resolution, self.min_resolution
else:
t, height, width = self.num_frames, self.min_resolution, self.min_resolution
resized_height, resized_width = smart_resize(
t,
height,
width,
factor=self.patch_size * self.merge_size,
min_pixels=self.size["shortest_edge"],
max_pixels=self.size["longest_edge"],
)
grid_h, grid_w = resized_height // self.patch_size, resized_width // self.patch_size
seq_len += grid_t * grid_h * grid_w
return [seq_len, hidden_dim]
def prepare_video_inputs(self, equal_resolution=False, return_tensors="pil"):
videos = prepare_video_inputs(
batch_size=self.batch_size,
num_frames=self.num_frames,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
return_tensors=return_tensors,
)
return videos
@require_torch
@require_vision
class Glm4vVideoProcessingTest(VideoProcessingTestMixin, unittest.TestCase):
fast_video_processing_class = Glm4vVideoProcessor if is_torchvision_available() else None
input_name = "pixel_values_videos"
def setUp(self):
super().setUp()
self.video_processor_tester = Glm4vVideoProcessingTester(self)
@property
def video_processor_dict(self):
return self.video_processor_tester.prepare_video_processor_dict()
def test_video_processor_from_dict_with_kwargs(self):
video_processor = self.fast_video_processing_class.from_dict(self.video_processor_dict)
self.assertEqual(video_processor.size, {"longest_edge": 20, "shortest_edge": 10})
video_processor = self.fast_video_processing_class.from_dict(
self.video_processor_dict, size={"longest_edge": 42, "shortest_edge": 42}
)
self.assertEqual(video_processor.size, {"longest_edge": 42, "shortest_edge": 42})
def test_call_pil(self):
for video_processing_class in self.video_processor_list:
video_processing = video_processing_class(**self.video_processor_dict)
video_inputs = self.video_processor_tester.prepare_video_inputs(
equal_resolution=False, return_tensors="pil"
)
for video in video_inputs:
self.assertIsInstance(video[0], Image.Image)
video_metadata = self.video_processor_tester.prepare_video_metadata(video_inputs)
encoded_videos = video_processing(
video_inputs[0], video_metadata=[video_metadata[0]], return_tensors="pt"
)[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]])
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
encoded_videos = video_processing(video_inputs, video_metadata=video_metadata, return_tensors="pt")[
self.input_name
]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs)
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
def test_call_numpy(self):
for video_processing_class in self.video_processor_list:
video_processing = video_processing_class(**self.video_processor_dict)
video_inputs = self.video_processor_tester.prepare_video_inputs(
equal_resolution=False, return_tensors="np"
)
video_metadata = self.video_processor_tester.prepare_video_metadata(video_inputs)
encoded_videos = video_processing(
video_inputs[0], video_metadata=[video_metadata[0]], return_tensors="pt"
)[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]])
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
encoded_videos = video_processing(video_inputs, video_metadata=video_metadata, return_tensors="pt")[
self.input_name
]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs)
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
def test_call_pytorch(self):
for video_processing_class in self.video_processor_list:
video_processing = video_processing_class(**self.video_processor_dict)
video_inputs = self.video_processor_tester.prepare_video_inputs(
equal_resolution=False, return_tensors="pt"
)
video_metadata = self.video_processor_tester.prepare_video_metadata(video_inputs)
encoded_videos = video_processing(
video_inputs[0], video_metadata=[video_metadata[0]], return_tensors="pt"
)[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]])
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
encoded_videos = video_processing(video_inputs, video_metadata=video_metadata, return_tensors="pt")[
self.input_name
]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs)
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
@unittest.skip("Skip for now, the test needs adjustment for GLM-4.1V")
def test_call_numpy_4_channels(self):
for video_processing_class in self.video_processor_list:
# Test that can process videos which have an arbitrary number of channels
# Initialize video_processing
video_processor = video_processing_class(**self.video_processor_dict)
# create random numpy tensors
self.video_processor_tester.num_channels = 4
video_inputs = self.video_processor_tester.prepare_video_inputs(
equal_resolution=False, return_tensors="np"
)
# Test not batched input
encoded_videos = video_processor(
video_inputs[0],
return_tensors="pt",
input_data_format="channels_last",
image_mean=(0.0, 0.0, 0.0, 0.0),
image_std=(1.0, 1.0, 1.0, 1.0),
)[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]])
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
# Test batched
encoded_videos = video_processor(
video_inputs,
return_tensors="pt",
input_data_format="channels_last",
image_mean=(0.0, 0.0, 0.0, 0.0),
image_std=(1.0, 1.0, 1.0, 1.0),
)[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs)
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
def test_nested_input(self):
"""Tests that the processor can work with nested list where each video is a list of arrays"""
for video_processing_class in self.video_processor_list:
video_processing = video_processing_class(**self.video_processor_dict)
video_inputs = self.video_processor_tester.prepare_video_inputs(
equal_resolution=False, return_tensors="np"
)
video_inputs_nested = [list(video) for video in video_inputs]
video_metadata = self.video_processor_tester.prepare_video_metadata(video_inputs)
# Test not batched input
encoded_videos = video_processing(
video_inputs_nested[0], video_metadata=[video_metadata[0]], return_tensors="pt"
)[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]])
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
# Test batched
encoded_videos = video_processing(video_inputs_nested, video_metadata=video_metadata, return_tensors="pt")[
self.input_name
]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs)
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
def test_call_sample_frames(self):
for video_processing_class in self.video_processor_list:
video_processor_dict = self.video_processor_dict.copy()
video_processing = video_processing_class(**video_processor_dict)
prev_num_frames = self.video_processor_tester.num_frames
self.video_processor_tester.num_frames = 8
prev_min_resolution = getattr(self.video_processor_tester, "min_resolution", None)
prev_max_resolution = getattr(self.video_processor_tester, "max_resolution", None)
self.video_processor_tester.min_resolution = 56
self.video_processor_tester.max_resolution = 112
video_inputs = self.video_processor_tester.prepare_video_inputs(
equal_resolution=False,
return_tensors="torch",
)
metadata = [[{"total_num_frames": 8, "fps": 4}]]
batched_metadata = metadata * len(video_inputs)
encoded_videos = video_processing(video_inputs[0], return_tensors="pt", video_metadata=metadata)[
self.input_name
]
encoded_videos_batched = video_processing(
video_inputs, return_tensors="pt", video_metadata=batched_metadata
)[self.input_name]
self.assertIsNotNone(encoded_videos)
self.assertIsNotNone(encoded_videos_batched)
self.assertEqual(len(encoded_videos.shape), 2)
self.assertEqual(len(encoded_videos_batched.shape), 2)
with self.assertRaises(ValueError):
video_processing(video_inputs[0], return_tensors="pt")[self.input_name]
self.video_processor_tester.num_frames = prev_num_frames
if prev_min_resolution is not None:
self.video_processor_tester.min_resolution = prev_min_resolution
if prev_max_resolution is not None:
self.video_processor_tester.max_resolution = prev_max_resolution
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/glm4v/test_video_processing_glm4v.py",
"license": "Apache License 2.0",
"lines": 288,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/models/kyutai_speech_to_text/test_modeling_kyutai_speech_to_text.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch Moshi ASR model."""
import gc
import tempfile
import unittest
import datasets
import pytest
from parameterized import parameterized
from transformers import (
KyutaiSpeechToTextConfig,
KyutaiSpeechToTextForConditionalGeneration,
KyutaiSpeechToTextProcessor,
is_torch_available,
)
from transformers.testing_utils import (
cleanup,
require_accelerate,
require_torch,
require_torch_accelerator,
slow,
torch_device,
)
from transformers.utils.generic import is_flash_attention_requested
from ...generation.test_utils import GenerationTesterMixin, assert_similar_generate_outputs
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import (
TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION,
ModelTesterMixin,
floats_tensor,
ids_tensor,
)
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
KyutaiSpeechToTextForConditionalGeneration,
KyutaiSpeechToTextModel,
)
class KyutaiSpeechToTextModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
text_seq_length=1,
input_values_length=192, # gives 3 audio tokens, corresponding to the default in GenerationTesterMixin
is_training=False,
use_input_mask=True,
use_token_type_ids=False,
use_labels=True,
codebook_vocab_size=2049,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
num_key_value_heads=None,
max_position_embeddings=512,
rope_theta=10000.0,
hidden_act="silu",
head_dim=None,
initializer_range=0.02,
use_cache=True,
sliding_window=512,
attention_dropout=0.1,
ffn_dim=38,
rms_norm_eps=1e-6,
num_codebooks=8,
frame_size=64,
delay_in_tokens=5,
audio_bos_token_id=2048,
audio_pad_token_id=2048,
tie_word_embeddings=False,
pad_token_id=0,
bos_token_id=1,
codec_config={
"model_type": "mimi",
"num_quantizers": 8,
"audio_channels": 1,
"chunk_in_sec": None,
"hidden_size": 16,
"num_filters": 8,
"num_residual_layers": 1,
"upsampling_ratios": [8, 4],
"codebook_size": 16,
"vector_quantization_hidden_dimension": 16,
"upsample_groups": 16,
"num_hidden_layers": 2,
"num_attention_heads": 2,
"num_key_value_heads": 2,
"sliding_window": 4,
"codebook_dim": 16,
"use_cache": False,
},
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.text_seq_length = text_seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.codebook_vocab_size = codebook_vocab_size
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.max_position_embeddings = max_position_embeddings
self.rope_theta = rope_theta
self.hidden_act = hidden_act
self.head_dim = head_dim
self.initializer_range = initializer_range
self.use_cache = use_cache
self.sliding_window = sliding_window
self.attention_dropout = attention_dropout
self.ffn_dim = ffn_dim
self.rms_norm_eps = rms_norm_eps
self.num_codebooks = num_codebooks
self.frame_size = frame_size
self.delay_in_tokens = delay_in_tokens
self.audio_bos_token_id = audio_bos_token_id
self.audio_pad_token_id = audio_pad_token_id
self.tie_word_embeddings = tie_word_embeddings
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.codec_config = codec_config
self.scope = scope
self.input_values_length = input_values_length
def get_config(self):
return KyutaiSpeechToTextConfig(
codebook_vocab_size=self.codebook_vocab_size,
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
num_key_value_heads=self.num_key_value_heads,
max_position_embeddings=self.max_position_embeddings,
rope_theta=self.rope_theta,
hidden_act=self.hidden_act,
head_dim=self.head_dim,
initializer_range=self.initializer_range,
use_cache=self.use_cache,
sliding_window=self.sliding_window,
attention_dropout=self.attention_dropout,
ffn_dim=self.ffn_dim,
rms_norm_eps=self.rms_norm_eps,
num_codebooks=self.num_codebooks,
frame_size=self.frame_size,
delay_in_tokens=self.delay_in_tokens,
audio_bos_token_id=self.audio_bos_token_id,
audio_pad_token_id=self.audio_pad_token_id,
tie_word_embeddings=self.tie_word_embeddings,
pad_token_id=self.pad_token_id,
bos_token_id=self.bos_token_id,
codec_config=self.codec_config,
)
def create_and_check_model(self, config, input_ids, input_mask):
model = KyutaiSpeechToTextModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def prepare_config_and_inputs(self):
config = self.get_config()
text_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size - 1) + 1
codebook_input_ids = (
ids_tensor([self.batch_size, self.seq_length, self.num_codebooks], self.codebook_vocab_size - 1) + 1
)
input_ids = torch.cat([text_input_ids.unsqueeze(2), codebook_input_ids], dim=2)
attention_mask = text_input_ids.ne(1).to(torch_device)
return config, input_ids, attention_mask
def prepare_config_and_inputs_generate(self):
config = self.get_config()
input_ids = torch.ones([self.batch_size, 1], dtype=torch.long, device=torch_device)
input_values = floats_tensor([self.batch_size, 1, self.input_values_length])
padding_mask = torch.ones_like(input_values, dtype=torch.int32, device=torch_device)
return config, input_ids, input_values, padding_mask
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
attention_mask,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
def prepare_config_and_inputs_for_common_generate(self):
config_and_inputs = self.prepare_config_and_inputs_generate()
(
config,
input_ids,
input_values,
padding_mask,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"input_values": input_values,
"padding_mask": padding_mask,
}
return config, inputs_dict
@require_torch
class KyutaiSpeechToTextModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
KyutaiSpeechToTextModel,
KyutaiSpeechToTextForConditionalGeneration,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": KyutaiSpeechToTextModel,
"automatic-speech-recognition": KyutaiSpeechToTextForConditionalGeneration,
"any-to-any": KyutaiSpeechToTextForConditionalGeneration,
}
if is_torch_available()
else {}
)
# Need to use `0.8` instead of `0.9` for `test_cpu_offload`
# This is because we are hitting edge cases with the causal_mask buffer
model_split_percents = [0.5, 0.7, 0.8]
def setUp(self):
self.model_tester = KyutaiSpeechToTextModelTester(self)
self.config_tester = ConfigTester(self, config_class=KyutaiSpeechToTextConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels)
return inputs_dict
def prepare_config_and_inputs_for_generate(self, batch_size=2):
# monkey patch prepare_config_and_inputs_for_common
prepare_config_and_inputs_for_common = self.model_tester.prepare_config_and_inputs_for_common
original_batch_size = self.model_tester.batch_size
self.model_tester.prepare_config_and_inputs_for_common = (
self.model_tester.prepare_config_and_inputs_for_common_generate
)
self.model_tester.batch_size = batch_size
config, filtered_inputs_dict = super().prepare_config_and_inputs_for_generate()
self.model_tester.prepare_config_and_inputs_for_common = prepare_config_and_inputs_for_common
self.model_tester.batch_size = original_batch_size
return config, filtered_inputs_dict
@pytest.mark.skip(reason="Moshi ASR has custom embedding approach (text and audio embeddings).")
def test_model_get_set_embeddings(self):
pass
@pytest.mark.skip(reason="Moshi ASR has custom embedding approach (text and audio embeddings).")
def test_resize_embeddings_untied(self):
pass
@pytest.mark.skip(reason="Moshi ASR has custom embedding approach (text and audio embeddings).")
def test_resize_tokens_embeddings(self):
pass
@pytest.mark.skip(reason="Moshi ASR has custom embedding approach (text and audio embeddings).")
def test_tied_weights_keys(self):
pass
@pytest.mark.skip(reason="Does not apply to Moshi ASR that requires input_values.")
def test_generate_without_input_ids(self):
pass
@parameterized.expand(TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION)
def test_eager_matches_sdpa_inference(
self, name, dtype, padding_side, use_attention_mask, output_attentions, enable_kernels
):
if use_attention_mask or (not use_attention_mask and dtype == "fp32" and not output_attentions):
self.skipTest("Test is failing, fix me :) ")
parent_parameterized_test = getattr(ModelTesterMixin, self._testMethodName)
parent_parameterized_test(self)
@unittest.skip(reason="Some undefined behavior encountered with test versions of this model. Skip for now.")
def test_cpu_offload(self):
pass
@unittest.skip(reason="Some undefined behavior encountered with test versions of this model. Skip for now.")
def test_disk_offload_bin(self):
pass
@unittest.skip(reason="Some undefined behavior encountered with test versions of this model. Skip for now.")
def test_disk_offload_safetensors(self):
pass
@pytest.mark.generate
def test_left_padding_compatibility(self):
# TODO: this tester has non-standard input monkey-patching in `prepare_config_and_inputs_for_generate`,
# and the test fails with the monkey-patched test inputs (bad shapes for the test) ☠️ The base inputs work
# fine, though.
unpadded_custom_inputs = self.model_tester.prepare_config_and_inputs_for_common()[1]
super().test_left_padding_compatibility(unpadded_custom_inputs=unpadded_custom_inputs)
def test_generate_continue_from_past_key_values(self):
# Tests that we can continue generating from past key values, returned from a previous `generate` call
for model_class in self.all_generative_model_classes:
if any(model_name in model_class.__name__.lower() for model_name in ["imagegpt", "mllama"]):
self.skipTest(reason="Won't fix: old model with unique inputs/caches/other")
if any(model_name in model_class.__name__.lower() for model_name in ["umt5"]):
self.skipTest(reason="TODO: needs modeling or test input preparation fixes for compatibility")
config, inputs = self.model_tester.prepare_config_and_inputs_for_common()
if not hasattr(config.get_text_config(), "use_cache"):
self.skipTest(reason=f"{model_class.__name__} doesn't support caching")
# Let's make it always:
# 1. use cache (for obvious reasons)
# 2. generate to max length (which can be achieved by setting the eos token to an invalid value), which
# would make the test flaky (e.g. EOS is generated on iteration 1 on both generations, but the
# continuation would force it to generate beyond an EOS token)
# 3. ignore `token_type_ids` for simplicity
# 4. ignore `forced_eos_token_id`, which requires further manipulation of the continuation inputs and is
# active by default on some models
# 5. ignore `encoder_no_repeat_ngram_size`, which is set by default in some encoder-decoder models. When
# we use their decoder as a stand-alone model, `encoder_no_repeat_ngram_size` actually prevents
# repetition exclusively from the prompt. This test relies on comparing one call vs 2 calls
# with cache, what is considered a prompt is different in the two cases.
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
model = model_class(config).to(torch_device)
model.eval()
# If "past_key_values" is not returned, skip the test (e.g. RWKV uses a different cache name and format)
outputs = model(**inputs)
if "past_key_values" not in outputs:
self.skipTest(reason="This model doesn't return `past_key_values`")
generate_kwargs = {
"pad_token_id": -1,
"eos_token_id": -1,
"forced_eos_token_id": None,
"encoder_no_repeat_ngram_size": 0,
"use_cache": True,
"do_sample": False,
"return_dict_in_generate": True,
"output_scores": True,
}
# Traditional way of generating text, with `return_dict_in_generate` to return the past key values
_, inputs = self.prepare_config_and_inputs_for_generate()
outputs = model.generate(**inputs, **generate_kwargs, max_new_tokens=3)
# Let's generate again, but passing the past key values in between (2 + 1 = 3 tokens). Note that the
# inputs may need to be tweaked across `generate` calls (like the attention mask).
outputs_cached = model.generate(**inputs, **generate_kwargs, max_new_tokens=2)
# Continue from the tokens generated above, preparing the inputs accordingly
inputs["past_key_values"] = outputs_cached.past_key_values
new_attention_len = outputs_cached.sequences.shape[-1]
if config.is_encoder_decoder:
inputs["decoder_input_ids"] = outputs_cached.sequences
if "decoder_attention_mask" in inputs:
inputs["decoder_attention_mask"] = torch.nn.functional.pad(
inputs["decoder_attention_mask"],
(0, new_attention_len - inputs["decoder_attention_mask"].shape[1]),
mode="constant",
value=1,
)
else:
inputs["input_ids"] = outputs_cached.sequences
if "attention_mask" in inputs:
inputs["attention_mask"] = torch.nn.functional.pad(
inputs["attention_mask"],
(0, new_attention_len - inputs["attention_mask"].shape[1]),
mode="constant",
value=1,
)
first_caches_scores = outputs_cached.scores
outputs_cached = model.generate(**inputs, **generate_kwargs, max_new_tokens=1)
full_cached_scores = first_caches_scores + outputs_cached.scores
outputs_cached.scores = full_cached_scores
# The two sets of generated text and past kv should be equal to each other
assert_similar_generate_outputs(outputs, outputs_cached)
self._check_caches_are_equal(outputs.past_key_values, outputs_cached.past_key_values)
# skipping for anything FA related that is not FA2 (no attn interface implemented)
def flash_attn_inference_equivalence(
self, attn_implementation: str, padding_side: str, atol: float = 4e-2, rtol: float = 4e-2
):
if (
is_flash_attention_requested(requested_attention_implementation=attn_implementation)
and attn_implementation != "flash_attention_2"
):
self.skipTest(reason="Model fails for every other FA implementation than FA2 (no attention interface).")
super().flash_attn_inference_equivalence(attn_implementation, padding_side, atol, rtol)
# needs to be overridden to avoid to avoid casting of input_values to float16
# indeed, the codec model is kept in fp32, so we need to avoid casting input_values to float16
def _test_attention_implementation(self, attn_implementation):
"""
Compares the output of generate with the eager attention implementation against other implementations.
NOTE: despite the test logic being the same, different implementations actually need different decorators, hence
this separate function.
"""
max_new_tokens = 30
support_flag = {
"sdpa": "_supports_sdpa",
"flash_attention_2": "_supports_flash_attn",
}
if (
is_flash_attention_requested(requested_attention_implementation=attn_implementation)
and attn_implementation != "flash_attention_2"
):
self.skipTest(reason="Model fails for every other FA implementation than FA2 (no attention interface).")
for model_class in self.all_generative_model_classes:
if attn_implementation != "eager" and not getattr(model_class, support_flag[attn_implementation]):
self.skipTest(f"{model_class.__name__} does not support `attn_implementation={attn_implementation}`")
config, original_inputs_dict = self.prepare_config_and_inputs_for_generate()
inputs_dict = {}
for input_name, input_data in original_inputs_dict.items():
if (
isinstance(input_data, torch.Tensor)
and input_data.dtype in [torch.float32, torch.bfloat16]
and input_name != "input_values"
):
inputs_dict[input_name] = input_data.to(torch.float16)
else:
inputs_dict[input_name] = input_data
main_input = inputs_dict[model_class.main_input_name]
# FA doesn't accept masking in the middle of the sequence for now. We usually generate right-padded
# attention masks at test time and, with generate, the mask will be appended with 1s on the right,
# resulting in a mask with holes (not supported properly by FA).
if is_flash_attention_requested(requested_attention_implementation=attn_implementation):
for input_name in ("attention_mask", "decoder_attention_mask", "encoder_attention_mask"):
if input_name in inputs_dict:
inputs_dict[input_name] = torch.ones_like(inputs_dict[input_name])
# make sure that all models have enough positions for generation
if hasattr(config, "max_position_embeddings"):
config.max_position_embeddings = max_new_tokens + main_input.shape[1] + 1
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
del model
gc.collect()
generate_kwargs = {
"max_new_tokens": max_new_tokens,
"do_sample": False,
"return_dict_in_generate": True,
"output_scores": True,
"use_cache": True,
}
model_eager = model_class.from_pretrained(
tmpdirname,
dtype=torch.float16,
attn_implementation="eager",
).to(torch_device)
res_eager = model_eager.generate(**inputs_dict, **generate_kwargs)
del model_eager
gc.collect()
model_attn = model_class.from_pretrained(
tmpdirname,
dtype=torch.float16,
attn_implementation=attn_implementation,
).to(torch_device)
res_attn = model_attn.generate(**inputs_dict, **generate_kwargs)
del model_attn
gc.collect()
assert_similar_generate_outputs(res_eager, res_attn, atol=1e-3, rtol=1e-3)
@require_torch
@require_accelerate
@slow
class KyutaiSpeechToTextBf16Test(unittest.TestCase):
def test_bf16_fp32_conversion(self):
r"""
A test to check whether the argument `keep_in_fp32_modules` correctly does its job
"""
model_checkpoint = "kyutai/stt-2.6b-en-trfs"
orig_import = __import__
accelerate_mock = unittest.mock.Mock()
# mock import of accelerate
def import_accelerate_mock(name, *args, **kwargs):
if name == "accelerate":
if accelerate_available:
return accelerate_mock
else:
raise ImportError
return orig_import(name, *args, **kwargs)
# Load without using `accelerate`
with unittest.mock.patch("builtins.__import__", side_effect=import_accelerate_mock):
accelerate_available = False
model = KyutaiSpeechToTextForConditionalGeneration.from_pretrained(model_checkpoint, dtype=torch.float16)
self.assertTrue(model.codec_model.dtype == torch.float32)
self.assertTrue(model.model.dtype == torch.float16)
self.assertTrue(model.lm_head.weight.data.dtype == torch.float16)
# Load without in bf16
model = KyutaiSpeechToTextForConditionalGeneration.from_pretrained(model_checkpoint, dtype=torch.bfloat16)
self.assertTrue(model.codec_model.dtype == torch.float32)
self.assertTrue(model.model.dtype == torch.bfloat16)
self.assertTrue(model.lm_head.weight.data.dtype == torch.bfloat16)
# Load using `accelerate` in bf16
model = KyutaiSpeechToTextForConditionalGeneration.from_pretrained(
model_checkpoint, dtype=torch.bfloat16, device_map="auto"
)
self.assertTrue(model.codec_model.dtype == torch.float32)
self.assertTrue(model.model.dtype == torch.bfloat16)
self.assertTrue(model.lm_head.weight.data.dtype == torch.bfloat16)
# Load using `accelerate` in bf16
model = KyutaiSpeechToTextForConditionalGeneration.from_pretrained(
model_checkpoint,
dtype=torch.bfloat16,
)
self.assertTrue(model.codec_model.dtype == torch.float32)
self.assertTrue(model.model.dtype == torch.bfloat16)
self.assertTrue(model.lm_head.weight.data.dtype == torch.bfloat16)
# Load without using `accelerate`
model = KyutaiSpeechToTextForConditionalGeneration.from_pretrained(
model_checkpoint,
dtype=torch.float16,
)
self.assertTrue(model.codec_model.dtype == torch.float32)
self.assertTrue(model.model.dtype == torch.float16)
self.assertTrue(model.lm_head.weight.data.dtype == torch.float16)
# Load using `accelerate`
model = KyutaiSpeechToTextForConditionalGeneration.from_pretrained(
model_checkpoint, dtype=torch.float16, device_map="auto"
)
self.assertTrue(model.codec_model.dtype == torch.float32)
self.assertTrue(model.model.dtype == torch.float16)
self.assertTrue(model.lm_head.weight.data.dtype == torch.float16)
class KyutaiSpeechToTextForConditionalGenerationIntegrationTests(unittest.TestCase):
_dataset = None
def setUp(self):
self.model_checkpoint = "kyutai/stt-2.6b-en-trfs"
def tearDown(self):
cleanup(torch_device, gc_collect=True)
@classmethod
def _load_dataset(cls):
# Lazy loading of the dataset. Because it is a class method, it will only be loaded once per pytest process.
if cls._dataset is None:
cls._dataset = datasets.load_dataset(
"hf-internal-testing/librispeech_asr_dummy", "clean", split="validation"
)
# using 24000 here for simplicity, should rather be processor.feature_extractor.sampling_rate
cls._dataset = cls._dataset.cast_column("audio", datasets.Audio(sampling_rate=24000))
def _load_datasamples(self, num_samples):
self._load_dataset()
ds = self._dataset
speech_samples = ds.sort("id")[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
@slow
@require_torch_accelerator
def test_generation(self):
"""
reproduce test expected outputs using original codebase: https://gist.github.com/eustlb/7a9aa6139d11e0103c6b65bac103da52
DISCLAIMER: we are testing for pretty short inputs. Indeed, reproducing correct expected outputs for longer is not possible
as implementation choices (qkv matrix in one linear for original code vs three for hf) create growing divergence with context length,
ultimately giving different outputs.
"""
processor = KyutaiSpeechToTextProcessor.from_pretrained(self.model_checkpoint)
model = KyutaiSpeechToTextForConditionalGeneration.from_pretrained(
self.model_checkpoint, device_map=torch_device
)
samples = self._load_datasamples(1)
inputs = processor(
samples,
).to(torch_device)
out = model.generate(**inputs)
# fmt: off
EXPECTED_TOKENS = torch.tensor([
[48000, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 1519, 263, 3, 3, 0, 3635, 428, 641, 0, 277, 3, 0, 265, 0, 267, 1162, 261, 274, 410, 0, 272, 3, 0, 265, 0, 260, 1621, 0, 1174, 371, 262, 3, 3, 3, 0, 269, 0, 281, 0, 304, 0, 2433, 3, 0, 266, 3, 0, 281, 1661, 3, 0, 376, 3, 3, 0, 350, 261, 401, 516, 263, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3]],
)
# fmt: on
torch.testing.assert_close(out.cpu(), EXPECTED_TOKENS)
@slow
@require_torch_accelerator
def test_generation_batched(self):
"""
reproduce test expected outputs using original codebase: https://gist.github.com/eustlb/b58c217c75124d405ec1c13877c7ece8
DISCLAIMER: we are testing for pretty short inputs. Indeed, reproducing correct expected outputs for longer is not possible
as implementation choices (qkv matrix in one linear for original code vs three for hf) create growing divergence with context length,
ultimately giving different outputs.
"""
processor = KyutaiSpeechToTextProcessor.from_pretrained(self.model_checkpoint)
model = KyutaiSpeechToTextForConditionalGeneration.from_pretrained(
self.model_checkpoint, device_map=torch_device
)
samples = self._load_datasamples(4)
inputs = processor(
samples,
).to(torch_device)
out = model.generate(**inputs)
# fmt: off
EXPECTED_TOKENS = torch.tensor([
[48000, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 1519, 263, 3, 3, 0, 3635, 428, 641, 0, 277, 3, 0, 265, 0, 267, 1162, 261, 274, 410, 0, 272, 3, 0, 265, 0, 260, 1621, 0, 1174, 371, 262, 3, 3, 3, 0, 269, 0, 281, 0, 304, 0, 2433, 3, 0, 266, 3, 0, 281, 1661, 3, 0, 376, 3, 3, 0, 350, 261, 401, 516, 263, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3],
[48000, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 500, 334, 0, 277, 3, 0, 1519, 263, 3, 3, 0, 3635, 428, 641, 264, 261, 0, 511, 1109, 3, 0, 1138, 3, 3, 3, 0, 508, 827, 3, 3, 3, 3, 0, 468, 3, 3, 0, 376, 3, 3, 3, 0, 260, 978, 263, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3],
[48000, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 414, 0, 527, 261, 3, 0, 409, 3, 3, 3, 0, 271, 3, 0, 309, 3, 0, 285, 3, 0, 521, 371, 609, 3, 3, 0, 260, 959, 3, 3, 3, 0, 272, 3, 0, 265, 0, 546, 262, 3, 3, 3, 3, 3, 3, 0, 291, 3, 0, 975, 2203, 3, 3, 3, 3, 0, 269, 3, 0, 260, 489, 651, 274, 279, 1870, 3, 0, 1084, 873, 273, 3, 0, 260, 531, 3, 3, 0, 409, 262, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 1502, 1005, 836, 3, 3, 0, 1666, 306, 3, 0, 340, 3, 0, 260, 3232, 3, 0, 269, 3, 3, 0, 275, 261, 0, 260, 1379, 261, 0, 3324, 3, 3, 3, 3, 0, 549, 3, 3, 0, 693, 405, 323, 3, 0, 266, 3, 3, 0, 265, 0, 699, 263, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3],
[48000, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 414, 0, 392, 3, 3, 0, 1269, 314, 0, 2607, 261, 3, 3, 3, 0, 1098, 295, 3, 3, 3, 0, 446, 625, 3, 0, 496, 280, 1205, 485, 1071, 1627, 449, 264, 261, 3, 0, 400, 0, 277, 3, 3, 3, 0, 260, 342, 3, 0, 618, 280, 1866, 3, 3, 0, 554, 3, 3, 3, 3, 0, 317, 262, 3, 3, 3, 3, 3, 3, 3, 3, 0, 269, 0, 303, 3, 0, 573, 2615, 3, 3, 0, 276, 3, 0, 275, 0, 305, 3, 0, 260, 415, 3, 3, 0, 272, 3, 3, 3, 3, 0, 1631, 327, 3, 3, 0, 333, 739, 841, 263, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3],
])
# fmt: on
# See https://github.com/huggingface/transformers/pull/39416
EXPECTED_TOKENS_2 = torch.clone(EXPECTED_TOKENS)
EXPECTED_TOKENS_2[2, 159:162] = torch.tensor([3, 0, 269])
try:
torch.testing.assert_close(out.cpu(), EXPECTED_TOKENS)
except AssertionError:
torch.testing.assert_close(out.cpu(), EXPECTED_TOKENS_2)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/kyutai_speech_to_text/test_modeling_kyutai_speech_to_text.py",
"license": "Apache License 2.0",
"lines": 593,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.