sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
huggingface/transformers:src/transformers/integrations/metal_quantization.py | # Copyright 2026 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Metal affine quantization integration for transformers.
This module provides:
- ``MetalLinear``: a drop-in replacement for ``nn.Linear`` that stores weights
as affine-quantized uint32 packed tensors and uses the ``quantization-mlx``
Metal kernels for the forward pass.
- ``replace_with_metal_linear``: walks a model and swaps every eligible
``nn.Linear`` with ``MetalLinear``.
- ``MetalQuantize`` / ``MetalDequantize``: weight conversion operations that
participate in the new ``WeightConverter`` pipeline.
Weight layout (transposed, matching ``affine_qmm_t``):
- ``weight``: ``[N, K_packed]`` (``uint32``) -- K is the packed dimension.
- ``scales``: ``[N, K // group_size]`` (``float16 / bfloat16``)
- ``qbiases``: ``[N, K // group_size]`` (same dtype as scales)
The kernel call is ``affine_qmm_t(x, weight, scales, qbiases, group_size, bits)``
which computes ``y = x @ dequant(weight).T``, identical to ``nn.Linear``.
"""
from ..core_model_loading import ConversionOps
from ..quantizers.quantizers_utils import should_convert_module
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
import torch.nn as nn
logger = logging.get_logger(__name__)
_metal_kernel = None
def _get_metal_kernel():
"""Lazily load the quantization-mlx kernel from Hugging Face Hub."""
global _metal_kernel
if _metal_kernel is None:
try:
from .hub_kernels import get_kernel
_metal_kernel = get_kernel("kernels-community/mlx-quantization-metal-kernels")
except Exception as e:
raise ImportError(
f"Failed to load the quantization-mlx kernel from the Hub: {e}. "
"Make sure you have `kernels` installed (`pip install kernels`) "
"and are running on an Apple Silicon machine."
) from e
return _metal_kernel
# ---------------------------------------------------------------------------
# MetalLinear -- the quantized nn.Linear replacement
# ---------------------------------------------------------------------------
class MetalLinear(nn.Linear):
"""
A quantized linear layer that stores weights in affine uint32 packed format
and uses the ``quantization-mlx`` Metal kernels for the forward pass.
Parameters match ``nn.Linear`` with additional quantization metadata.
"""
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = False,
dtype=torch.uint32,
bits: int = 4,
group_size: int = 128,
):
nn.Module.__init__(self)
self.in_features = in_features
self.out_features = out_features
self.bits = bits
self.group_size = group_size
elems_per_int = 32 // bits
k_packed = in_features // elems_per_int
n_groups = in_features // group_size
if dtype == torch.uint32:
self.weight = nn.Parameter(torch.zeros(out_features, k_packed, dtype=torch.uint32), requires_grad=False)
else:
self.weight = nn.Parameter(torch.zeros(out_features, in_features, dtype=dtype), requires_grad=False)
scales_dtype = torch.float32 if dtype == torch.uint32 else None
self.scales = nn.Parameter(torch.zeros(out_features, n_groups, dtype=scales_dtype), requires_grad=False)
self.qbiases = nn.Parameter(torch.zeros(out_features, n_groups, dtype=scales_dtype), requires_grad=False)
if bias:
self.bias = nn.Parameter(torch.zeros(out_features))
else:
self.register_parameter("bias", None)
def forward(self, input: torch.Tensor) -> torch.Tensor:
if self.weight.dtype != torch.uint32:
return nn.functional.linear(input, self.weight, self.bias)
kernel = _get_metal_kernel()
output = kernel.affine_qmm_t(
input,
self.weight,
self.scales.to(input.dtype),
self.qbiases.to(input.dtype),
self.group_size,
self.bits,
)
if self.bias is not None:
output = output + self.bias
return output
def replace_with_metal_linear(
model,
modules_to_not_convert: list[str] | None = None,
quantization_config=None,
pre_quantized: bool = False,
):
"""
Replace every eligible ``nn.Linear`` with ``MetalLinear``.
Args:
model: the ``PreTrainedModel`` (on the meta device at this point).
modules_to_not_convert: module names to leave untouched.
quantization_config: the ``MetalConfig`` instance.
pre_quantized: ``True`` when loading from a quantized checkpoint.
"""
if quantization_config.dequantize:
return model
bits = quantization_config.bits
group_size = quantization_config.group_size
has_been_replaced = False
for module_name, module in model.named_modules():
if not should_convert_module(module_name, modules_to_not_convert):
continue
if isinstance(module, nn.Linear):
module_kwargs = {} if pre_quantized else {"dtype": None}
new_module = MetalLinear(
in_features=module.in_features,
out_features=module.out_features,
bias=module.bias is not None,
bits=bits,
group_size=group_size,
**module_kwargs,
)
model.set_submodule(module_name, new_module)
has_been_replaced = True
if not has_been_replaced:
logger.warning(
"You are loading a model with Metal quantization but no nn.Linear modules were found. "
"Please double check your model architecture."
)
return model
def _affine_quantize_tensor(weight: torch.Tensor, group_size: int, bits: int):
"""
Quantize a 2-D float weight ``[N, K]`` into packed uint32 + scales + biases.
Returns ``(w_packed, scales, biases)`` with:
- ``w_packed``: ``[N, K // (32 // bits)]`` uint32
- ``scales``: ``[N, K // group_size]`` float32/float16/bfloat16
- ``biases``: ``[N, K // group_size]`` float32/float16/bfloat16
"""
N, K = weight.shape
elems_per_int = 32 // bits
max_val = (1 << bits) - 1
n_groups = K // group_size
w_grouped = weight.float().reshape(N, n_groups, group_size)
w_min = w_grouped.min(dim=-1).values # [N, n_groups]
w_max = w_grouped.max(dim=-1).values
scales = ((w_max - w_min) / max_val).clamp(min=1e-8)
biases = w_min
w_int = (w_grouped - biases.unsqueeze(-1)) / scales.unsqueeze(-1)
w_int = w_int.round().clamp(0, max_val).to(torch.int32).reshape(N, K)
# Pack into uint32
k_packed = K // elems_per_int
w_packed = torch.zeros(N, k_packed, dtype=torch.int32, device=weight.device)
for i in range(elems_per_int):
w_packed |= w_int[:, i::elems_per_int] << (bits * i)
return w_packed.to(torch.uint32), scales, biases
def _affine_dequantize_tensor(
w_packed: torch.Tensor, scales: torch.Tensor, biases: torch.Tensor, group_size: int, bits: int
):
"""
Dequantize a packed uint32 weight ``[N, K_packed]`` back to float.
Returns a ``[N, K]`` float32 tensor.
"""
N = w_packed.shape[0]
elems_per_int = 32 // bits
max_val = (1 << bits) - 1
K = w_packed.shape[1] * elems_per_int
w_packed_i = w_packed.to(torch.int32)
w_flat = torch.zeros(N, K, dtype=torch.float32, device=w_packed.device)
for i in range(elems_per_int):
w_flat[:, i::elems_per_int] = ((w_packed_i >> (bits * i)) & max_val).float()
w_grouped = w_flat.reshape(N, -1, group_size)
w_deq = w_grouped * scales.float().unsqueeze(-1) + biases.float().unsqueeze(-1)
return w_deq.reshape(N, K)
class MetalQuantize(ConversionOps):
"""
Quantize a full-precision weight tensor into (weight, scales, qbiases).
Used during quantize-on-the-fly. The float ``weight`` is replaced in-place
by the packed uint32 tensor.
"""
def __init__(self, hf_quantizer):
self.hf_quantizer = hf_quantizer
def convert(self, input_dict: dict, **kwargs) -> dict:
target_key, value = next(iter(input_dict.items()))
value = value[0] if isinstance(value, list) else value
bits = self.hf_quantizer.quantization_config.bits
group_size = self.hf_quantizer.quantization_config.group_size
w_packed, scales, biases = _affine_quantize_tensor(value, group_size, bits)
base = target_key.rsplit(".", 1)[0] if "." in target_key else ""
scale_key = f"{base}.scales" if base else "scales"
bias_key = f"{base}.qbiases" if base else "qbiases"
orig_dtype = value.dtype
return {
target_key: w_packed,
scale_key: scales.to(orig_dtype),
bias_key: biases.to(orig_dtype),
}
class MetalDequantize(ConversionOps):
"""
Dequantize (weight, scales, qbiases) back to a full-precision tensor.
Used when ``dequantize=True`` is set in the config to fall back to a normal
``nn.Linear`` on devices without MPS.
"""
def __init__(self, hf_quantizer):
self.hf_quantizer = hf_quantizer
def convert(self, input_dict: dict, full_layer_name: str | None = None, **kwargs) -> dict:
bits = self.hf_quantizer.quantization_config.bits
group_size = self.hf_quantizer.quantization_config.group_size
if len(input_dict) < 2:
return {full_layer_name: input_dict["weight$"]}
quantized = input_dict["weight$"][0]
scales = input_dict["scales"][0]
qbiases = input_dict["qbiases"][0]
w_deq = _affine_dequantize_tensor(quantized, scales, qbiases, group_size, bits)
return {full_layer_name: w_deq.to(scales.dtype)}
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/integrations/metal_quantization.py",
"license": "Apache License 2.0",
"lines": 228,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/quantizers/quantizer_metal.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Any
from ..utils import is_kernels_available, is_torch_available, logging
from .base import HfQuantizer
from .quantizers_utils import get_module_from_name
if is_torch_available():
import torch
if TYPE_CHECKING:
from ..modeling_utils import PreTrainedModel
logger = logging.get_logger(__name__)
class MetalHfQuantizer(HfQuantizer):
"""
Quantizer for Metal affine quantization on Apple Silicon (MPS) devices.
Uses the ``quantization-mlx`` Metal kernels from the Hub to pack weights into
low-bit (2/4/8) uint32 tensors with per-group scales and biases, and performs
fused dequant + matmul in the forward pass.
"""
requires_calibration = False
def __init__(self, quantization_config, **kwargs):
super().__init__(quantization_config, **kwargs)
def validate_environment(self, *args, **kwargs):
if self.quantization_config.dequantize:
return
if not torch.backends.mps.is_available():
if self.pre_quantized:
logger.warning_once(
"Metal quantization requires an Apple Silicon GPU (MPS), but none is available. "
"We will default to dequantizing the model to the original dtype."
)
self.quantization_config.dequantize = True
return
else:
raise RuntimeError("Metal quantization requires an Apple Silicon GPU (MPS). No MPS device found.")
if not is_kernels_available():
raise ImportError("Metal quantization requires kernels: `pip install kernels`")
device_map = kwargs.get("device_map")
if device_map is None:
logger.warning_once(
"You have loaded a Metal quantized model on CPU and have an MPS device available. "
"Set device_map='mps' to use the Metal kernels."
)
elif isinstance(device_map, dict):
if not self.pre_quantized and ("cpu" in device_map.values() or "disk" in device_map.values()):
raise ValueError(
"Metal quantization on the fly does not support CPU or disk in the device_map. "
"Please use a pre-quantized checkpoint or remove CPU/disk from device_map."
)
def update_device_map(self, device_map: dict[str, Any] | None) -> dict[str, Any] | None:
if device_map is None:
device_map = {"": "mps"}
return device_map
def param_needs_quantization(self, model: "PreTrainedModel", param_name: str, **kwargs) -> bool:
from ..integrations.metal_quantization import MetalLinear
module, tensor_name = get_module_from_name(model, param_name)
if isinstance(module, MetalLinear):
if self.pre_quantized or tensor_name != "weight":
return False
return True
return False
def _process_model_before_weight_loading(self, model: "PreTrainedModel", **kwargs):
from ..integrations.metal_quantization import replace_with_metal_linear
self.modules_to_not_convert = self.get_modules_to_not_convert(
model, self.quantization_config.modules_to_not_convert, model._keep_in_fp32_modules
)
model = replace_with_metal_linear(
model,
modules_to_not_convert=self.modules_to_not_convert,
quantization_config=self.quantization_config,
pre_quantized=self.pre_quantized,
)
def is_serializable(self):
return True
@property
def is_trainable(self) -> bool:
return False
def get_quantize_ops(self):
from ..integrations.metal_quantization import MetalQuantize
return MetalQuantize(self)
def get_weight_conversions(self):
from ..core_model_loading import WeightConverter
from ..integrations.metal_quantization import MetalDequantize
if self.pre_quantized and self.quantization_config.dequantize:
return [
WeightConverter(
source_patterns=["weight$", "scales", "qbiases"],
target_patterns="weight",
operations=[MetalDequantize(self)],
)
]
return []
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/quantizers/quantizer_metal.py",
"license": "Apache License 2.0",
"lines": 102,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/quantization/metal/test_metal.py | # Copyright 2026 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
from contextlib import ExitStack, contextmanager
from unittest.mock import patch
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, MetalConfig, OPTForCausalLM
from transformers.quantizers.quantizer_metal import MetalHfQuantizer
from transformers.testing_utils import (
require_torch,
slow,
)
from transformers.utils import is_torch_available
if is_torch_available():
import torch
import torch.nn as nn
@contextmanager
def _patch_mps_available(available: bool = True):
"""Patch ``torch.backends.mps.is_available`` to simulate MPS presence/absence."""
with patch("torch.backends.mps.is_available", return_value=available):
yield
@contextmanager
def _patch_no_mps():
"""Convenience: simulate a machine with no MPS device."""
with _patch_mps_available(False):
yield
@contextmanager
def _patch_has_mps():
"""Convenience: simulate a machine with an MPS device."""
with ExitStack() as stack:
stack.enter_context(_patch_mps_available(True))
stack.enter_context(patch("transformers.quantizers.quantizer_metal.is_kernels_available", return_value=True))
yield
@require_torch
class MetalConfigTest(unittest.TestCase):
"""Unit tests for ``MetalConfig`` (no device / model needed)."""
def test_default_values(self):
config = MetalConfig()
self.assertEqual(config.bits, 4)
self.assertEqual(config.group_size, 64)
self.assertIsNone(config.modules_to_not_convert)
self.assertFalse(config.dequantize)
self.assertEqual(config.quant_method, "metal")
def test_custom_values(self):
config = MetalConfig(bits=8, group_size=32, modules_to_not_convert=["lm_head"], dequantize=True)
self.assertEqual(config.bits, 8)
self.assertEqual(config.group_size, 32)
self.assertEqual(config.modules_to_not_convert, ["lm_head"])
self.assertTrue(config.dequantize)
def test_invalid_bits_raises(self):
for bad_bits in (0, 1, 3, 5, 6, 7, 16):
with self.assertRaises(ValueError, msg=f"bits={bad_bits} should raise"):
MetalConfig(bits=bad_bits)
def test_valid_bits(self):
for bits in (2, 4, 8):
config = MetalConfig(bits=bits)
self.assertEqual(config.bits, bits)
def test_invalid_group_size_raises(self):
with self.assertRaises(ValueError):
MetalConfig(group_size=0)
with self.assertRaises(ValueError):
MetalConfig(group_size=-1)
def test_to_dict(self):
config = MetalConfig(bits=4, group_size=64, modules_to_not_convert=["lm_head"])
d = config.to_dict()
self.assertEqual(d["quant_method"], "metal")
self.assertEqual(d["bits"], 4)
self.assertEqual(d["group_size"], 64)
self.assertEqual(d["modules_to_not_convert"], ["lm_head"])
def test_from_dict(self):
d = {"quant_method": "metal", "bits": 8, "group_size": 32, "modules_to_not_convert": None}
config = MetalConfig.from_dict(d)
self.assertEqual(config.bits, 8)
self.assertEqual(config.group_size, 32)
def test_to_dict_from_dict(self):
original = MetalConfig(bits=2, group_size=128, modules_to_not_convert=["lm_head"])
d = original.to_dict()
restored = MetalConfig.from_dict(d)
self.assertEqual(original.bits, restored.bits)
self.assertEqual(original.group_size, restored.group_size)
self.assertEqual(original.modules_to_not_convert, restored.modules_to_not_convert)
def test_get_loading_attributes(self):
config = MetalConfig(dequantize=True)
attrs = config.get_loading_attributes()
self.assertIn("dequantize", attrs)
self.assertTrue(attrs["dequantize"])
@require_torch
class MetalQuantizerEnvironmentTest(unittest.TestCase):
"""Validate ``MetalHfQuantizer.validate_environment`` under various conditions."""
def test_no_mps_prequantized_triggers_dequantize(self):
"""Pre-quantized model on non-MPS machine should auto-enable dequantize."""
with _patch_no_mps():
config = MetalConfig()
quantizer = MetalHfQuantizer(config)
quantizer.pre_quantized = True
quantizer.validate_environment()
self.assertTrue(quantizer.quantization_config.dequantize)
def test_no_mps_not_prequantized_raises(self):
"""Quantize-on-the-fly on non-MPS machine should raise."""
with _patch_no_mps():
config = MetalConfig()
quantizer = MetalHfQuantizer(config)
quantizer.pre_quantized = False
with self.assertRaises(RuntimeError):
quantizer.validate_environment()
def test_dequantize_flag_skips_mps_check(self):
"""When dequantize=True, no MPS check should occur."""
with _patch_no_mps():
config = MetalConfig(dequantize=True)
quantizer = MetalHfQuantizer(config)
quantizer.pre_quantized = True
quantizer.validate_environment()
def test_missing_kernels_raises(self):
"""Missing ``kernels`` package should raise ImportError."""
with ExitStack() as stack:
stack.enter_context(_patch_mps_available(True))
stack.enter_context(
patch("transformers.quantizers.quantizer_metal.is_kernels_available", return_value=False)
)
config = MetalConfig()
quantizer = MetalHfQuantizer(config)
quantizer.pre_quantized = False
with self.assertRaises(ImportError):
quantizer.validate_environment()
def test_cpu_in_device_map_not_prequantized_raises(self):
"""Quantize-on-the-fly with CPU in device_map should raise."""
with _patch_has_mps():
config = MetalConfig()
quantizer = MetalHfQuantizer(config)
quantizer.pre_quantized = False
with self.assertRaises(ValueError):
quantizer.validate_environment(device_map={"model": "cpu"})
def test_disk_in_device_map_not_prequantized_raises(self):
"""Quantize-on-the-fly with disk in device_map should raise."""
with _patch_has_mps():
config = MetalConfig()
quantizer = MetalHfQuantizer(config)
quantizer.pre_quantized = False
with self.assertRaises(ValueError):
quantizer.validate_environment(device_map={"model": "disk"})
def test_update_device_map_defaults_to_mps(self):
config = MetalConfig()
quantizer = MetalHfQuantizer(config)
result = quantizer.update_device_map(None)
self.assertEqual(result, {"": "mps"})
def test_is_serializable(self):
config = MetalConfig()
quantizer = MetalHfQuantizer(config)
self.assertTrue(quantizer.is_serializable())
def test_is_not_trainable(self):
config = MetalConfig()
quantizer = MetalHfQuantizer(config)
self.assertFalse(quantizer.is_trainable)
@require_torch
class AffineQuantizeDequantizeTest(unittest.TestCase):
"""Test the low-level ``_affine_quantize_tensor`` / ``_affine_dequantize_tensor`` functions."""
def _roundtrip(self, bits, group_size, N=64, K=256, dtype=torch.float32):
from transformers.integrations.metal_quantization import _affine_dequantize_tensor, _affine_quantize_tensor
weight = torch.randn(N, K, dtype=dtype)
w_packed, scales, biases = _affine_quantize_tensor(weight, group_size, bits)
self.assertEqual(w_packed.dtype, torch.uint32)
self.assertEqual(w_packed.shape, (N, K // (32 // bits)))
self.assertEqual(scales.shape, (N, K // group_size))
self.assertEqual(biases.shape, (N, K // group_size))
w_deq = _affine_dequantize_tensor(w_packed, scales, biases, group_size, bits)
self.assertEqual(w_deq.shape, (N, K))
return weight.float(), w_deq.float()
def test_roundtrip_4bit_gs64(self):
orig, deq = self._roundtrip(bits=4, group_size=64)
max_err = (orig - deq).abs().max().item()
self.assertLess(max_err, 0.25, "4-bit gs=64 round-trip error too large")
def test_roundtrip_4bit_gs128(self):
orig, deq = self._roundtrip(bits=4, group_size=128)
max_err = (orig - deq).abs().max().item()
self.assertLess(max_err, 0.5, "4-bit gs=128 round-trip error too large")
def test_roundtrip_8bit_gs64(self):
orig, deq = self._roundtrip(bits=8, group_size=64)
max_err = (orig - deq).abs().max().item()
self.assertLess(max_err, 0.02, "8-bit gs=64 round-trip error too large")
def test_roundtrip_2bit_gs64(self):
orig, deq = self._roundtrip(bits=2, group_size=64)
max_err = (orig - deq).abs().max().item()
self.assertLess(max_err, 1.25, "2-bit gs=64 round-trip error too large")
def test_quantize_shapes_2bit(self):
from transformers.integrations.metal_quantization import _affine_quantize_tensor
N, K = 32, 128
weight = torch.randn(N, K)
w_packed, scales, biases = _affine_quantize_tensor(weight, group_size=64, bits=2)
elems_per_int = 32 // 2
self.assertEqual(w_packed.shape, (N, K // elems_per_int))
self.assertEqual(scales.shape, (N, K // 64))
def test_quantize_preserves_device(self):
from transformers.integrations.metal_quantization import _affine_quantize_tensor
weight = torch.randn(32, 128, device="cpu")
w_packed, scales, biases = _affine_quantize_tensor(weight, group_size=64, bits=4)
self.assertEqual(w_packed.device.type, "cpu")
self.assertEqual(scales.device.type, "cpu")
self.assertEqual(biases.device.type, "cpu")
def test_dequantize_returns_correct_dtype(self):
"""Regression: dequantize should always return float32 (caller casts to target dtype)."""
from transformers.integrations.metal_quantization import _affine_dequantize_tensor, _affine_quantize_tensor
weight = torch.randn(32, 128, dtype=torch.bfloat16)
w_packed, scales, biases = _affine_quantize_tensor(weight, group_size=64, bits=4)
w_deq = _affine_dequantize_tensor(w_packed, scales, biases, group_size=64, bits=4)
self.assertEqual(w_deq.dtype, torch.float32)
@require_torch
class MetalLinearTest(unittest.TestCase):
"""Test the ``MetalLinear`` nn.Module directly (CPU, no kernel calls)."""
def test_prequantized_weight_shape(self):
"""Pre-quantized mode: weight should be uint32 with packed K dimension."""
from transformers.integrations.metal_quantization import MetalLinear
layer = MetalLinear(in_features=256, out_features=128, bits=4, group_size=64)
elems_per_int = 32 // 4
self.assertEqual(layer.weight.shape, (128, 256 // elems_per_int))
self.assertEqual(layer.weight.dtype, torch.uint32)
self.assertEqual(layer.scales.shape, (128, 256 // 64))
self.assertEqual(layer.qbiases.shape, (128, 256 // 64))
def test_quantize_on_the_fly_weight_shape(self):
"""Quantize-on-the-fly mode (dtype=None): weight should be full-shape float."""
from transformers.integrations.metal_quantization import MetalLinear
layer = MetalLinear(in_features=256, out_features=128, bits=4, group_size=64, dtype=None)
self.assertEqual(layer.weight.shape, (128, 256))
self.assertNotEqual(layer.weight.dtype, torch.uint32)
def test_no_bias_by_default(self):
from transformers.integrations.metal_quantization import MetalLinear
layer = MetalLinear(in_features=128, out_features=64, bits=4, group_size=64)
self.assertIsNone(layer.bias)
def test_with_bias(self):
from transformers.integrations.metal_quantization import MetalLinear
layer = MetalLinear(in_features=128, out_features=64, bias=True, bits=4, group_size=64)
self.assertIsNotNone(layer.bias)
self.assertEqual(layer.bias.shape, (64,))
def test_forward_fallback_when_not_uint32(self):
"""When weight is not uint32, forward should use standard nn.functional.linear (no kernel needed)."""
from transformers.integrations.metal_quantization import MetalLinear
layer = MetalLinear(in_features=128, out_features=64, bits=4, group_size=64, dtype=None)
layer.weight = nn.Parameter(torch.randn(64, 128))
x = torch.randn(2, 5, 128)
out = layer(x)
self.assertEqual(out.shape, (2, 5, 64))
def test_forward_fallback_with_bias(self):
from transformers.integrations.metal_quantization import MetalLinear
layer = MetalLinear(in_features=128, out_features=64, bias=True, bits=4, group_size=64, dtype=None)
layer.weight = nn.Parameter(torch.randn(64, 128))
layer.bias = nn.Parameter(torch.randn(64))
x = torch.randn(1, 10, 128)
out = layer(x)
self.assertEqual(out.shape, (1, 10, 64))
def test_prequantized_shapes_8bit(self):
from transformers.integrations.metal_quantization import MetalLinear
layer = MetalLinear(in_features=256, out_features=128, bits=8, group_size=64)
elems_per_int = 32 // 8 # 4
self.assertEqual(layer.weight.shape, (128, 256 // elems_per_int))
def test_prequantized_shapes_2bit(self):
from transformers.integrations.metal_quantization import MetalLinear
layer = MetalLinear(in_features=256, out_features=128, bits=2, group_size=64)
elems_per_int = 32 // 2 # 16
self.assertEqual(layer.weight.shape, (128, 256 // elems_per_int))
@require_torch
class ReplaceWithMetalLinearTest(unittest.TestCase):
"""Test module replacement logic."""
def _make_small_model(self):
config = AutoConfig.from_pretrained("hf-internal-testing/tiny-random-OPTForCausalLM")
with torch.device("meta"):
model = OPTForCausalLM(config)
return model
def test_all_linears_replaced(self):
from transformers.integrations.metal_quantization import MetalLinear, replace_with_metal_linear
model = self._make_small_model()
nb_linears = sum(1 for m in model.modules() if isinstance(m, nn.Linear))
self.assertGreater(nb_linears, 0)
config = MetalConfig(bits=4, group_size=64)
replace_with_metal_linear(model, quantization_config=config, pre_quantized=True)
nb_metal = sum(1 for m in model.modules() if isinstance(m, MetalLinear))
self.assertEqual(nb_linears, nb_metal)
def test_modules_to_not_convert(self):
from transformers.integrations.metal_quantization import MetalLinear, replace_with_metal_linear
model = self._make_small_model()
config = MetalConfig(bits=4, group_size=64)
replace_with_metal_linear(
model, modules_to_not_convert=["lm_head"], quantization_config=config, pre_quantized=True
)
self.assertNotIsInstance(model.lm_head, MetalLinear)
nb_metal = sum(1 for m in model.modules() if isinstance(m, MetalLinear))
nb_linears = sum(1 for m in model.modules() if isinstance(m, nn.Linear))
self.assertEqual(nb_metal, nb_linears - 1)
def test_dequantize_skips_replacement(self):
from transformers.integrations.metal_quantization import MetalLinear, replace_with_metal_linear
model = self._make_small_model()
config = MetalConfig(bits=4, group_size=64, dequantize=True)
replace_with_metal_linear(model, quantization_config=config, pre_quantized=True)
nb_metal = sum(1 for m in model.modules() if isinstance(m, MetalLinear))
self.assertEqual(nb_metal, 0, "No modules should be replaced when dequantize=True")
def test_prequantized_dtype_is_uint32(self):
from transformers.integrations.metal_quantization import MetalLinear, replace_with_metal_linear
model = self._make_small_model()
config = MetalConfig(bits=4, group_size=64)
replace_with_metal_linear(model, quantization_config=config, pre_quantized=True)
for m in model.modules():
if isinstance(m, MetalLinear):
self.assertEqual(m.weight.dtype, torch.uint32)
break
def test_quantize_on_the_fly_dtype_is_not_uint32(self):
from transformers.integrations.metal_quantization import MetalLinear, replace_with_metal_linear
model = self._make_small_model()
config = MetalConfig(bits=4, group_size=64)
replace_with_metal_linear(model, quantization_config=config, pre_quantized=False)
for m in model.modules():
if isinstance(m, MetalLinear):
self.assertNotEqual(m.weight.dtype, torch.uint32)
break
@require_torch
class MetalConversionOpsTest(unittest.TestCase):
"""Test the ``MetalQuantize`` and ``MetalDequantize`` weight conversion operations."""
def _make_quantizer(self, bits=4, group_size=64):
config = MetalConfig(bits=bits, group_size=group_size)
quantizer = MetalHfQuantizer(config)
quantizer.pre_quantized = False
return quantizer
def test_metal_quantize_produces_correct_keys(self):
from transformers.integrations.metal_quantization import MetalQuantize
quantizer = self._make_quantizer()
op = MetalQuantize(quantizer)
weight = torch.randn(64, 256)
result = op.convert({"model.layer.weight": weight})
self.assertIn("model.layer.weight", result)
self.assertIn("model.layer.scales", result)
self.assertIn("model.layer.qbiases", result)
self.assertEqual(result["model.layer.weight"].dtype, torch.uint32)
def test_metal_quantize_preserves_original_dtype(self):
from transformers.integrations.metal_quantization import MetalQuantize
quantizer = self._make_quantizer()
op = MetalQuantize(quantizer)
for dtype in (torch.float32, torch.float16, torch.bfloat16):
weight = torch.randn(64, 256, dtype=dtype)
result = op.convert({"layer.weight": weight})
self.assertEqual(result["layer.scales"].dtype, dtype, f"scales dtype mismatch for input {dtype}")
self.assertEqual(result["layer.qbiases"].dtype, dtype, f"qbiases dtype mismatch for input {dtype}")
def test_metal_dequantize_returns_target_dtype(self):
"""MetalDequantize should return a tensor in the same dtype as the scales."""
from transformers.integrations.metal_quantization import MetalDequantize, MetalQuantize
quantizer = self._make_quantizer()
for dtype in (torch.float16, torch.bfloat16):
weight = torch.randn(64, 256, dtype=dtype)
q_op = MetalQuantize(quantizer)
q_result = q_op.convert({"layer.weight": weight})
dq_quantizer = self._make_quantizer()
dq_quantizer.pre_quantized = True
dq_quantizer.quantization_config.dequantize = True
dq_op = MetalDequantize(dq_quantizer)
dq_result = dq_op.convert(
{
"weight$": [q_result["layer.weight"]],
"scales": [q_result["layer.scales"]],
"qbiases": [q_result["layer.qbiases"]],
},
full_layer_name="layer.weight",
)
self.assertEqual(
dq_result["layer.weight"].dtype, dtype, f"dequantized dtype should match scales ({dtype})"
)
def test_quantize_then_dequantize_roundtrip(self):
from transformers.integrations.metal_quantization import MetalDequantize, MetalQuantize
quantizer = self._make_quantizer(bits=4, group_size=64)
q_op = MetalQuantize(quantizer)
weight = torch.randn(64, 256)
q_result = q_op.convert({"layer.weight": weight})
dq_quantizer = self._make_quantizer(bits=4, group_size=64)
dq_op = MetalDequantize(dq_quantizer)
dq_result = dq_op.convert(
{
"weight$": [q_result["layer.weight"]],
"scales": [q_result["layer.scales"]],
"qbiases": [q_result["layer.qbiases"]],
},
full_layer_name="layer.weight",
)
w_deq = dq_result["layer.weight"].float()
max_err = (weight - w_deq).abs().max().item()
self.assertLess(max_err, 0.5, "Quantize -> Dequantize round-trip error too large")
@require_torch
class MetalWeightConversionsTest(unittest.TestCase):
def test_get_weight_conversions_empty_when_not_dequantize(self):
config = MetalConfig()
quantizer = MetalHfQuantizer(config)
quantizer.pre_quantized = True
self.assertEqual(quantizer.get_weight_conversions(), [])
def test_get_weight_conversions_has_entry_when_dequantize(self):
config = MetalConfig(dequantize=True)
quantizer = MetalHfQuantizer(config)
quantizer.pre_quantized = True
conversions = quantizer.get_weight_conversions()
self.assertEqual(len(conversions), 1)
def test_get_weight_conversions_empty_when_not_prequantized(self):
config = MetalConfig(dequantize=True)
quantizer = MetalHfQuantizer(config)
quantizer.pre_quantized = False
self.assertEqual(quantizer.get_weight_conversions(), [])
@require_torch
class MetalModelConversionTest(unittest.TestCase):
"""Test that a model is correctly converted on the meta device."""
def setUp(self):
gc.collect()
def tearDown(self):
gc.collect()
def test_quantized_model_conversion(self):
from transformers.integrations.metal_quantization import MetalLinear, replace_with_metal_linear
model_id = "hf-internal-testing/tiny-random-OPTForCausalLM"
config = AutoConfig.from_pretrained(model_id)
quantization_config = MetalConfig(bits=4, group_size=64)
with torch.device("meta"):
model = OPTForCausalLM(config)
nb_linears = sum(1 for m in model.modules() if isinstance(m, nn.Linear))
model = replace_with_metal_linear(model, quantization_config=quantization_config, pre_quantized=True)
nb_metal = sum(1 for m in model.modules() if isinstance(m, MetalLinear))
self.assertEqual(nb_linears, nb_metal)
def test_quantized_model_conversion_with_exclusion(self):
from transformers.integrations.metal_quantization import MetalLinear, replace_with_metal_linear
model_id = "hf-internal-testing/tiny-random-OPTForCausalLM"
config = AutoConfig.from_pretrained(model_id)
quantization_config = MetalConfig(bits=4, group_size=64)
with torch.device("meta"):
model = OPTForCausalLM(config)
nb_linears = sum(1 for m in model.modules() if isinstance(m, nn.Linear))
model = replace_with_metal_linear(
model, modules_to_not_convert=["out_proj"], quantization_config=quantization_config, pre_quantized=True
)
nb_metal = sum(1 for m in model.modules() if isinstance(m, MetalLinear))
nb_excluded = sum(1 for name, m in model.named_modules() if "out_proj" in name and isinstance(m, nn.Linear))
self.assertEqual(nb_metal + nb_excluded, nb_linears)
def test_param_needs_quantization(self):
from transformers.integrations.metal_quantization import MetalLinear, replace_with_metal_linear
model_id = "hf-internal-testing/tiny-random-OPTForCausalLM"
config = AutoConfig.from_pretrained(model_id)
quantization_config = MetalConfig(bits=4, group_size=64)
with torch.device("meta"):
model = OPTForCausalLM(config)
replace_with_metal_linear(model, quantization_config=quantization_config, pre_quantized=False)
quantizer = MetalHfQuantizer(quantization_config)
quantizer.pre_quantized = False
for name, module in model.named_modules():
if isinstance(module, MetalLinear):
self.assertTrue(quantizer.param_needs_quantization(model, f"{name}.weight"))
self.assertFalse(quantizer.param_needs_quantization(model, f"{name}.scales"))
self.assertFalse(quantizer.param_needs_quantization(model, f"{name}.qbiases"))
def test_param_needs_quantization_prequantized_is_false(self):
from transformers.integrations.metal_quantization import MetalLinear, replace_with_metal_linear
model_id = "hf-internal-testing/tiny-random-OPTForCausalLM"
config = AutoConfig.from_pretrained(model_id)
quantization_config = MetalConfig(bits=4, group_size=64)
with torch.device("meta"):
model = OPTForCausalLM(config)
replace_with_metal_linear(model, quantization_config=quantization_config, pre_quantized=True)
quantizer = MetalHfQuantizer(quantization_config)
quantizer.pre_quantized = True
for name, module in model.named_modules():
if isinstance(module, MetalLinear):
self.assertFalse(
quantizer.param_needs_quantization(model, f"{name}.weight"),
"Pre-quantized weights should not be re-quantized",
)
@slow
@require_torch
class MetalSlowIntegrationTest(unittest.TestCase):
"""Slow tests that actually load a model with Metal quantization.
These run on CPU with ``dequantize=True`` so they don't require MPS.
"""
model_id = "medmekk/Llama-3.2-1B-Instruct-metal"
def setUp(self):
gc.collect()
def tearDown(self):
gc.collect()
def test_load_prequantized_dequantize_on_cpu(self):
"""Load a quantized checkpoint with dequantize=True on CPU and run a forward pass."""
with _patch_no_mps():
config = MetalConfig(dequantize=True)
model = AutoModelForCausalLM.from_pretrained(self.model_id, quantization_config=config, device_map="cpu")
self.assertIsNotNone(model)
for param in model.parameters():
self.assertNotEqual(param.dtype, torch.uint32, "All weights should be dequantized")
def test_quantized_model(self):
with _patch_no_mps():
config = MetalConfig(bits=4, group_size=64)
model = AutoModelForCausalLM.from_pretrained(self.model_id, quantization_config=config, device_map="mps")
tokenizer = AutoTokenizer.from_pretrained(self.model_id)
self.assertIsNotNone(model)
input = "Hello, how are you?"
EXPECTED_OUTPUT = "Hello, how are you? I'm doing well, thanks for asking. I"
input_ids = tokenizer.encode(input, return_tensors="pt").to("mps")
output = model.generate(input_ids, max_new_tokens=10, do_sample=False)
self.assertEqual(tokenizer.decode(output[0], skip_special_tokens=True), EXPECTED_OUTPUT)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/quantization/metal/test_metal.py",
"license": "Apache License 2.0",
"lines": 502,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/timesfm2_5/convert_timesfm2_5_original_to_hf.py | # Copyright 2026 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import re
import shutil
import numpy as np
import timesfm
import torch
from huggingface_hub import snapshot_download
from transformers import TimesFm2_5Config, TimesFm2_5ModelForPrediction
"""
Sample usage:
```
python src/transformers/models/timesfm2_5/convert_timesfm2_5_original_to_hf.py \
--output_dir /output/path
```
"""
def get_nested_attr(obj, key):
"""Recursively retrieves an attribute from an object, handling list/tuple indexing if present."""
parts = key.split(".")
for part in parts:
match = re.match(r"(.*)\[(\d+)\]", part) # Handle list indexing like `layers[0]`
if match:
attr_name, index = match.groups()
obj = getattr(obj, attr_name)[int(index)] # Access list/tuple element
else:
obj = getattr(obj, part) # Regular attribute access
return obj
def download_checkpoint_from_hub(huggingface_repo_id: str) -> str:
"""Download the official checkpoint and return the local safetensors path."""
checkpoint_dir = snapshot_download(repo_id=huggingface_repo_id, allow_patterns=["model.safetensors"])
checkpoint_path = os.path.join(checkpoint_dir, "model.safetensors")
if not os.path.exists(checkpoint_path):
raise FileNotFoundError(
f"`model.safetensors` was not found in {huggingface_repo_id}. "
"Please verify the repo contains a non-sharded safetensors checkpoint."
)
return checkpoint_path
def write_model(model_path, huggingface_repo_id="google/timesfm-2.5-200m-pytorch", safe_serialization=True):
os.makedirs(model_path, exist_ok=True)
tmp_model_path = os.path.join(model_path, "tmp")
os.makedirs(tmp_model_path, exist_ok=True)
checkpoint_path = download_checkpoint_from_hub(huggingface_repo_id)
# Create model instance and load checkpoint
tfm = timesfm.TimesFM_2p5_200M_torch()
tfm.model.load_checkpoint(checkpoint_path)
# Compile with forecasting configuration
tfm.compile(
timesfm.ForecastConfig(
max_context=1024,
max_horizon=256,
normalize_inputs=True,
use_continuous_quantile_head=True,
)
)
original_model = tfm.model
# Get actual dimensions from original model
quantile_output_dims = original_model.output_projection_quantiles.output_layer.weight.shape[0]
# Original TimesFM 2.5 has 9 quantiles + 1 extra (median/point prediction) = 10 total
actual_quantile_len = quantile_output_dims // 10 # 9 quantiles + 1 = 10 total
timesfm_config = TimesFm2_5Config(
patch_length=32,
context_length=16384,
horizon_length=128,
num_hidden_layers=20,
hidden_size=1280,
intermediate_size=1280,
head_dim=80,
num_attention_heads=16,
output_quantile_len=actual_quantile_len,
decode_index=5,
use_bias=False,
activation="swish",
quantiles=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9],
max_position_embeddings=16384,
)
timesfm_config.save_pretrained(tmp_model_path)
timesfm_model = TimesFm2_5ModelForPrediction(timesfm_config)
# Mapping of the layers from the original TimesFM 2.5 model to the Transformers model
MODEL_LAYER_MAPPING = {
# Input projection (tokenizer) - ResidualBlock: 64 -> 1280 -> 1280
"tokenizer.hidden_layer.weight": "model.input_ff_layer.input_layer.weight",
"tokenizer.hidden_layer.bias": "model.input_ff_layer.input_layer.bias",
"tokenizer.output_layer.weight": "model.input_ff_layer.output_layer.weight",
"tokenizer.output_layer.bias": "model.input_ff_layer.output_layer.bias",
"tokenizer.residual_layer.weight": "model.input_ff_layer.residual_layer.weight",
"tokenizer.residual_layer.bias": "model.input_ff_layer.residual_layer.bias",
# Separate output projections for TimesFM 2.5 - these are at model level, not inside model
# Point projection: 1280 -> 1280 -> 1280
"output_projection_point.hidden_layer.weight": "output_projection_point.input_layer.weight",
"output_projection_point.output_layer.weight": "output_projection_point.output_layer.weight",
"output_projection_point.residual_layer.weight": "output_projection_point.residual_layer.weight",
# Quantile projection: 1280 -> 1280 -> output_dims
"output_projection_quantiles.hidden_layer.weight": "output_projection_quantiles.input_layer.weight",
"output_projection_quantiles.output_layer.weight": "output_projection_quantiles.output_layer.weight",
"output_projection_quantiles.residual_layer.weight": "output_projection_quantiles.residual_layer.weight",
}
TRANSFORMER_LAYER_MAPPING = {
# Attention layers - MultiHeadAttention with separate q, k, v projections
"stacked_xf[{i}].attn.query.weight": "model.layers[{i}].self_attn.q_proj.weight",
"stacked_xf[{i}].attn.key.weight": "model.layers[{i}].self_attn.k_proj.weight",
"stacked_xf[{i}].attn.value.weight": "model.layers[{i}].self_attn.v_proj.weight",
"stacked_xf[{i}].attn.out.weight": "model.layers[{i}].self_attn.o_proj.weight",
# QK normalization layers (RMS norm) - uses 'scale' instead of 'weight'
"stacked_xf[{i}].attn.query_ln.scale": "model.layers[{i}].self_attn.q_norm.weight",
"stacked_xf[{i}].attn.key_ln.scale": "model.layers[{i}].self_attn.k_norm.weight",
# Per-dimension scaling parameter
"stacked_xf[{i}].attn.per_dim_scale.per_dim_scale": "model.layers[{i}].self_attn.scaling",
# MLP layers (feed forward)
"stacked_xf[{i}].ff0.weight": "model.layers[{i}].mlp.ff0.weight",
"stacked_xf[{i}].ff1.weight": "model.layers[{i}].mlp.ff1.weight",
# Layer normalization (RMS norm) - uses 'scale' instead of 'weight'
"stacked_xf[{i}].pre_attn_ln.scale": "model.layers[{i}].input_layernorm.weight",
"stacked_xf[{i}].post_attn_ln.scale": "model.layers[{i}].post_attention_layernorm.weight",
"stacked_xf[{i}].pre_ff_ln.scale": "model.layers[{i}].pre_feedforward_layernorm.weight",
"stacked_xf[{i}].post_ff_ln.scale": "model.layers[{i}].post_feedforward_layernorm.weight",
}
# Debug: Print both model structures
print(f"Original model attributes: {dir(original_model)}")
print(f"\\nTransformers model attributes: {dir(timesfm_model)}")
print(f"\\nTransformers model (inner) attributes: {dir(timesfm_model.model)}")
print(f"\\nTransformers input_ff_layer attributes: {dir(timesfm_model.model.input_ff_layer)}")
# Copy model-level weights
for old_key, new_key in MODEL_LAYER_MAPPING.items():
try:
old_attr = get_nested_attr(original_model, old_key) # Get tensor from original model
new_attr = get_nested_attr(timesfm_model, new_key) # Get corresponding attribute in new model
print(f"Shape comparison {old_key}: {old_attr.shape} vs {new_key}: {new_attr.shape}")
if old_attr.shape == new_attr.shape:
new_attr.data.copy_(old_attr.data) # Copy data
print(f"✅ Converted {old_key} -> {new_key}")
else:
print(f"⚠️ Shape mismatch {old_key}: {old_attr.shape} vs {new_attr.shape}")
except AttributeError as e:
print(f"Skipping {old_key}: {e}")
# Copy transformer layer weights
num_layers = len(timesfm_model.model.layers)
for i in range(num_layers):
# Special handling for fused QKV weights
try:
# Check if original model has fused QKV projection
qkv_fused = get_nested_attr(original_model, f"stacked_xf[{i}].attn.qkv_proj.weight")
# Split fused QKV into separate Q, K, V projections
# QKV fused shape: [3 * hidden_size, hidden_size] = [3840, 1280]
# Split into Q: [1280, 1280], K: [1280, 1280], V: [1280, 1280]
q_weight, k_weight, v_weight = qkv_fused.chunk(3, dim=0)
# Copy to separate projections
q_proj = get_nested_attr(timesfm_model, f"model.layers[{i}].self_attn.q_proj.weight")
k_proj = get_nested_attr(timesfm_model, f"model.layers[{i}].self_attn.k_proj.weight")
v_proj = get_nested_attr(timesfm_model, f"model.layers[{i}].self_attn.v_proj.weight")
q_proj.data.copy_(q_weight.data)
k_proj.data.copy_(k_weight.data)
v_proj.data.copy_(v_weight.data)
if i == 0:
print(
f"✅ Converted layer {i}: stacked_xf[{i}].attn.qkv_proj.weight (fused) -> separate Q/K/V projections"
)
print(f" Q: {q_weight.shape}, K: {k_weight.shape}, V: {v_weight.shape}")
except AttributeError:
# No fused QKV, try separate weights
if i == 0:
print(f"⚠️ Layer {i}: No fused QKV found, trying separate Q/K/V weights...")
# Copy all other transformer layer weights
for old_template, new_template in TRANSFORMER_LAYER_MAPPING.items():
old_key = old_template.format(i=i)
new_key = new_template.format(i=i)
# Skip Q/K/V weights if we already handled fused QKV
if any(x in old_key for x in [".query.weight", ".key.weight", ".value.weight"]):
continue
try:
# Get tensor from original model
old_attr = get_nested_attr(original_model, old_key)
# Get corresponding attribute in new model
new_attr = get_nested_attr(timesfm_model, new_key)
new_attr.data.copy_(old_attr.data) # Copy data
if i == 0: # Only print first layer details
print(f"✅ Converted layer {i}: {old_key} -> {new_key}")
except AttributeError:
if i == 0: # Only print first layer errors
print(f"Skipping layer {i}: {old_key} (not found in original model).")
timesfm_model.save_pretrained(model_path, safe_serialization=safe_serialization)
shutil.rmtree(tmp_model_path)
print(f"✅ Model saved to {model_path}")
def check_outputs(model_path, huggingface_repo_id="google/timesfm-2.5-200m-pytorch"):
"""Compares outputs between original and converted models."""
print("\nChecking model outputs...")
# Load original TimesFM 2.5 model using the same Hub download path as conversion.
checkpoint_path = download_checkpoint_from_hub(huggingface_repo_id)
# Create model instance and load checkpoint
tfm = timesfm.TimesFM_2p5_200M_torch()
tfm.model.load_checkpoint(checkpoint_path)
# Compile with forecasting configuration (following README example)
tfm.compile(
timesfm.ForecastConfig(
max_context=1024,
max_horizon=256,
normalize_inputs=True,
use_continuous_quantile_head=True,
)
)
# Load converted model
converted_model = TimesFm2_5ModelForPrediction.from_pretrained(
model_path,
dtype=torch.float32,
attn_implementation="sdpa",
)
if torch.cuda.is_available():
converted_model = converted_model.to("cuda")
converted_model.eval() # Set to evaluation mode
# Create deterministic test inputs
forecast_input = [
np.linspace(0, 1, 100),
np.sin(np.linspace(0, 20, 67)),
np.sin(np.linspace(0, 10, 150)),
]
# Get predictions from original model
point_forecast_orig, quantile_forecast_orig = tfm.forecast(
horizon=128,
inputs=forecast_input,
)
# Convert inputs to sequence of tensors
forecast_input_tensor = [torch.tensor(ts, dtype=torch.float32) for ts in forecast_input]
if torch.cuda.is_available():
forecast_input_tensor = [ts.to("cuda") for ts in forecast_input_tensor]
# Get predictions from converted model
with torch.no_grad():
# Use forecast_context_len=1024 to match original's max_context=1024
outputs = converted_model(past_values=forecast_input_tensor, forecast_context_len=1024, return_dict=True)
point_forecast_conv = outputs.mean_predictions.float().cpu().numpy()
quantile_forecast_conv = outputs.full_predictions.float().cpu().numpy()
# Compare outputs
point_forecast_diff = np.abs(point_forecast_orig - point_forecast_conv)
quantile_forecast_diff = np.abs(quantile_forecast_orig - quantile_forecast_conv)
max_point_diff = point_forecast_diff.max()
mean_point_diff = point_forecast_diff.mean()
max_quantile_diff = quantile_forecast_diff.max()
mean_quantile_diff = quantile_forecast_diff.mean()
print("\nOutput comparison:")
print(f"Point forecast - Max difference: {max_point_diff:.6f}")
print(f"Point forecast - Mean difference: {mean_point_diff:.6f}")
print(f"Quantile forecast - Max difference: {max_quantile_diff:.6f}")
print(f"Quantile forecast - Mean difference: {mean_quantile_diff:.6f}")
# Define acceptable thresholds
POINT_THRESHOLD = 1e-5
QUANTILE_THRESHOLD = 1e-5
if max_point_diff > POINT_THRESHOLD or max_quantile_diff > QUANTILE_THRESHOLD:
raise ValueError(
f"Output mismatch detected!\n"
f"Point forecast max diff: {max_point_diff} (threshold: {POINT_THRESHOLD})\n"
f"Quantile forecast max diff: {max_quantile_diff} (threshold: {QUANTILE_THRESHOLD})"
)
print("\n✅ All outputs match within acceptable tolerance!")
# Print shapes for verification
print("\nOutput shapes:")
print(f"Original point forecast: {point_forecast_orig.shape}")
print(f"Converted point forecast: {point_forecast_conv.shape}")
print(f"Original quantile forecast: {quantile_forecast_orig.shape}")
print(f"Converted quantile forecast: {quantile_forecast_conv.shape}")
return max_point_diff, max_quantile_diff
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--output_dir",
required=True,
help="Location to write HF model and tokenizer",
)
parser.add_argument(
"--safe_serialization", type=bool, default=True, help="Whether or not to save using `safetensors`."
)
parser.add_argument(
"--huggingface_repo_id",
type=str,
default="google/timesfm-2.5-200m-pytorch",
help="The Hugging Face repository ID to use for the model.",
)
args = parser.parse_args()
# if the saved model file exists, skip the conversion
if os.path.exists(
os.path.join(args.output_dir, "model.safetensors" if args.safe_serialization else "pytorch_model.bin")
):
print(f"Model already exists in {args.output_dir}, skipping conversion.")
else:
write_model(
model_path=args.output_dir,
huggingface_repo_id=args.huggingface_repo_id,
safe_serialization=args.safe_serialization,
)
# Always check outputs
max_point_diff, max_quantile_diff = check_outputs(args.output_dir, args.huggingface_repo_id)
print("\n🎉 TimesFM 2.5 conversion completed!")
print(f" Point forecast precision: {max_point_diff:.6f}")
print(f" Quantile forecast precision: {max_quantile_diff:.6f}")
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/timesfm2_5/convert_timesfm2_5_original_to_hf.py",
"license": "Apache License 2.0",
"lines": 306,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/timesfm2_5/modular_timesfm2_5.py | # Copyright 2026 the HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from collections.abc import Callable, Sequence
from dataclasses import dataclass
import torch
import torch.nn as nn
import torch.nn.functional as F
from ...activations import ACT2FN
from ...masking_utils import create_causal_mask
from ...modeling_rope_utils import RopeParameters
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
from ...utils.generic import merge_with_config_defaults
from ...utils.output_capturing import capture_outputs
from ..apertus.modeling_apertus import ApertusAttention
from ..clip.modeling_clip import CLIPMLP
from ..llama.modeling_llama import (
LlamaDecoderLayer,
LlamaRMSNorm,
LlamaRotaryEmbedding,
apply_rotary_pos_emb,
eager_attention_forward,
)
from ..timesfm.configuration_timesfm import TimesFmConfig
from ..timesfm.modeling_timesfm import (
TimesFmModelForPrediction,
TimesFmOutput,
TimesFmOutputForPrediction,
TimesFmPreTrainedModel,
TimesFmResidualBlock,
)
logger = logging.get_logger(__name__)
class TimesFm2_5Config(TimesFmConfig):
r"""
This is the configuration class to store the configuration of a [`TimesFm2_5ModelForPrediction`]. It is used to
instantiate a TimesFM 2.5 model according to the specified arguments, defining the model architecture. Instantiating
a configuration with the defaults will yield a similar configuration to that of the TimesFM 2.5
[google/timesfm-2.5-200m-transformers](https://huggingface.co/google/timesfm-2.5-200m-transformers) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
patch_length (`int`, *optional*, defaults to 32):
The length of one patch in the input sequence.
context_length (`int`, *optional*, defaults to 16384):
The length of the input context.
horizon_length (`int`, *optional*, defaults to 128):
The length of the prediction horizon.
quantiles (`list[float]`, *optional*, defaults to `[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]`):
The quantiles to predict.
hidden_size (`int`, *optional*, defaults to 1280):
Size of the hidden layers.
intermediate_size (`int`, *optional*, defaults to 1280):
Dimension of the MLP representations.
head_dim (`int`, *optional*, defaults to 80):
Size of the key, query, value projections per attention head.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer.
num_key_value_heads (`int`, *optional*, defaults to 16):
Number of key-value heads. Set equal to `num_attention_heads` for full (non-grouped) attention.
num_hidden_layers (`int`, *optional*, defaults to 20):
Number of Transformer layers.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the RMS normalization layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout probability for the attention scores.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use bias in the attention linear projections.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
output_quantile_len (`int`, *optional*, defaults to 1024):
Length of the quantile output projection dimension.
decode_index (`int`, *optional*, defaults to 5):
Index into the quantile dimension used to extract the point (median) forecast.
use_bias (`bool`, *optional*, defaults to `False`):
Whether to use bias in MLP and transformer linear layers.
activation (`str`, *optional*, defaults to `"swish"`):
Activation function used in MLP and residual block layers (any key from `ACT2FN`).
use_continuous_quantile_head (`bool`, *optional*, defaults to `True`):
Whether to use the continuous quantile head for non-median quantile predictions.
force_flip_invariance (`bool`, *optional*, defaults to `True`):
Whether to apply flip-invariance averaging during forecasting.
infer_is_positive (`bool`, *optional*, defaults to `True`):
Whether to clamp forecasts to non-negative values when the input minimum is non-negative.
max_position_embeddings (`int`, *optional*, defaults to 16384):
Maximum sequence length supported by the rotary position encoding.
rope_parameters (`RopeParameters` or `dict[str, RopeParameters]`, *optional*):
Dictionary containing the RoPE configuration. Uses default rope type with theta=10000.0 when not set.
Example:
```python
>>> from transformers import TimesFm2_5Config, TimesFm2_5ModelForPrediction
>>> configuration = TimesFm2_5Config()
>>> model = TimesFm2_5ModelForPrediction(configuration)
>>> configuration = model.config
```
"""
model_type = "timesfm2_5"
def __init__(
self,
patch_length: int = 32,
context_length: int = 16384,
horizon_length: int = 128,
quantiles: list = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9],
hidden_size: int = 1280,
intermediate_size: int = 1280,
head_dim: int = 80,
num_attention_heads: int = 16,
num_key_value_heads: int = 16,
num_hidden_layers: int = 20,
rms_norm_eps: float = 1e-6,
attention_dropout: float = 0.0,
attention_bias: bool = False,
initializer_range: float = 0.02,
output_quantile_len: int = 1024,
decode_index: int = 5,
use_bias: bool = False,
activation: str = "swish",
use_continuous_quantile_head: bool = True,
force_flip_invariance: bool = True,
infer_is_positive: bool = True,
max_position_embeddings: int = 16384,
rope_parameters: RopeParameters | dict[str, RopeParameters] | None = None,
**kwargs,
):
self.num_key_value_heads = num_key_value_heads
self.attention_bias = attention_bias
self.output_quantile_len = output_quantile_len
self.decode_index = decode_index
self.use_bias = use_bias
self.activation = activation
self.use_continuous_quantile_head = use_continuous_quantile_head
self.force_flip_invariance = force_flip_invariance
self.infer_is_positive = infer_is_positive
self.max_position_embeddings = max_position_embeddings
self.rope_parameters = rope_parameters
super().__init__(
patch_length=patch_length,
context_length=context_length,
horizon_length=horizon_length,
quantiles=quantiles,
hidden_size=hidden_size,
intermediate_size=intermediate_size,
head_dim=head_dim,
num_attention_heads=num_attention_heads,
num_key_value_heads=num_key_value_heads,
rms_norm_eps=rms_norm_eps,
attention_dropout=attention_dropout,
attention_bias=attention_bias,
initializer_range=initializer_range,
num_hidden_layers=num_hidden_layers,
use_positional_embedding=False,
**kwargs,
)
# Delete inherited attributes that TimesFM 2.5 does not use
del self.freq_size
del self.pad_val
del self.tolerance
del self.normalize_inputs
del self.use_positional_embedding
del self.use_rotary_embeddings
del self.min_timescale
del self.max_timescale
@dataclass
@auto_docstring
class TimesFm2_5Output(TimesFmOutput):
r"""
context_mu (`torch.Tensor` of shape `(batch_size, num_patches)`):
Running means computed per input patch during normalization.
context_sigma (`torch.Tensor` of shape `(batch_size, num_patches)`):
Running standard deviations computed per input patch during normalization.
"""
context_mu: torch.Tensor | None = None
context_sigma: torch.Tensor | None = None
@dataclass
@auto_docstring
class TimesFm2_5OutputForPrediction(TimesFmOutputForPrediction):
r"""
mean_predictions (`torch.Tensor` of shape `(batch_size, horizon_length)`):
Deterministic forecasts after denormalization.
full_predictions (`torch.Tensor` of shape `(batch_size, horizon_length, quantiles)`):
Quantile forecasts including the median after denormalization.
loss (`torch.Tensor` of shape `(1,)`, *optional*, returned when `future_values` is provided):
Training loss combining MSE and quantile losses when targets are supplied.
"""
pass
class TimesFm2_5MLP(CLIPMLP):
def __init__(self, config: TimesFm2_5Config):
super().__init__()
self.activation_fn = ACT2FN[config.activation]
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size, bias=config.use_bias)
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size, bias=config.use_bias)
class TimesFm2_5ResidualBlock(TimesFmResidualBlock):
"""[`TimesFmResidualBlock`] variant with configurable `use_bias` and `activation`."""
def __init__(self, config, input_dims: int, hidden_dims: int, output_dims: int, use_bias: bool | None = None):
super().__init__(input_dims, hidden_dims, output_dims)
use_bias = use_bias if use_bias is not None else config.use_bias
self.input_layer = nn.Linear(input_dims, hidden_dims, bias=use_bias)
self.output_layer = nn.Linear(hidden_dims, output_dims, bias=use_bias)
self.residual_layer = nn.Linear(input_dims, output_dims, bias=use_bias)
self.activation = ACT2FN[config.activation]
class TimesFm2_5RMSNorm(LlamaRMSNorm):
pass
class TimesFm2_5RotaryEmbedding(LlamaRotaryEmbedding):
pass
class TimesFm2_5Attention(ApertusAttention):
"""TimesFM 2.5 attention with learnable per-dimension query scaling."""
def __init__(self, config: TimesFm2_5Config, layer_idx: int):
super().__init__(config, layer_idx)
self.scaling = nn.Parameter(torch.empty((self.head_dim,)))
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: torch.Tensor | None,
past_key_values=None,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
):
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
query_states = self.q_norm(query_states)
key_states = self.k_norm(key_states)
scale = F.softplus(self.scaling).mul(1.442695041 / math.sqrt(self.head_dim))
query_states = query_states * scale[None, None, None, :]
if past_key_values is not None:
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface(
self.config._attn_implementation, eager_attention_forward
)
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=self.attention_dropout if self.training else 0.0,
# scaling=1.0 because per-dimension learnable scaling is already applied to query_states above
scaling=1.0,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class TimesFm2_5DecoderLayer(LlamaDecoderLayer):
"""TimesFM 2.5 Transformer decoder layer with pre/post RMS normalization and no KV cache."""
def __init__(self, config: TimesFm2_5Config, layer_idx: int):
super().__init__(config, layer_idx)
self.pre_feedforward_layernorm = TimesFm2_5RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_feedforward_layernorm = TimesFm2_5RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.Tensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
position_embeddings=position_embeddings,
attention_mask=attention_mask,
position_ids=position_ids,
**kwargs,
)
hidden_states = self.post_attention_layernorm(hidden_states) + residual
residual = hidden_states
hidden_states = self.pre_feedforward_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = self.post_feedforward_layernorm(hidden_states) + residual
return hidden_states
@auto_docstring
class TimesFm2_5PreTrainedModel(TimesFmPreTrainedModel):
config_class = TimesFm2_5Config
base_model_prefix = "model"
_no_split_modules = ["TimesFm2_5DecoderLayer"]
_supports_flash_attn = True
_supports_flex_attn = True
_can_record_outputs = {
"hidden_states": TimesFm2_5DecoderLayer,
"attentions": TimesFm2_5Attention,
}
class TimesFm2_5Model(TimesFm2_5PreTrainedModel):
def __init__(self, config: TimesFm2_5Config):
super().__init__(config)
self.config = config
self.tolerance = 1e-6
self.input_ff_layer = TimesFm2_5ResidualBlock(
config,
input_dims=2 * config.patch_length,
hidden_dims=config.hidden_size,
output_dims=config.hidden_size,
use_bias=True,
)
self.layers = nn.ModuleList(
[TimesFm2_5DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.rotary_emb = TimesFm2_5RotaryEmbedding(config)
self.gradient_checkpointing = False
self.post_init()
def _revin(
self,
hidden_states: torch.Tensor,
loc: torch.Tensor,
scale: torch.Tensor,
reverse: bool = False,
mask: torch.Tensor | None = None,
) -> torch.Tensor:
"""Reversible instance normalization (RevIN).
Normalizes or denormalizes `hidden_states` using the provided location and scale statistics.
When `mask` is provided during normalization (reverse=False), masked positions are zeroed out.
"""
if len(loc.shape) == len(hidden_states.shape) - 1:
loc = loc[..., None]
scale = scale[..., None]
elif len(loc.shape) == len(hidden_states.shape) - 2:
loc = loc[..., None, None]
scale = scale[..., None, None]
loc = loc.to(hidden_states.device)
scale = scale.to(hidden_states.device)
safe_scale = torch.where(scale < self.tolerance, torch.ones_like(scale), scale)
if reverse:
return hidden_states * scale + loc
normed = (hidden_states - loc) / safe_scale
if mask is not None:
normed = torch.where(mask, torch.zeros_like(normed), normed)
return normed
@staticmethod
def _update_running_stats(
count: torch.Tensor,
mean: torch.Tensor,
std: torch.Tensor,
new_values: torch.Tensor,
mask: torch.Tensor,
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Update running mean/std using Welford's online algorithm.
Combines existing statistics (`count`, `mean`, `std`) with a new batch of values,
respecting the boolean `mask` (True = masked/invalid).
"""
is_valid = (~mask).to(new_values.dtype)
inc_count = is_valid.sum(dim=-1)
inc_count_safe = torch.where(inc_count == 0, torch.ones_like(inc_count), inc_count)
inc_mean = (new_values * is_valid).sum(dim=-1) / inc_count_safe
inc_mean = torch.where(inc_count == 0, torch.zeros_like(inc_mean), inc_mean)
centered = new_values - inc_mean.unsqueeze(-1)
inc_var = ((centered * is_valid) ** 2).sum(dim=-1) / inc_count_safe
inc_var = torch.where(inc_count == 0, torch.zeros_like(inc_var), inc_var)
inc_std = torch.sqrt(torch.clamp(inc_var, min=0.0))
new_count = count + inc_count
new_count_safe = torch.where(new_count == 0, torch.ones_like(new_count), new_count)
new_mean = (count * mean + inc_mean * inc_count) / new_count_safe
new_mean = torch.where(new_count == 0, torch.zeros_like(new_mean), new_mean)
term1 = count * std.pow(2)
term2 = inc_count * inc_std.pow(2)
term3 = count * (mean - new_mean).pow(2)
term4 = inc_count * (inc_mean - new_mean).pow(2)
new_var = (term1 + term2 + term3 + term4) / new_count_safe
new_var = torch.where(new_count == 0, torch.zeros_like(new_var), new_var)
new_std = torch.sqrt(torch.clamp(new_var, min=0.0))
return new_count, new_mean, new_std
@merge_with_config_defaults
@capture_outputs
@auto_docstring
def forward(
self,
past_values: torch.Tensor,
past_values_padding: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> TimesFm2_5Output:
r"""
past_values (`torch.Tensor` of shape `(batch_size, sequence_length)`):
Past values of the time series used as input to the model.
past_values_padding (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Padding mask for the input. `1` indicates padded (masked) time steps, `0` indicates valid values.
"""
batch_size, seq_len = past_values.shape
patch_len = self.config.patch_length
if past_values_padding is None:
past_values_padding = torch.zeros_like(past_values, dtype=torch.long)
patched_inputs = past_values.view(batch_size, -1, patch_len)
patched_masks = past_values_padding[:, :seq_len].view(batch_size, -1, patch_len)
patched_masks_bool = patched_masks >= 0.5
count = past_values.new_zeros(batch_size)
mean = past_values.new_zeros(batch_size)
std = past_values.new_zeros(batch_size)
mean_history: list[torch.Tensor] = []
std_history: list[torch.Tensor] = []
for i in range(patched_inputs.shape[1]):
count, mean, std = self._update_running_stats(
count, mean, std, patched_inputs[:, i, :], patched_masks_bool[:, i, :]
)
mean_history.append(mean)
std_history.append(std)
if mean_history:
context_mu = torch.stack(mean_history, dim=1)
context_sigma = torch.stack(std_history, dim=1)
else:
context_mu = mean.unsqueeze(1)
context_sigma = std.unsqueeze(1)
normed_inputs = self._revin(patched_inputs, context_mu, context_sigma, reverse=False, mask=patched_masks_bool)
tokenizer_inputs = torch.cat(
[normed_inputs, patched_masks_bool.to(dtype=normed_inputs.dtype)],
dim=-1,
)
input_embeddings = self.input_ff_layer(tokenizer_inputs)
patch_padding = patched_masks_bool[..., -1]
sequence_length = input_embeddings.shape[1]
num_masked = patch_padding.to(torch.int32).sum(dim=-1, keepdim=True)
position_ids = torch.arange(sequence_length, device=input_embeddings.device).unsqueeze(0) - num_masked
padding_mask = (~patch_padding).to(torch.int64)
cache_position = torch.arange(sequence_length, device=input_embeddings.device)
attention_mask = create_causal_mask(
self.config, input_embeddings, padding_mask, cache_position, past_key_values=None
)
position_embeddings = self.rotary_emb(input_embeddings, position_ids)
hidden_states = input_embeddings
for layer in self.layers:
hidden_states = layer(
hidden_states,
position_embeddings=position_embeddings,
attention_mask=attention_mask,
position_ids=position_ids,
**kwargs,
)
loc = context_mu[:, -1]
scale = torch.clamp(context_sigma[:, -1], min=self.tolerance)
return TimesFm2_5Output(
last_hidden_state=hidden_states,
loc=loc,
scale=scale,
context_mu=context_mu,
context_sigma=context_sigma,
)
class TimesFm2_5ModelForPrediction(TimesFmModelForPrediction):
def __init__(self, config: TimesFm2_5Config):
super().__init__(config)
self.config = config
self.context_len = config.context_length
self.horizon_len = config.horizon_length
# Remove inherited attributes from parent TimesFmModelForPrediction
del self.decoder
del self.horizon_ff_layer
self.model = TimesFm2_5Model(config)
num_quantiles = len(config.quantiles) + 1
self.output_projection_point = TimesFm2_5ResidualBlock(
config,
input_dims=config.hidden_size,
hidden_dims=config.hidden_size,
output_dims=config.horizon_length * num_quantiles,
)
self.output_projection_quantiles = TimesFm2_5ResidualBlock(
config,
input_dims=config.hidden_size,
hidden_dims=config.hidden_size,
output_dims=config.output_quantile_len * num_quantiles,
)
self.post_init()
def _decode_and_project(
self,
normalized_ts: torch.Tensor,
input_padding: torch.Tensor,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, torch.Tensor]:
"""Run the decoder and project to point/quantile outputs.
Returns:
Tuple of (point_forecast, quantile_spreads), each of shape `(batch, length, num_quantiles)`.
"""
model_outputs = self.model(
past_values=normalized_ts,
past_values_padding=input_padding,
**kwargs,
)
hidden_states = model_outputs.last_hidden_state
context_mu = model_outputs.context_mu
context_sigma = model_outputs.context_sigma
point_output = self.model._revin(
self.output_projection_point(hidden_states), context_mu, context_sigma, reverse=True
)
quantile_output = self.model._revin(
self.output_projection_quantiles(hidden_states), context_mu, context_sigma, reverse=True
)
batch_size, num_patches = point_output.shape[:2]
num_quantiles = len(self.config.quantiles) + 1
point_forecast = point_output.view(batch_size, num_patches, self.config.horizon_length, num_quantiles)[
:, -1, :, :
]
quantile_spreads = quantile_output.view(
batch_size, num_patches, self.config.output_quantile_len, num_quantiles
)[:, -1, :, :]
# Ensure both outputs are on the same device for model parallelism
quantile_spreads = quantile_spreads.to(point_forecast.device)
return point_forecast, quantile_spreads, model_outputs
@can_return_tuple
@auto_docstring
def forward(
self,
past_values: Sequence[torch.Tensor],
window_size: int | None = None,
future_values: torch.Tensor | None = None,
forecast_context_len: int | None = None,
truncate_negative: bool | None = None,
force_flip_invariance: bool | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> TimesFm2_5OutputForPrediction:
r"""
past_values (`Sequence[torch.Tensor]`):
Past values of the time series that serves as input to the model. Each tensor is a 1D time series.
window_size (`int`, *optional*):
Window size of trend + residual decomposition. If `None`, decomposition is not applied.
future_values (`torch.Tensor`, *optional*):
Optional future values used to compute the loss.
forecast_context_len (`int`, *optional*):
Optional context length override used during forecasting.
truncate_negative (`bool`, *optional*):
Whether to clamp outputs to non-negative values. If `None`, defaults to `config.infer_is_positive`.
force_flip_invariance (`bool`, *optional*):
Whether to apply the flip-invariance combination. If `None`, defaults to
`config.force_flip_invariance`.
"""
forecast_context_len = forecast_context_len or self.context_len
device = past_values[0].device
inputs = [ts[-forecast_context_len:] for ts in past_values]
input_min = torch.min(torch.stack([torch.min(ts) for ts in inputs]))
if window_size is not None:
new_inputs: list[torch.Tensor] = []
for ts in inputs:
new_inputs.extend(self._timesfm_moving_average(ts, window_size))
inputs = new_inputs
if truncate_negative is None:
truncate_negative = self.config.infer_is_positive
if force_flip_invariance is None:
force_flip_invariance = self.config.force_flip_invariance
input_ts, input_padding = self._preprocess(inputs, context_len=forecast_context_len)
input_ts = input_ts.to(device)
input_padding = input_padding.to(device)
mu_global = input_ts.mean(dim=1, keepdim=True)
sigma_global = input_ts.std(dim=1, keepdim=True)
normalized_ts = self.model._revin(input_ts, mu_global, sigma_global, reverse=False)
pf_outputs, quantile_spreads, model_outputs = self._decode_and_project(normalized_ts, input_padding, **kwargs)
if force_flip_invariance:
flipped_pf, flipped_qs, _ = self._decode_and_project(-normalized_ts, input_padding, **kwargs)
def _flip_quantiles(x: torch.Tensor) -> torch.Tensor:
return torch.cat([x[..., :1], torch.flip(x[..., 1:], dims=(-1,))], dim=-1)
pf_outputs = (pf_outputs - _flip_quantiles(flipped_pf)) / 2
quantile_spreads = (quantile_spreads - _flip_quantiles(flipped_qs)) / 2
horizon = min(self.horizon_len, pf_outputs.shape[1])
full_forecast = pf_outputs[:, :horizon, :].clone()
median_index = min(self.config.decode_index, full_forecast.shape[-1] - 1)
if self.config.use_continuous_quantile_head:
max_quantile_horizon = min(horizon, quantile_spreads.shape[1])
for idx, _ in enumerate(self.config.quantiles, start=1):
if idx == median_index or idx >= full_forecast.shape[-1]:
continue
full_forecast[:, :max_quantile_horizon, idx] = (
quantile_spreads[:, :max_quantile_horizon, idx]
- quantile_spreads[:, :max_quantile_horizon, median_index]
+ full_forecast[:, :max_quantile_horizon, median_index]
)
full_predictions = self.model._revin(full_forecast, mu_global, sigma_global, reverse=True)
decode_index = min(self.config.decode_index, full_predictions.shape[-1] - 1)
mean_predictions = full_predictions[:, :, decode_index]
if window_size is not None:
mean_predictions = mean_predictions[0::2, ...] + mean_predictions[1::2, ...]
full_predictions = full_predictions[0::2, ...] + full_predictions[1::2, ...]
if truncate_negative:
zero = torch.zeros(1, device=mean_predictions.device, dtype=mean_predictions.dtype)
clamped_mean = torch.maximum(mean_predictions, zero)
clamped_full = torch.maximum(full_predictions, zero)
should_clamp = (input_min >= 0).to(mean_predictions.device)
mean_predictions = torch.where(should_clamp, clamped_mean, mean_predictions)
full_predictions = torch.where(should_clamp, clamped_full, full_predictions)
loss = None
if future_values is not None:
mse_loss = F.mse_loss(mean_predictions, future_values)
quantile_indices = [i for i in range(full_predictions.shape[-1]) if i != decode_index]
if quantile_indices:
index_tensor = torch.tensor(quantile_indices, device=full_predictions.device, dtype=torch.long)
quantile_tensor = torch.index_select(full_predictions, dim=-1, index=index_tensor)
quantile_loss = self._quantile_loss(quantile_tensor, future_values)
loss = mse_loss + quantile_loss
else:
loss = mse_loss
return TimesFm2_5OutputForPrediction(
last_hidden_state=model_outputs.last_hidden_state,
hidden_states=model_outputs.hidden_states,
attentions=model_outputs.attentions,
mean_predictions=mean_predictions,
full_predictions=full_predictions,
loss=loss,
)
__all__ = [
"TimesFm2_5Config",
"TimesFm2_5ModelForPrediction",
"TimesFm2_5PreTrainedModel",
"TimesFm2_5Model",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/timesfm2_5/modular_timesfm2_5.py",
"license": "Apache License 2.0",
"lines": 615,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/timesfm2_5/test_modeling_timesfm2_5.py | # Copyright 2026 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import torch
from parameterized import parameterized
from transformers import TimesFm2_5Config, is_torch_available
from transformers.testing_utils import require_flash_attn, require_torch, require_torch_accelerator, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION, ModelTesterMixin
if is_torch_available():
from transformers import TimesFm2_5ModelForPrediction
class TimesFm2_5ModelTester:
def __init__(
self,
parent,
patch_length: int = 32,
context_length: int = 128,
horizon_length: int = 8,
num_hidden_layers: int = 1,
hidden_size: int = 32, # 2 heads * 16 head_dim
intermediate_size: int = 64,
head_dim: int = 16,
num_heads: int = 2,
rms_norm_eps: float = 1e-6,
quantiles: list[float] = [0.1, 0.5, 0.9],
output_quantile_len: int = 16,
is_training: bool = False,
batch_size: int = 2,
):
self.parent = parent
self.patch_length = patch_length
self.context_length = context_length
self.horizon_length = horizon_length
self.quantiles = quantiles
self.output_quantile_len = output_quantile_len
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.head_dim = head_dim
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_heads
self.rms_norm_eps = rms_norm_eps
self.is_training = is_training
self.batch_size = batch_size
# The size of test input
self.seq_length = context_length // patch_length
def get_config(self):
return TimesFm2_5Config(
patch_length=self.patch_length,
context_length=self.context_length,
horizon_length=self.horizon_length,
quantiles=self.quantiles,
output_quantile_len=self.output_quantile_len,
hidden_size=self.hidden_size,
intermediate_size=self.intermediate_size,
head_dim=self.head_dim,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
num_key_value_heads=self.num_attention_heads,
rms_norm_eps=self.rms_norm_eps,
)
def get_pipeline_config(self):
return self.get_config()
def prepare_config_and_inputs(self):
forecast_input = torch.stack(
[
torch.tensor(np.sin(np.linspace(0, 20, 100)), dtype=torch.float32, device=torch_device),
torch.tensor(np.cos(np.linspace(0, 20, 100)), dtype=torch.float32, device=torch_device),
]
)
return self.get_config(), forecast_input
def prepare_config_and_inputs_for_common(self):
config, forecast_input = self.prepare_config_and_inputs()
inputs_dict = {"past_values": forecast_input}
return config, inputs_dict
@require_torch
class TimesFm2_5ModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (TimesFm2_5ModelForPrediction,) if is_torch_available() else ()
test_resize_embeddings = False
is_encoder_decoder = False
test_inputs_embeds = False
def setUp(self):
self.model_tester = TimesFm2_5ModelTester(self)
self.config_tester = ConfigTester(self, config_class=TimesFm2_5Config)
def test_create_and_run_model(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
model = TimesFm2_5ModelForPrediction(config)
model.to(torch_device)
model.eval()
results = model(**inputs_dict)
assert results.mean_predictions is not None
@unittest.skip(reason="FA backend not yet supported because of forced masks")
def test_sdpa_can_dispatch_on_flash(self):
pass
@unittest.skip(reason="Model does not have input embeddings")
def test_model_get_set_embeddings(self):
pass
@parameterized.expand(TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION)
def test_eager_matches_sdpa_inference(
self, name, dtype, padding_side, use_attention_mask, output_attentions, enable_kernels
):
"""
TimesFM 2.5 computes its own causal attention mask internally from the input padding,
so the generic test harness (which injects external attention masks and sets RMSNorm eps=1.0
on QK-norm layers) is not compatible. This override directly verifies eager vs SDPA equivalence.
"""
if not self.all_model_classes[0]._supports_sdpa:
self.skipTest("Model does not support SDPA")
if dtype == "fp16":
dtype = torch.float16
elif dtype == "bf16":
dtype = torch.bfloat16
elif dtype == "fp32":
dtype = torch.float32
tolerance = {torch.float32: 1e-5, torch.bfloat16: 1e-3, torch.float16: 1e-3}[dtype]
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
model_eager = TimesFm2_5ModelForPrediction._from_config(config, attn_implementation="eager")
model_eager.to(dtype=dtype, device=torch_device)
model_eager.eval()
model_sdpa = TimesFm2_5ModelForPrediction._from_config(config, attn_implementation="sdpa")
model_sdpa.load_state_dict(model_eager.state_dict())
model_sdpa.to(dtype=dtype, device=torch_device)
model_sdpa.eval()
past_values = inputs_dict["past_values"].to(dtype=dtype, device=torch_device)
with torch.no_grad():
out_eager = model_eager(past_values=past_values)
out_sdpa = model_sdpa(past_values=past_values)
# Compare mean predictions
self.assertTrue(
torch.allclose(out_eager.mean_predictions, out_sdpa.mean_predictions, atol=tolerance),
f"mean_predictions max diff: {(out_eager.mean_predictions - out_sdpa.mean_predictions).abs().max().item():.2e}",
)
# Compare full predictions
self.assertTrue(
torch.allclose(out_eager.full_predictions, out_sdpa.full_predictions, atol=tolerance),
f"full_predictions max diff: {(out_eager.full_predictions - out_sdpa.full_predictions).abs().max().item():.2e}",
)
# Compare last hidden state
hs_eager = out_eager.hidden_states[-1]
hs_sdpa = out_sdpa.hidden_states[-1]
self.assertTrue(
torch.allclose(hs_eager, hs_sdpa, atol=tolerance),
f"hidden_states max diff: {(hs_eager - hs_sdpa).abs().max().item():.2e}",
)
def _test_flash_or_flex_attn_inference_equivalence(self, attn_implementation):
"""
TimesFM 2.5 computes its own attention mask internally, so the generic
flash/flex equivalence test (which injects external attention masks) does not apply.
This override directly verifies eager vs flash/flex equivalence.
"""
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
dtype = torch.bfloat16
tolerance = 1e-2
model_eager = TimesFm2_5ModelForPrediction._from_config(config, attn_implementation="eager")
model_eager.to(dtype=dtype, device=torch_device)
model_eager.eval()
model_fa = TimesFm2_5ModelForPrediction._from_config(config, attn_implementation=attn_implementation)
model_fa.load_state_dict(model_eager.state_dict())
model_fa.to(dtype=dtype, device=torch_device)
model_fa.eval()
past_values = inputs_dict["past_values"].to(dtype=dtype, device=torch_device)
with torch.no_grad():
out_eager = model_eager(past_values=past_values)
out_fa = model_fa(past_values=past_values)
self.assertTrue(
torch.allclose(out_eager.mean_predictions, out_fa.mean_predictions, atol=tolerance),
f"mean_predictions max diff: {(out_eager.mean_predictions - out_fa.mean_predictions).abs().max().item():.2e}",
)
hs_eager = out_eager.hidden_states[-1]
hs_fa = out_fa.hidden_states[-1]
self.assertTrue(
torch.allclose(hs_eager, hs_fa, atol=tolerance),
f"hidden_states max diff: {(hs_eager - hs_fa).abs().max().item():.2e}",
)
@require_flash_attn
@require_torch_accelerator
def test_flash_attn_2_inference_equivalence(self):
self._test_flash_or_flex_attn_inference_equivalence("flash_attention_2")
@require_flash_attn
@require_torch_accelerator
def test_flash_attn_2_inference_equivalence_right_padding(self):
self._test_flash_or_flex_attn_inference_equivalence("flash_attention_2")
def test_retain_grad_hidden_states_attentions(self):
"""
TimesFM 2.5 specific test for retain_grad since the model returns mean_predictions
as the first tensor, not last_hidden_state like standard models.
"""
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = self.has_attentions
# force eager attention to support output attentions
if self.has_attentions:
config._attn_implementation = "eager"
# no need to test all models as different heads yield the same functionality
model_class = self.all_model_classes[0]
model = model_class._from_config(config, attn_implementation="eager")
model.to(torch_device)
inputs = self._prepare_for_class(inputs_dict, model_class)
outputs = model(**inputs)
# TimesFM 2.5 returns mean_predictions as first output, not last_hidden_state
output_tensor = outputs.mean_predictions
# Encoder-/Decoder-only models
if outputs.hidden_states is not None:
hidden_states = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions and outputs.attentions is not None:
attentions = outputs.attentions[0]
attentions.retain_grad()
output_tensor.flatten()[0].backward(retain_graph=True)
if outputs.hidden_states is not None:
self.assertIsNotNone(hidden_states.grad)
if self.has_attentions and outputs.attentions is not None:
self.assertIsNotNone(attentions.grad)
@require_torch
@slow
class TimesFm2_5ModelIntegrationTests(unittest.TestCase):
def test_inference(self):
model = TimesFm2_5ModelForPrediction.from_pretrained(
"google/timesfm-2.5-200m-transformers", revision="refs/pr/3"
).to(torch_device)
forecast_input = [
np.sin(np.linspace(0, 20, 100)),
np.sin(np.linspace(0, 20, 200)),
np.sin(np.linspace(0, 20, 400)),
]
forecast_input_tensor = [torch.tensor(ts, dtype=torch.float32, device=torch_device) for ts in forecast_input]
with torch.no_grad():
output = model(past_values=forecast_input_tensor)
mean_predictions = output.mean_predictions
self.assertEqual(mean_predictions.shape, torch.Size([3, model.config.horizon_length]))
# fmt: off
expected_slice = torch.tensor(
[ 0.9745, 1.0047, 0.9707, 0.9161, 0.8041, 0.6829, 0.5378, 0.3563,
0.1698, -0.0396, -0.2508, -0.4358, -0.6150, -0.7491, -0.8659, -0.9535,
-1.0024, -0.9977, -0.9557, -0.8840, -0.7716, -0.6092, -0.4526, -0.2582,
-0.0554, 0.1263, 0.3258, 0.5207, 0.6667, 0.7989, 0.9002, 0.9782,
0.9848, 0.9877, 0.9339, 0.8473, 0.7109, 0.5525, 0.3799, 0.1756,
-0.0285, -0.2325, -0.4137, -0.5926, -0.7425, -0.8532, -0.9444, -0.9878,
-0.9985, -0.9828, -0.8972, -0.7833, -0.6414, -0.4881, -0.2838, -0.0878,
0.1169, 0.3137, 0.4918, 0.6508, 0.7762, 0.8961, 0.9666, 0.9910
],
device=torch_device)
# fmt: on
self.assertTrue(torch.allclose(mean_predictions[0, :64], expected_slice, atol=1e-4))
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/timesfm2_5/test_modeling_timesfm2_5.py",
"license": "Apache License 2.0",
"lines": 257,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/pp_doclayout_v2/modular_pp_doclayout_v2.py | # Copyright 2026 The PaddlePaddle Team and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ... import initialization as init
from ...backbone_utils import consolidate_backbone_kwargs_to_config
from ...configuration_utils import PreTrainedConfig
from ...masking_utils import create_bidirectional_mask
from ...processing_utils import Unpack
from ...utils import (
ModelOutput,
TransformersKwargs,
auto_docstring,
can_return_tuple,
logging,
)
from ...utils.generic import TensorType
from ..auto import AutoConfig
from ..layoutlmv3.modeling_layoutlmv3 import (
LayoutLMv3Attention,
LayoutLMv3Encoder,
LayoutLMv3Intermediate,
LayoutLMv3Layer,
LayoutLMv3Output,
LayoutLMv3SelfAttention,
LayoutLMv3SelfOutput,
LayoutLMv3TextEmbeddings,
)
from ..pp_doclayout_v3.image_processing_pp_doclayout_v3_fast import PPDocLayoutV3ImageProcessorFast
from ..pp_doclayout_v3.modeling_pp_doclayout_v3 import PPDocLayoutV3GlobalPointer
from ..rt_detr.modeling_rt_detr import (
RTDetrForObjectDetection,
RTDetrMLPPredictionHead,
RTDetrModel,
RTDetrModelOutput,
RTDetrPreTrainedModel,
)
logger = logging.get_logger(__name__)
class PPDocLayoutV2ReadingOrderConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`PPDocLayoutV2ReadingOrder`].
It is used to instantiate the reading order sub-module of the PP-DocLayoutV2 model. This configuration defines the architecture and hyperparameters specific to the reading order detection task within the larger PP-DocLayoutV2 framework.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 512):
Dimension of the encoder layers and the pooled layer.
num_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
has_relative_attention_bias (`bool`, *optional*, defaults to `True`):
Whether or not to use a relative attention bias in the self-attention mechanism.
has_spatial_attention_bias (`bool`, *optional*, defaults to `True`):
Whether or not to use a spatial attention bias in the self-attention mechanism.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
intermediate_size (`int`, *optional*, defaults to 2048):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
num_hidden_layers (`int`, *optional*, defaults to 6):
Number of the hidden layers.
rel_pos_bins (`int`, *optional*, defaults to 32):
The number of relative position bins to be used in the self-attention mechanism.
max_rel_pos (`int`, *optional*, defaults to 128):
The maximum number of relative positions to be used in the self-attention mechanism.
rel_2d_pos_bins (`int`, *optional*, defaults to 64):
The number of 2D relative position bins in the self-attention mechanism.
max_rel_2d_pos (`int`, *optional*, defaults to 256):
The maximum number of relative 2D positions in the self-attention mechanism.
max_position_embeddings (`int`, *optional*, defaults to 514):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
max_2d_position_embeddings (`int`, *optional*, defaults to 1024):
The maximum value that the 2D position embedding might ever be used with. Typically set this to something
large just in case (e.g., 1024).
type_vocab_size (`int`, *optional*, defaults to 1):
The vocabulary size of the `token_type_ids`.
vocab_size (`int`, *optional*, defaults to 4):
Vocabulary size of the model. Defines the number of different tokens that can be represented by the `inputs_ids`.
initializer_range (`float`, *optional*, defaults to 0.01):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
start_token_id (`int`, *optional*, defaults to 0):
Token id representing the start of a sequence.
pad_token_id (`int`, *optional*, defaults to 1):
Token id used for padding the input sequences.
end_token_id (`int`, *optional*, defaults to 2):
Token id representing the end of a sequence.
pred_token_id (`int`, *optional*, defaults to 3):
Token id representing valid prediction positions (placeholders) in the sequence.
coordinate_size (`int`, *optional*, defaults to 171):
Dimension of the coordinate embeddings.
shape_size (`int`, *optional*, defaults to 170):
Dimension of the width and height embeddings.
num_classes (`int`, *optional*, defaults to 20):
Number of labels or classes for the layout elements.
relation_bias_embed_dim (`int`, *optional*, defaults to 16):
Embedding dimension for the relation bias.
relation_bias_theta (`float`, *optional*, defaults to 10000):
Temperature parameter used for relation bias scaling.
relation_bias_scale (`float`, *optional*, defaults to 100):
Scale parameter for the relation bias.
global_pointer_head_size (`int`, *optional*, defaults to 64):
The size of the global pointer head.
gp_dropout_value (`float`, *optional*, defaults to 0.0):
The dropout probability in the global pointer head.
"""
def __init__(
self,
hidden_size=512,
num_attention_heads=8,
attention_probs_dropout_prob=0.1,
has_relative_attention_bias=False,
has_spatial_attention_bias=True,
layer_norm_eps=1e-5,
hidden_dropout_prob=0.1,
intermediate_size=2048,
hidden_act="gelu",
num_hidden_layers=6,
rel_pos_bins=32,
max_rel_pos=128,
rel_2d_pos_bins=64,
max_rel_2d_pos=256,
max_position_embeddings=514,
max_2d_position_embeddings=1024,
type_vocab_size=1,
vocab_size=4,
initializer_range=0.01,
start_token_id=0,
pad_token_id=1,
end_token_id=2,
pred_token_id=3,
coordinate_size=171,
shape_size=170,
num_classes=20,
relation_bias_embed_dim=16,
relation_bias_theta=10000,
relation_bias_scale=100,
global_pointer_head_size=64,
gp_dropout_value=0.0,
**kwargs,
):
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.has_relative_attention_bias = has_relative_attention_bias
self.has_spatial_attention_bias = has_spatial_attention_bias
self.layer_norm_eps = layer_norm_eps
self.hidden_dropout_prob = hidden_dropout_prob
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.num_hidden_layers = num_hidden_layers
self.rel_pos_bins = rel_pos_bins
self.max_rel_pos = max_rel_pos
self.rel_2d_pos_bins = rel_2d_pos_bins
self.max_rel_2d_pos = max_rel_2d_pos
self.max_position_embeddings = max_position_embeddings
self.max_2d_position_embeddings = max_2d_position_embeddings
self.type_vocab_size = type_vocab_size
self.vocab_size = vocab_size
self.initializer_range = initializer_range
self.start_token_id = start_token_id
self.pad_token_id = pad_token_id
self.end_token_id = end_token_id
self.pred_token_id = pred_token_id
self.coordinate_size = coordinate_size
self.shape_size = shape_size
self.num_classes = num_classes
self.relation_bias_embed_dim = relation_bias_embed_dim
self.relation_bias_theta = relation_bias_theta
self.relation_bias_scale = relation_bias_scale
self.global_pointer_head_size = global_pointer_head_size
self.gp_dropout_value = gp_dropout_value
super().__init__(**kwargs)
class PPDocLayoutV2Config(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`PP-DocLayoutV2`]. It is used to instantiate a
PP-DocLayoutV2 model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the PP-DocLayoutV2
[PaddlePaddle/PP-DocLayoutV2_safetensors](https://huggingface.co/PaddlePaddle/PP-DocLayoutV2_safetensors) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
initializer_range (`float`, *optional*, defaults to 0.01):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
initializer_bias_prior_prob (`float`, *optional*):
The prior probability used by the bias initializer to initialize biases for `enc_score_head` and `class_embed`.
If `None`, `prior_prob` computed as `prior_prob = 1 / (num_labels + 1)` while initializing model weights.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
batch_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the batch normalization layers.
backbone_config (`Union[dict, "PreTrainedConfig"]`, *optional*, defaults to `RTDetrResNetConfig()`):
The configuration of the backbone model.
freeze_backbone_batch_norms (`bool`, *optional*, defaults to `True`):
Whether to freeze the batch normalization layers in the backbone.
encoder_hidden_dim (`int`, *optional*, defaults to 256):
Dimension of the layers in hybrid encoder.
encoder_in_channels (`list`, *optional*, defaults to `[512, 1024, 2048]`):
Multi level features input for encoder.
feat_strides (`list[int]`, *optional*, defaults to `[8, 16, 32]`):
Strides used in each feature map.
encoder_layers (`int`, *optional*, defaults to 1):
Total of layers to be used by the encoder.
encoder_ffn_dim (`int`, *optional*, defaults to 1024):
Dimension of the "intermediate" (often named feed-forward) layer in decoder.
encoder_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer encoder.
dropout (`float`, *optional*, defaults to 0.0):
The ratio for all dropout layers.
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for activations inside the fully connected layer.
encode_proj_layers (`list[int]`, *optional*, defaults to `[2]`):
Indexes of the projected layers to be used in the encoder.
positional_encoding_temperature (`int`, *optional*, defaults to 10000):
The temperature parameter used to create the positional encodings.
encoder_activation_function (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
activation_function (`str`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the general layer. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
eval_size (`tuple[int, int]`, *optional*):
Height and width used to computes the effective height and width of the position embeddings after taking
into account the stride.
normalize_before (`bool`, *optional*, defaults to `False`):
Determine whether to apply layer normalization in the transformer encoder layer before self-attention and
feed-forward modules.
hidden_expansion (`float`, *optional*, defaults to 1.0):
Expansion ratio to enlarge the dimension size of RepVGGBlock and CSPRepLayer.
d_model (`int`, *optional*, defaults to 256):
Dimension of the layers exclude hybrid encoder.
num_queries (`int`, *optional*, defaults to 300):
Number of object queries.
decoder_in_channels (`list`, *optional*, defaults to `[256, 256, 256]`):
Multi level features dimension for decoder
decoder_ffn_dim (`int`, *optional*, defaults to 1024):
Dimension of the "intermediate" (often named feed-forward) layer in decoder.
num_feature_levels (`int`, *optional*, defaults to 3):
The number of input feature levels.
decoder_n_points (`int`, *optional*, defaults to 4):
The number of sampled keys in each feature level for each attention head in the decoder.
decoder_layers (`int`, *optional*, defaults to 6):
Number of decoder layers.
decoder_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer decoder.
decoder_activation_function (`str`, *optional*, defaults to `"relu"`):
The non-linear activation function (function or string) in the decoder. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
num_denoising (`int`, *optional*, defaults to 100):
The total number of denoising tasks or queries to be used for contrastive denoising.
label_noise_ratio (`float`, *optional*, defaults to 0.5):
The fraction of denoising labels to which random noise should be added.
box_noise_scale (`float`, *optional*, defaults to 1.0):
Scale or magnitude of noise to be added to the bounding boxes.
learn_initial_query (`bool`, *optional*, defaults to `False`):
Indicates whether the initial query embeddings for the decoder should be learned during training
anchor_image_size (`tuple[int, int]`, *optional*):
Height and width of the input image used during evaluation to generate the bounding box anchors. If None, automatic generate anchor is applied.
disable_custom_kernels (`bool`, *optional*, defaults to `True`):
Whether to disable custom kernels.
is_encoder_decoder (`bool`, *optional*, defaults to `True`):
Whether the architecture has an encoder decoder structure.
class_thresholds (`list[float]`, *optional*):
The thresholds for each label.
class_order (`list[int]`, *optional*):
The priority for each label.
reading_order_config (`dict`, *optional*):
The configuration of a `PPDocLayoutV2ReadingOrder`.
Examples:
```python
>>> from transformers import PPDocLayoutV2Config, PPDocLayoutV2ForObjectDetection
>>> # Initializing a PP-DocLayoutV2 configuration
>>> configuration = PPDocLayoutV2Config()
>>> # Initializing a model (with random weights) from the configuration
>>> model = PPDocLayoutV2ForObjectDetection(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "pp_doclayout_v2"
sub_configs = {"backbone_config": AutoConfig, "reading_order_config": PPDocLayoutV2ReadingOrderConfig}
layer_types = ("basic", "bottleneck")
attribute_map = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__(
self,
initializer_range=0.01,
initializer_bias_prior_prob=None,
layer_norm_eps=1e-5,
batch_norm_eps=1e-5,
# backbone
backbone_config=None,
freeze_backbone_batch_norms=True,
# encoder HybridEncoder
encoder_hidden_dim=256,
encoder_in_channels=[512, 1024, 2048],
feat_strides=[8, 16, 32],
encoder_layers=1,
encoder_ffn_dim=1024,
encoder_attention_heads=8,
dropout=0.0,
activation_dropout=0.0,
encode_proj_layers=[2],
positional_encoding_temperature=10000,
encoder_activation_function="gelu",
activation_function="silu",
eval_size=None,
normalize_before=False,
hidden_expansion=1.0,
# decoder PPDocLayoutV2Transformer
d_model=256,
num_queries=300,
decoder_in_channels=[256, 256, 256],
decoder_ffn_dim=1024,
num_feature_levels=3,
decoder_n_points=4,
decoder_layers=6,
decoder_attention_heads=8,
decoder_activation_function="relu",
attention_dropout=0.0,
num_denoising=100,
label_noise_ratio=0.5,
box_noise_scale=1.0,
learn_initial_query=False,
anchor_image_size=None,
disable_custom_kernels=True,
is_encoder_decoder=True,
# label
class_thresholds=None,
class_order=None,
reading_order_config=None,
**kwargs,
):
self.initializer_range = initializer_range
self.initializer_bias_prior_prob = initializer_bias_prior_prob
self.layer_norm_eps = layer_norm_eps
self.batch_norm_eps = batch_norm_eps
if isinstance(reading_order_config, dict):
self.reading_order_config = self.sub_configs["reading_order_config"](**reading_order_config)
elif reading_order_config is None:
self.reading_order_config = self.sub_configs["reading_order_config"]()
backbone_config, kwargs = consolidate_backbone_kwargs_to_config(
backbone_config=backbone_config,
default_config_type="hgnet_v2",
default_config_kwargs={
"arch": "L",
"return_idx": [1, 2, 3],
"freeze_stem_only": True,
"freeze_at": 0,
"freeze_norm": True,
"lr_mult_list": [0, 0.05, 0.05, 0.05, 0.05],
"out_features": ["stage2", "stage3", "stage4"],
},
**kwargs,
)
self.backbone_config = backbone_config
self.freeze_backbone_batch_norms = freeze_backbone_batch_norms
# ---- encoder ----
self.encoder_hidden_dim = encoder_hidden_dim
self.encoder_in_channels = list(encoder_in_channels)
self.feat_strides = list(feat_strides)
self.encoder_layers = encoder_layers
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_attention_heads = encoder_attention_heads
self.dropout = dropout
self.activation_dropout = activation_dropout
self.encode_proj_layers = list(encode_proj_layers)
self.positional_encoding_temperature = positional_encoding_temperature
self.encoder_activation_function = encoder_activation_function
self.activation_function = activation_function
self.eval_size = list(eval_size) if eval_size is not None else None
self.normalize_before = normalize_before
self.hidden_expansion = hidden_expansion
# ---- decoder ----
self.d_model = d_model
self.num_queries = num_queries
self.decoder_in_channels = list(decoder_in_channels)
self.decoder_ffn_dim = decoder_ffn_dim
self.num_feature_levels = num_feature_levels
self.decoder_n_points = decoder_n_points
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.decoder_activation_function = decoder_activation_function
self.attention_dropout = attention_dropout
self.num_denoising = num_denoising
self.label_noise_ratio = label_noise_ratio
self.box_noise_scale = box_noise_scale
self.learn_initial_query = learn_initial_query
self.anchor_image_size = list(anchor_image_size) if anchor_image_size is not None else None
self.disable_custom_kernels = disable_custom_kernels
self.class_thresholds = class_thresholds
self.class_order = class_order
super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)
class PPDocLayoutV2ImageProcessorFast(PPDocLayoutV3ImageProcessorFast):
def extract_custom_vertices(self):
raise AttributeError("Not needed for PPDocLayoutV2")
def _mask2polygon(self):
raise AttributeError("Not needed for PPDocLayoutV2")
def _extract_polygon_points_by_masks(self):
raise AttributeError("Not needed for PPDocLayoutV2")
def post_process_object_detection(
self,
outputs,
threshold: float = 0.5,
target_sizes: TensorType | list[tuple] | None = None,
):
"""
Converts the raw output of [`DetrForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y,
bottom_right_x, bottom_right_y) format. Only supports PyTorch.
PPDocLayoutV2 is identical to PPDocLayoutV3, except that it does not return `polygon_points`.
Args:
outputs ([`DetrObjectDetectionOutput`]):
Raw outputs of the model.
Returns:
`list[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image
in the batch as predicted by the model.
"""
boxes = outputs.pred_boxes
logits = outputs.logits
order_logits = outputs.order_logits
order_seqs = self._get_order_seqs(order_logits)
box_centers, box_dims = torch.split(boxes, 2, dim=-1)
top_left_coords = box_centers - 0.5 * box_dims
bottom_right_coords = box_centers + 0.5 * box_dims
boxes = torch.cat([top_left_coords, bottom_right_coords], dim=-1)
if target_sizes is not None:
if len(logits) != len(target_sizes):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits"
)
if isinstance(target_sizes, list):
img_height, img_width = torch.as_tensor(target_sizes).unbind(1)
else:
img_height, img_width = target_sizes.unbind(1)
scale_factor = torch.stack([img_width, img_height, img_width, img_height], dim=1).to(boxes.device)
boxes = boxes * scale_factor[:, None, :]
num_top_queries = logits.shape[1]
num_classes = logits.shape[2]
scores = torch.nn.functional.sigmoid(logits)
scores, index = torch.topk(scores.flatten(1), num_top_queries, dim=-1)
labels = index % num_classes
index = index // num_classes
boxes = boxes.gather(dim=1, index=index.unsqueeze(-1).repeat(1, 1, boxes.shape[-1]))
order_seqs = order_seqs.gather(dim=1, index=index)
results = []
for score, label, box, order_seq in zip(scores, labels, boxes, order_seqs):
order_seq = order_seq[score >= threshold]
order_seq, indices = torch.sort(order_seq)
results.append(
{
"scores": score[score >= threshold][indices],
"labels": label[score >= threshold][indices],
"boxes": box[score >= threshold][indices],
"order_seq": order_seq,
}
)
return results
class PPDocLayoutV2GlobalPointer(PPDocLayoutV3GlobalPointer):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, self.head_size * 2)
class PPDocLayoutV2PositionRelationEmbedding(nn.Module):
inv_freq: torch.Tensor
def __init__(self, config, device=None):
super().__init__()
self.config = config
self.embed_dim = config.relation_bias_embed_dim
self.scale = config.relation_bias_scale
self.pos_proj = nn.Conv2d(
in_channels=self.embed_dim * 4, out_channels=config.num_attention_heads, kernel_size=1
)
inv_freq, self.attention_scaling = self.compute_default_rope_parameters(config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
@staticmethod
def compute_default_rope_parameters(
config: PPDocLayoutV2Config | None = None,
device: Optional["torch.device"] = None,
seq_len: int | None = None,
) -> tuple["torch.Tensor", float]:
"""
Computes the inverse frequencies according to the original RoPE implementation
Args:
config ([`~transformers.PreTrainedConfig`]):
The model configuration.
device (`torch.device`):
The device to use for initialization of the inverse frequencies.
seq_len (`int`, *optional*):
The current sequence length. Unused for this type of RoPE.
Returns:
Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
"""
base = config.relation_bias_theta
dim = config.relation_bias_embed_dim
half_dim = dim // 2
attention_factor = 1.0 # Unused in this type of RoPE
# Compute the inverse frequencies
inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / half_dim)
)
return inv_freq, attention_factor
def box_relative_encoding(
self, source_boxes: torch.Tensor, target_boxes: torch.Tensor = None, epsilon: float = 1e-5
):
source_boxes, target_boxes = source_boxes.unsqueeze(-2), target_boxes.unsqueeze(-3)
source_coordinates, source_dim = source_boxes[..., :2], source_boxes[..., 2:]
target_coordinates, target_dim = target_boxes[..., :2], target_boxes[..., 2:]
coordinate_difference = torch.abs(source_coordinates - target_coordinates)
relative_coordinates = torch.log(coordinate_difference / (source_dim + epsilon) + 1.0)
relative_dim = torch.log((source_dim + epsilon) / (target_dim + epsilon))
relative_encoding = torch.cat([relative_coordinates, relative_dim], dim=-1)
return relative_encoding
def get_position_embedding(self, x: torch.Tensor, scale: float = 100.0):
embedding = (x * scale).unsqueeze(-1) * self.inv_freq
embedding = torch.cat((embedding.sin(), embedding.cos()), dim=-1).flatten(start_dim=-2).to(x.dtype)
return embedding
def forward(self, source_boxes: torch.Tensor, target_boxes: torch.Tensor = None):
if target_boxes is None:
target_boxes = source_boxes
with torch.no_grad():
relative_encoding = self.box_relative_encoding(source_boxes, target_boxes)
position_embedding = self.get_position_embedding(relative_encoding, self.scale)
position_embedding = position_embedding.permute(0, 3, 1, 2)
out = self.pos_proj(position_embedding)
return out
class PPDocLayoutV2ReadingOrderSelfAttention(LayoutLMv3SelfAttention):
def forward(
self,
hidden_states,
attention_mask=None,
output_attentions=False,
rel_pos=None,
rel_2d_pos=None,
):
batch_size, seq_length, _ = hidden_states.shape
query_layer = (
self.query(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
key_layer = (
self.key(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
value_layer = (
self.value(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
# Take the dot product between "query" and "key" to get the raw attention scores.
# The attention scores QT K/√d could be significantly larger than input elements, and result in overflow.
# Changing the computational order into QT(K/√d) alleviates the problem. (https://huggingface.co/papers/2105.13290)
attention_scores = torch.matmul(query_layer / math.sqrt(self.attention_head_size), key_layer.transpose(-1, -2))
# NOTE:
# PPDocLayoutV2ReadingOrder applies unscaled rel_2d_pos
# unlike LayoutLMv3 which uses (rel_pos + rel_2d_pos) / math.sqrt(self.attention_head_size)
if rel_2d_pos is not None:
attention_scores += rel_2d_pos
elif self.has_relative_attention_bias:
attention_scores += rel_pos / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in RobertaModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
# Use the trick of the CogView paper to stabilize training
attention_probs = self.cogview_attention(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
class PPDocLayoutV2ReadingOrderSelfOutput(LayoutLMv3SelfOutput):
def __init__(self, config):
super().__init__()
del self.LayerNorm
self.norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.norm(hidden_states + input_tensor)
return hidden_states
class PPDocLayoutV2ReadingOrderIntermediate(LayoutLMv3Intermediate):
pass
class PPDocLayoutV2ReadingOrderOutput(LayoutLMv3Output):
def __init__(self, config):
super().__init__()
del self.LayerNorm
self.norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.norm(hidden_states + input_tensor)
return hidden_states
class PPDocLayoutV2ReadingOrderAttention(LayoutLMv3Attention):
def __init__(self, config):
super().__init__()
self.self = PPDocLayoutV2ReadingOrderSelfAttention(config)
self.output = PPDocLayoutV2ReadingOrderSelfOutput(config)
class PPDocLayoutV2ReadingOrderLayer(LayoutLMv3Layer):
def __init__(self, config):
super().__init__()
self.attention = PPDocLayoutV2ReadingOrderAttention(config)
self.intermediate = PPDocLayoutV2ReadingOrderIntermediate(config)
self.output = PPDocLayoutV2ReadingOrderOutput(config)
class PPDocLayoutV2ReadingOrderEncoder(LayoutLMv3Encoder):
def __init__(self, config):
super().__init__(config)
self.layer = nn.ModuleList([PPDocLayoutV2ReadingOrderLayer(config) for _ in range(config.num_hidden_layers)])
self.rel_bias_module = PPDocLayoutV2PositionRelationEmbedding(config)
def _cal_2d_pos_emb(self, bbox):
x_min, y_min, x_max, y_max = (
bbox[..., 0],
bbox[..., 1],
bbox[..., 2],
bbox[..., 3],
)
width = (x_max - x_min).clamp(min=1e-3)
height = (y_max - y_min).clamp(min=1e-3)
center_x = (x_min + x_max) * 0.5
center_y = (y_min + y_max) * 0.5
center_width_height_bbox = torch.stack([center_x, center_y, width, height], dim=-1)
result = self.rel_bias_module(center_width_height_bbox)
return result
class PPDocLayoutV2TextEmbeddings(LayoutLMv3TextEmbeddings):
def __init__(self, config):
super().__init__(config)
del self.LayerNorm
self.norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
spatial_embed_dim = 4 * config.coordinate_size + 2 * config.shape_size
self.spatial_proj = nn.Linear(spatial_embed_dim, config.hidden_size)
def forward(
self,
input_ids=None,
bbox=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
):
if position_ids is None:
if input_ids is not None:
# Create the position ids from the input token ids. Any padded tokens remain padded.
position_ids = self.create_position_ids_from_input_ids(input_ids, self.padding_idx).to(
input_ids.device
)
else:
position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
# custom new spatial embeddings
spatial_position_embeddings = self.calculate_spatial_position_embeddings(bbox)
spatial_position_embeddings = self.spatial_proj(spatial_position_embeddings)
embeddings += spatial_position_embeddings
return embeddings
@auto_docstring
class PPDocLayoutV2PreTrainedModel(RTDetrPreTrainedModel):
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights"""
super()._init_weights(module)
if isinstance(module, PPDocLayoutV2TextEmbeddings):
init.copy_(module.position_ids, torch.arange(module.position_ids.shape[-1]).expand((1, -1)))
if isinstance(module, nn.Embedding):
init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
init.zeros_(module.weight.data[module.padding_idx])
if isinstance(module, PPDocLayoutV2PositionRelationEmbedding):
inv_freq, _ = module.compute_default_rope_parameters(module.config, module.inv_freq.device)
module.register_buffer("inv_freq", inv_freq, persistent=False)
@auto_docstring(
custom_intro="""
PP-DocLayoutV2 ReadingOrder Model. This model consists of an encoder and a GlobalPointer head.
It takes layout features as input and outputs logits representing the relative ordering relationships
between elements, which are used to determine the final reading sequence.
"""
)
class PPDocLayoutV2ReadingOrder(PPDocLayoutV2PreTrainedModel):
# Attention is based on LayoutLMv3 (no interface)
_supports_sdpa = False
_supports_flash_attn = False
_supports_attention_backend = False
_supports_flex_attn = False
def __init__(self, config):
super().__init__(config)
self.embeddings = PPDocLayoutV2TextEmbeddings(config)
self.label_embeddings = nn.Embedding(config.num_classes, config.hidden_size)
self.label_features_projection = nn.Linear(config.hidden_size, config.hidden_size)
self.encoder = PPDocLayoutV2ReadingOrderEncoder(config)
self.relative_head = PPDocLayoutV2GlobalPointer(config)
self.config = config
self.post_init()
@auto_docstring
def forward(self, boxes, labels=None, mask=None, **kwargs: Unpack[TransformersKwargs]):
r"""
boxes (`torch.Tensor` of shape `(batch_size, sequence_length, 4)`):
Bounding box coordinates of the detected layout elements **in [0, 1000] scale**.
Format is `[x_min, y_min, x_max, y_max]`.
The tensor usually contains sorted valid boxes followed by zero-padding.
labels (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
The **remapped** class indices for each layout element.
These are not necessarily the raw detection class IDs, but indices mapped via
`config.class_order` (e.g., mapping text/title/figure to specific reading-order category IDs).
mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Boolean or Binary mask indicating valid detected elements after threshold filtering.
- True: Valid layout element.
- False: Padding/Empty element.
Used to determine the sequence length (`num_pred`) for the pointer mechanism.
"""
device = mask.device
batch_size, seq_len = mask.shape
num_pred = mask.sum(dim=1)
input_ids = torch.full(
(batch_size, seq_len + 2), self.config.pad_token_id, dtype=torch.long, device=boxes.device
)
input_ids[:, 0] = self.config.start_token_id
pred_col_idx = torch.arange(seq_len + 2, device=device).unsqueeze(0)
pred_mask = (pred_col_idx >= 1) & (pred_col_idx <= num_pred.unsqueeze(1))
input_ids[pred_mask] = self.config.pred_token_id
end_col_indices = num_pred + 1
input_ids[torch.arange(batch_size, device=device), end_col_indices] = self.config.end_token_id
pad_box = torch.zeros(size=[boxes.shape[0], 1, boxes.shape[-1]], dtype=boxes.dtype, device=boxes.device)
pad_boxes = torch.cat([pad_box, boxes, pad_box], dim=1)
bbox_embedding = self.embeddings(input_ids=input_ids, bbox=pad_boxes.long())
if labels is not None:
label_embs = self.label_embeddings(labels)
label_proj = self.label_features_projection(label_embs)
pad = torch.zeros(
size=[label_proj.shape[0], 1, label_proj.shape[-1]], dtype=label_proj.dtype, device=labels.device
)
label_proj = torch.cat([pad, label_proj, pad], dim=1)
else:
label_proj = torch.zeros_like(bbox_embedding)
final_embeddings = bbox_embedding + label_proj
final_embeddings = self.embeddings.norm(final_embeddings)
final_embeddings = self.embeddings.dropout(final_embeddings)
attention_mask = pred_col_idx < (num_pred + 2).unsqueeze(1)
attention_mask = create_bidirectional_mask(
config=self.config,
inputs_embeds=final_embeddings,
attention_mask=attention_mask,
)
encoder_output = self.encoder(hidden_states=final_embeddings, bbox=pad_boxes, attention_mask=attention_mask)
encoder_output = encoder_output.last_hidden_state
token = encoder_output[:, 1 : 1 + seq_len, :]
read_order_logits = self.relative_head(token)
return read_order_logits
@dataclass
@auto_docstring
class PPDocLayoutV2ForObjectDetectionOutput(ModelOutput):
r"""
logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes + 1)`):
Classification logits (including no-object) for all queries.
order_logits (`tuple` of `torch.FloatTensor` of shape `(batch_size, num_queries, num_queries)`):
Order logits for all queries. The first dimension of each tensor is the batch size. The second dimension is the number of queries.
pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`):
Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These
values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding
possible padding). You can use [`~RTDetrImageProcessor.post_process_object_detection`] to retrieve the
unnormalized (absolute) bounding boxes.
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the decoder of the model.
intermediate_hidden_states (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, hidden_size)`):
Stacked intermediate hidden states (output of each layer of the decoder).
intermediate_logits (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, config.num_labels)`):
Stacked intermediate logits (logits of each layer of the decoder).
intermediate_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`):
Stacked intermediate reference points (reference points of each layer of the decoder).
intermediate_predicted_corners (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`):
Stacked intermediate predicted corners (predicted corners of each layer of the decoder).
initial_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`):
Stacked initial reference points (initial reference points of each layer of the decoder).
init_reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`):
Initial reference points sent through the Transformer decoder.
enc_topk_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`):
Logits of predicted bounding boxes coordinates in the encoder.
enc_topk_bboxes (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`):
Logits of predicted bounding boxes coordinates in the encoder.
enc_outputs_class (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`):
Predicted bounding boxes scores where the top `config.two_stage_num_proposals` scoring bounding boxes are
picked as region proposals in the first stage. Output of bounding box binary classification (i.e.
foreground and background).
enc_outputs_coord_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`):
Logits of predicted bounding boxes coordinates in the first stage.
denoising_meta_values (`dict`):
Extra dictionary for the denoising related values
"""
logits: torch.FloatTensor | None = None
pred_boxes: torch.FloatTensor | None = None
order_logits: tuple[torch.FloatTensor] | None = None
last_hidden_state: torch.FloatTensor | None = None
intermediate_hidden_states: torch.FloatTensor | None = None
intermediate_logits: torch.FloatTensor | None = None
intermediate_reference_points: torch.FloatTensor | None = None
intermediate_predicted_corners: torch.FloatTensor | None = None
initial_reference_points: torch.FloatTensor | None = None
decoder_hidden_states: tuple[torch.FloatTensor] | None = None
decoder_attentions: tuple[torch.FloatTensor] | None = None
cross_attentions: tuple[torch.FloatTensor] | None = None
encoder_last_hidden_state: torch.FloatTensor | None = None
encoder_hidden_states: tuple[torch.FloatTensor] | None = None
encoder_attentions: tuple[torch.FloatTensor] | None = None
init_reference_points: tuple[torch.FloatTensor] | None = None
enc_topk_logits: torch.FloatTensor | None = None
enc_topk_bboxes: torch.FloatTensor | None = None
enc_outputs_class: torch.FloatTensor | None = None
enc_outputs_coord_logits: torch.FloatTensor | None = None
denoising_meta_values: dict | None = None
@dataclass
@auto_docstring(
custom_intro="""
Base class for outputs of the PP-DocLayoutV2 encoder-decoder model.
"""
)
class PPDocLayoutV2ModelOutput(RTDetrModelOutput):
pass
class PPDocLayoutV2MLPPredictionHead(RTDetrMLPPredictionHead):
pass
@auto_docstring(
custom_intro="""
PP-DocLayoutV2 Model (consisting of a backbone and encoder-decoder) outputting raw hidden states without any head on top.
"""
)
class PPDocLayoutV2Model(RTDetrModel):
def __init__(self, config: PPDocLayoutV2Config):
super().__init__()
self.denoising_class_embed = nn.Embedding(config.num_labels, config.d_model)
@auto_docstring(
custom_intro="""
PP-DocLayoutV2 Model (consisting of a backbone and encoder-decoder) outputting bounding boxes, logits and order_logits to be further
decoded into scores, classes and their reading order.
"""
)
class PPDocLayoutV2ForObjectDetection(RTDetrForObjectDetection):
_keys_to_ignore_on_load_missing = ["num_batches_tracked", "rel_pos_y_bias", "rel_pos_x_bias"]
def __init__(self, config: PPDocLayoutV2Config):
super().__init__(config)
self.reading_order = PPDocLayoutV2ReadingOrder(config.reading_order_config)
self.num_queries = config.num_queries
self.config = config
self.post_init()
@auto_docstring
@can_return_tuple
def forward(
self,
pixel_values: torch.FloatTensor,
pixel_mask: torch.LongTensor | None = None,
encoder_outputs: torch.FloatTensor | None = None,
inputs_embeds: torch.FloatTensor | None = None,
decoder_inputs_embeds: torch.FloatTensor | None = None,
labels: list[dict] | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.FloatTensor] | PPDocLayoutV2ForObjectDetectionOutput:
r"""
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing the flattened feature map (output of the backbone + projection layer), you
can choose to directly pass a flattened representation of an image.
decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*):
Optionally, instead of initializing the queries with a tensor of zeros, you can choose to directly pass an
embedded representation.
labels (`list[Dict]` of len `(batch_size,)`, *optional*):
Labels for computing the bipartite matching loss. List of dicts, each dictionary containing at least the
following 2 keys: 'class_labels' and 'boxes' (the class labels and bounding boxes of an image in the batch
respectively). The class labels themselves should be a `torch.LongTensor` of len `(number of bounding boxes
in the image,)` and the boxes a `torch.FloatTensor` of shape `(number of bounding boxes in the image, 4)`.
Examples:
```python
>>> from transformers import AutoModelForObjectDetection, AutoImageProcessor
>>> from PIL import Image
>>> import requests
>>> import torch
>>> url = "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/layout_demo.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> model_path = "PaddlePaddle/PP-DocLayoutV2_safetensors"
>>> image_processor = AutoImageProcessor.from_pretrained(model_path)
>>> model = AutoModelForObjectDetection.from_pretrained(model_path)
>>> # prepare image for the model
>>> inputs = image_processor(images=[image], return_tensors="pt")
>>> # forward pass
>>> outputs = model(**inputs)
>>> # convert outputs (bounding boxes and class logits) to Pascal VOC format (xmin, ymin, xmax, ymax)
>>> results = image_processor.post_process_object_detection(outputs, target_sizes=torch.tensor([image.size[::-1]]))
>>> # print outputs
>>> for result in results:
... for idx, (score, label_id, box) in enumerate(zip(result["scores"], result["labels"], result["boxes"])):
... score, label = score.item(), label_id.item()
... box = [round(i, 2) for i in box.tolist()]
... print(f"Order {idx + 1}: {model.config.id2label[label]}: {score:.2f} {box}")
Order 1: text: 0.99 [335.39, 184.26, 896.49, 654.48]
Order 2: paragraph_title: 0.97 [337.14, 683.49, 869.42, 798.27]
Order 3: text: 0.99 [335.71, 843.04, 891.17, 1454.15]
Order 4: text: 0.99 [920.42, 185.53, 1476.39, 464.25]
Order 5: text: 0.98 [920.62, 483.75, 1480.52, 765.34]
Order 6: text: 0.98 [920.58, 846.75, 1481.94, 1220.53]
Order 7: text: 0.97 [921.12, 1239.27, 1468.87, 1377.33]
Order 8: footnote: 0.82 [334.58, 1614.67, 1483.84, 1731.61]
Order 9: text: 0.51 [334.58, 1614.67, 1483.84, 1731.61]
Order 10: footnote: 0.83 [334.7, 1757.26, 1471.07, 1845.33]
Order 11: text: 0.87 [336.65, 1910.28, 661.33, 1939.92]
Order 12: footnote: 0.95 [336.16, 2114.52, 1450.28, 2171.74]
Order 13: number: 0.87 [106.04, 2257.37, 136.05, 2281.98]
Order 14: footer: 0.93 [338.6, 2255.94, 985.67, 2283.57]
```"""
outputs = self.model(
pixel_values,
pixel_mask=pixel_mask,
encoder_outputs=encoder_outputs,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
labels=labels,
**kwargs,
)
intermediate_reference_points = outputs.intermediate_reference_points
intermediate_logits = outputs.intermediate_logits
raw_bboxes = intermediate_reference_points[:, -1]
logits = intermediate_logits[:, -1]
box_centers, box_sizes = raw_bboxes.split(2, dim=-1)
bboxes = torch.cat([box_centers - 0.5 * box_sizes, box_centers + 0.5 * box_sizes], dim=-1) * 1000
bboxes = bboxes.clamp_(0.0, 1000.0)
max_logits, class_ids = logits.max(dim=-1)
max_probs = max_logits.sigmoid()
class_thresholds = torch.tensor(self.config.class_thresholds, dtype=torch.float32, device=logits.device)
thresholds = class_thresholds[class_ids]
mask = max_probs >= thresholds
indices = torch.argsort(mask.to(torch.int8), dim=1, descending=True)
sorted_class_ids = torch.take_along_dim(class_ids, indices, dim=1)
sorted_boxes = torch.take_along_dim(bboxes, indices[..., None].expand(-1, -1, 4), dim=1)
pred_boxes = torch.take_along_dim(raw_bboxes, indices[..., None].expand(-1, -1, 4), dim=1)
logits = torch.take_along_dim(logits, indices[..., None].expand(-1, -1, logits.size(-1)), dim=1)
sorted_mask = torch.take_along_dim(mask, indices, dim=1)
pad_boxes = torch.where(sorted_mask[..., None], sorted_boxes, torch.zeros_like(sorted_boxes))
pad_class_ids = torch.where(sorted_mask, sorted_class_ids, torch.zeros_like(sorted_class_ids))
class_order = torch.tensor(self.config.class_order, dtype=torch.int32, device=logits.device)
pad_class_ids = class_order[pad_class_ids]
order_logits = self.reading_order(
boxes=pad_boxes,
labels=pad_class_ids,
mask=mask,
)
order_logits = order_logits[:, :, : self.num_queries]
if labels is not None:
raise ValueError("PPDocLayoutV2ForObjectDetection does not support training")
return PPDocLayoutV2ForObjectDetectionOutput(
logits=logits,
pred_boxes=pred_boxes,
order_logits=order_logits,
last_hidden_state=outputs.last_hidden_state,
intermediate_hidden_states=outputs.intermediate_hidden_states,
intermediate_logits=outputs.intermediate_logits,
intermediate_reference_points=outputs.intermediate_reference_points,
intermediate_predicted_corners=outputs.intermediate_predicted_corners,
initial_reference_points=outputs.initial_reference_points,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
init_reference_points=outputs.init_reference_points,
enc_topk_logits=outputs.enc_topk_logits,
enc_topk_bboxes=outputs.enc_topk_bboxes,
enc_outputs_class=outputs.enc_outputs_class,
enc_outputs_coord_logits=outputs.enc_outputs_coord_logits,
denoising_meta_values=outputs.denoising_meta_values,
)
__all__ = [
"PPDocLayoutV2ForObjectDetection",
"PPDocLayoutV2ImageProcessorFast",
"PPDocLayoutV2Config",
"PPDocLayoutV2Model",
"PPDocLayoutV2PreTrainedModel",
"PPDocLayoutV2ReadingOrder",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/pp_doclayout_v2/modular_pp_doclayout_v2.py",
"license": "Apache License 2.0",
"lines": 1008,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/pp_doclayout_v2/test_modeling_pp_doclayout_v2.py | # coding = utf-8
# Copyright 2026 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PP-DocLayoutV2 model."""
import inspect
import math
import tempfile
import unittest
import requests
from parameterized import parameterized
from transformers import (
PPDocLayoutV2Config,
PPDocLayoutV2ForObjectDetection,
PPDocLayoutV2ImageProcessorFast,
is_torch_available,
is_vision_available,
)
from transformers.testing_utils import (
cleanup,
require_torch,
require_torch_accelerator,
require_vision,
slow,
torch_device,
)
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
class PPDocLayoutV2ModelTester:
def __init__(
self,
parent,
batch_size=3,
is_training=False,
n_targets=3,
num_labels=25,
initializer_range=0.01,
layer_norm_eps=1e-5,
batch_norm_eps=1e-5,
# backbone
backbone_config=None,
# encoder HybridEncoder
encoder_hidden_dim=32,
encoder_in_channels=[32, 32, 32],
feat_strides=[8, 16, 32],
encoder_layers=1,
encoder_ffn_dim=8,
encoder_attention_heads=2,
dropout=0.0,
activation_dropout=0.0,
encode_proj_layers=[2],
positional_encoding_temperature=10000,
encoder_activation_function="gelu",
activation_function="silu",
eval_size=None,
normalize_before=False,
# decoder PPDocLayoutV2Transformer
d_model=32,
num_queries=30,
decoder_in_channels=[32, 32, 32],
decoder_ffn_dim=8,
num_feature_levels=3,
decoder_n_points=4,
decoder_layers=2,
decoder_attention_heads=2,
decoder_activation_function="relu",
attention_dropout=0.0,
num_denoising=0,
label_noise_ratio=0.5,
box_noise_scale=1.0,
learn_initial_query=False,
anchor_image_size=None,
image_size=128,
disable_custom_kernels=True,
):
self.parent = parent
self.batch_size = batch_size
self.num_channels = 3
self.is_training = is_training
self.n_targets = n_targets
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.batch_norm_eps = batch_norm_eps
self.backbone_config = backbone_config
self.encoder_hidden_dim = encoder_hidden_dim
self.encoder_in_channels = encoder_in_channels
self.feat_strides = feat_strides
self.num_labels = num_labels
self.encoder_layers = encoder_layers
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_attention_heads = encoder_attention_heads
self.dropout = dropout
self.activation_dropout = activation_dropout
self.encode_proj_layers = encode_proj_layers
self.positional_encoding_temperature = positional_encoding_temperature
self.encoder_activation_function = encoder_activation_function
self.activation_function = activation_function
self.eval_size = eval_size
self.normalize_before = normalize_before
self.d_model = d_model
self.num_queries = num_queries
self.decoder_in_channels = decoder_in_channels
self.decoder_ffn_dim = decoder_ffn_dim
self.num_feature_levels = num_feature_levels
self.decoder_n_points = decoder_n_points
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.decoder_activation_function = decoder_activation_function
self.attention_dropout = attention_dropout
self.num_denoising = num_denoising
self.label_noise_ratio = label_noise_ratio
self.box_noise_scale = box_noise_scale
self.learn_initial_query = learn_initial_query
self.anchor_image_size = anchor_image_size
self.image_size = image_size
self.disable_custom_kernels = disable_custom_kernels
self.encoder_seq_length = math.ceil(self.image_size / 32) * math.ceil(self.image_size / 32)
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
config = self.get_config()
return config, pixel_values
def get_config(self):
hidden_sizes = [10, 20, 30, 40]
backbone_config = {
"model_type": "hgnet_v2",
"arch": "L",
"return_idx": [1, 2, 3],
"freeze_stem_only": True,
"freeze_at": 0,
"freeze_norm": True,
"hidden_sizes": [32, 32, 32, 32],
"stem_channels": [3, 32, 32],
"stage_in_channels": [32, 32, 32, 32],
"stage_mid_channels": [32, 32, 32, 32],
"stage_out_channels": [32, 32, 32, 32],
"lr_mult_list": [0, 0.05, 0.05, 0.05, 0.05],
"out_features": ["stage2", "stage3", "stage4"],
}
reading_order_config = {
"hidden_size": 16,
"num_attention_heads": 8,
"attention_probs_dropout_prob": 0.1,
"has_relative_attention_bias": False,
"has_spatial_attention_bias": True,
"layer_norm_eps": 1e-5,
"hidden_dropout_prob": 0.1,
"intermediate_size": 16,
"hidden_act": "gelu",
"num_hidden_layers": 6,
"rel_pos_bins": 32,
"max_rel_pos": 128,
"rel_2d_pos_bins": 64,
"max_rel_2d_pos": 256,
"num_labels": 510,
"max_position_embeddings": 514,
"max_2d_position_embeddings": 512,
"type_vocab_size": 1,
"vocab_size": 4,
"start_token_id": 0,
"pad_token_id": 1,
"end_token_id": 2,
"pred_token_id": 3,
"coordinate_size": 171,
"shape_size": 170,
"num_classes": 20,
"relation_bias_embed_dim": 16,
"relation_bias_temperature": 10000,
"relation_bias_scale": 100,
"relative_head_num": 1,
"relative_head_size": 64,
}
id2label = {
0: "abstract",
1: "algorithm",
2: "aside_text",
3: "chart",
4: "content",
5: "formula",
6: "doc_title",
7: "figure_title",
8: "footer",
9: "footer",
10: "footnote",
11: "formula_number",
12: "header",
13: "header",
14: "image",
15: "formula",
16: "number",
17: "paragraph_title",
18: "reference",
19: "reference_content",
20: "seal",
21: "table",
22: "text",
23: "text",
24: "vision_footnote",
}
num_labels = 25
class_thresholds = [
0.5,
0.5,
0.5,
0.5,
0.5,
0.4,
0.4,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.4,
0.5,
0.4,
0.5,
0.5,
0.45,
0.5,
0.4,
0.4,
0.5,
]
class_order = [4, 2, 14, 1, 5, 7, 8, 6, 11, 11, 9, 13, 10, 10, 1, 2, 3, 0, 2, 2, 12, 1, 2, 15, 6]
return PPDocLayoutV2Config(
backbone_config=backbone_config,
reading_order_config=reading_order_config,
encoder_hidden_dim=self.encoder_hidden_dim,
encoder_in_channels=hidden_sizes[1:],
feat_strides=self.feat_strides,
encoder_layers=self.encoder_layers,
encoder_ffn_dim=self.encoder_ffn_dim,
encoder_attention_heads=self.encoder_attention_heads,
dropout=self.dropout,
activation_dropout=self.activation_dropout,
encode_proj_layers=self.encode_proj_layers,
positional_encoding_temperature=self.positional_encoding_temperature,
encoder_activation_function=self.encoder_activation_function,
activation_function=self.activation_function,
eval_size=self.eval_size,
normalize_before=self.normalize_before,
d_model=self.d_model,
num_queries=self.num_queries,
decoder_in_channels=self.decoder_in_channels,
decoder_ffn_dim=self.decoder_ffn_dim,
num_feature_levels=self.num_feature_levels,
decoder_n_points=self.decoder_n_points,
decoder_layers=self.decoder_layers,
decoder_attention_heads=self.decoder_attention_heads,
decoder_activation_function=self.decoder_activation_function,
attention_dropout=self.attention_dropout,
num_denoising=self.num_denoising,
label_noise_ratio=self.label_noise_ratio,
box_noise_scale=self.box_noise_scale,
learn_initial_query=self.learn_initial_query,
anchor_image_size=self.anchor_image_size,
image_size=self.image_size,
disable_custom_kernels=self.disable_custom_kernels,
id2label=id2label,
num_labels=num_labels,
class_thresholds=class_thresholds,
class_order=class_order,
)
def prepare_config_and_inputs_for_common(self):
config, pixel_values = self.prepare_config_and_inputs()
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
def create_and_check_pp_doclayout_v2_object_detection_head_model(self, config, pixel_values):
model = PPDocLayoutV2ForObjectDetection(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels))
self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4))
@require_torch
class PPDocLayoutV2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (PPDocLayoutV2ForObjectDetection,) if is_torch_available() else ()
pipeline_model_mapping = {"object-detection": PPDocLayoutV2ForObjectDetection} if is_torch_available() else {}
is_encoder_decoder = True
test_missing_keys = False
test_inputs_embeds = False
test_resize_embeddings = False
def setUp(self):
self.model_tester = PPDocLayoutV2ModelTester(self)
self.config_tester = ConfigTester(
self,
config_class=PPDocLayoutV2Config,
has_text_modality=False,
)
def test_config(self):
self.config_tester.run_common_tests()
def test_pp_doclayout_v2_object_detection_head_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_pp_doclayout_v2_object_detection_head_model(*config_and_inputs)
@unittest.skip(reason="PPDocLayoutV2 does not support input and output embeddings")
def test_model_get_set_embeddings(self):
pass
@unittest.skip(reason="PPDocLayoutV2 does not support input and output embeddings")
def test_model_common_attributes(self):
pass
@unittest.skip(reason="Feed forward chunking is not implemented")
def test_feed_forward_chunking(self):
pass
@unittest.skip(reason="PPDocLayoutV2 does not support training")
def test_retain_grad_hidden_states_attentions(self):
pass
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["pixel_values"]
self.assertListEqual(arg_names[:1], expected_arg_names)
@parameterized.expand(["float32", "float16", "bfloat16"])
@require_torch_accelerator
@slow
def test_inference_with_different_dtypes(self, dtype_str):
dtype = {
"float32": torch.float32,
"float16": torch.float16,
"bfloat16": torch.bfloat16,
}[dtype_str]
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device).to(dtype)
model.eval()
for key, tensor in inputs_dict.items():
inputs_dict[key] = tensor.to(dtype)
with torch.no_grad():
_ = model(**self._prepare_for_class(inputs_dict, model_class))
@parameterized.expand(["float32", "float16", "bfloat16"])
@require_torch_accelerator
@slow
def test_inference_equivalence_for_static_and_dynamic_anchors(self, dtype_str):
dtype = {
"float32": torch.float32,
"float16": torch.float16,
"bfloat16": torch.bfloat16,
}[dtype_str]
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
h, w = inputs_dict["pixel_values"].shape[-2:]
# convert inputs to the desired dtype
for key, tensor in inputs_dict.items():
inputs_dict[key] = tensor.to(dtype)
for model_class in self.all_model_classes:
with tempfile.TemporaryDirectory() as tmpdirname:
model_class(config).save_pretrained(tmpdirname)
model_static = model_class.from_pretrained(
tmpdirname, anchor_image_size=[h, w], device_map=torch_device, dtype=dtype
).eval()
model_dynamic = model_class.from_pretrained(
tmpdirname, anchor_image_size=None, device_map=torch_device, dtype=dtype
).eval()
self.assertIsNotNone(model_static.config.anchor_image_size)
self.assertIsNone(model_dynamic.config.anchor_image_size)
with torch.no_grad():
outputs_static = model_static(**self._prepare_for_class(inputs_dict, model_class))
outputs_dynamic = model_dynamic(**self._prepare_for_class(inputs_dict, model_class))
self.assertTrue(
torch.allclose(outputs_static.logits, outputs_dynamic.logits, rtol=1e-4, atol=1e-4),
f"Max diff: {(outputs_static.logits - outputs_dynamic.logits).abs().max()}",
)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", len(self.model_tester.encoder_in_channels) - 1
)
self.assertEqual(len(hidden_states), expected_num_layers)
self.assertListEqual(
list(hidden_states[1].shape[-2:]),
[
self.model_tester.image_size // self.model_tester.feat_strides[-1],
self.model_tester.image_size // self.model_tester.feat_strides[-1],
],
)
if config.is_encoder_decoder:
hidden_states = outputs.decoder_hidden_states
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.decoder_layers + 1
)
self.assertIsInstance(hidden_states, (list, tuple))
self.assertEqual(len(hidden_states), expected_num_layers)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[self.model_tester.num_queries, self.model_tester.d_model],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class._from_config(config, attn_implementation="eager")
config = model.config
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions
self.assertEqual(len(attentions), self.model_tester.encoder_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions
self.assertEqual(len(attentions), self.model_tester.encoder_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[
self.model_tester.encoder_attention_heads,
self.model_tester.encoder_seq_length,
self.model_tester.encoder_seq_length,
],
)
out_len = len(outputs)
correct_outlen = 13
# loss is at first position
if "labels" in inputs_dict:
correct_outlen += 1 # loss is added to beginning
# Object Detection model returns pred_logits and pred_boxes
if model_class.__name__ == "PPDocLayoutV2ForObjectDetection":
correct_outlen += 3
self.assertEqual(out_len, correct_outlen)
# decoder attentions
decoder_attentions = outputs.decoder_attentions
self.assertIsInstance(decoder_attentions, (list, tuple))
self.assertEqual(len(decoder_attentions), self.model_tester.decoder_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]),
[
self.model_tester.decoder_attention_heads,
self.model_tester.num_queries,
self.model_tester.num_queries,
],
)
# cross attentions
cross_attentions = outputs.cross_attentions
self.assertIsInstance(cross_attentions, (list, tuple))
self.assertEqual(len(cross_attentions), self.model_tester.decoder_layers)
self.assertListEqual(
list(cross_attentions[0].shape[-3:]),
[
self.model_tester.decoder_attention_heads,
self.model_tester.num_feature_levels,
self.model_tester.decoder_n_points,
],
)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
if hasattr(self.model_tester, "num_hidden_states_types"):
added_hidden_states = self.model_tester.num_hidden_states_types
else:
# RTDetr should maintin encoder_hidden_states output
added_hidden_states = 2
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs.encoder_attentions
self.assertEqual(len(self_attentions), self.model_tester.encoder_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[
self.model_tester.encoder_attention_heads,
self.model_tester.encoder_seq_length,
self.model_tester.encoder_seq_length,
],
)
@require_torch
@require_vision
@slow
class PPDocLayoutV2ModelIntegrationTest(unittest.TestCase):
def setUp(self):
model_path = "PaddlePaddle/PP-DocLayoutV2_safetensors"
self.model = PPDocLayoutV2ForObjectDetection.from_pretrained(model_path).to(torch_device)
self.image_processor = PPDocLayoutV2ImageProcessorFast.from_pretrained(model_path)
url = "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/layout_demo.jpg"
self.image = Image.open(requests.get(url, stream=True).raw)
def tearDown(self):
cleanup(torch_device, gc_collect=True)
def test_inference_object_detection_head(self):
inputs = self.image_processor(images=self.image, return_tensors="pt").to(torch_device)
with torch.no_grad():
outputs = self.model(**inputs)
expected_shape_logits = torch.Size((1, 300, self.model.config.num_labels))
expected_logits = torch.tensor(
[[-3.6572, -4.4185, -4.3930], [-3.7213, -4.5011, -4.6771], [-3.8721, -4.4524, -4.4162]]
).to(torch_device)
self.assertEqual(outputs.logits.shape, expected_shape_logits)
torch.testing.assert_close(outputs.logits[0, :3, :3], expected_logits, rtol=2e-2, atol=2e-2)
expected_shape_boxes = torch.Size((1, 300, 4))
expected_boxes = torch.tensor(
[[0.3709, 0.4911, 0.3358], [0.7263, 0.4419, 0.3394], [0.3724, 0.1793, 0.3392]]
).to(torch_device)
self.assertEqual(outputs.pred_boxes.shape, expected_shape_boxes)
torch.testing.assert_close(outputs.pred_boxes[0, :3, :3], expected_boxes, rtol=2e-2, atol=2e-2)
expected_shape_order_logits = torch.Size((1, 300, 300))
expected_order_logits = torch.tensor(
[
[-10000.0000, 43.8388, -32.8785],
[-10000.0000, -10000.0000, -63.9118],
[-10000.0000, -10000.0000, -10000.0000],
]
).to(torch_device)
self.assertEqual(outputs.order_logits.shape, expected_shape_order_logits)
torch.testing.assert_close(outputs.order_logits[0, :3, :3], expected_order_logits, rtol=2e-2, atol=2e-2)
# verify postprocessing
results = self.image_processor.post_process_object_detection(
outputs, threshold=0.5, target_sizes=[self.image.size[::-1]]
)[0]
expected_scores = torch.tensor(
[
0.9878,
0.9675,
0.9882,
0.9852,
0.9828,
0.9843,
0.9700,
0.8182,
0.5148,
0.8273,
0.8718,
0.9494,
0.8733,
0.9266,
]
).to(torch_device)
torch.testing.assert_close(results["scores"], expected_scores, rtol=2e-2, atol=2e-2)
expected_labels = [22, 17, 22, 22, 22, 22, 22, 10, 22, 10, 22, 10, 16, 8]
self.assertSequenceEqual(results["labels"].tolist(), expected_labels)
expected_slice_boxes = torch.tensor(
[
[335.3923, 184.2622, 896.4918, 654.4847],
[337.1364, 683.4911, 869.4224, 798.2716],
[335.7133, 843.0425, 891.1711, 1454.1525],
[920.4213, 185.5302, 1476.3922, 464.2497],
]
).to(torch_device)
torch.testing.assert_close(results["boxes"][:4], expected_slice_boxes, rtol=2e-2, atol=2e-2)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/pp_doclayout_v2/test_modeling_pp_doclayout_v2.py",
"license": "Apache License 2.0",
"lines": 582,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/olmo_hybrid/convert_olmo_hybrid_weights_to_hf.py | # Copyright 2026 EleutherAI and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Convert OLMo Hybrid model checkpoints (with FLA layers) to HuggingFace format.
This script handles OLMo Hybrid models that mix standard attention layers with
linear attention (GatedDeltaNet) layers.
UPDATED: Now aligned with the OLMo-core conversion script, including support for:
- Configurable dtype (defaults to bfloat16)
- Configurable max_sequence_length via CLI
- Device selection
Sample usage:
```bash
TRUST_REMOTE_CODE=True python src/transformers/models/olmo_hybrid/convert_olmo_hybrid_weights_to_hf.py \
--input_dir /path/to/downloaded/olmo_hybrid/weights \
--output_dir /output/path
```
Thereafter, models can be loaded via:
```python
from transformers import OlmoHybridForCausalLM, AutoTokenizer
model = OlmoHybridForCausalLM.from_pretrained("/output/path")
tokenizer = AutoTokenizer.from_pretrained("/output/path")
```
Important note: you need to be able to host the whole model in RAM to execute this script.
"""
from __future__ import annotations
import argparse
import gc
import io
import json
import os
import pickle
import traceback
import uuid
from collections.abc import Sequence
from concurrent.futures import ThreadPoolExecutor, as_completed
from dataclasses import dataclass
from pathlib import Path
from typing import Any, cast
import torch
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.metadata import Metadata, MetadataIndex, StorageMeta
from torch.distributed.checkpoint.planner import LoadItemType, ReadItem
from torch.futures import Future
from transformers import AutoTokenizer, OlmoHybridConfig
# Mapping from string dtype names to torch dtypes
DTYPE_MAP = {
"bfloat16": torch.bfloat16,
"float16": torch.float16,
"float32": torch.float32,
}
def strtobool(val):
"""
Convert a string representation of truth to True or False.
True values are 'y', 'yes', 't', 'true', 'on', and '1'.
False values are 'n', 'no', 'f', 'false', 'off', and '0'.
Raises ValueError if 'val' is anything else.
"""
if isinstance(val, bool):
return val
val = str(val).lower()
if val in ("y", "yes", "t", "true", "on", "1"):
return True
elif val in ("n", "no", "f", "false", "off", "0"):
return False
else:
raise ValueError(f"Invalid truth value {val!r}")
def read_json(path):
with open(path, "r") as f:
return json.load(f)
def write_json(text, path):
with open(path, "w") as f:
json.dump(text, f)
def normalize_path(path: Path | str) -> str:
return str(path).rstrip("/").replace("file://", "")
def generate_uuid() -> str:
return str(uuid.uuid4())
def get_bytes_range(path: Path | str, bytes_start: int, num_bytes: int) -> bytes:
with open(path, "rb") as f:
f.seek(bytes_start)
return f.read(num_bytes)
def _narrow_tensor_by_index(tensor: torch.Tensor, offsets: Sequence[int], sizes: Sequence[int]) -> torch.Tensor:
"""
Narrow the tensor according to ``offsets`` and ``sizes``.
"""
narrowed_tensor = tensor
for idx, (offset, size) in enumerate(zip(offsets, sizes)):
if size < tensor.size(idx):
narrowed_tensor = narrowed_tensor.narrow(idx, offset, size)
return narrowed_tensor
@dataclass
class _StorageInfo:
"""This is the per entry storage info."""
relative_path: str
offset: int
length: int
@dataclass
class _StoragePrefix:
prefix: str
class RemoteFileSystemReader(dist_cp.StorageReader):
"""
A :class:`~torch.distributed.checkpoint.StorageReader` based on :class:`~torch.distributed.checkpoint.FileSystemReader`
that can read data directly from cloud storage as well as a local directory.
"""
def __init__(
self,
path: Path | str,
*,
thread_count: int | None = None,
pre_download: bool = False,
work_dir: Path | str | None = None,
):
super().__init__()
if thread_count is not None and thread_count <= 0:
raise ValueError("thread count must be at least 1")
self.path = normalize_path(path)
self.thread_count = thread_count or 1
self.pre_download = pre_download
self.work_dir = normalize_path(work_dir) if work_dir is not None else None
self.storage_data: dict[MetadataIndex, _StorageInfo] = {}
self.load_id = generate_uuid()
self._metadata: Metadata | None = None
def _get_bytes(self, relative_path: str, offset: int, length: int) -> bytes:
full_path = f"{self.path}/{relative_path}"
return get_bytes_range(full_path, offset, length)
def _get_content_for_read(self, read_item: ReadItem) -> tuple[ReadItem, bytes]:
sinfo = self.storage_data[read_item.storage_index]
content = self._get_bytes(sinfo.relative_path, sinfo.offset, sinfo.length)
return (read_item, content)
def reset(self, checkpoint_id: Path | str | None = None) -> None:
self.storage_data = {}
if checkpoint_id:
self.path = normalize_path(checkpoint_id)
self.load_id = generate_uuid()
def read_data(self, plan: dist_cp.LoadPlan, planner: dist_cp.LoadPlanner) -> Future[None]:
with ThreadPoolExecutor(max_workers=self.thread_count) as executor:
read_item_content_futures = []
for read_item in plan.items:
read_item_content_futures.append(executor.submit(self._get_content_for_read, read_item))
read_item_content_results = []
for f in as_completed(read_item_content_futures):
try:
read_item_content_results.append(f.result())
except BaseException:
raise RuntimeError(f"Original error:\n{traceback.format_exc()}")
for read_item, content in read_item_content_results:
bytes_io = io.BytesIO(content)
bytes_io.seek(0)
if read_item.type == LoadItemType.BYTE_IO:
planner.load_bytes(read_item, bytes_io)
else:
tensor = cast(torch.Tensor, torch.load(bytes_io, map_location="cpu", weights_only=False))
tensor = _narrow_tensor_by_index(tensor, read_item.storage_offsets, read_item.lengths)
target_tensor = planner.resolve_tensor(read_item).detach()
assert target_tensor.size() == tensor.size(), (
f"req {read_item.storage_index} mismatch sizes {target_tensor.size()} vs {tensor.size()}"
)
target_tensor.copy_(tensor)
planner.commit_tensor(read_item, target_tensor)
fut: Future = Future()
fut.set_result(None)
return fut
def read_metadata(self) -> Metadata:
if self._metadata is None:
try:
if not strtobool(os.environ.get("TRUST_REMOTE_CODE", "False")):
raise ValueError(
"This part uses `pickle.load` which is insecure and will execute arbitrary code that is potentially "
"malicious. It's recommended to never unpickle data that could have come from an untrusted source, or "
"that could have been tampered with. If you already verified the pickle data and decided to use it, "
"you can set the environment variable `TRUST_REMOTE_CODE` to `True` to allow it."
)
with (Path(self.path) / ".metadata").open("rb") as metadata_file:
metadata = restricted_load(metadata_file)
except FileNotFoundError as exc:
msg = f"'{self.path}' is not a distributed checkpoint folder."
suggested_dir = os.path.join(self.path, "model_and_optim")
if Path(os.path.join(suggested_dir, ".metadata")).exists():
msg += f" Did you mean to use '{suggested_dir}'?"
raise FileNotFoundError(msg) from exc
if getattr(metadata, "storage_meta", None) is None:
metadata.storage_meta = StorageMeta()
metadata.storage_meta.load_id = self.load_id
self._metadata = metadata
return self._metadata
def set_up_storage_reader(self, metadata: Metadata, is_coordinator: bool) -> None:
del is_coordinator
self.storage_data = metadata.storage_data
assert self.storage_data is not None
def prepare_local_plan(self, plan: dist_cp.LoadPlan) -> dist_cp.LoadPlan:
return plan
def prepare_global_plan(self, global_plan: list[dist_cp.LoadPlan]) -> list[dist_cp.LoadPlan]:
return global_plan
@property
def checkpoint_id(self) -> str:
return self.path
@classmethod
def validate_checkpoint_id(cls, checkpoint_id: Path | str) -> bool:
del checkpoint_id
return True
class _RestrictedUnpickler(pickle.Unpickler):
"""
Custom unpickler that handles missing olmo_core module references.
This allows loading checkpoints saved with olmo_core without having it installed.
"""
def find_class(self, module, name):
if module.startswith("torch"):
return super().find_class(module, name)
if module in ("collections", "builtins", "_collections_abc"):
return super().find_class(module, name)
if module.startswith("olmo_core"):
return super().find_class("builtins", "dict") if name == "dict" else type(name, (), {})
return super().find_class(module, name)
def restricted_loads(data):
"""Load pickle data with restricted unpickler."""
return _RestrictedUnpickler(io.BytesIO(data)).load()
def restricted_load(file):
"""Load pickle file with restricted unpickler."""
return _RestrictedUnpickler(file).load()
def load_model(model_path: str):
"""Load model state dict from distributed checkpoint."""
from torch.distributed.checkpoint.default_planner import _EmptyStateDictLoadPlanner
from torch.distributed.checkpoint.state_dict_loader import _load_state_dict
def _load_unsharded_keys(
dir: Path | str,
keys: list[str],
*,
pre_download: bool = False,
work_dir: Path | str | None = None,
) -> dict[str, Any]:
state_dict: dict[str, Any] = {}
_load_state_dict(
state_dict,
storage_reader=RemoteFileSystemReader(dir, pre_download=pre_download, work_dir=work_dir),
planner=_EmptyStateDictLoadPlanner(keys=keys),
no_dist=True,
)
return state_dict
if not strtobool(os.environ.get("TRUST_REMOTE_CODE", "False")):
raise ValueError(
"This part uses `pickle.load` which is insecure and will execute arbitrary code that is potentially "
"malicious. It's recommended to never unpickle data that could have come from an untrusted source, or "
"that could have been tampered with. If you already verified the pickle data and decided to use it, "
"you can set the environment variable `TRUST_REMOTE_CODE` to `True` to allow it."
)
with (Path(model_path) / ".metadata").open("rb") as metadata_file:
metadata = restricted_load(metadata_file)
keys = [key for key in metadata.state_dict_metadata.keys() if key.startswith("model.")]
return _load_unsharded_keys(model_path, keys)
def get_layer_types_from_config(olmo_config: dict) -> list[str]:
"""
Determine the layer types (full_attention, linear_attention)
from the OLMo config.
"""
model_config = olmo_config["model"]
block_config = model_config["block"]
n_layers = model_config["n_layers"]
fla_hybrid_attention_indices = block_config.get("fla_hybrid_attention_indices", [])
layer_types = []
for i in range(n_layers):
if i in fla_hybrid_attention_indices:
layer_types.append("full_attention")
else:
layer_types.append("linear_attention")
return layer_types
def convert_attention_layer_weights(
loaded: dict[str, torch.Tensor],
layer_i: int,
) -> dict[str, torch.Tensor]:
"""Convert weights for an attention (full or sliding) layer."""
state_dict = {
f"model.layers.{layer_i}.self_attn.q_proj.weight": loaded[f"blocks.{layer_i}.attention.w_q.weight"],
f"model.layers.{layer_i}.self_attn.k_proj.weight": loaded[f"blocks.{layer_i}.attention.w_k.weight"],
f"model.layers.{layer_i}.self_attn.v_proj.weight": loaded[f"blocks.{layer_i}.attention.w_v.weight"],
f"model.layers.{layer_i}.self_attn.o_proj.weight": loaded[f"blocks.{layer_i}.attention.w_out.weight"],
f"model.layers.{layer_i}.self_attn.q_norm.weight": loaded[f"blocks.{layer_i}.attention.q_norm.weight"],
f"model.layers.{layer_i}.self_attn.k_norm.weight": loaded[f"blocks.{layer_i}.attention.k_norm.weight"],
f"model.layers.{layer_i}.mlp.gate_proj.weight": loaded[f"blocks.{layer_i}.feed_forward.w1.weight"],
f"model.layers.{layer_i}.mlp.down_proj.weight": loaded[f"blocks.{layer_i}.feed_forward.w2.weight"],
f"model.layers.{layer_i}.mlp.up_proj.weight": loaded[f"blocks.{layer_i}.feed_forward.w3.weight"],
f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[f"blocks.{layer_i}.attention_norm.weight"],
f"model.layers.{layer_i}.post_feedforward_layernorm.weight": loaded[
f"blocks.{layer_i}.feed_forward_norm.weight"
],
}
return state_dict
def convert_fla_layer_weights(
loaded: dict[str, torch.Tensor],
layer_i: int,
) -> dict[str, torch.Tensor]:
"""Convert weights for a FLA (GatedDeltaNet / linear attention) layer."""
state_dict = {
f"model.layers.{layer_i}.linear_attn.q_proj.weight": loaded[f"blocks.{layer_i}.fla.inner.q_proj.weight"],
f"model.layers.{layer_i}.linear_attn.k_proj.weight": loaded[f"blocks.{layer_i}.fla.inner.k_proj.weight"],
f"model.layers.{layer_i}.linear_attn.v_proj.weight": loaded[f"blocks.{layer_i}.fla.inner.v_proj.weight"],
f"model.layers.{layer_i}.linear_attn.g_proj.weight": loaded[f"blocks.{layer_i}.fla.inner.g_proj.weight"],
f"model.layers.{layer_i}.linear_attn.a_proj.weight": loaded[f"blocks.{layer_i}.fla.inner.a_proj.weight"],
f"model.layers.{layer_i}.linear_attn.b_proj.weight": loaded[f"blocks.{layer_i}.fla.inner.b_proj.weight"],
f"model.layers.{layer_i}.linear_attn.o_proj.weight": loaded[f"blocks.{layer_i}.fla.inner.o_proj.weight"],
f"model.layers.{layer_i}.linear_attn.q_conv1d.weight": loaded[f"blocks.{layer_i}.fla.inner.q_conv1d.weight"],
f"model.layers.{layer_i}.linear_attn.k_conv1d.weight": loaded[f"blocks.{layer_i}.fla.inner.k_conv1d.weight"],
f"model.layers.{layer_i}.linear_attn.v_conv1d.weight": loaded[f"blocks.{layer_i}.fla.inner.v_conv1d.weight"],
f"model.layers.{layer_i}.linear_attn.o_norm.weight": loaded[f"blocks.{layer_i}.fla.inner.o_norm.weight"],
f"model.layers.{layer_i}.linear_attn.A_log": loaded[f"blocks.{layer_i}.fla.inner.A_log"],
f"model.layers.{layer_i}.linear_attn.dt_bias": loaded[f"blocks.{layer_i}.fla.inner.dt_bias"],
f"model.layers.{layer_i}.mlp.gate_proj.weight": loaded[f"blocks.{layer_i}.feed_forward.w1.weight"],
f"model.layers.{layer_i}.mlp.down_proj.weight": loaded[f"blocks.{layer_i}.feed_forward.w2.weight"],
f"model.layers.{layer_i}.mlp.up_proj.weight": loaded[f"blocks.{layer_i}.feed_forward.w3.weight"],
f"model.layers.{layer_i}.attention_layer_norm.weight": loaded[f"blocks.{layer_i}.fla_norm.weight"],
f"model.layers.{layer_i}.feedforward_layer_norm.weight": loaded[f"blocks.{layer_i}.feed_forward_norm.weight"],
}
return state_dict
def write_model(
model_path: str,
input_base_path: str,
include_tokenizer: bool = True,
tokenizer_id: str | None = None,
max_sequence_length: int | None = None,
dtype: torch.dtype = torch.bfloat16,
device: str | None = None,
):
"""
Convert OLMo Hybrid checkpoint to HuggingFace format.
Args:
model_path: Output directory for the HuggingFace model.
input_base_path: Path to the OLMo checkpoint directory containing config.json and model_and_optim/.
include_tokenizer: Whether to save the tokenizer alongside the model.
tokenizer_id: HuggingFace tokenizer identifier. Defaults to the one in the config.
max_sequence_length: Override for max sequence length. If None, read from config.
dtype: Torch dtype for the output model weights.
device: Device to use for loading/conversion (e.g., "cpu", "cuda"). Defaults to CPU.
"""
os.makedirs(model_path, exist_ok=True)
config_path = Path(input_base_path) / "config.json"
olmo_config = json.loads(config_path.read_text())
model_config = olmo_config["model"]
block_config = model_config["block"]
attention_config = block_config.get("attention", {})
fla_config = block_config.get("fla", {})
tokenizer_config = olmo_config["dataset"]["tokenizer"]
n_layers = model_config["n_layers"]
n_heads = attention_config.get("n_heads", model_config.get("n_heads", 32))
n_kv_heads = attention_config.get("n_kv_heads", n_heads)
dim = model_config["d_model"]
rope_config = attention_config.get("rope")
if rope_config is not None:
rope_theta = rope_config.get("theta", 500000.0)
# Build unified rope_parameters dict
rope_parameters = {"rope_theta": rope_theta}
rope_scaling_config = rope_config.get("scaling")
if rope_scaling_config:
if hasattr(rope_scaling_config, "to_hf_config"):
rope_parameters.update(rope_scaling_config.to_hf_config())
else:
rope_parameters.update(rope_scaling_config)
else:
rope_parameters["rope_type"] = "default"
else:
rope_parameters = None
# Resolve max_position_embeddings with priority:
# CLI arg > train_module.max_sequence_length > dataset.sequence_length > fallback
if max_sequence_length is None:
max_sequence_length = olmo_config.get("train_module", {}).get("max_sequence_length")
if max_sequence_length is None:
max_sequence_length = olmo_config.get("dataset", {}).get("sequence_length")
if max_sequence_length is None:
max_sequence_length = 65536
print(f"Warning: max_sequence_length not found in config or CLI, using default: {max_sequence_length}")
max_position_embeddings = max_sequence_length
layer_types = get_layer_types_from_config(olmo_config)
fla_layer_kwargs = fla_config.get("fla_layer_kwargs", {})
linear_key_head_dim = fla_layer_kwargs.get("head_dim", 96)
linear_value_head_dim = fla_layer_kwargs.get("head_v_dim", linear_key_head_dim * 2)
linear_num_heads = fla_layer_kwargs.get("num_heads", n_heads)
linear_conv_kernel_dim = fla_layer_kwargs.get("conv_kernel_dim", 4)
linear_allow_neg_eigval = fla_layer_kwargs.get("allow_neg_eigval", True)
print(f"Fetching all parameters from the checkpoint at {input_base_path}.")
loaded = load_model(os.path.join(input_base_path, "model_and_optim"))["model"]
print(f"Loaded {len(loaded)} keys from checkpoint")
param_count = 0
full_state_dict: dict[str, torch.Tensor] = {}
for layer_i in range(n_layers):
layer_type = layer_types[layer_i]
if layer_type == "linear_attention":
layer_state = convert_fla_layer_weights(loaded, layer_i)
else:
layer_state = convert_attention_layer_weights(loaded, layer_i)
full_state_dict.update(layer_state)
param_count += sum(v.numel() for v in layer_state.values())
print(f"Converted layer {layer_i} ({layer_type})")
# Add embeddings and lm_head
full_state_dict["model.embed_tokens.weight"] = loaded["embeddings.weight"]
full_state_dict["model.norm.weight"] = loaded["lm_head.norm.weight"]
full_state_dict["lm_head.weight"] = loaded["lm_head.w_out.weight"]
param_count += sum(
v.numel() for v in [loaded["embeddings.weight"], loaded["lm_head.norm.weight"], loaded["lm_head.w_out.weight"]]
)
# Cast all tensors to target dtype (matches OLMo-core behavior which casts everything,
# including buffers like A_log and dt_bias)
full_state_dict = {k: v.to(dtype) if torch.is_tensor(v) else v for k, v in full_state_dict.items()}
print(f"Total parameters: {param_count}")
config = OlmoHybridConfig(
vocab_size=model_config["vocab_size"],
hidden_size=dim,
intermediate_size=block_config["feed_forward"]["hidden_size"],
num_hidden_layers=n_layers,
num_attention_heads=n_heads,
num_key_value_heads=n_kv_heads,
max_position_embeddings=max_position_embeddings,
pad_token_id=tokenizer_config.get("pad_token_id"),
bos_token_id=tokenizer_config.get("bos_token_id"),
eos_token_id=tokenizer_config.get("eos_token_id"),
tie_word_embeddings=False,
rms_norm_eps=block_config.get("layer_norm", {}).get("eps", 1e-6),
rope_parameters=rope_parameters,
layer_types=layer_types,
linear_num_key_heads=linear_num_heads,
linear_num_value_heads=linear_num_heads,
linear_key_head_dim=linear_key_head_dim,
linear_value_head_dim=linear_value_head_dim,
linear_conv_kernel_dim=linear_conv_kernel_dim,
linear_allow_neg_eigval=linear_allow_neg_eigval,
)
if rope_parameters is None:
config.rope_parameters = None
config.rope_theta = None
# Explicitly set architectures (normally set by model.save_pretrained, but we
# save directly without the model roundtrip)
config.architectures = ["OlmoHybridForCausalLM"]
# Save config and weights directly (no from_pretrained roundtrip, which can
# corrupt embeddings and fail to cast buffers like A_log)
config.save_pretrained(model_path)
from safetensors.torch import save_file
safetensors_path = os.path.join(model_path, "model.safetensors")
save_file(full_state_dict, safetensors_path)
print(f"Saved weights to {safetensors_path}")
del full_state_dict
del loaded
gc.collect()
if include_tokenizer:
tokenizer_id = tokenizer_id or tokenizer_config.get("identifier")
if tokenizer_id:
_write_tokenizer(model_path, tokenizer_id, max_sequence_length, tokenizer_config)
# Update config with tokenizer info
hf_config_path = Path(model_path) / "config.json"
with open(hf_config_path, "r") as f:
config_dict = json.load(f)
config_dict["max_position_embeddings"] = max_position_embeddings
config_dict["pad_token_id"] = tokenizer_config.get("pad_token_id")
config_dict["bos_token_id"] = tokenizer_config.get("bos_token_id")
config_dict["eos_token_id"] = tokenizer_config.get("eos_token_id")
with open(hf_config_path, "w") as f:
json.dump(config_dict, f, indent=2)
print("Updated config.json with tokenizer settings")
def _write_tokenizer(
output_path: Path | str,
tokenizer_id: str,
max_sequence_length: int | None = None,
tokenizer_config: dict | None = None,
) -> None:
"""Save tokenizer with proper configuration matching OLMo-core behavior."""
print(f"Saving tokenizer {tokenizer_id} to {output_path}.")
tokenizer = AutoTokenizer.from_pretrained(tokenizer_id)
if max_sequence_length is not None:
tokenizer.model_max_length = max_sequence_length
if tokenizer_config is not None:
tokenizer.pad_token_id = tokenizer_config.get("pad_token_id")
tokenizer.bos_token_id = tokenizer_config.get("bos_token_id")
tokenizer.eos_token_id = tokenizer_config.get("eos_token_id")
tokenizer.save_pretrained(output_path)
def main():
parser = argparse.ArgumentParser(description="Convert OLMo Hybrid weights to HuggingFace format.")
parser.add_argument(
"--input_dir",
required=True,
help="Location of OLMo Hybrid weights, which contains config.json and model_and_optim/.",
)
parser.add_argument(
"--no_tokenizer",
action="store_false",
dest="include_tokenizer",
help="If set, do not convert OLMo tokenizer to HF tokenizer.",
)
parser.add_argument(
"--tokenizer",
type=str,
default=None,
help="HuggingFace tokenizer identifier. Defaults to what is set in the config file.",
)
parser.add_argument(
"--output_dir",
required=True,
help="Location to write HF model and tokenizer.",
)
parser.add_argument(
"--max_sequence_length",
type=int,
default=None,
help="Max sequence length. If not set, reads from train_module.max_sequence_length or dataset.sequence_length in the config.",
)
parser.add_argument(
"--dtype",
type=str,
default="bfloat16",
choices=list(DTYPE_MAP.keys()),
help="Output dtype for model weights. Defaults to bfloat16.",
)
parser.add_argument(
"--device",
type=str,
default=None,
help="Device for conversion (e.g., 'cpu', 'cuda'). Defaults to CPU.",
)
args = parser.parse_args()
write_model(
model_path=args.output_dir,
input_base_path=args.input_dir,
include_tokenizer=args.include_tokenizer,
tokenizer_id=args.tokenizer,
max_sequence_length=args.max_sequence_length,
dtype=DTYPE_MAP[args.dtype],
device=args.device,
)
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/olmo_hybrid/convert_olmo_hybrid_weights_to_hf.py",
"license": "Apache License 2.0",
"lines": 535,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/olmo_hybrid/modular_olmo_hybrid.py | # Copyright 2026 the HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import math
from collections.abc import Callable
import torch
import torch.nn as nn
import torch.nn.functional as F
from ... import initialization as init
from ...activations import ACT2FN
from ...cache_utils import Cache
from ...configuration_utils import layer_type_validation
from ...masking_utils import create_causal_mask
from ...modeling_outputs import BaseModelOutputWithPast
from ...modeling_rope_utils import dynamic_rope_update
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, logging
from ...utils.generic import maybe_autocast, merge_with_config_defaults
from ...utils.import_utils import is_flash_linear_attention_available
from ...utils.output_capturing import capture_outputs
from ..llama.configuration_llama import LlamaConfig
from ..llama.modeling_llama import LlamaDecoderLayer
from ..olmo3.modeling_olmo3 import (
Olmo3Attention,
Olmo3DecoderLayer,
Olmo3ForCausalLM,
Olmo3MLP,
Olmo3RMSNorm,
Olmo3RotaryEmbedding,
apply_rotary_pos_emb,
eager_attention_forward,
)
from ..qwen3_next.modeling_qwen3_next import (
Qwen3NextDynamicCache,
Qwen3NextModel,
Qwen3NextPreTrainedModel,
Qwen3NextRMSNormGated,
apply_mask_to_padding_states,
torch_chunk_gated_delta_rule,
torch_recurrent_gated_delta_rule,
)
if is_flash_linear_attention_available():
from fla.modules import FusedRMSNormGated, ShortConvolution
from fla.ops.gated_delta_rule import chunk_gated_delta_rule, fused_recurrent_gated_delta_rule
else:
chunk_gated_delta_rule, fused_recurrent_gated_delta_rule = None, None
FusedRMSNormGated = None
ShortConvolution = None
is_fast_path_available = all(
(ShortConvolution, chunk_gated_delta_rule, fused_recurrent_gated_delta_rule, FusedRMSNormGated)
)
logger = logging.get_logger(__name__)
class OlmoHybridConfig(LlamaConfig):
r"""
This is the configuration class to store the configuration of a [`OlmoHybridModel`]. It is used to instantiate
an OLMo Hybrid model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the
[allenai/Olmo-Hybrid-7B](https://huggingface.co/allenai/Olmo-Hybrid-7B) model.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 100352):
Vocabulary size of the OlmoHybrid model. Defines the number of different tokens that can be represented
by the `inputs_ids` passed when calling [`OlmoHybridModel`].
hidden_size (`int`, *optional*, defaults to 3840):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 11008):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 30):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 65536):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*, defaults to 100277):
Padding token id.
bos_token_id (`int`, *optional*):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 100257):
End of stream token id.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`. Can be `None` to disable RoPE (e.g., during long context extension).
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
layer_types (`list`, *optional*):
Attention pattern for each layer. Can contain `"full_attention"` or `"linear_attention"`.
Defaults to linear attention for most layers with full attention for every 4th layer.
linear_num_key_heads (`int`, *optional*):
Number of key heads for the linear attention layers. Defaults to `num_attention_heads`.
linear_num_value_heads (`int`, *optional*):
Number of value heads for the linear attention layers. Defaults to `num_attention_heads`.
linear_key_head_dim (`int`, *optional*):
Dimension of each key head in linear attention layers. Defaults to `0.75 * hidden_size / linear_num_key_heads`.
linear_value_head_dim (`int`, *optional*):
Dimension of each value head in linear attention layers. Defaults to `2 * linear_key_head_dim`.
linear_a_log_min (`float`, *optional*, defaults to 0.0):
Minimum value for uniform initialization of A_log in GatedDeltaNet layers.
linear_a_log_max (`float`, *optional*, defaults to 16.0):
Maximum value for uniform initialization of A_log in GatedDeltaNet layers.
linear_dt_min (`float`, *optional*, defaults to 0.001):
Minimum value for dt initialization in GatedDeltaNet layers.
linear_dt_max (`float`, *optional*, defaults to 0.1):
Maximum value for dt initialization in GatedDeltaNet layers.
linear_dt_init_floor (`float`, *optional*, defaults to 0.0001):
Floor value for clamping dt during initialization in GatedDeltaNet layers.
linear_conv_kernel_dim (`int`, *optional*, defaults to 4):
Kernel size for the short convolution applied to queries, keys, and values in linear attention layers.
linear_allow_neg_eigval (`bool`, *optional*, defaults to `True`):
Whether to allow negative eigenvalues in the GatedDeltaNet recurrence. When `True`, the beta
parameter is scaled by 2.0 to allow values in range [0, 2] instead of [0, 1].
```python
>>> from transformers import OlmoHybridModel, OlmoHybridConfig
>>> # Initializing an OlmoHybrid style configuration
>>> configuration = OlmoHybridConfig()
>>> # Initializing a model from the OlmoHybrid style configuration
>>> model = OlmoHybridModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = "olmo_hybrid"
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise_gather_output", # we need to replicate here due to the added norm on q and k
"layers.*.self_attn.k_proj": "colwise_gather_output", # we need to replicate here due to the added norm on q and k
"layers.*.self_attn.v_proj": "colwise_gather_output", # we need to replicate here due to the added norm on q and k
"layers.*.self_attn.o_proj": "rowwise_split_input", # input is replicated due to the added norm on q and k
"layers.*.mlp.gate_proj": "colwise",
"layers.*.mlp.up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
}
def __init__(
self,
vocab_size: int | None = 100352,
hidden_size: int | None = 3840,
intermediate_size: int | None = 11008,
num_hidden_layers: int | None = 32,
num_attention_heads: int | None = 30,
num_key_value_heads: int | None = None,
hidden_act: str | None = "silu",
max_position_embeddings: int | None = 65536,
initializer_range: float | None = 0.02,
use_cache: bool | None = True,
pad_token_id: int | None = 100277,
bos_token_id: int | None = None,
eos_token_id: int | None = 100257,
tie_word_embeddings: bool | None = False,
rope_parameters=None,
attention_bias: bool | None = False,
attention_dropout: float | None = 0.0,
rms_norm_eps: float | None = 1e-06,
layer_types: list[str] | None = None,
linear_num_key_heads: int | None = None,
linear_num_value_heads: int | None = None,
linear_key_head_dim: int | None = None,
linear_value_head_dim: int | None = None,
linear_a_log_min: float = 0.0,
linear_a_log_max: float = 16.0,
linear_dt_min: float = 0.001,
linear_dt_max: float = 0.1,
linear_dt_init_floor: float = 1e-4,
linear_conv_kernel_dim: int = 4,
linear_allow_neg_eigval: bool = True,
**kwargs,
):
if layer_types is None:
# Default: linear attention for most layers, full attention every 4th layer
layer_types = ["linear_attention"] * int(num_hidden_layers)
for i in range(int(num_hidden_layers)):
if i % 4 == 3:
layer_types[i] = "full_attention"
# Ensure at least one full attention layer for small num_hidden_layers
if "full_attention" not in layer_types:
layer_types[-1] = "full_attention"
layer_type_validation(layer_types, num_hidden_layers)
if "linear_attention" not in layer_types:
raise ValueError("OLMoHybrid expects at least one 'linear_attention' layer.")
if all(t == "linear_attention" for t in layer_types):
raise ValueError("OLMoHybrid expects at least one attention layer.")
self.layer_types = layer_types
if linear_num_key_heads is None:
linear_num_key_heads = num_attention_heads
if linear_num_value_heads is None:
linear_num_value_heads = num_attention_heads
if linear_key_head_dim is None:
linear_key_head_dim = int(0.75 * hidden_size / linear_num_key_heads)
if linear_value_head_dim is None:
linear_value_head_dim = 2 * linear_key_head_dim
self.linear_num_key_heads = linear_num_key_heads
self.linear_num_value_heads = linear_num_value_heads
self.linear_key_head_dim = linear_key_head_dim
self.linear_value_head_dim = linear_value_head_dim
self.linear_a_log_min = linear_a_log_min
self.linear_a_log_max = linear_a_log_max
self.linear_dt_min = linear_dt_min
self.linear_dt_max = linear_dt_max
self.linear_dt_init_floor = linear_dt_init_floor
self.linear_conv_kernel_dim = linear_conv_kernel_dim
self.linear_allow_neg_eigval = linear_allow_neg_eigval
super().__init__(
vocab_size=vocab_size,
hidden_size=hidden_size,
intermediate_size=intermediate_size,
num_hidden_layers=num_hidden_layers,
num_attention_heads=num_attention_heads,
num_key_value_heads=num_key_value_heads,
hidden_act=hidden_act,
max_position_embeddings=max_position_embeddings,
initializer_range=initializer_range,
use_cache=use_cache,
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
attention_bias=attention_bias,
attention_dropout=attention_dropout,
rms_norm_eps=rms_norm_eps,
rope_parameters=rope_parameters,
**kwargs,
)
del self.pretraining_tp
del self.mlp_bias
del self.head_dim
class OlmoHybridDynamicCache(Qwen3NextDynamicCache):
"""
Cache for hybrid model supporting both attention KV cache and linear attention state.
Inherits from Qwen3NextDynamicCache. The main difference is that this cache
stores separate conv states for q, k, v (instead of a single conv_states list).
"""
def __init__(self, config: OlmoHybridConfig):
super().__init__(config)
del self.conv_states
# Replace single conv_states with separate q, k, v conv states
self.conv_states_q = [None for _ in range(config.num_hidden_layers)]
self.conv_states_k = [None for _ in range(config.num_hidden_layers)]
self.conv_states_v = [None for _ in range(config.num_hidden_layers)]
def reorder_cache(self, beam_idx: torch.LongTensor):
"""Reorders the cache for beam search, given the selected beam indices."""
batch_size = beam_idx.shape[0]
for layer_idx in range(len(self.key_cache)):
if self.key_cache[layer_idx] is not None:
if self.key_cache[layer_idx].shape[0] < batch_size:
expand_ratio = batch_size // self.key_cache[layer_idx].shape[0]
self.key_cache[layer_idx] = self.key_cache[layer_idx].repeat_interleave(expand_ratio, dim=0)
self.value_cache[layer_idx] = self.value_cache[layer_idx].repeat_interleave(expand_ratio, dim=0)
device = self.key_cache[layer_idx].device
self.key_cache[layer_idx] = self.key_cache[layer_idx].index_select(0, beam_idx.to(device))
self.value_cache[layer_idx] = self.value_cache[layer_idx].index_select(0, beam_idx.to(device))
if self.conv_states_q[layer_idx] is not None:
if self.conv_states_q[layer_idx].shape[0] < batch_size:
expand_ratio = batch_size // self.conv_states_q[layer_idx].shape[0]
self.conv_states_q[layer_idx] = self.conv_states_q[layer_idx].repeat_interleave(
expand_ratio, dim=0
)
self.conv_states_k[layer_idx] = self.conv_states_k[layer_idx].repeat_interleave(
expand_ratio, dim=0
)
self.conv_states_v[layer_idx] = self.conv_states_v[layer_idx].repeat_interleave(
expand_ratio, dim=0
)
self.recurrent_states[layer_idx] = self.recurrent_states[layer_idx].repeat_interleave(
expand_ratio, dim=0
)
device = self.conv_states_q[layer_idx].device
self.conv_states_q[layer_idx] = self.conv_states_q[layer_idx].index_select(0, beam_idx.to(device))
self.conv_states_k[layer_idx] = self.conv_states_k[layer_idx].index_select(0, beam_idx.to(device))
self.conv_states_v[layer_idx] = self.conv_states_v[layer_idx].index_select(0, beam_idx.to(device))
self.recurrent_states[layer_idx] = self.recurrent_states[layer_idx].index_select(
0, beam_idx.to(device)
)
@property
def has_previous_state(self):
return self.conv_states_q[self.last_linear_layer] is not None
class OlmoHybridRMSNormGated(Qwen3NextRMSNormGated):
pass
class OlmoHybridRMSNorm(Olmo3RMSNorm):
pass
class OlmoHybridShortConvolution(nn.Conv1d):
def __init__(
self,
hidden_size: int,
kernel_size: int,
bias: bool = False,
activation: str | None = "silu",
):
super().__init__(
in_channels=hidden_size,
out_channels=hidden_size,
kernel_size=kernel_size,
groups=hidden_size,
padding=kernel_size - 1,
bias=bias,
)
self.hidden_size = hidden_size
self.conv_kernel_size = kernel_size
self.act_fn = ACT2FN[activation]
def forward(
self,
hidden_states: torch.Tensor,
cache: torch.Tensor | None = None,
use_precomputed: bool = False,
**kwargs,
) -> tuple[torch.Tensor, torch.Tensor]:
seq_len, dim = hidden_states.shape[-2:]
hidden_states = hidden_states.transpose(1, 2)
if use_precomputed:
# Single token update (decoding mode)
x_with_state = torch.cat([cache, hidden_states], dim=-1)
out = F.conv1d(
x_with_state,
self.weight,
self.bias,
padding=0,
groups=dim,
)
conv_state = x_with_state[:, :, 1:]
else:
# Multi-token forward (prefill mode)
out = F.conv1d(hidden_states, self.weight, self.bias, padding=self.conv_kernel_size - 1, groups=dim)
out = out[:, :, :seq_len]
conv_state = F.pad(hidden_states, (self.conv_kernel_size - 1 - hidden_states.shape[-1], 0))
out = self.act_fn(out)
return out.transpose(1, 2), conv_state
class OlmoHybridAttention(Olmo3Attention):
"""
Multi-headed attention for OLMo Hybrid that supports optional RoPE (NoPE mode).
Inherits from Olmo3Attention. The only behavioral difference is that when
position_embeddings is None, rotary position embeddings are skipped entirely,
enabling NoPE mode for long context extension.
"""
def __init__(self, config: OlmoHybridConfig, layer_idx: int):
super().__init__(config, layer_idx)
# Hybrid model doesn't use sliding window attention
del self.sliding_window
del self.attention_type
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor] | None,
attention_mask: torch.Tensor | None,
past_key_values: Cache | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, torch.Tensor | None]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_norm(self.q_proj(hidden_states))
key_states = self.k_norm(self.k_proj(hidden_states))
value_states = self.v_proj(hidden_states)
query_states = query_states.view(hidden_shape).transpose(1, 2)
key_states = key_states.view(hidden_shape).transpose(1, 2)
value_states = value_states.view(hidden_shape).transpose(1, 2)
# NoPE mode: skip RoPE when position_embeddings is None
cos, sin = None, None
if position_embeddings is not None:
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface(
self.config._attn_implementation, eager_attention_forward
)
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class OlmoHybridRotaryEmbedding(Olmo3RotaryEmbedding):
"""
RoPE for OLMo Hybrid that returns float32 cos/sin to match OLMo-core.
"""
@torch.no_grad()
@dynamic_rope_update
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with maybe_autocast(device_type=device_type, enabled=False):
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
# KEY difference from parent: return float32, don't cast to x.dtype
return cos, sin
class OlmoHybridGatedDeltaNet(nn.Module):
"""
GatedDeltaNet linear attention for OLMo Hybrid.
Key differences from Qwen3NextGatedDeltaNet:
- Fully separate q/k/v/a/b projections (vs. fused qkvz + partially split ba)
- Per-projection conv1d for q, k, v (vs. single conv1d over concatenated qkv)
- Dedicated g_proj gate (vs. z derived from the fused qkvz projection)
- Supports allow_neg_eigval: scales beta by 2.0 to allow range [0, 2]
"""
def __init__(self, config: OlmoHybridConfig, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.num_v_heads = config.linear_num_value_heads
self.num_k_heads = config.linear_num_key_heads
self.head_k_dim = config.linear_key_head_dim
self.head_v_dim = config.linear_value_head_dim
self.key_dim = self.head_k_dim * self.num_k_heads
self.value_dim = self.head_v_dim * self.num_v_heads
self.layer_idx = layer_idx
self.conv_kernel_size = config.linear_conv_kernel_dim
self.allow_neg_eigval = config.linear_allow_neg_eigval
self.eps = config.rms_norm_eps
self.q_proj = nn.Linear(self.hidden_size, self.key_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.key_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.value_dim, bias=False)
self.a_proj = nn.Linear(self.hidden_size, self.num_v_heads, bias=False)
self.b_proj = nn.Linear(self.hidden_size, self.num_v_heads, bias=False)
self.g_proj = nn.Linear(self.hidden_size, self.value_dim, bias=False)
self.o_proj = nn.Linear(self.value_dim, self.hidden_size, bias=False)
Conv1dClass = ShortConvolution if ShortConvolution is not None else OlmoHybridShortConvolution
self.q_conv1d = Conv1dClass(
hidden_size=self.key_dim,
kernel_size=self.conv_kernel_size,
bias=False,
activation="silu",
)
self.k_conv1d = Conv1dClass(
hidden_size=self.key_dim,
kernel_size=self.conv_kernel_size,
bias=False,
activation="silu",
)
self.v_conv1d = Conv1dClass(
hidden_size=self.value_dim,
kernel_size=self.conv_kernel_size,
bias=False,
activation="silu",
)
A = torch.empty(self.num_v_heads, dtype=torch.float32).uniform_(
config.linear_a_log_min, config.linear_a_log_max
)
self.A_log = nn.Parameter(torch.log(A))
dt = torch.exp(
torch.rand(self.num_v_heads) * (math.log(config.linear_dt_max) - math.log(config.linear_dt_min))
+ math.log(config.linear_dt_min)
)
dt = torch.clamp(dt, min=config.linear_dt_init_floor)
inv_dt = dt + torch.log(-torch.expm1(-dt))
self.dt_bias = nn.Parameter(inv_dt)
# Output norm - NOTE: FLA's FusedRMSNormGated uses eps=1e-5 by default
self.o_norm = (
OlmoHybridRMSNormGated(self.head_v_dim, eps=1e-5)
if FusedRMSNormGated is None
else FusedRMSNormGated(
self.head_v_dim,
eps=1e-5,
device=torch.cuda.current_device(),
dtype=config.dtype if config.dtype is not None else torch.get_default_dtype(),
)
)
self.chunk_gated_delta_rule = chunk_gated_delta_rule or torch_chunk_gated_delta_rule
self.recurrent_gated_delta_rule = fused_recurrent_gated_delta_rule or torch_recurrent_gated_delta_rule
if not is_fast_path_available:
logger.warning_once(
"The fast path is not available because one of the required libraries is not installed. "
"Falling back to torch implementation. To install, follow: "
"https://github.com/fla-org/flash-linear-attention#installation"
)
def forward(
self,
hidden_states: torch.Tensor,
cache_params: OlmoHybridDynamicCache | None = None,
cache_position: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
# Requires LEFT padding to work correctly
hidden_states = apply_mask_to_padding_states(hidden_states, attention_mask)
batch_size, seq_len, _ = hidden_states.shape
use_cache = cache_params is not None
use_precomputed = use_cache and getattr(cache_params, "has_previous_state", False) and seq_len == 1
conv_state_q = cache_params.conv_states_q[self.layer_idx] if cache_params else None
conv_state_k = cache_params.conv_states_k[self.layer_idx] if cache_params else None
conv_state_v = cache_params.conv_states_v[self.layer_idx] if cache_params else None
recurrent_state = cache_params.recurrent_states[self.layer_idx] if cache_params else None
q = self.q_proj(hidden_states)
k = self.k_proj(hidden_states)
v = self.v_proj(hidden_states)
q, new_conv_state_q = self.q_conv1d(
q, cache=conv_state_q, use_precomputed=use_precomputed, output_final_state=use_cache
)
k, new_conv_state_k = self.k_conv1d(
k, cache=conv_state_k, use_precomputed=use_precomputed, output_final_state=use_cache
)
v, new_conv_state_v = self.v_conv1d(
v, cache=conv_state_v, use_precomputed=use_precomputed, output_final_state=use_cache
)
if cache_params is not None:
cache_params.conv_states_q[self.layer_idx] = new_conv_state_q
cache_params.conv_states_k[self.layer_idx] = new_conv_state_k
cache_params.conv_states_v[self.layer_idx] = new_conv_state_v
q = q.view(batch_size, seq_len, -1, self.head_k_dim)
k = k.view(batch_size, seq_len, -1, self.head_k_dim)
v = v.view(batch_size, seq_len, -1, self.head_v_dim)
if self.num_v_heads > self.num_k_heads:
expand_ratio = self.num_v_heads // self.num_k_heads
q = q.repeat_interleave(expand_ratio, dim=2)
k = k.repeat_interleave(expand_ratio, dim=2)
beta = self.b_proj(hidden_states).sigmoid()
if self.allow_neg_eigval:
beta = beta * 2.0
g = -self.A_log.float().exp() * F.softplus(self.a_proj(hidden_states).float() + self.dt_bias)
if use_precomputed:
output, new_recurrent_state = self.recurrent_gated_delta_rule(
q,
k,
v,
g=g,
beta=beta,
initial_state=recurrent_state,
output_final_state=use_cache,
use_qk_l2norm_in_kernel=True,
)
else:
output, new_recurrent_state = self.chunk_gated_delta_rule(
q,
k,
v,
g=g,
beta=beta,
initial_state=recurrent_state,
output_final_state=use_cache,
use_qk_l2norm_in_kernel=True,
)
if cache_params is not None:
cache_params.recurrent_states[self.layer_idx] = new_recurrent_state
gate = self.g_proj(hidden_states)
output = output.reshape(-1, self.head_v_dim)
gate = gate.reshape(-1, self.head_v_dim)
output = self.o_norm(output, gate)
output = output.reshape(batch_size, seq_len, -1)
output = self.o_proj(output)
return output
class OlmoHybridMLP(Olmo3MLP):
pass
class OlmoHybridAttentionDecoderLayer(Olmo3DecoderLayer):
def __init__(self, config: OlmoHybridConfig, layer_idx: int):
super().__init__(config, layer_idx)
self.layer_type = "full_attention"
self.self_attn = OlmoHybridAttention(config=config, layer_idx=layer_idx)
class OlmoHybridLinearAttentionDecoderLayer(LlamaDecoderLayer):
def __init__(self, config: OlmoHybridConfig, layer_idx: int):
super().__init__(config, layer_idx)
self.layer_type = "linear_attention"
del self.self_attn
self.linear_attn = OlmoHybridGatedDeltaNet(config, layer_idx=layer_idx)
self.input_layernorm = OlmoHybridRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = OlmoHybridRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.mlp = OlmoHybridMLP(config)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
use_cache: bool | None = False,
cache_position: torch.LongTensor | None = None,
position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None,
output_attentions: bool | None = False,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Main difference to llama - signature (`cache_params`) and linear attention
hidden_states = self.linear_attn(
hidden_states=hidden_states,
cache_params=past_key_values,
cache_position=cache_position,
attention_mask=attention_mask,
)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
class OlmoHybridPreTrainedModel(Qwen3NextPreTrainedModel):
_is_stateful = True
_no_split_modules = ["OlmoHybridAttentionDecoderLayer", "OlmoHybridLinearAttentionDecoderLayer"]
_can_record_outputs = {
"hidden_states": (OlmoHybridAttentionDecoderLayer, OlmoHybridLinearAttentionDecoderLayer),
"attentions": OlmoHybridAttention,
}
@torch.no_grad()
def _init_weights(self, module):
PreTrainedModel._init_weights(self, module)
if isinstance(module, OlmoHybridGatedDeltaNet):
cfg = self.config
init.copy_(
module.A_log,
torch.empty_like(module.A_log).uniform_(cfg.linear_a_log_min, cfg.linear_a_log_max).log_(),
)
dt = torch.exp(
torch.rand_like(module.dt_bias) * (math.log(cfg.linear_dt_max) - math.log(cfg.linear_dt_min))
+ math.log(cfg.linear_dt_min)
)
dt = torch.clamp(dt, min=cfg.linear_dt_init_floor)
inv_dt = dt + torch.log(-torch.expm1(-dt))
init.copy_(module.dt_bias, inv_dt)
class OlmoHybridModel(Qwen3NextModel):
def __init__(self, config: OlmoHybridConfig):
super().__init__(config)
self.layers = nn.ModuleList(
[
OlmoHybridLinearAttentionDecoderLayer(config, layer_idx)
if config.layer_types[layer_idx] == "linear_attention"
else OlmoHybridAttentionDecoderLayer(config, layer_idx)
for layer_idx in range(config.num_hidden_layers)
]
)
self.rotary_emb = (
OlmoHybridRotaryEmbedding(config=config)
if getattr(config, "rope_parameters", None) is not None
and config.rope_parameters.get("rope_theta") is not None
else None
)
self.post_init()
@merge_with_config_defaults
@capture_outputs
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
use_cache: bool | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = OlmoHybridDynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = create_causal_mask(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
position_ids=position_ids,
)
linear_attn_mask = self._update_linear_attn_mask(attention_mask, cache_position)
hidden_states = inputs_embeds
# RoPE or NoPE
position_embeddings = self.rotary_emb(hidden_states, position_ids) if self.rotary_emb is not None else None
for decoder_layer in self.layers:
layer_mask = linear_attn_mask if decoder_layer.layer_type == "linear_attention" else causal_mask
layer_position_embeddings = position_embeddings if decoder_layer.layer_type == "full_attention" else None
hidden_states = decoder_layer(
hidden_states,
position_embeddings=layer_position_embeddings,
attention_mask=layer_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
class OlmoHybridForCausalLM(Olmo3ForCausalLM):
pass
__all__ = [
"OlmoHybridConfig",
"OlmoHybridForCausalLM",
"OlmoHybridModel",
"OlmoHybridPreTrainedModel",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/olmo_hybrid/modular_olmo_hybrid.py",
"license": "Apache License 2.0",
"lines": 728,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/olmo_hybrid/test_modeling_olmo_hybrid.py | # Copyright 2026 the HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch OlmoHybrid model."""
import unittest
from transformers import OlmoHybridConfig, is_torch_available
from transformers.models.auto.tokenization_auto import AutoTokenizer
from transformers.testing_utils import (
Expectations,
cleanup,
require_torch,
slow,
torch_device,
)
from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester
if is_torch_available():
import torch
from transformers import (
Cache,
OlmoHybridForCausalLM,
OlmoHybridModel,
)
from transformers.models.olmo_hybrid.modeling_olmo_hybrid import (
OlmoHybridDynamicCache,
OlmoHybridRotaryEmbedding,
)
class OlmoHybridModelTester(CausalLMModelTester):
if is_torch_available():
config_class = OlmoHybridConfig
base_model_class = OlmoHybridModel
causal_lm_class = OlmoHybridForCausalLM
def __init__(self, parent):
super().__init__(parent=parent)
self.layer_types = ["linear_attention", "full_attention"]
self.linear_num_key_heads = 4
self.linear_num_value_heads = 4
self.linear_key_head_dim = 8
self.linear_value_head_dim = 8
self.linear_conv_kernel_dim = 4
self.linear_allow_neg_eigval = False
@require_torch
class OlmoHybridModelTest(CausalLMModelTest, unittest.TestCase):
model_tester_class = OlmoHybridModelTester
rotary_embedding_layer = OlmoHybridRotaryEmbedding if is_torch_available() else None
# === Cache helper methods (same pattern as Qwen3Next) ===
def _check_past_key_values_for_generate(self, batch_size, past_key_values, seq_length, config):
"""OlmoHybrid has a special Cache as it alternates with gated deltanet layers"""
self.assertIsInstance(past_key_values, OlmoHybridDynamicCache)
num_heads = getattr(config, "num_key_value_heads", config.num_attention_heads)
head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
expected_shape = (batch_size, num_heads, seq_length, head_dim)
attention_layer_indices = past_key_values.transformer_layers
self.assertListEqual(
[past_key_values.key_cache[idx].shape for idx in attention_layer_indices],
[expected_shape] * len(attention_layer_indices),
)
self.assertListEqual(
[past_key_values.value_cache[idx].shape for idx in attention_layer_indices],
[expected_shape] * len(attention_layer_indices),
)
def _check_caches_are_equal(self, cache1: Cache, cache2: Cache):
"""OlmoHybrid has a special Cache as it alternates with gated deltanet layers"""
if not len(cache1) == len(cache2):
raise ValueError("Both caches do not have the same number of layers.")
num_layers = len(cache1)
for idx in range(num_layers):
if cache1.key_cache[idx] is not None:
torch.testing.assert_close(cache1.key_cache[idx], cache2.key_cache[idx])
torch.testing.assert_close(cache1.value_cache[idx], cache2.value_cache[idx])
# === Override test_attention_outputs (same pattern as Qwen3Next) ===
def test_attention_outputs(self):
"""Needs to be overwritten as OlmoHybrid alternates between attention layers and gated deltanet layers."""
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
config._attn_implementation = "eager"
seq_len = getattr(self.model_tester, "seq_length", None)
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class._from_config(config, attn_implementation="eager")
config = model.config
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), sum(layer == "full_attention" for layer in config.layer_types))
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), sum(layer == "full_attention" for layer in config.layer_types))
self.assertListEqual(list(attentions[0].shape[-3:]), [config.num_attention_heads, seq_len, seq_len])
out_len = len(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
self_attentions = outputs.attentions
self.assertEqual(out_len + 1, len(outputs))
self.assertEqual(len(self_attentions), sum(layer == "full_attention" for layer in config.layer_types))
self.assertListEqual(list(self_attentions[0].shape[-3:]), [config.num_attention_heads, seq_len, seq_len])
@unittest.skip("The specific cache format cannot be instantiated from dp/ddp data.")
def test_multi_gpu_data_parallel_forward(self):
pass
@require_torch
class OlmoHybridIntegrationTest(unittest.TestCase):
def setUp(self):
cleanup(torch_device, gc_collect=True)
def tearDown(self):
cleanup(torch_device, gc_collect=True)
@slow
def test_model_logits(self):
input_ids = [[1, 306, 4658, 278, 6593, 310, 2834, 338]]
model = OlmoHybridForCausalLM.from_pretrained("hf-internal-testing/olmo-hybrid").to(
torch_device, dtype=torch.bfloat16
)
out = model(torch.tensor(input_ids, device=torch_device)).logits.float()
rtol = 3e-2
atol = 5e-2
expectations = Expectations(
{
("cuda", 8): [
[
-3.819033145904541,
-3.795485734939575,
-2.975806951522827,
-2.7940011024475098,
-3.548236131668091,
-4.012556552886963,
-4.722480773925781,
-4.015453338623047,
]
]
}
)
EXPECTED_MEAN = torch.tensor(expectations.get_expectation(), device=torch_device)
torch.testing.assert_close(out.mean(-1), EXPECTED_MEAN, rtol=rtol, atol=atol)
expectations = Expectations(
{
("cuda", 8): [
3.828125,
-0.546875,
-1.7578125,
-2.203125,
-2.25,
-2.890625,
-0.87109375,
-1.21875,
-1.65625,
-2.78125,
-1.2890625,
0.8359375,
-2.578125,
0.8125,
-2.1875,
2.921875,
3.671875,
3.5625,
3.109375,
2.78125,
2.703125,
1.7578125,
1.890625,
2.21875,
1.8984375,
-2.5,
-2.03125,
-4.03125,
1.2421875,
-1.1328125,
]
}
)
EXPECTED_SLICE = torch.tensor(expectations.get_expectation(), device=torch_device)
torch.testing.assert_close(out[0, 0, :30], EXPECTED_SLICE, rtol=rtol, atol=atol)
@slow
def test_model_greedy_generation(self):
expectations = Expectations(
{
(
"cuda",
8,
): "Simply put, the theory of relativity states that \xa0the laws of physics are the same for all non-accelerating observers. This means that the laws of physics are the same for all observers, regardless of their relative motion or the strength of the gravitational field they are in. This theory was first proposed by Albert Einstein in 1905 and has since been confirmed",
}
)
EXPECTED_TEXT_COMPLETION = expectations.get_expectation()
prompt = "Simply put, the theory of relativity states that "
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/olmo-hybrid")
model = OlmoHybridForCausalLM.from_pretrained(
"hf-internal-testing/olmo-hybrid", device_map="auto", torch_dtype=torch.bfloat16
)
input_ids = tokenizer.encode(prompt, return_tensors="pt").to(model.device)
generated_ids = model.generate(input_ids, max_new_tokens=64, top_p=None, temperature=1, do_sample=False)
text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, text)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/olmo_hybrid/test_modeling_olmo_hybrid.py",
"license": "Apache License 2.0",
"lines": 216,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/integrations/fouroversix.py | import torch
from ..quantizers.quantizers_utils import get_module_from_name
from ..utils import is_fouroversix_available
if is_fouroversix_available():
from fouroversix import ModelQuantizationConfig
from transformers.utils.quantization_config import FourOverSixConfig
from ..core_model_loading import ConversionOps
class FourOverSixQuantize(ConversionOps):
def __init__(self, hf_quantizer):
self.hf_quantizer = hf_quantizer
def convert(
self,
input_dict: dict[str, torch.Tensor],
model: torch.nn.Module | None = None,
full_layer_name: str | None = None,
missing_keys: list[str] | None = None,
**kwargs,
) -> dict[str, torch.Tensor]:
"""
We need to store some parameters to create the quantized weight. For example, fouroversix
requires 4 values that are stored in the checkpoint to recover the quantized weight. So we
store them in a dict that is stored in hf_quantizer for now as we can't save it in the op
since we create an op per tensor.
"""
if self.hf_quantizer.quantization_config.keep_master_weights:
return input_dict
module, _ = get_module_from_name(model, full_layer_name)
module_name = full_layer_name.rsplit(".", 1)[0]
full_parameter_name = list(input_dict.keys())[0]
parameter_name = full_parameter_name.replace(f"{module_name}.", "", 1)
parameter = input_dict[full_parameter_name][0]
quantized_parameters = module.get_quantized_parameters(parameter_name, parameter)
# Delete the high-precision parameters from the module after we used them to create
# the quantized parameters
for parameter_name in module.parameters_to_quantize:
delattr(module, parameter_name)
# Remove these keys from the missing_keys list since we've deleted them from the model
for key in input_dict:
missing_keys.discard(key)
return {
f"{module_name}.{quantized_key}": quantized_parameters[quantized_key]
for quantized_key in quantized_parameters
}
def adapt_fouroversix_config(config: FourOverSixConfig):
return ModelQuantizationConfig(
activation_scale_rule=config.activation_scale_rule,
dtype=config.dtype,
gradient_scale_rule=config.gradient_scale_rule,
keep_master_weights=config.keep_master_weights,
matmul_backend=config.matmul_backend,
output_dtype=config.output_dtype,
quantize_backend=config.quantize_backend,
scale_rule=config.scale_rule,
weight_scale_2d=config.weight_scale_2d,
weight_scale_rule=config.weight_scale_rule,
modules_to_not_convert=config.modules_to_not_convert,
module_config_overrides=config.module_config_overrides,
)
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/integrations/fouroversix.py",
"license": "Apache License 2.0",
"lines": 58,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
huggingface/transformers:src/transformers/quantizers/quantizer_fouroversix.py | from typing import TYPE_CHECKING
from ..utils.import_utils import is_fouroversix_available
from .base import HfQuantizer
from .quantizers_utils import get_module_from_name
if TYPE_CHECKING:
from ..modeling_utils import PreTrainedModel
from ..utils import (
is_torch_available,
)
if is_torch_available():
import torch
class FourOverSixHfQuantizer(HfQuantizer):
"""
FP4 quantization with fouroversix.
"""
requires_calibration = False
def __init__(self, quantization_config, **kwargs):
super().__init__(quantization_config, **kwargs)
def validate_environment(self, *args, **kwargs):
if not is_fouroversix_available():
raise ImportError(
"Using `fouroversix` requires fouroversix: `pip install fouroversix --no-build-isolation`"
)
def param_element_size(
self,
model: "PreTrainedModel",
param_name: str,
param: "torch.Tensor",
) -> float:
if self.param_needs_quantization(model, param_name):
# 4-bit quantization
return 0.5
return super().param_element_size(model, param_name, param)
def param_needs_quantization(
self,
model: "PreTrainedModel",
param_name: str,
**kwargs,
) -> bool:
from fouroversix import QuantizedModule
module, tensor_name = get_module_from_name(model, param_name)
return (
QuantizedModule.is_quantized_module_type(type(module))
and hasattr(module, "parameters_to_quantize")
and tensor_name in module.parameters_to_quantize
)
def adjust_max_memory(self, max_memory: dict[str, int | str]) -> dict[str, int | str]:
# need more space for buffers that are created during quantization
max_memory = {key: val * 0.9 for key, val in max_memory.items()}
return max_memory
def _process_model_before_weight_loading(
self,
model: "PreTrainedModel",
device_map,
**kwargs,
):
from fouroversix import QuantizedModule, quantize_model
from ..integrations.fouroversix import adapt_fouroversix_config
quantize_model(
model,
adapt_fouroversix_config(self.quantization_config),
)
# If the model has already been quantized, we need to delete the weight tensor here so that
# it's not expected when parameters are loaded from the checkpoint.
if self.pre_quantized and not self.quantization_config.keep_master_weights:
for _, module in model.named_modules():
if QuantizedModule.is_quantized_module_type(type(module)):
for parameter_name in module.parameters_to_quantize:
delattr(module, parameter_name)
def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs):
return model
def is_serializable(self):
return True
@property
def is_trainable(self) -> bool:
return self.quantization_config.keep_master_weights
def get_quantize_ops(self):
from ..integrations.fouroversix import FourOverSixQuantize
return FourOverSixQuantize(self)
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/quantizers/quantizer_fouroversix.py",
"license": "Apache License 2.0",
"lines": 79,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
huggingface/transformers:tests/quantization/fouroversix_integration/test_fouroversix.py | # Copyright 2026 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import tempfile
import unittest
from transformers import AutoModelForCausalLM, AutoTokenizer, FourOverSixConfig
from transformers.testing_utils import (
backend_empty_cache,
require_accelerate,
require_fouroversix,
require_torch_accelerator,
require_torch_multi_accelerator,
slow,
torch_device,
)
@require_torch_accelerator
class FourOverSixConfigTest(unittest.TestCase):
def test_to_dict(self):
"""
Simple test that checks if one uses a config and converts it to a dict, the dict is the same as the config object
"""
quantization_config = FourOverSixConfig()
config_to_dict = quantization_config.to_dict()
for key in config_to_dict:
self.assertEqual(getattr(quantization_config, key), config_to_dict[key])
def test_from_dict(self):
"""
Simple test that checks if one uses a dict and converts it to a config object, the config object is the same as the dict
"""
dict = {
"scale_rule": "mse",
"quant_method": "fouroversix",
}
quantization_config = FourOverSixConfig.from_dict(dict)
self.assertEqual(dict["scale_rule"], quantization_config.scale_rule)
self.assertEqual(dict["quant_method"], quantization_config.quant_method)
@slow
@require_torch_accelerator
@require_fouroversix
@require_accelerate
class FourOverSixBaseTest(unittest.TestCase):
model_name = "unsloth/Llama-3.2-3B"
input_text = "1 2 3 4"
max_new_tokens = 4
EXPECTED_OUTPUT = "1 2 3 4 5 6"
device_map = torch_device
@classmethod
def getQuantizationConfig(cls):
unittest.skip("Subclass must implement this method")
# Called only once for all tests in this class
@classmethod
def setUpClass(cls):
"""
Setup quantized model
"""
cls.quantization_config = cls.getQuantizationConfig()
cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name)
cls.quantized_model = AutoModelForCausalLM.from_pretrained(
cls.model_name,
device_map=cls.device_map,
quantization_config=cls.quantization_config,
)
def tearDown(self):
gc.collect()
backend_empty_cache(torch_device)
gc.collect()
def test_quantized_model(self):
"""
Simple test that checks if the quantized model is working properly
"""
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
output = self.quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens)
self.assertEqual(
self.tokenizer.decode(output[0], skip_special_tokens=True),
self.EXPECTED_OUTPUT,
)
def test_save_pretrained(self):
"""
Simple test that checks if the quantized model is working properly after being saved and loaded
"""
with tempfile.TemporaryDirectory() as tmpdirname:
self.quantized_model.save_pretrained(tmpdirname)
model = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map=self.device_map)
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
output = model.generate(**input_ids, max_new_tokens=self.max_new_tokens)
self.assertEqual(
self.tokenizer.decode(output[0], skip_special_tokens=True),
self.EXPECTED_OUTPUT,
)
@require_torch_multi_accelerator
def test_quantized_model_multi_accelerator(self):
"""
Simple test that checks if the quantized model is working properly with multiple accelerators.
Set CUDA_VISIBLE_DEVICES=0,1 if you have more than 2 CUDA GPUs.
"""
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to("cuda:0")
quantized_model = AutoModelForCausalLM.from_pretrained(
self.model_name,
device_map="auto",
quantization_config=self.quantization_config,
max_memory={0: "1GB", 1: "10GB"},
)
self.assertTrue(set(quantized_model.hf_device_map.values()) == {0, 1})
output = quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens)
self.assertEqual(
self.tokenizer.decode(output[0], skip_special_tokens=True),
self.EXPECTED_OUTPUT,
)
@require_torch_multi_accelerator
def test_save_pretrained_multi_accelerator(self):
"""
Simple test that checks if the quantized model is working properly after being saved and loaded
"""
with tempfile.TemporaryDirectory() as tmpdirname:
self.quantized_model.save_pretrained(tmpdirname)
model = AutoModelForCausalLM.from_pretrained(
tmpdirname,
device_map="sequential",
max_memory={0: "1GB", 1: "10GB"},
)
self.assertTrue(set(model.hf_device_map.values()) == {0, 1})
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
output = model.generate(**input_ids, max_new_tokens=self.max_new_tokens)
self.assertEqual(
self.tokenizer.decode(output[0], skip_special_tokens=True),
self.EXPECTED_OUTPUT,
)
class FourOverSixMSETest(FourOverSixBaseTest):
@classmethod
def getQuantizationConfig(cls):
return FourOverSixConfig()
class FourOverSixStatic6Test(FourOverSixBaseTest):
@classmethod
def getQuantizationConfig(cls):
return FourOverSixConfig(scale_rule="static_6")
class FourOverSixKeepMasterWeightsTest(FourOverSixBaseTest):
@classmethod
def getQuantizationConfig(cls):
return FourOverSixConfig(keep_master_weights=True)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/quantization/fouroversix_integration/test_fouroversix.py",
"license": "Apache License 2.0",
"lines": 150,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/models/phi4_multimodal/test_processing_phi4_multimodal.py | # Copyright 2026 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import unittest
import numpy as np
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
from ...test_processing_common import ProcessorTesterMixin
if is_vision_available():
from PIL import Image
from transformers import Phi4MultimodalProcessor
@require_vision
class Phi4MultimodalProcessorTest(ProcessorTesterMixin, unittest.TestCase):
processor_class = Phi4MultimodalProcessor
checkpoint_path = "microsoft/Phi-4-multimodal-instruct"
revision = "refs/pr/70"
text_input_name = "input_ids"
images_input_name = "image_pixel_values"
audio_input_name = "audio_input_features"
# Max-length values used in image-text kwargs tests. Override as phi4 needs lots of tokens for images.
image_text_kwargs_max_length = 400
image_text_kwargs_override_max_length = 396
image_unstructured_max_length = 407
# Max-length values used in audio-text kwargs tests. Override as phi4 needs lots of tokens for audio.
audio_text_kwargs_max_length = 300
audio_processor_tester_max_length = 117
audio_unstructured_max_length = 76
# Max-length values used in video-text kwargs tests. Override in subclasses if needed.
video_text_kwargs_max_length = 167
video_text_kwargs_override_max_length = 162
video_unstructured_max_length = 176
@classmethod
def _setup_tokenizer(cls):
tokenizer_class = cls._get_component_class_from_processor("tokenizer")
return tokenizer_class.from_pretrained(cls.checkpoint_path, revision=cls.revision)
@classmethod
def _setup_test_attributes(cls, processor):
cls.image_token = processor.image_token
cls.image_token_id = processor.image_token_id
cls.audio_token = processor.audio_token
cls.audio_token_id = processor.audio_token_id
# override: audio_attention_mask is returned conditionally, and not expected in the input names in this case
def test_model_input_names(self):
processor = self.get_processor()
text = self.prepare_text_inputs(modalities=["image", "video", "audio"])
image_input = self.prepare_image_inputs()
video_inputs = self.prepare_video_inputs()
audio_inputs = self.prepare_audio_inputs()
inputs_dict = {"text": text, "images": image_input, "videos": video_inputs, "audio": audio_inputs}
call_signature = inspect.signature(processor.__call__)
input_args = [param.name for param in call_signature.parameters.values()]
inputs_dict = {k: v for k, v in inputs_dict.items() if k in input_args}
inputs = processor(**inputs_dict, return_tensors="pt")
# audio_attention_mask is returned conditionally, and not expected in the input names in this case
input_names_expected = set(processor.model_input_names) - {"audio_attention_mask"}
self.assertSetEqual(set(inputs.keys()), input_names_expected)
def test_dynamic_hd_kwarg_passed_to_image_processor(self):
processor = self.get_processor()
# 1000x1000 image: with size=448, w_crop_num=3, h_crop_num=3 -> 9 HD crops (1 global + 9 = 10 total)
# With dynamic_hd=4: limits to 2x2 grid -> 4 HD crops (1 global + 4 = 5 total)
arr = np.random.randint(255, size=(3, 1000, 1000), dtype=np.uint8)
image_input = Image.fromarray(np.moveaxis(arr, 0, -1))
input_str = self.prepare_text_inputs(modalities="image")
inputs_default = processor(text=input_str, images=image_input, return_tensors="pt")
inputs_limited = processor(
text=input_str,
images=image_input,
dynamic_hd=4,
return_tensors="pt",
)
self.assertEqual(inputs_limited[self.images_input_name].shape[1], 5)
self.assertEqual(inputs_default[self.images_input_name].shape[1], 10)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/phi4_multimodal/test_processing_phi4_multimodal.py",
"license": "Apache License 2.0",
"lines": 83,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/colmodernvbert/modular_colmodernvbert.py | # Copyright 2026 Illuin Technology and contributors, and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
from dataclasses import dataclass
from typing import Any, Optional, Union
import torch
from ...configuration_utils import PreTrainedConfig
from ...feature_extraction_utils import BatchFeature
from ...image_utils import ImageInput, is_valid_image
from ...processing_utils import Unpack
from ...tokenization_utils_base import TextInput
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, logging
from ...utils.generic import can_return_tuple
from ..auto import CONFIG_MAPPING
from ..auto.modeling_auto import AutoModel
from ..colpali.modeling_colpali import ColPaliForRetrieval, ColPaliPreTrainedModel
from ..colqwen2.configuration_colqwen2 import ColQwen2Config
from ..idefics3.processing_idefics3 import Idefics3Processor, Idefics3ProcessorKwargs
logger = logging.get_logger(__name__)
class ColModernVBertConfig(ColQwen2Config):
r"""
Configuration class to store the configuration of a [`ColModernVBertForRetrieval`]. It is used to instantiate an instance
of `ColModernVBertForRetrieval` according to the specified arguments, defining the model architecture following the methodology
from the "ColPali: Efficient Document Retrieval with Vision Language Models" paper.
Instantiating a configuration with the defaults will yield a similar configuration to the vision encoder used by the pre-trained
ColModernVBert model, e.g. [ModernVBERT/colmodernvbert-merged](https://huggingface.co/ModernVBERT/colmodernvbert-merged).
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vlm_config (`PreTrainedConfig`, *optional*):
Configuration of the VLM backbone model.
embedding_dim (`int`, *optional*, defaults to 128):
Dimension of the multi-vector embeddings produced by the model.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
Example:
```python
from transformers import ColModernVBertConfig, ColModernVBertForRetrieval
config = ColModernVBertConfig()
model = ColModernVBertForRetrieval(config)
```
"""
model_type = "colmodernvbert"
sub_configs: dict[str, Any] = {"vlm_config": PreTrainedConfig}
def __init__(
self,
vlm_config=None,
embedding_dim: int = 128,
initializer_range: float = 0.02,
**kwargs,
):
if vlm_config is None:
vlm_config = CONFIG_MAPPING["modernvbert"]()
logger.info(
"`vlm_config` is `None`. Initializing `vlm_config` with the `ModernVBertConfig` with default values."
)
elif isinstance(vlm_config, dict):
vlm_config = deepcopy(vlm_config)
if "model_type" not in vlm_config:
raise KeyError(
"The `model_type` key is missing in the `vlm_config` dictionary. Please provide the model type."
)
vlm_config = CONFIG_MAPPING[vlm_config["model_type"]](**vlm_config)
elif not isinstance(vlm_config, PreTrainedConfig):
raise TypeError(
f"Invalid type for `vlm_config`. Expected `PreTrainedConfig`, `dict`, or `None`, but got {type(vlm_config)}."
)
if not hasattr(vlm_config, "vocab_size"):
vlm_config.vocab_size = vlm_config.get_text_config().vocab_size
self.vlm_config = vlm_config
self.embedding_dim = embedding_dim
self.initializer_range = initializer_range
PreTrainedConfig.__init__(**kwargs)
class ColModernVBertProcessorKwargs(Idefics3ProcessorKwargs, total=False):
_defaults = {
"text_kwargs": {
"padding": "longest",
},
"images_kwargs": {
"return_row_col_info": True,
"data_format": "channels_first",
"do_convert_rgb": True,
},
"common_kwargs": {"return_tensors": "pt"},
}
class ColModernVBertProcessor(Idefics3Processor):
r"""
Constructs a ColModernVBert processor which wraps a ModernVBertProcessor and special methods to process images and queries, as
well as to compute the late-interaction retrieval score.
[`ColModernVBertProcessor`] offers all the functionalities of [`ModernVBertProcessor`]. See the [`~ModernVBertProcessor.__call__`]
for more information.
Args:
image_processor ([`Idefics3ImageProcessor`]): An instance of [`Idefics3ImageProcessor`]. The image processor is a required input.
tokenizer (`PreTrainedTokenizerFast`, *optional*): An instance of [`PreTrainedTokenizerFast`]. This should correspond with the model's text model. The tokenizer is a required input.
image_seq_len (`int`, *optional*, defaults to 64): The length of the image sequence i.e. the number of <image> tokens per image in the input.
visual_prompt_prefix (`Optional`, *optional*): A prefix to be prepended to visual prompts.
query_prefix (`Optional`, *optional*): A prefix to be prepended to query prompts.
"""
def __init__(
self,
image_processor,
tokenizer=None,
chat_template=None,
image_seq_len: int = 64,
visual_prompt_prefix: str | None = None,
query_prefix: str | None = None,
**kwargs,
):
r"""
image_seq_len (`int`, *optional*, defaults to 64):
The length of the image sequence i.e. the number of <image> tokens per image in the input.
visual_prompt_prefix (`str`, *optional*):
A string that gets tokenized and prepended to the image tokens.
query_prefix (`str`, *optional*):
A prefix to be used for the query.
"""
chat_template = None # ColModernVBert does not use chat templates
super().__init__(
image_processor,
tokenizer,
chat_template=chat_template,
image_seq_len=image_seq_len,
**kwargs,
)
self.visual_prompt_prefix = visual_prompt_prefix or (
f"<|begin_of_text|>User:{self.image_token}Describe the image.<end_of_utterance>\nAssistant:"
)
self.query_prefix = query_prefix or ""
self.query_augmentation_token = self.end_of_utterance_token
def process_images(
self,
images: ImageInput | None = None,
**kwargs: Unpack[ColModernVBertProcessorKwargs],
) -> BatchFeature:
"""
Prepare for the model one or several image(s). Handles input validation, RGB conversion,
and prepends the `visual_prompt_prefix` to each image. Optionally computes labels from
`token_type_ids` when a `suffix` is provided in `text_kwargs`.
Args:
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a
number of channels, H and W are image height and width.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors of a particular framework. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
"""
output_kwargs = self._merge_kwargs(
ColModernVBertProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
suffix = output_kwargs["text_kwargs"].pop("suffix", None)
return_token_type_ids = suffix is not None
# Normalize input to a flat list of images
if is_valid_image(images):
images = [images]
elif isinstance(images, list) and is_valid_image(images[0]):
pass
elif not (isinstance(images, list) and isinstance(images[0], list) and is_valid_image(images[0][0])):
raise ValueError("images must be an image, list of images or list of list of images")
# Ensure all images are in RGB format
images = [image.convert("RGB") for image in images]
# Pair each image with the visual prompt prefix for the VLM backbone
batch_doc = self.__call__(
text=[self.visual_prompt_prefix] * len(images),
images=images,
images_kwargs=output_kwargs["images_kwargs"],
text_kwargs=output_kwargs["text_kwargs"],
)
# When suffix is provided, generate labels by masking non-suffix tokens
if return_token_type_ids:
labels = batch_doc["input_ids"].masked_fill(batch_doc["token_type_ids"] == 0, -100)
batch_doc.update({"labels": labels})
return batch_doc
def process_queries(
self,
text: TextInput | list[TextInput],
**kwargs: Unpack[ColModernVBertProcessorKwargs],
) -> BatchFeature:
"""
Prepare for the model one or several text queries. Handles input validation, prepends the
`query_prefix`, and appends query augmentation tokens (used to pad query embeddings for
better late-interaction retrieval performance).
Args:
text (`str`, `list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors of a particular framework. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
"""
output_kwargs = self._merge_kwargs(
ColModernVBertProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
suffix = output_kwargs["text_kwargs"].pop("suffix", None)
if isinstance(text, str):
text = [text]
elif not (isinstance(text, list) and isinstance(text[0], str)):
raise ValueError("Text must be a string or a list of strings")
# Default suffix: repeat the augmentation token to pad query embeddings
if suffix is None:
suffix = self.query_augmentation_token * 10
# Build final queries: prefix + original query + augmentation suffix
texts_query: list[str] = [self.query_prefix + query + suffix for query in text]
batch_query = self.__call__(
text=texts_query,
return_token_type_ids=False,
text_kwargs=output_kwargs["text_kwargs"],
)
return batch_query
def score_retrieval(
self,
query_embeddings: Union["torch.Tensor", list["torch.Tensor"]],
passage_embeddings: Union["torch.Tensor", list["torch.Tensor"]],
batch_size: int = 128,
output_dtype: Optional["torch.dtype"] = None,
output_device: Union["torch.device", str] = "cpu",
) -> "torch.Tensor":
"""
Compute the late-interaction/MaxSim score (ColBERT-like) for the given multi-vector
query embeddings (`qs`) and passage embeddings (`ps`). For ColQwen2, a passage is the
image of a document page.
Because the embedding tensors are multi-vector and can thus have different shapes, they
should be fed as:
(1) a list of tensors, where the i-th tensor is of shape (sequence_length_i, embedding_dim)
(2) a single tensor of shape (n_passages, max_sequence_length, embedding_dim) -> usually
obtained by padding the list of tensors.
Args:
query_embeddings (`Union[torch.Tensor, list[torch.Tensor]`): Query embeddings.
passage_embeddings (`Union[torch.Tensor, list[torch.Tensor]`): Passage embeddings.
batch_size (`int`, *optional*, defaults to 128): Batch size for computing scores.
output_dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): The dtype of the output tensor.
If `None`, the dtype of the input embeddings is used.
output_device (`torch.device` or `str`, *optional*, defaults to "cpu"): The device of the output tensor.
Returns:
`torch.Tensor`: A tensor of shape `(n_queries, n_passages)` containing the scores. The score
tensor is saved on the "cpu" device.
"""
if len(query_embeddings) == 0:
raise ValueError("No queries provided")
if len(passage_embeddings) == 0:
raise ValueError("No passages provided")
if query_embeddings[0].device != passage_embeddings[0].device:
raise ValueError("Queries and passages must be on the same device")
if query_embeddings[0].dtype != passage_embeddings[0].dtype:
raise ValueError("Queries and passages must have the same dtype")
if output_dtype is None:
output_dtype = query_embeddings[0].dtype
scores: list[torch.Tensor] = []
for i in range(0, len(query_embeddings), batch_size):
batch_scores: list[torch.Tensor] = []
batch_queries = torch.nn.utils.rnn.pad_sequence(
query_embeddings[i : i + batch_size], batch_first=True, padding_value=0
)
for j in range(0, len(passage_embeddings), batch_size):
batch_passages = torch.nn.utils.rnn.pad_sequence(
passage_embeddings[j : j + batch_size], batch_first=True, padding_value=0
)
batch_scores.append(
torch.einsum("bnd,csd->bcns", batch_queries, batch_passages).max(dim=3)[0].sum(dim=2)
)
scores.append(torch.cat(batch_scores, dim=1).to(output_dtype).to(output_device))
return torch.cat(scores, dim=0)
@auto_docstring
class ColModernVBertPreTrainedModel(ColPaliPreTrainedModel):
config: ColModernVBertConfig
@dataclass
@auto_docstring(
custom_intro="""
Base class for ColModernVBert embeddings output.
"""
)
class ColModernVBertForRetrievalOutput(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
embeddings (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
The embeddings of the model.
image_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True` and `pixel_values` are provided):
Tuple of `torch.FloatTensor` (one for the output of the image modality projection + one for the output of each layer) of shape
`(batch_size, num_channels, image_size, image_size)`.
Hidden-states of the image encoder at the output of each layer plus the initial modality projection outputs.
"""
loss: torch.FloatTensor | None = None
embeddings: torch.Tensor | None = None
hidden_states: tuple[torch.FloatTensor] | None = None
image_hidden_states: tuple[torch.FloatTensor] | None = None
attentions: tuple[torch.FloatTensor] | None = None
@auto_docstring(
custom_intro="""
Following the ColPali approach, ColModernVBert leverages VLMs to construct efficient multi-vector embeddings directly
from document images (“screenshots”) for document retrieval. The model is trained to maximize the similarity
between these document embeddings and the corresponding query embeddings, using the late interaction method
introduced in ColBERT.
Using ColModernVBert removes the need for potentially complex and brittle layout recognition and OCR pipelines with
a single model that can take into account both the textual and visual content (layout, charts, ...) of a document.
ColModernVBert is trained on top of ModernVBert, and was introduced in the following paper:
[*ModernVBERT: Towards Smaller Visual Document Retrievers*](https://arxiv.org/abs/2510.01149).
ColModernVBert is part of the ColVision model family, which was introduced with ColPali in the following paper:
[*ColPali: Efficient Document Retrieval with Vision Language Models*](https://huggingface.co/papers/2407.01449).
"""
)
class ColModernVBertForRetrieval(ColPaliForRetrieval):
_checkpoint_conversion_mapping = {}
def __init__(self, config: ColModernVBertConfig):
super().__init__(config)
self.vlm = AutoModel.from_config(config.vlm_config)
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
pixel_values: torch.FloatTensor | None = None,
attention_mask: torch.Tensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> ColModernVBertForRetrievalOutput:
vlm_output = self.vlm(
input_ids=input_ids,
attention_mask=attention_mask,
pixel_values=pixel_values,
**kwargs,
)
last_hidden_states = vlm_output[0] # (batch_size, sequence_length, hidden_size)
proj_dtype = self.embedding_proj_layer.weight.dtype
embeddings = self.embedding_proj_layer(last_hidden_states.to(proj_dtype)) # (batch_size, sequence_length, dim)
# L2 normalization
embeddings = embeddings / embeddings.norm(dim=-1, keepdim=True) # (batch_size, sequence_length, dim)
if attention_mask is not None:
attention_mask = attention_mask.to(dtype=embeddings.dtype, device=embeddings.device)
embeddings = embeddings * attention_mask.unsqueeze(-1) # (batch_size, sequence_length, dim)
return ColModernVBertForRetrievalOutput(
embeddings=embeddings,
hidden_states=vlm_output.hidden_states,
attentions=vlm_output.attentions,
image_hidden_states=vlm_output.image_hidden_states,
)
__all__ = [
"ColModernVBertConfig",
"ColModernVBertForRetrieval",
"ColModernVBertPreTrainedModel",
"ColModernVBertProcessor",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/colmodernvbert/modular_colmodernvbert.py",
"license": "Apache License 2.0",
"lines": 373,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/modernvbert/modular_modernvbert.py | # Copyright 2026 Illuin Technology and contributors, and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from dataclasses import dataclass
from typing import Any, Literal
import torch
import torch.nn as nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ... import initialization as init
from ...configuration_utils import PretrainedConfig
from ...modeling_outputs import (
BaseModelOutput,
MaskedLMOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from ...modeling_utils import PreTrainedModel
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring, logging
from ...utils.generic import can_return_tuple
from ..auto import CONFIG_MAPPING, AutoConfig, AutoModel
from ..modernbert.modeling_modernbert import ModernBertPredictionHead
from ..smolvlm.modeling_smolvlm import SmolVLMModel, SmolVLMPreTrainedModel
logger = logging.get_logger(__name__)
class ModernVBertConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`ModernVBert`] model. It is used to
instantiate a ModernVBert model according to the specified arguments and defines the model architecture.
e.g. [ModernVBERT/modernvbert](https://huggingface.co/ModernVBERT/modernvbert).
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs.
See the documentation for [`PretrainedConfig`] for more details.
Args:
text_config (`AutoConfig`, *optional*): Configuration for the text encoder.
vision_config (`ModernVBertVisionConfig`, *optional*): Configuration for the vision encoder.
image_token_id (`int | None`, *optional*, defaults to 50407): The token id reserved for image tokens inserted into the text stream.
pixel_shuffle_factor (`int | None`, *optional*, defaults to 4): Scale factor used by any pixel-shuffle / upsampling operations in the vision head.
initializer_range (`float | None`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
initializer_cutoff_factor (`float | None`, *optional*, defaults to 2.0): The cutoff factor for the truncated_normal_initializer for initializing all weight matrices.
classifier_pooling (`Literal["cls", "mean"]`, *optional*, defaults to `"cls"`): The pooling strategy to use for classification tasks.
classifier_dropout (`float | None`, *optional*, defaults to 0.0): The dropout probability for the classification head.
classifier_bias (`bool | None`, *optional*, defaults to `False`): Whether to add a bias term to the classification head.
Example:
```python
>>> from transformers import ModernVBertConfig
>>> # Initializing configuration
>>> configuration = ModernVBertConfig()
>>> # Initializing a model from the configuration (model class is implemented in
>>> # `modernvbert.modeling_modernvbert`)
>>> from transformers import ModernVBertModel
>>> model = ModernVBertModel(configuration)
>>> # Accessing the model configuration
>>> cfg = model.config
```"""
model_type = "modernvbert"
sub_configs: dict[str, Any] = {"text_config": AutoConfig, "vision_config": AutoConfig}
def __init__(
self,
text_config=None,
vision_config=None,
image_token_id: int | None = 50407,
pixel_shuffle_factor: int | None = 4,
initializer_range: float | None = 0.02,
initializer_cutoff_factor: float | None = 2.0,
classifier_pooling: Literal["cls", "mean"] = "cls",
classifier_dropout: float | None = 0.0,
classifier_bias: bool | None = False,
**kwargs,
):
if classifier_pooling not in ["cls", "mean"]:
raise ValueError(
f'Invalid value for `classifier_pooling`, should be either "cls" or "mean", but is {classifier_pooling}.'
)
if text_config is None:
text_config = CONFIG_MAPPING["modernbert"]()
elif isinstance(text_config, dict):
text_config = CONFIG_MAPPING["modernbert"](**text_config)
self.text_config = text_config
if vision_config is None:
vision_config = CONFIG_MAPPING["siglip_vision_model"]()
elif isinstance(vision_config, dict):
vision_config = CONFIG_MAPPING["siglip_vision_model"](**vision_config)
self.vision_config = vision_config
self.pixel_shuffle_factor = pixel_shuffle_factor
self.initializer_range = initializer_range
self.initializer_cutoff_factor = initializer_cutoff_factor
self.classifier_pooling = classifier_pooling
self.classifier_dropout = classifier_dropout
self.classifier_bias = classifier_bias
super().__init__(image_token_id=image_token_id, **kwargs)
@dataclass
class ModernVBertBaseModelOutput(BaseModelOutput):
"""
Base class for ModernVBERT model's outputs.
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
hidden_size)` is output.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
image_hidden_states (`tuple(torch.FloatTensor)`, *optional*):
Tuple of `torch.FloatTensor` (one for the output of the image embeddings, `(batch_size, num_images,
sequence_length, hidden_size)`.
image_hidden_states of the model produced by the vision encoder
"""
last_hidden_state: torch.FloatTensor = None
hidden_states: tuple[torch.FloatTensor] | None = None
attentions: tuple[torch.FloatTensor] | None = None
image_hidden_states: tuple[torch.FloatTensor] | None = None
@dataclass
class ModernVBertMaskedLMOutput(MaskedLMOutput):
"""
Base class for ModernVBERT model's outputs with masked language modeling loss.
Args:
loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided):
Masked language modeling (MLM) loss.
logits (`torch.FloatTensor`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
image_hidden_states (`tuple(torch.FloatTensor)`, *optional*):
Tuple of `torch.FloatTensor` (one for the output of the image embeddings, `(batch_size, num_images,
sequence_length, hidden_size)`.
image_hidden_states of the model produced by the vision encoder
"""
loss: torch.FloatTensor | None = None
logits: torch.FloatTensor = None
hidden_states: tuple[torch.FloatTensor, ...] | None = None
attentions: tuple[torch.FloatTensor, ...] | None = None
image_hidden_states: torch.FloatTensor | None = None
class ModernVBertConnector(nn.Module):
"""
Connector module for ModernVBERT. It performs a pixel shuffle operation followed by a linear projection to match the text model's hidden size.
Based on https://pytorch.org/docs/stable/generated/torch.nn.PixelShuffle.html
"""
def __init__(self, config):
super().__init__()
self.pixel_shuffle_factor = config.pixel_shuffle_factor
self.modality_projection = nn.Linear(
config.vision_config.hidden_size * (config.pixel_shuffle_factor**2),
config.text_config.hidden_size,
bias=False,
)
def pixel_shuffle(self, image_hidden_states, pixel_shuffle_factor):
batch_size, seq_length, embed_dim = image_hidden_states.size()
height = width = int(seq_length**0.5)
image_hidden_states = image_hidden_states.view(batch_size, height, width, embed_dim)
image_hidden_states = image_hidden_states.view(
batch_size, height, int(width / pixel_shuffle_factor), embed_dim * pixel_shuffle_factor
)
image_hidden_states = image_hidden_states.permute(0, 2, 1, 3)
image_hidden_states = image_hidden_states.reshape(
batch_size,
int(width / pixel_shuffle_factor),
int(height / pixel_shuffle_factor),
embed_dim * (pixel_shuffle_factor**2),
)
image_hidden_states = image_hidden_states.permute(0, 2, 1, 3)
return image_hidden_states.reshape(
batch_size, int(seq_length / (pixel_shuffle_factor**2)), embed_dim * (pixel_shuffle_factor**2)
)
def forward(self, image_hidden_states):
image_hidden_states = self.pixel_shuffle(image_hidden_states, self.pixel_shuffle_factor)
return self.modality_projection(image_hidden_states)
@auto_docstring
class ModernVBertPreTrainedModel(SmolVLMPreTrainedModel):
config_class = ModernVBertConfig
_no_split_modules = []
@torch.no_grad()
def _init_weights(self, module):
PreTrainedModel._init_weights(self, module)
def init_weight(module: nn.Module, std: float):
cutoff_factor = getattr(self.config, "initializer_cutoff_factor", 2.0)
init.trunc_normal_(
module.weight,
mean=0.0,
std=std,
a=-cutoff_factor * std,
b=cutoff_factor * std,
)
if isinstance(module, (nn.Linear, nn.Conv2d)):
if module.bias is not None:
init.zeros_(module.bias)
if isinstance(module, ModernVBertConnector):
out_std = self.config.initializer_range / math.sqrt(2.0 * self.config.text_config.num_hidden_layers)
init_weight(module.modality_projection, out_std)
elif isinstance(module, ModernVBertForMaskedLM):
out_std = self.config.initializer_range / math.sqrt(2.0 * self.config.text_config.num_hidden_layers)
init_weight(module.lm_head, out_std)
elif isinstance(
module,
(
ModernVBertForSequenceClassification,
ModernVBertForTokenClassification,
),
):
final_out_std = self.config.initializer_range / math.sqrt(self.config.text_config.hidden_size)
init_weight(module.classifier, final_out_std)
@auto_docstring(
custom_intro="""
ModernVBertModel is a model that combines a vision encoder (SigLIP) and a text encoder (ModernBert).
ModernVBert is the base model of the visual retriver ColModernVBert, and was introduced in the following paper:
[*ModernVBERT: Towards Smaller Visual Document Retrievers*](https://arxiv.org/abs/2510.01149).
"""
)
class ModernVBertModel(SmolVLMModel):
def __init__(self, config: ModernVBertConfig):
super().__init__(config)
# init components
self.connector = ModernVBertConnector(config)
self.text_model = AutoModel.from_config(config.text_config)
self.vision_model = AutoModel.from_config(config.vision_config)
self.image_seq_len = int(
((config.vision_config.image_size // config.vision_config.patch_size) ** 2)
/ (config.pixel_shuffle_factor**2)
)
# initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring(
custom_intro="""
Inputs fed to the model can have an arbitrary number of images. To account for this, pixel_values fed to
the model have image padding -> (batch_size, max_num_images, 3, max_heights, max_widths) where
max_num_images is the maximum number of images among the batch_size samples in the batch.
Padding images are not needed beyond padding the pixel_values at the entrance of the model.
For efficiency, we only pass through the vision_model's forward the real images by
discarding the padding images i.e. pixel_values of size (image_batch_size, 3, height, width) where
image_batch_size would be 7 when num_images_per_sample=[1, 3, 1, 2] and max_num_images would be 3.
""",
checkpoint="ModernVBERT/modernvbert",
)
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
inputs_embeds: torch.FloatTensor | None = None,
pixel_values: torch.FloatTensor | None = None,
pixel_attention_mask: torch.BoolTensor | None = None,
image_hidden_states: torch.FloatTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | ModernVBertBaseModelOutput:
r"""
pixel_attention_mask (`torch.Tensor` of shape `(batch_size, image_size, image_size)`, *optional*):
Mask to avoid performing attention on padding pixel indices.
image_hidden_states (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
The hidden states of the image encoder after modality projection.
"""
if inputs_embeds is None:
inputs_embeds = self.text_model.get_input_embeddings()(input_ids).to(input_ids.device)
# Images processing
if pixel_values is not None:
image_hidden_states = self.get_image_features(
pixel_values=pixel_values, pixel_attention_mask=pixel_attention_mask
).pooler_output
# Merge image and text embeddings
if image_hidden_states is not None:
image_hidden_states = image_hidden_states.to(dtype=inputs_embeds.dtype, device=inputs_embeds.device)
inputs_embeds = self.inputs_merger(
input_ids=input_ids, inputs_embeds=inputs_embeds, image_hidden_states=image_hidden_states
)
# Language model pass
outputs = self.text_model(
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
position_ids=position_ids,
**kwargs,
)
return ModernVBertBaseModelOutput(
last_hidden_state=outputs.last_hidden_state,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=image_hidden_states,
)
class ModernVBertPredictionHead(ModernBertPredictionHead):
pass
@auto_docstring
class ModernVBertForMaskedLM(ModernVBertPreTrainedModel):
_tied_weights_keys = {"lm_head.weight": "model.text_model.embeddings.tok_embeddings.weight"}
def __init__(self, config):
super().__init__(config)
self.vocab_size = config.text_config.vocab_size
self.model = ModernVBertModel(config)
self.projection_head = ModernVBertPredictionHead(config.text_config)
self.lm_head = nn.Linear(config.text_config.hidden_size, self.vocab_size, bias=config.text_config.decoder_bias)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
@can_return_tuple
@auto_docstring(
custom_intro="""
Inputs fed to the model can have an arbitrary number of images. To account for this, pixel_values fed to
the model have image padding -> (batch_size, max_num_images, 3, max_heights, max_widths) where
max_num_images is the maximum number of images among the batch_size samples in the batch.
Padding images are not needed beyond padding the pixel_values at the entrance of the model.
For efficiency, we only pass through the vision_model's forward the real images by
discarding the padding images i.e. pixel_values of size (image_batch_size, 3, height, width) where
image_batch_size would be 7 when num_images_per_sample=[1, 3, 1, 2] and max_num_images would be 3.
""",
checkpoint="ModernVBERT/modernvbert",
)
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
inputs_embeds: torch.FloatTensor | None = None,
pixel_values: torch.FloatTensor | None = None,
pixel_attention_mask: torch.BoolTensor | None = None,
image_hidden_states: torch.FloatTensor | None = None,
labels: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | ModernVBertMaskedLMOutput:
r"""
pixel_attention_mask (`torch.Tensor` of shape `(batch_size, image_size, image_size)`, *optional*):
Mask to avoid performing attention on padding pixel indices.
image_hidden_states (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
The hidden states of the image encoder after modality projection.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
text_config.]` or `model.image_token_id`. Tokens with indices set to `model.image_token_id` are
ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., text_config.]`.
"""
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
pixel_values=pixel_values,
pixel_attention_mask=pixel_attention_mask,
image_hidden_states=image_hidden_states,
**kwargs,
)
hidden_states = outputs[0]
logits = self.lm_head(self.projection_head(hidden_states))
loss = None
if labels is not None:
criterion = CrossEntropyLoss()
loss = criterion(logits.view(-1, self.vocab_size), labels.view(-1))
return ModernVBertMaskedLMOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=outputs.image_hidden_states,
)
@auto_docstring(
custom_intro="""
The ModernVBert Model with a sequence classification head on top that performs pooling.
"""
)
class ModernVBertForSequenceClassification(ModernVBertPreTrainedModel):
def __init__(self, config: ModernVBertConfig):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.model = ModernVBertModel(config)
self.head = ModernVBertPredictionHead(config.text_config)
self.drop = nn.Dropout(config.classifier_dropout)
self.classifier = nn.Linear(config.text_config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring(
custom_intro="""
Inputs fed to the model can have an arbitrary number of images. To account for this, pixel_values fed to
the model have image padding -> (batch_size, max_num_images, 3, max_heights, max_widths) where
max_num_images is the maximum number of images among the batch_size samples in the batch.
Padding images are not needed beyond padding the pixel_values at the entrance of the model.
For efficiency, we only pass through the vision_model's forward the real images by
discarding the padding images i.e. pixel_values of size (image_batch_size, 3, height, width) where
image_batch_size would be 7 when num_images_per_sample=[1, 3, 1, 2] and max_num_images would be 3.
""",
checkpoint="ModernVBERT/modernvbert",
)
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
inputs_embeds: torch.FloatTensor | None = None,
pixel_values: torch.FloatTensor | None = None,
pixel_attention_mask: torch.BoolTensor | None = None,
image_hidden_states: torch.FloatTensor | None = None,
labels: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | SequenceClassifierOutput:
r"""
pixel_attention_mask (`torch.Tensor` of shape `(batch_size, image_size, image_size)`, *optional*):
Mask to avoid performing attention on padding pixel indices.
image_hidden_states (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
The hidden states of the image encoder after modality projection.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
text_config.]` or `model.image_token_id`. Tokens with indices set to `model.image_token_id` are
ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., text_config.]`.
"""
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
pixel_values=pixel_values,
pixel_attention_mask=pixel_attention_mask,
image_hidden_states=image_hidden_states,
**kwargs,
)
last_hidden_state = outputs[0]
if self.config.classifier_pooling == "cls":
last_hidden_state = last_hidden_state[:, 0]
elif self.config.classifier_pooling == "mean":
if inputs_embeds is not None:
batch_size, seq_len = inputs_embeds.shape[:2]
else:
batch_size, seq_len = input_ids.shape[:2]
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones((batch_size, seq_len), device=device, dtype=torch.bool)
last_hidden_state = (last_hidden_state * attention_mask.unsqueeze(-1)).sum(dim=1) / attention_mask.sum(
dim=1, keepdim=True
)
pooled_output = self.head(last_hidden_state)
pooled_output = self.drop(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@auto_docstring(
custom_intro="""
The ModernVBert Model with a token classification head on top, e.g. for Named Entity Recognition (NER) tasks.
"""
)
class ModernVBertForTokenClassification(ModernVBertPreTrainedModel):
def __init__(self, config: ModernVBertConfig):
super().__init__(config)
self.num_labels = config.num_labels
self.model = ModernVBertModel(config)
self.head = ModernVBertPredictionHead(config.text_config)
self.drop = nn.Dropout(config.classifier_dropout)
self.classifier = nn.Linear(config.text_config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring(
custom_intro="""
Inputs fed to the model can have an arbitrary number of images. To account for this, pixel_values fed to
the model have image padding -> (batch_size, max_num_images, 3, max_heights, max_widths) where
max_num_images is the maximum number of images among the batch_size samples in the batch.
Padding images are not needed beyond padding the pixel_values at the entrance of the model.
For efficiency, we only pass through the vision_model's forward the real images by
discarding the padding images i.e. pixel_values of size (image_batch_size, 3, height, width) where
image_batch_size would be 7 when num_images_per_sample=[1, 3, 1, 2] and max_num_images would be 3.
""",
checkpoint="ModernVBERT/modernvbert",
)
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
inputs_embeds: torch.FloatTensor | None = None,
pixel_values: torch.FloatTensor | None = None,
pixel_attention_mask: torch.BoolTensor | None = None,
image_hidden_states: torch.FloatTensor | None = None,
labels: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | TokenClassifierOutput:
r"""
pixel_attention_mask (`torch.Tensor` of shape `(batch_size, image_size, image_size)`, *optional*):
Mask to avoid performing attention on padding pixel indices.
image_hidden_states (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
The hidden states of the image encoder after modality projection.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
text_config.]` or `model.image_token_id`. Tokens with indices set to `model.image_token_id` are
ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., text_config.]`.
"""
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
pixel_values=pixel_values,
pixel_attention_mask=pixel_attention_mask,
image_hidden_states=image_hidden_states,
**kwargs,
)
last_hidden_state = outputs[0]
last_hidden_state = self.head(last_hidden_state)
last_hidden_state = self.drop(last_hidden_state)
logits = self.classifier(last_hidden_state)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
__all__ = [
"ModernVBertConfig",
"ModernVBertPreTrainedModel",
"ModernVBertModel",
"ModernVBertForMaskedLM",
"ModernVBertForSequenceClassification",
"ModernVBertForTokenClassification",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/modernvbert/modular_modernvbert.py",
"license": "Apache License 2.0",
"lines": 561,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/colmodernvbert/test_modeling_colmodernvbert.py | # Copyright 2026 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch ColModernVBert model."""
import unittest
from typing import ClassVar
from huggingface_hub import hf_hub_download
from PIL import Image
from tests.test_configuration_common import ConfigTester
from tests.test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from transformers import (
is_torch_available,
)
from transformers.models.colmodernvbert.configuration_colmodernvbert import ColModernVBertConfig
from transformers.models.colmodernvbert.modeling_colmodernvbert import (
ColModernVBertForRetrieval,
ColModernVBertForRetrievalOutput,
)
from transformers.models.colmodernvbert.processing_colmodernvbert import ColModernVBertProcessor
from transformers.testing_utils import (
cleanup,
require_torch,
require_vision,
slow,
torch_device,
)
if is_torch_available():
import torch
class ColModernVBertForRetrievalModelTester:
def __init__(
self,
parent,
batch_size=2,
num_images=2,
ignore_index=-100,
text_config=None,
is_training=False,
vision_config=None,
pixel_shuffle_factor=2,
embedding_dim=64,
):
if text_config is None:
text_config = {
"vocab_size": 99,
"pad_token_id": 0,
"hidden_size": 32,
"num_hidden_layers": 2,
"num_attention_heads": 4,
"intermediate_size": 64,
"hidden_activation": "gelu",
"mlp_dropout": 0.1,
"attention_dropout": 0.1,
"embedding_dropout": 0.1,
"classifier_dropout": 0.1,
"max_position_embeddings": 512,
"type_vocab_size": 2,
"is_decoder": False,
"initializer_range": 0.02,
"reference_compile": False,
}
if vision_config is None:
vision_config = {
"image_size": 16,
"patch_size": 4,
"hidden_size": 64,
"num_hidden_layers": 2,
"num_attention_heads": 4,
"intermediate_size": 32,
"dropout": 0.1,
"attention_dropout": 0.1,
"initializer_range": 0.02,
"vision_use_head": False,
}
self.is_training = is_training
self.parent = parent
self.batch_size = batch_size
self.text_config = text_config
self.vision_config = vision_config
self.num_images = num_images
self.image_size = vision_config["image_size"]
self.pixel_shuffle_factor = pixel_shuffle_factor
self.image_token_id = self.text_config["vocab_size"] - 1
self.pad_token_id = text_config["pad_token_id"]
self.seq_length = (
int(((vision_config["image_size"] // vision_config["patch_size"]) ** 2) / (pixel_shuffle_factor**2))
* self.num_images
)
self.hidden_size = text_config["hidden_size"]
self.num_hidden_layers = text_config["num_hidden_layers"]
self.num_attention_heads = text_config["num_attention_heads"]
self.ignore_index = ignore_index
self.embedding_dim = embedding_dim
self.vlm_config = {
"model_type": "modernvbert",
"text_config": self.text_config,
"vision_config": self.vision_config,
"image_token_id": self.image_token_id,
"pixel_shuffle_factor": self.pixel_shuffle_factor,
}
def get_config(self):
config = ColModernVBertConfig(
vlm_config=self.vlm_config,
embedding_dim=self.embedding_dim,
)
return config
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_images, 3, self.image_size, self.image_size])
config = self.get_config()
return config, pixel_values
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
input_ids = ids_tensor([self.batch_size, self.seq_length], config.vlm_config.text_config.vocab_size)
attention_mask = torch.ones(input_ids.shape, dtype=torch.long).to(torch_device)
# For simplicity just set the last n tokens to the image token
n_image_tokens_per_batch = self.seq_length
input_ids[:, -n_image_tokens_per_batch:] = self.image_token_id
attention_mask = input_ids.ne(1).to(torch_device)
inputs_dict = {
"pixel_values": pixel_values,
"input_ids": input_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class ColModernVBertForRetrievalModelTest(ModelTesterMixin, unittest.TestCase):
"""
Model tester for `ColModernVBertForRetrieval`.
"""
all_model_classes = (ColModernVBertForRetrieval,) if is_torch_available() else ()
test_resize_embeddings = True
model_split_percents = [0.5, 0.8, 0.9]
def setUp(self):
self.model_tester = ColModernVBertForRetrievalModelTester(self)
self.config_tester = ConfigTester(self, config_class=ColModernVBertConfig, has_text_modality=False)
@require_vision
def test_colmodernvbert_forward_inputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = self._prepare_for_class(inputs_dict, model_class)
with torch.no_grad():
outputs = model(**inputs, return_dict=True)
self.assertIsInstance(outputs, ColModernVBertForRetrievalOutput)
@unittest.skip(reason="Error related to ModernBERT model parallelism: self.dtype is broken.")
def test_multi_gpu_data_parallel_forward(self):
pass
@require_torch
class ColModernVBertModelIntegrationTest(unittest.TestCase):
model_name: ClassVar[str] = "paultltc/colmodernvbert_hf"
def setUp(self):
self.model_dtype = torch.float32
self.processor = ColModernVBertProcessor.from_pretrained(self.model_name)
self.model = (
ColModernVBertForRetrieval.from_pretrained(
self.model_name,
dtype=self.model_dtype,
)
.to(torch_device)
.eval()
)
def tearDown(self):
cleanup(torch_device, gc_collect=True)
@slow
def test_model_integration_test(self):
"""
Test if the model is able to retrieve the correct pages for a small and easy dataset.
"""
# Load the test dataset
queries = [
"A paint on the wall",
"ColModernVBERT matches the performance of models nearly 10x larger on visual document benchmarks.",
]
images = [
Image.open(hf_hub_download("HuggingFaceTB/SmolVLM", "example_images/rococo.jpg", repo_type="space")),
Image.open(hf_hub_download("ModernVBERT/colmodernvbert", "table.png", repo_type="model")),
]
# Preprocess the examples
batch_queries = self.processor.process_queries(text=queries).to(torch_device)
batch_images = self.processor.process_images(images=images).to(torch_device)
# Run inference
with torch.inference_mode():
image_embeddings = self.model(**batch_images).embeddings
query_embeddings = self.model(**batch_queries).embeddings
# Compute retrieval scores
scores = self.processor.score_retrieval(
query_embeddings=query_embeddings,
passage_embeddings=image_embeddings,
) # (num_queries, num_passages)
scores = torch.softmax(scores, dim=-1)
self.assertTrue(scores.ndim == 2, f"Expected 2D tensor, got {scores.ndim}")
(
self.assertTrue(scores.shape == (len(images), len(images))),
(f"Expected shape {(len(images), len(images))}, got {scores.shape}"),
)
# Check if the maximum scores per row are in the diagonal of the matrix score
self.assertTrue((scores.argmax(axis=1) == torch.arange(len(images), device=scores.device)).all())
# Further validation: fine-grained check, with a hardcoded score from the original implementation
expected_scores = torch.tensor(
[[0.95181, 0.048189], [0.00057251, 0.99943]],
dtype=scores.dtype,
)
(
self.assertTrue(torch.allclose(scores, expected_scores, atol=1e-2)),
f"Expected scores {expected_scores}, got {scores}",
)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/colmodernvbert/test_modeling_colmodernvbert.py",
"license": "Apache License 2.0",
"lines": 218,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/models/colmodernvbert/test_processing_colmodernvbert.py | # Copyright 2026 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the ColModernVBert processor."""
import shutil
import tempfile
import unittest
import torch
from parameterized import parameterized
from transformers.models.colmodernvbert.processing_colmodernvbert import ColModernVBertProcessor
from transformers.testing_utils import get_tests_dir, require_torch, require_vision
from transformers.utils import is_vision_available
from ...test_processing_common import ProcessorTesterMixin
if is_vision_available():
from transformers import (
ColModernVBertProcessor,
)
SAMPLE_VOCAB = get_tests_dir("fixtures/vocab.txt")
@require_vision
class ColModernVBertProcessorTest(ProcessorTesterMixin, unittest.TestCase):
processor_class = ColModernVBertProcessor
@classmethod
def setUpClass(cls):
cls.tmpdirname = tempfile.mkdtemp()
processor = ColModernVBertProcessor.from_pretrained("ModernVBERT/colmodernvbert")
processor.save_pretrained(cls.tmpdirname)
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.tmpdirname, ignore_errors=True)
@require_torch
@require_vision
def test_process_images(self):
# Processor configuration
image_input = self.prepare_image_inputs()
image_processor = self.get_component("image_processor")
tokenizer = self.get_component("tokenizer", max_length=112, padding="max_length")
# Get the processor
processor = self.processor_class(
tokenizer=tokenizer,
image_processor=image_processor,
)
# Process the image
batch_feature = processor.process_images(images=image_input, return_tensors="pt")
# Assertions
self.assertIn("pixel_values", batch_feature)
# ModernVBert/Idefics3 usually resizes to something specific or keeps aspect ratio.
# Let's check if pixel_values are present and have correct type.
self.assertIsInstance(batch_feature["pixel_values"], torch.Tensor)
# Shape depends on image processor config, so we might not want to hardcode it unless we know defaults.
# Idefics3 default size is often dynamic or specific.
@require_torch
@require_vision
def test_process_queries(self):
# Inputs
queries = [
"Is attention really all you need?",
"Are Benjamin, Antoine, Merve, and Jo best friends?",
]
# Processor configuration
image_processor = self.get_component("image_processor")
tokenizer = self.get_component("tokenizer", max_length=112, padding="max_length")
# Get the processor
processor = self.processor_class(
tokenizer=tokenizer,
image_processor=image_processor,
)
# Process the queries
batch_feature = processor.process_queries(text=queries, return_tensors="pt")
# Assertions
self.assertIn("input_ids", batch_feature)
self.assertIsInstance(batch_feature["input_ids"], torch.Tensor)
self.assertEqual(batch_feature["input_ids"].shape[0], len(queries))
# The following tests override the parent tests because ColModernVBertProcessor can only take one of images or text as input at a time.
def test_tokenizer_defaults_preserved_by_kwargs(self):
if "image_processor" not in self.processor_class.get_attributes():
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
processor_components = self.prepare_components()
processor_components["tokenizer"] = self.get_component("tokenizer", max_length=117, padding="max_length")
processor = self.processor_class(**processor_components)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs()
inputs = processor(text=input_str, return_tensors="pt")
self.assertEqual(inputs[self.text_input_name].shape[-1], 117)
def test_image_processor_defaults_preserved_by_image_kwargs(self):
"""
We use do_rescale=True, rescale_factor=-1.0 to ensure that image_processor kwargs are preserved in the processor.
We then check that the mean of the pixel_values is less than or equal to 0 after processing.
Since the original pixel_values are in [0, 255], this is a good indicator that the rescale_factor is indeed applied.
"""
if "image_processor" not in self.processor_class.get_attributes():
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
processor_components = self.prepare_components()
processor_components["image_processor"] = self.get_component(
"image_processor", do_rescale=True, rescale_factor=-1.0
)
processor_components["tokenizer"] = self.get_component("tokenizer", max_length=117, padding="max_length")
processor = self.processor_class(**processor_components)
self.skip_processor_without_typed_kwargs(processor)
image_input = self.prepare_image_inputs()
inputs = processor(images=image_input, return_tensors="pt")
self.assertLessEqual(inputs[self.images_input_name][0][0].mean(), 0)
def test_kwargs_overrides_default_tokenizer_kwargs(self):
if "image_processor" not in self.processor_class.get_attributes():
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
processor_components = self.prepare_components()
processor_components["tokenizer"] = self.get_component("tokenizer", padding="longest")
processor = self.processor_class(**processor_components)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs()
inputs = processor(text=input_str, return_tensors="pt", max_length=112, padding="max_length")
self.assertEqual(inputs[self.text_input_name].shape[-1], 112)
def test_kwargs_overrides_default_image_processor_kwargs(self):
if "image_processor" not in self.processor_class.get_attributes():
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
processor_components = self.prepare_components()
processor_components["image_processor"] = self.get_component(
"image_processor", do_rescale=True, rescale_factor=1
)
processor_components["tokenizer"] = self.get_component("tokenizer", max_length=117, padding="max_length")
processor = self.processor_class(**processor_components)
self.skip_processor_without_typed_kwargs(processor)
image_input = self.prepare_image_inputs()
inputs = processor(images=image_input, do_rescale=True, rescale_factor=-1.0, return_tensors="pt")
self.assertLessEqual(inputs[self.images_input_name][0][0].mean(), 0)
def test_unstructured_kwargs(self):
if "image_processor" not in self.processor_class.get_attributes():
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
processor_components = self.prepare_components()
processor = self.processor_class(**processor_components)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs()
inputs = processor(
text=input_str,
return_tensors="pt",
do_rescale=True,
rescale_factor=-1.0,
padding="max_length",
max_length=76,
)
self.assertEqual(inputs[self.text_input_name].shape[-1], 76)
def test_unstructured_kwargs_batched(self):
if "image_processor" not in self.processor_class.get_attributes():
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
processor_components = self.prepare_components()
processor = self.processor_class(**processor_components)
self.skip_processor_without_typed_kwargs(processor)
image_input = self.prepare_image_inputs(batch_size=2)
inputs = processor(
images=image_input,
return_tensors="pt",
do_rescale=True,
rescale_factor=-1.0,
padding="longest",
max_length=76,
)
self.assertLessEqual(inputs[self.images_input_name][0][0].mean(), 0)
def test_doubly_passed_kwargs(self):
if "image_processor" not in self.processor_class.get_attributes():
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
processor_components = self.prepare_components()
processor = self.processor_class(**processor_components)
self.skip_processor_without_typed_kwargs(processor)
image_input = self.prepare_image_inputs()
with self.assertRaises(ValueError):
_ = processor(
images=image_input,
images_kwargs={"do_rescale": True, "rescale_factor": -1.0},
do_rescale=True,
return_tensors="pt",
)
def test_structured_kwargs_nested(self):
if "image_processor" not in self.processor_class.get_attributes():
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
processor_components = self.prepare_components()
processor = self.processor_class(**processor_components)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs()
# Define the kwargs for each modality
all_kwargs = {
"common_kwargs": {"return_tensors": "pt"},
"images_kwargs": {"do_rescale": True, "rescale_factor": -1.0},
"text_kwargs": {"padding": "max_length", "max_length": 15, "truncation": True},
}
inputs = processor(text=input_str, **all_kwargs)
self.skip_processor_without_typed_kwargs(processor)
self.assertEqual(inputs[self.text_input_name].shape[-1], 15)
def test_structured_kwargs_nested_from_dict(self):
if "image_processor" not in self.processor_class.get_attributes():
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
processor_components = self.prepare_components()
processor = self.processor_class(**processor_components)
self.skip_processor_without_typed_kwargs(processor)
image_input = self.prepare_image_inputs()
# Define the kwargs for each modality
all_kwargs = {
"common_kwargs": {"return_tensors": "pt"},
"images_kwargs": {"do_rescale": True, "rescale_factor": -1.0},
}
inputs = processor(images=image_input, **all_kwargs)
self.assertLessEqual(inputs[self.images_input_name][0][0].mean(), 0)
# Can process only text or images at a time
def test_model_input_names(self):
processor = self.get_processor()
image_input = self.prepare_image_inputs()
inputs = processor(images=image_input)
# When only images are provided, pixel_values must be present
self.assertIn("pixel_values", inputs)
@unittest.skip(reason="ColModernVBert is meant to be used through `process_queries` or `process_images`.")
def test_tokenizer_defaults(self):
pass
@unittest.skip("ColModernVBert can't process text+image inputs at the same time")
def test_processor_text_has_no_visual(self):
pass
@unittest.skip("ColModernVBert can't process text+image inputs at the same time")
def test_processor_with_multiple_inputs(self):
pass
@unittest.skip("ColModernVBert can't process text+image inputs at the same time")
def test_get_num_multimodal_tokens_matches_processor_call(self):
pass
@unittest.skip("ColModernVBert does not have a chat template")
def test_chat_template_save_loading(self):
pass
@unittest.skip("ColModernVBert does not have a chat template")
def test_apply_chat_template_audio(self):
pass
@unittest.skip("ColModernVBert does not have a chat template")
def test_apply_chat_template_decoded_video(self):
pass
@unittest.skip("ColModernVBert does not have a chat template")
def test_apply_chat_template_video(self):
pass
@parameterized.expand([(1, "pt"), (2, "pt")])
@unittest.skip("ColModernVBert does not have a chat template")
def test_apply_chat_template_image(self, batch_size, return_tensors):
pass
@unittest.skip("ColModernVBert does not have a chat template")
def test_apply_chat_template_video_frame_sampling(self):
pass
@unittest.skip("ColModernVBert does not have a chat template")
def test_chat_template_audio_from_video(self):
pass
@unittest.skip("ColModernVBert does not have a chat template")
def test_chat_template_jinja_kwargs(self):
pass
@unittest.skip("ColModernVBert does not have a chat template")
def test_apply_chat_template_assistant_mask(self):
pass
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/colmodernvbert/test_processing_colmodernvbert.py",
"license": "Apache License 2.0",
"lines": 257,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/models/modernvbert/test_modeling_modernvbert.py | # Copyright 2026 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch ModernVBERT model."""
import copy
import unittest
from typing import ClassVar
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
AutoProcessor,
AutoTokenizer,
ModernVBertConfig,
is_torch_available,
is_vision_available,
)
from transformers.testing_utils import (
cleanup,
require_torch,
slow,
torch_device,
)
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import (
ModelTesterMixin,
floats_tensor,
ids_tensor,
)
if is_torch_available():
import torch
from transformers import (
ModernBertConfig,
ModernVBertForMaskedLM,
ModernVBertForSequenceClassification,
ModernVBertForTokenClassification,
ModernVBertModel,
)
if is_vision_available():
from PIL import Image
class ModernVBertModelTester:
def __init__(
self,
parent,
batch_size=2,
num_images=2,
text_config=None,
is_training=True,
vision_config=None,
image_token_id: int = 98,
pixel_shuffle_factor=2,
num_labels=3,
use_labels=True,
type_sequence_label_size=2,
):
if text_config is None:
text_config = {
"vocab_size": 99,
"pad_token_id": 0,
"hidden_size": 32,
"num_hidden_layers": 2,
"num_attention_heads": 4,
"intermediate_size": 64,
"hidden_activation": "gelu",
"mlp_dropout": 0.1,
"attention_dropout": 0.1,
"embedding_dropout": 0.1,
"classifier_dropout": 0.1,
"max_position_embeddings": 512,
"type_vocab_size": 2,
"is_decoder": False,
"initializer_range": 0.02,
"reference_compile": False,
}
if vision_config is None:
vision_config = {
"image_size": 16,
"patch_size": 4,
"hidden_size": 64,
"num_hidden_layers": 2,
"num_attention_heads": 4,
"intermediate_size": 32,
"dropout": 0.1,
"attention_dropout": 0.1,
"initializer_range": 0.02,
}
self.parent = parent
self.batch_size = batch_size
self.text_config = ModernBertConfig(**text_config)
self.vision_config = vision_config
self.num_images = num_images
self.image_token_id = image_token_id
self.image_size = vision_config["image_size"]
self.pixel_shuffle_factor = pixel_shuffle_factor
self.seq_length = (
int(((vision_config["image_size"] // vision_config["patch_size"]) ** 2) / (pixel_shuffle_factor**2))
* self.num_images
)
self.vocab_size = text_config["vocab_size"]
self.num_hidden_layers = text_config["num_hidden_layers"]
self.hidden_size = text_config["hidden_size"]
self.num_attention_heads = text_config["num_attention_heads"]
self.is_training = is_training
self.num_labels = num_labels
self.use_labels = use_labels
self.type_sequence_label_size = type_sequence_label_size
def get_config(self):
config = ModernVBertConfig(
text_config=self.text_config,
vision_config=self.vision_config,
image_token_id=self.image_token_id,
pixel_shuffle_factor=self.pixel_shuffle_factor,
vocab_size=self.vocab_size,
attn_implementation={"text_config": "sdpa"},
)
return config
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_images, 3, self.image_size, self.image_size])
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
attention_mask = torch.ones(input_ids.shape, dtype=torch.long).to(torch_device)
# For simplicity just set the last n tokens to the image token
n_image_tokens_per_batch = self.seq_length
input_ids[:, -n_image_tokens_per_batch:] = self.image_token_id
attention_mask = input_ids.ne(1).to(torch_device)
sequence_labels = None
token_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
config = self.get_config()
# tie text-level args to top-level args for test purposes
config.pad_token_id = config.text_config.pad_token_id
config.bos_token_id = config.text_config.bos_token_id
config.eos_token_id = config.text_config.eos_token_id
config.tie_word_embeddings = config.text_config.tie_word_embeddings
return config, input_ids, attention_mask, pixel_values, sequence_labels, token_labels
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, attention_mask, pixel_values, sequence_labels, token_labels = config_and_inputs
inputs_dict = {
"pixel_values": pixel_values,
"input_ids": input_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
def create_and_check_model(self, config, input_ids, input_mask, pixel_values, sequence_labels, token_labels):
model = ModernVBertModel(config=config)
model.to(torch_device)
model.eval()
inputs_dict = {
"pixel_values": pixel_values,
"input_ids": input_ids,
"attention_mask": input_mask,
}
result = model(**inputs_dict)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_for_masked_lm(
self, config, input_ids, input_mask, pixel_values, sequence_labels, token_labels
):
model = ModernVBertForMaskedLM(config=config)
model.to(torch_device)
model.eval()
inputs_dict = {
"pixel_values": pixel_values,
"input_ids": input_ids,
"attention_mask": input_mask,
"labels": token_labels,
}
result = model(**inputs_dict)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_for_sequence_classification(
self, config, input_ids, input_mask, pixel_values, sequence_labels, token_labels
):
config.num_labels = self.num_labels
model = ModernVBertForSequenceClassification(config)
model.to(torch_device)
model.eval()
inputs_dict = {
"pixel_values": pixel_values,
"input_ids": input_ids,
"attention_mask": input_mask,
"labels": sequence_labels,
}
result = model(**inputs_dict)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_for_token_classification(
self, config, input_ids, input_mask, pixel_values, sequence_labels, token_labels
):
config.num_labels = self.num_labels
model = ModernVBertForTokenClassification(config=config)
model.to(torch_device)
model.eval()
inputs_dict = {
"pixel_values": pixel_values,
"input_ids": input_ids,
"attention_mask": input_mask,
"labels": token_labels,
}
result = model(**inputs_dict)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
@require_torch
class ModernVBertModelTest(ModelTesterMixin, unittest.TestCase):
"""
Model tester for `ModernVBertForMaskedLM`.
"""
all_model_classes = (
(
ModernVBertModel,
ModernVBertForMaskedLM,
ModernVBertForSequenceClassification,
ModernVBertForTokenClassification,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": ModernVBertModel,
"fill-mask": ModernVBertForMaskedLM,
"text-classification": ModernVBertForSequenceClassification,
"image-classification": ModernVBertForSequenceClassification,
"token-classification": ModernVBertForTokenClassification,
"zero-shot": ModernVBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_is_composite = True
test_mismatched_shapes = False
skip_test_image_features_output_shape = True # ModernVBert merges batch_size with num_images in index 0
model_split_percents = [0.5, 0.8, 0.9]
def setUp(self):
self.model_tester = ModernVBertModelTester(self)
self.config_tester = ConfigTester(
self,
config_class=ModernVBertConfig,
has_text_modality=False, # Avoid the check for vocab_size, which is now in text_config
common_properties=None, # Common properties are now in text_config
)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*config_and_inputs)
# We need to override as we need to prepare such that the image token is the last token
def test_resize_tokens_embeddings(self):
(original_config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
config = copy.deepcopy(original_config)
model = model_class(config)
model.to(torch_device)
if self.model_tester.is_training is False:
model.eval()
model_vocab_size = config.text_config.vocab_size
# Retrieve the embeddings and clone theme
model_embed = model.resize_token_embeddings(model_vocab_size)
cloned_embeddings = model_embed.weight.clone()
# Check that resizing the token embeddings with a larger vocab size increases the model's vocab size
model_embed = model.resize_token_embeddings(model_vocab_size + 10)
self.assertEqual(model.config.text_config.vocab_size, model_vocab_size + 10)
# Check that it actually resizes the embeddings matrix
self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size
model_embed = model.resize_token_embeddings(model_vocab_size - 15)
self.assertEqual(model.config.text_config.vocab_size, model_vocab_size - 15)
# Check that it actually resizes the embeddings matrix
self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15)
# Ignore copy
# Check that the model can still do a forward pass successfully (every parameter should be resized)
# Input ids should be clamped to the maximum size of the vocabulary - 1 and the image token should be the last token
inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 2)
n_images = self.model_tester.num_images * self.model_tester.seq_length
model.image_token_id = model_vocab_size - 15 - 1
inputs_dict["input_ids"][:, -n_images:] = model.image_token_id
# make sure that decoder_input_ids are resized as well
if "decoder_input_ids" in inputs_dict:
inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1)
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that adding and removing tokens has not modified the first part of the embedding matrix.
models_equal = True
for p1, p2 in zip(cloned_embeddings, model_embed.weight):
if p1.data.ne(p2.data).sum() > 0:
models_equal = False
self.assertTrue(models_equal)
config = copy.deepcopy(original_config)
model = model_class(config)
model.to(torch_device)
model_vocab_size = config.text_config.vocab_size
model.resize_token_embeddings(model_vocab_size + 10, pad_to_multiple_of=1)
self.assertTrue(model.config.text_config.vocab_size + 10, model_vocab_size)
model_embed = model.resize_token_embeddings(model_vocab_size, pad_to_multiple_of=64)
self.assertTrue(model_embed.weight.shape[0] // 64, 0)
self.assertTrue(model_embed.weight.shape[0], model.config.text_config.vocab_size)
self.assertTrue(model.config.text_config.vocab_size, model.vocab_size)
model_embed = model.resize_token_embeddings(model_vocab_size + 13, pad_to_multiple_of=64)
self.assertTrue(model_embed.weight.shape[0] // 64, 0)
# Check that resizing a model to a multiple of pad_to_multiple leads to a model of exactly that size
target_dimension = 128
model_embed = model.resize_token_embeddings(target_dimension, pad_to_multiple_of=64)
self.assertTrue(model_embed.weight.shape[0], target_dimension)
with self.assertRaisesRegex(
ValueError,
"Asking to pad the embedding matrix to a multiple of `1.3`, which is not and integer. Please make sure to pass an integer",
):
model.resize_token_embeddings(model_vocab_size, pad_to_multiple_of=1.3)
# We need to override as we need to prepare such that the image token is the last token
def test_resize_embeddings_untied(self):
(original_config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common()
original_config.tie_word_embeddings = False
for model_class in self.all_model_classes:
config = copy.deepcopy(original_config)
model = model_class(config).to(torch_device)
model.eval()
# if no output embeddings -> leave test
if model.get_output_embeddings() is None:
continue
# Check that resizing the token embeddings with a larger vocab size increases the model's vocab size
model_vocab_size = config.text_config.vocab_size
model.resize_token_embeddings(model_vocab_size + 10)
self.assertEqual(model.config.text_config.vocab_size, model_vocab_size + 10)
output_embeds = model.get_output_embeddings()
self.assertEqual(output_embeds.weight.shape[0], model_vocab_size + 10)
# Check bias if present
if output_embeds.bias is not None:
self.assertEqual(output_embeds.bias.shape[0], model_vocab_size + 10)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size
model.resize_token_embeddings(model_vocab_size - 15)
self.assertEqual(model.config.text_config.vocab_size, model_vocab_size - 15)
# Check that it actually resizes the embeddings matrix
output_embeds = model.get_output_embeddings()
self.assertEqual(output_embeds.weight.shape[0], model_vocab_size - 15)
# Check bias if present
if output_embeds.bias is not None:
self.assertEqual(output_embeds.bias.shape[0], model_vocab_size - 15)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
# Input ids should be clamped to the maximum size of the vocabulary - 1 and the image token should be the last token
inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 2)
n_images = self.model_tester.num_images * self.model_tester.seq_length
model.image_token_id = model_vocab_size - 15 - 1
inputs_dict["input_ids"][:, -n_images:] = model.image_token_id
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
@unittest.skip(reason="ModernVBERT model parallelism causes error: self.dtype is broken.")
def test_multi_gpu_data_parallel_forward(self):
pass
@unittest.skip(reason="Vision head's probe has no gradient.")
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(reason="Vision head's probe has no gradient.")
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@unittest.skip(reason="Vision head's probe has no gradient.")
def test_training_gradient_checkpointing_use_reentrant_true(self):
pass
@require_torch
class ModernVBertForMaskedLMIntegrationTest(unittest.TestCase):
model_name: ClassVar[str] = "paultltc/modernvbert_hf"
def setUp(self):
self.torch_dtype = torch.float32
self.processor = AutoProcessor.from_pretrained(self.model_name)
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
self.model = (
ModernVBertForMaskedLM.from_pretrained(self.model_name, torch_dtype=self.torch_dtype)
.to(torch_device)
.eval()
)
def tearDown(self):
cleanup(torch_device, gc_collect=True)
@slow
def test_masked_lm_inference(self):
image = Image.open(hf_hub_download("HuggingFaceTB/SmolVLM", "example_images/rococo.jpg", repo_type="space"))
text = "This [MASK] is on the wall."
messages = [
{
"role": "user",
"content": [
{"type": "image"},
{"type": "text", "text": text},
],
},
]
prompt = self.processor.apply_chat_template(messages, add_generation_prompt=False)
inputs = self.processor(text=prompt, images=[image], return_tensors="pt").to(torch_device)
with torch.no_grad():
outputs = self.model(**inputs)
masked_index = inputs["input_ids"][0].tolist().index(self.tokenizer.mask_token_id)
masked_token_logits = outputs.logits[0, masked_index, :]
masked_token_probs = torch.softmax(masked_token_logits, dim=-1)
top_5_probs, top_5_indices = torch.topk(masked_token_probs, k=5, dim=-1)
EXPECTED_TOP_5_INDICES = torch.tensor([13497, 5406, 2460, 22946, 3665], device=torch_device)
EXPECTED_TOP_5_VALUES = torch.tensor([0.4986, 0.3550, 0.0415, 0.0235, 0.0199], device=torch_device)
self.assertTrue(torch.allclose(top_5_indices, EXPECTED_TOP_5_INDICES))
self.assertTrue(torch.allclose(top_5_probs, EXPECTED_TOP_5_VALUES, atol=1e-4, rtol=1e-4))
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/modernvbert/test_modeling_modernvbert.py",
"license": "Apache License 2.0",
"lines": 415,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/generation/continuous_batching/input_outputs.py | # Copyright 2026 The HuggingFace Inc. team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from functools import partial
from itertools import count
from typing import Any
import torch
from transformers.configuration_utils import PretrainedConfig
from ...utils.metrics import traced
from .cache import PagedAttentionCache
from .requests import TMP_TOKEN_ID, FutureRequestState
from .utils import CudaGraphBuffer, aligned_divide, attn_mask_is_needed, build_attention_mask
@dataclass
class PagedAttentionArgs:
"""Dataclass containing the keyword arguments for a forward pass using paged attention.
Attributes:
input_ids: Input token IDs tensor of shape `(1, total_query_tokens)`.
attention_mask: Attention mask tensor or dictionary mapping layer types to masks. Can be `None` if the
attention implementation doesn't require explicit masks.
position_ids: Position IDs tensor of shape `(1, total_query_tokens)`.
cu_seq_lens_q: Cumulative sequence lengths for queries, used for variable-length batching.
cu_seq_lens_k: Cumulative sequence lengths for keys/values. Can be a tensor or dictionary mapping layer
types (e.g., "full_attention", "sliding_attention") to tensors for hybrid models.
max_seqlen_q: Maximum query sequence length in the batch.
max_seqlen_k: Maximum key/value sequence length. Can be an int or dictionary for hybrid models.
write_index: List of tensors indicating where to write new KV states in the cache, one per attention group.
read_index: List of tensors indicating which cache positions to read from, one per attention group.
logits_indices: Tensor indicating which positions in the output should be used for next-token prediction.
cache: The [`PagedAttentionCache`] instance managing the KV cache.
use_cache: Whether to use caching (always `False` in continuous batching as the cache is managed externally).
"""
input_ids: torch.Tensor
attention_mask: torch.Tensor | dict[str, torch.Tensor] | None
position_ids: torch.Tensor
cu_seq_lens_q: torch.Tensor
cu_seq_lens_k: torch.Tensor | dict[str, torch.Tensor]
max_seqlen_q: int
max_seqlen_k: int | dict[str, int]
write_index: list[torch.Tensor]
read_index: list[torch.Tensor]
logits_indices: torch.Tensor
cache: PagedAttentionCache
use_cache: bool = False
def asdict(self) -> dict[str, Any]:
return {
"input_ids": self.input_ids,
"attention_mask": self.attention_mask,
"position_ids": self.position_ids,
"cu_seq_lens_q": self.cu_seq_lens_q,
"cu_seq_lens_k": self.cu_seq_lens_k,
"max_seqlen_q": self.max_seqlen_q,
"max_seqlen_k": self.max_seqlen_k,
"write_index": self.write_index,
"read_index": self.read_index,
"logits_indices": self.logits_indices,
"cache": self.cache,
"use_cache": self.use_cache,
}
class ContinuousBatchingIOs:
"""A class to hold inputs and outputs for a continuous batching forward pass, using static tensors as storage. The
class is meant to be self-contained, so once a set of inputs have been created, the class can be used to update the
batch alone.
"""
def __init__(
self,
cache: PagedAttentionCache,
config: PretrainedConfig,
device: torch.device,
model_dtype: torch.dtype,
max_graphs: int = 32,
) -> None:
"""Initialize the continuous batching I/O manager. Args:
- cache: The [`PagedAttentionCache`] instance managing the KV cache. Meant to be unique.
- config: The model's pretrained configuration.
- device: The device to allocate tensors on. If the device is CPU, then the memory is pinned.
- model_dtype: The data type for model computations.
- max_graphs: Maximum number of CUDA graphs to cache. Uses LRU eviction when full.
"""
# Memoize attributes
self.cache = cache
self.device = device
self.config = config
self.model_dtype = model_dtype
self.sliding_window = 1 if getattr(config, "sliding_window", None) is None else config.sliding_window
# Setup input-related accumulators
self.actual_query_length = 0
self.actual_key_length = 0
self.actual_batch_size = 0
self.actual_read_sizes = [0 for _ in range(cache.num_groups)]
self.actual_write_sizes = [0 for _ in range(cache.num_groups)]
# Setup other accumulators
self.requests_in_batch: list[FutureRequestState] = []
self.req_id_to_new_token_position: dict[str, int] = {} # only used for async API
self.graphs: CudaGraphBuffer = CudaGraphBuffer(max_graphs)
# Setup static tensors and compute stream
self._setup_static_tensors()
self._reset_static_tensors(full_reset=True)
self.compute_stream = torch.cuda.Stream(device=self.device) if device.type == "cuda" else None
@traced(standalone=True)
def _setup_static_tensors(self) -> None:
"""Allocates static tensors for generation inputs and outputs. This is called only once at init time, to avoid
repeated allocations and enable CUDA graphs. All tensors are allocated with maximum possible sizes.
The allocated tensors are:
- `_bulk_input_tensor`: Storage for all the small inputs: `input_ids`, `position_ids`, `cumulative_seqlens_q`,
`logits_indices`, `cumulative_seqlens_k`, `carry_over_ids`.
- `attention_mask`: Optional attention masks (only for eager/SDPA implementations)
- `write_index` and `read_index` storage: Cache indexing tensors for each attention group
- `output_ids`: Storage for generated token IDs
"""
num_groups = self.cache.num_groups
max_batch_tokens = self.cache.max_batch_tokens
num_pages = self.cache.num_blocks * self.cache.block_size
pin_memory = self.device.type == "cpu"
# Small inputs are allocated as slices in a larget tensor aligned to 128 bytes (32 * 4b). This reduces the
# reduces fragmentation, so it lowers the number of D2H transfers and speeds up transfers.
bulk_size = aligned_divide(max_batch_tokens + 1, 1, 32)
self._bulk_input_tensor = torch.empty(
(7, bulk_size), dtype=torch.int32, device=self.device, pin_memory=pin_memory
)
self.input_ids = self._bulk_input_tensor[0, :max_batch_tokens]
self.position_ids = self._bulk_input_tensor[1, :max_batch_tokens]
self.cumulative_seqlens_q = self._bulk_input_tensor[2, : max_batch_tokens + 1]
self.logits_indices = self._bulk_input_tensor[3, :max_batch_tokens]
full_attention_cumulative_seqlens_k = self._bulk_input_tensor[4, : max_batch_tokens + 1]
sliding_attention_cumulative_seqlens_k = self._bulk_input_tensor[5, : max_batch_tokens + 1]
self.carry_over_ids = self._bulk_input_tensor[6, :max_batch_tokens] # only used for async API
# For sequence length of KV, the entries in the dict depend on the model
self.cumulative_seqlens_k: dict[str, torch.Tensor] = {}
if self.cache.num_full_attention_groups:
self.cumulative_seqlens_k["full_attention"] = full_attention_cumulative_seqlens_k
if self.cache.num_sliding_attention_groups:
self.cumulative_seqlens_k["sliding_attention"] = sliding_attention_cumulative_seqlens_k
# Output tensor and scalars
self.output_ids = torch.empty(
(max_batch_tokens + 1,), dtype=torch.int32, device=self.device, pin_memory=pin_memory
)
# Last output token is never changed and set to 0 for async carry on purpose
self.output_ids.zero_()
self.total_seqlen_q = 0
self.max_seqlen_q = 0
self.max_seqlen_k = dict.fromkeys(self.cumulative_seqlens_k.keys(), 0)
# If the attention mask is needed, it is allocated separately
if attn_mask_is_needed(self.config):
self.attention_mask = {}
for layer_type in self.cumulative_seqlens_k.keys():
self.attention_mask[layer_type] = torch.empty(
size=(1, 1, max_batch_tokens, num_pages + max_batch_tokens),
dtype=self.model_dtype,
device=self.device,
pin_memory=pin_memory,
)
else:
self.attention_mask = None
# For other kwargs, we need a list of tensors with as many tensors as there are groups
self.write_index_storage = torch.empty(
(num_groups, max_batch_tokens), dtype=torch.int32, device=self.device, pin_memory=pin_memory
)
self.read_index_storage = torch.empty(
(num_groups, num_pages + max_batch_tokens), dtype=torch.int32, device=self.device, pin_memory=pin_memory
)
# For read index, the +T is because there are -1 for seqlen_q when model uses a sliding window
def _transfer_inputs(
self, other: "ContinuousBatchingIOs", stream: torch.cuda.Stream, non_blocking: bool = False
) -> None:
# Transfer accumulators
other.actual_query_length = self.actual_query_length
other.actual_key_length = self.actual_key_length
other.actual_batch_size = self.actual_batch_size
other.actual_read_sizes = self.actual_read_sizes[:]
other.actual_write_sizes = self.actual_write_sizes[:]
# Transfer scalar attributes
other.total_seqlen_q = self.total_seqlen_q
other.max_seqlen_q = self.max_seqlen_q
other.max_seqlen_k = dict(self.max_seqlen_k.items())
# Transfer static tensors
with torch.cuda.stream(stream):
other._bulk_input_tensor.copy_(self._bulk_input_tensor, non_blocking=non_blocking) # fast bulk transfer
other.write_index_storage.copy_(self.write_index_storage, non_blocking=non_blocking)
other.read_index_storage.copy_(self.read_index_storage, non_blocking=non_blocking)
if self.attention_mask is not None and other.attention_mask is not None:
for layer_type in self.attention_mask.keys():
other.attention_mask[layer_type].copy_(self.attention_mask[layer_type], non_blocking=non_blocking)
@traced
@torch.no_grad()
def _reset_static_tensors(self, full_reset: bool = False) -> None:
"""Reset static tensors for the next batch. For efficiency, this only resets the portions of tensors that were
actually used in the previous batch, using the attributes actual_query_length, actual_key_length, and
actual_batch_size. If a (full_reset) is requested, the entire tensor storage is reset.
"""
# Compute the slice to reset
q_len = self.write_index_storage.size(-1) if full_reset else self.actual_query_length
k_len = self.read_index_storage.size(-1) if full_reset else self.actual_key_length
# Reset the attributes part of the bulk input tensor in one kernel
self._bulk_input_tensor[:, : q_len + 1].zero_()
self.max_seqlen_q = 0
# Reset the logits indices and output ids
self.logits_indices[:q_len].zero_()
self.output_ids[:q_len].zero_()
# Reset the attributes that are either tensors or dict of tensors
for layer_type in self.cumulative_seqlens_k:
self.max_seqlen_k[layer_type] = 0
if self.attention_mask is not None:
self.attention_mask[layer_type][:, :, :q_len, :k_len].fill_(torch.finfo(self.model_dtype).min)
# Reset the attributes that are lists of tensors
self.write_index_storage[:, :q_len].fill_(-2) # -1 is used to let the cache where new states go
self.read_index_storage[:, : q_len + k_len].fill_(-2) # same
# These getter function help create a common interface for the sync and async IOs
def get_cumulative_seqlens(self) -> tuple[torch.Tensor, dict[str, torch.Tensor]]:
"""Get the cumulative sequence lengths for the current batch."""
return self.cumulative_seqlens_q, self.cumulative_seqlens_k
def get_actual_lengths(self) -> tuple[int, int, int, list[int], list[int]]:
return (
self.actual_query_length,
self.actual_key_length,
self.actual_batch_size,
self.actual_read_sizes,
self.actual_write_sizes,
)
def carry_over_tokens(self, input_ids: torch.Tensor) -> None:
pass
def retrieve_device_outputs(self) -> None:
if self.compute_stream is not None:
self.compute_stream.synchronize()
def prepare_batch_update(self) -> tuple[list[FutureRequestState], list[int]]:
requests_in_batch = self.requests_in_batch
new_tokens = self.output_ids[: len(self.requests_in_batch)].tolist()
return requests_in_batch, new_tokens
@traced
def prepare_batch_tensors(self, requests_in_batch: list[FutureRequestState]) -> None:
"""Prepare tensors and metadata for the next model forward pass, using the given requests as data. This method:
1. Resets the static tensors from the previous batch
2. Iterates through requests to accumulate input_ids, position_ids, and sequence lengths
3. Extends read/write indices for cache management
4. Builds attention masks if needed (for eager/SDPA implementations)
5. Converts accumulated lists to tensors and copies them to static storage
This method also modifies the `position_offset` attribute of each request to track progress and adds a
temporary token at the end of the requests for which there will a new token.
"""
# Keep track of this requests in the batch, which will be useful to update the batch later
if not requests_in_batch:
raise ValueError("No requests in batch")
# Reset the static tensors used for storage
self._reset_static_tensors() # FIXME: why does this make the generation faster?
# Reset accumulators
self.actual_query_length = 0
self.actual_key_length = 0
self.actual_batch_size = 0
self.actual_read_sizes = [0 for _ in range(self.cache.num_groups)]
self.actual_write_sizes = [0 for _ in range(self.cache.num_groups)]
self.requests_in_batch = []
self.req_id_to_new_token_position = {}
# Prepare accumulators
input_ids = []
position_ids = []
cumulative_seqlens_q = [0]
logits_indices = []
cumulative_seqlens_k = {layer_type: [0] for layer_type in self.cumulative_seqlens_k.keys()}
read_index = [[] for _ in range(self.cache.num_groups)]
write_index = [[] for _ in range(self.cache.num_groups)]
# Go through all the requests in the batch
for future_state in requests_in_batch:
# First we retrieve the lengths related to the request
state = future_state.state
past_length = state.position_offset
query_length = len(state.tokens_to_process)
seqlens_k = self.cache.get_seqlens_k(past_length, query_length)
# Then we update the total lengths that are used for slicing
self.actual_query_length += query_length
# total_key_length is used to slice the keys so we need to take the max of all the key lengths
self.actual_key_length += max(seqlens_k.values())
self.actual_batch_size += 1
# And the attribute tracking the position in the request object
state.position_offset += query_length
# Then we accumulate for the object used in the kwargs
input_ids.extend(state.tokens_to_process)
position_ids.extend(range(past_length, past_length + query_length))
cumulative_seqlens_q.append(cumulative_seqlens_q[-1] + query_length)
self.max_seqlen_q = max(self.max_seqlen_q, query_length)
# Accumulate the key sequence lengths for the current request
for layer_type, layer_type_seqlen_k in seqlens_k.items():
cumulative_seqlens_k[layer_type].append(cumulative_seqlens_k[layer_type][-1] + layer_type_seqlen_k)
self.max_seqlen_k[layer_type] = max(self.max_seqlen_k[layer_type], layer_type_seqlen_k)
# We extend the read and write indices for the cache
self.cache.extend_read_and_write_indices(
state.request_id, past_length, query_length, read_index, write_index
)
# If the request has no remaining prefill tokens, it means the next token prediction is relevant
if future_state.has_new_token:
logits_indices.append(cumulative_seqlens_q[-1] - 1)
state.tokens_to_process = [TMP_TOKEN_ID]
self.req_id_to_new_token_position[state.request_id] = logits_indices[-1]
self.requests_in_batch.append(future_state)
# When looping over request is done, we can build the actual tensors. This is faster than modifying the static
# tensors inside the loop.
to_tensor = partial(torch.tensor, dtype=torch.int32, device=self.device)
# Those kwargs always have the same type regardless of the model
self.input_ids[: len(input_ids)] = to_tensor(input_ids)
self.position_ids[: len(position_ids)] = to_tensor(position_ids)
self.cumulative_seqlens_q[: len(cumulative_seqlens_q)] = to_tensor(cumulative_seqlens_q)
self.logits_indices[: len(logits_indices)] = to_tensor(logits_indices)
self.total_seqlen_q = cumulative_seqlens_q[-1]
# Those kwargs are either dict of tensors or tensors, so we need to handle both cases
for layer_type, layer_type_seqlens_k in cumulative_seqlens_k.items():
self.cumulative_seqlens_k[layer_type][: len(layer_type_seqlens_k)] = to_tensor(layer_type_seqlens_k)
if self.attention_mask is not None:
build_attention_mask(
attention_mask=self.attention_mask[layer_type],
cumulative_seqlens_q=cumulative_seqlens_q,
cumulative_seqlens_k=layer_type_seqlens_k,
sliding_window=self.sliding_window if layer_type == "sliding_attention" else 1,
)
# The index only contain references to the storage tensors, so we update the storage and their references
self.read_index = []
self.write_index = []
for i, group_read_indices, group_write_indices in zip(count(), read_index, write_index):
self.read_index_storage[i, : len(group_read_indices)] = to_tensor(group_read_indices)
self.write_index_storage[i, : len(group_write_indices)] = to_tensor(group_write_indices)
self.actual_read_sizes[i] = len(group_read_indices)
self.actual_write_sizes[i] = len(group_write_indices)
def get_model_kwargs(self, padded_q_size: int = 0, padded_kv_cache_size: int = 0) -> dict[str, Any]:
"""Get model keyword arguments for the current batch, eventually padding the query dimension to (padded_q_size)
and the keys/values dimension to (padded_kv_cache_size). The padding is only useful if we want static shapes,
like when using cuda graphs AND only activated if both Q and KV are padded."""
# Compute the slice to return, with the given padding if we are using cuda graphs
use_padding = padded_q_size > 0 and padded_kv_cache_size > 0
q_len = padded_q_size if use_padding else self.actual_query_length
b_size = padded_q_size if use_padding else self.actual_batch_size
# If there is padding, the size of the KV is the nb of padded Q tokens + the size padded of the padded KV cache
padded_kv_size = padded_q_size + padded_kv_cache_size
# Prepare the kwargs, the attributes that are either tensors or dict of tensors are initialized to empty dicts
kwargs = PagedAttentionArgs(
input_ids=self.input_ids[:q_len].unsqueeze(0),
position_ids=self.position_ids[:q_len].unsqueeze(0),
cu_seq_lens_q=self.cumulative_seqlens_q[: b_size + 1],
max_seqlen_q=self.max_seqlen_q,
logits_indices=self.logits_indices[:q_len],
cu_seq_lens_k={},
max_seqlen_k={},
attention_mask={},
read_index=[],
write_index=[],
cache=self.cache,
use_cache=False,
)
# If we use constant-sized slicing, there are some "padding" queries tokens which FA has some issues with. In
# some models like Qwen3-4B-Instruct-2507, if we don't include these tokens in cumulative_seqlens_q, there are
# some NaNs in the output logits even for non-padded tokens.
if use_padding:
self.max_seqlen_q = max(self.max_seqlen_q, q_len - self.total_seqlen_q)
kwargs.max_seqlen_q = self.max_seqlen_q
self.cumulative_seqlens_q[self.actual_batch_size + 1 :] = q_len
# FIXME: is there another way to avoid this? It has a very slight impact on performance (~5 tok/s)
# For the attributes that are lists of tensors, we construct list of tensor references
for i in range(self.cache.num_groups):
read_index_size = padded_kv_size if use_padding else self.actual_read_sizes[i]
write_index_size = padded_q_size if use_padding else self.actual_write_sizes[i]
kwargs.read_index.append(self.read_index_storage[i, :read_index_size])
kwargs.write_index.append(self.write_index_storage[i, :write_index_size])
# For the attributes that are dict of tensors, we replace the dict with a tensor if there is only one entry
layer_types = list(self.cumulative_seqlens_k.keys())
if len(layer_types) > 1:
kwargs.max_seqlen_k: dict[str, int] = {}
kwargs.cu_seq_lens_k: dict[str, torch.Tensor] = {}
kwargs.attention_mask: dict[str, torch.Tensor] = {}
for layer_type, seqlens_k in self.cumulative_seqlens_k.items():
kwargs.cu_seq_lens_k[layer_type] = seqlens_k[: b_size + 1]
kwargs.max_seqlen_k[layer_type] = self.max_seqlen_k[layer_type]
if self.attention_mask is not None:
k_len = padded_kv_size if use_padding else seqlens_k[b_size]
kwargs.attention_mask[layer_type] = self.attention_mask[layer_type][..., :q_len, :k_len]
else:
layer_type = layer_types[0]
kwargs.cu_seq_lens_k = self.cumulative_seqlens_k[layer_type][: b_size + 1]
kwargs.max_seqlen_k = self.max_seqlen_k[layer_type]
if self.attention_mask is not None:
k_len = padded_kv_size if use_padding else self.cumulative_seqlens_k[layer_type][b_size]
kwargs.attention_mask = self.attention_mask[layer_type][..., :q_len, :k_len]
if self.attention_mask is None:
kwargs.attention_mask = None
return kwargs.asdict() # TODO: this is imperfect, check if there is no better way to juggle dict / dataclass
class HostDeviceIOPair:
def __init__(
self,
cache: PagedAttentionCache,
config: PretrainedConfig,
device: torch.device,
model_dtype: torch.dtype,
max_graphs: int = 32,
) -> None:
# The host IO has automatic pinned memory because it is created on the CPU
self.host_io = ContinuousBatchingIOs(cache, config, torch.device("cpu"), model_dtype, max_graphs)
self.device_io = ContinuousBatchingIOs(cache, config, device, model_dtype, max_graphs)
self.h2d_over = torch.cuda.Event()
self.compute_over = torch.cuda.Event()
self.d2h_over = torch.cuda.Event()
def transfer_inputs_h2d(self, stream: torch.cuda.Stream) -> None:
self.host_io._transfer_inputs(self.device_io, stream=stream, non_blocking=True)
def transfer_outputs_d2h(self, stream: torch.cuda.Stream) -> None:
with torch.cuda.stream(stream):
self.host_io.output_ids.copy_(self.device_io.output_ids, non_blocking=True)
class ContinuousBatchingAsyncIOs:
"""A class to handle the inputs and outputs for the asynchronous API. It uses two IO pairs to avoid race conditions
between the two batches, which means twice as more VRAM is used for static input tensors and CUDA graph. If your GPU
is large enough or you want to generate long sequences, this is a good trade-off to make.
Asynchronous batching works by creating two pairs of host - device inputs and ouputs:
inputs
┌──────────┐ ────────► ┌────────────┐
IO pair object: │ Host IOs │ │ Device IOs │ (for a CUDA sytem, Host = CPU and Device = GPU)
└──────────┘ ◄──────── └────────────┘
outputs
Each pair is separate from the other. This means that each pairs has its own CUDA graphs set, because CUDA graphs
need to have static adresses for input tensors. To have a unique set of CUDA graph, we would need to copy the input
tensors to a third device-side buffer. This could limit the memory cost of CUDA graphs but would slow down the
forward pass.
But the CUDA streams orchestrating the transfer from host to device (H2D) and device to host (D2H) are the same for
both pairs. Same for the compute stream.
The order of steps in async batching looks like this (for 3 batches of compute):
│ ┌────┬────┐ ┌────┬────┐ ┌────┬────┐ ┌────┐ ┌────┐
CPU │ │PR 0│PR 1│ │UP 0│PR 2│ │UP 1│PR 3│ │UP 2│ │UP 3│
│ └────┼───┬┴──┐ └────┴────┼───┐ └────┴────┼───┐ └────┘ └────┘
H2D │ │0->│1->│ ¦ │2->│ ¦ │3->│ ¦ ¦
│ └───┼───┴───────────┬─────────────┴─┬─┼───────────┴───┼───────────────┐ ¦
GPU │ │ COMPUTE 0 │ COMPUTE 1 │█│ COMPUTE 2 │ COMPUTE 3 │ ¦
│ └───────────────┼───┬───────────┼─┴─┬─────────────┼───┬───────────┼───┤
D2H │ │0<-│ │1<-│ │2<-│ │3<-│
│ └───┘ └───┘ └───┘ └───┘
with: - CPU: actions happening on the CPU (host-side)
- GPU: actions happening on the GPU (device-side)
- H2D: host to device transfer
- D2H: device to host transfer
and:
- PR N: preparation of batch N
- ->N: host to device transfer of batch N
- COMPUTE N: compute step for batch N
- <-N: device to host transfer of batch N
- UP N: update of batch N
You can see that the GPU is almost always busy, execpt where the █ is.
Proper ordering of steps is ensured through the use of CUDA events and streams.
"""
def __init__(
self,
cache: PagedAttentionCache,
config: PretrainedConfig,
device: torch.device,
model_dtype: torch.dtype,
max_graphs: int = 32,
) -> None:
# IO pairs used to avoid race conditions
self.current_pair = 0
self.io_pairs = [HostDeviceIOPair(cache, config, device, model_dtype, max_graphs) for _ in range(2)]
# CUDA streams
self.h2d_stream = torch.cuda.Stream(device=device)
self.d2h_stream = torch.cuda.Stream(device=device)
self.compute_stream = torch.cuda.Stream(device=device)
# Set all unused compute streams to None
self.io_pairs[0].host_io.compute_stream = None
self.io_pairs[0].device_io.compute_stream = None
self.io_pairs[1].host_io.compute_stream = None
self.io_pairs[1].device_io.compute_stream = None
# Used in carry over ids computation
self.max_batch_tokens = cache.max_batch_tokens
# These methods are simple wrapper dispatching to the current IO pair
def get_cumulative_seqlens(self) -> tuple[torch.Tensor, dict[str, torch.Tensor]]:
return self.io_pairs[self.current_pair].host_io.get_cumulative_seqlens()
def get_actual_lengths(self) -> tuple[int, int, int, list[int], list[int]]:
return self.io_pairs[self.current_pair].host_io.get_actual_lengths()
# The prepare_batch_tensor method also has to prepare the carry over ids
def prepare_batch_tensors(self, requests_in_batch: list[FutureRequestState]) -> None:
io_pair = self.io_pairs[self.current_pair]
io_pair.host_io.prepare_batch_tensors(requests_in_batch)
io_pair.host_io.carry_over_ids.copy_(self.infer_carry_over_ids())
def infer_carry_over_ids(self) -> torch.Tensor:
"""Infers the ids of the tokens to carry over from batch N to batch N+1. In asynchronous batching mode, we can
schedule a request for batch N+1 without knowing the token predicted for that request in batch N. For that
reason, we might need to carry over tokens just predicted in batch N before launching the forwar pass of batch
N+1. This method computes the ids of the tokens to carry over."""
next_req_id_to_new_token_position = self.io_pairs[self.current_pair].host_io.req_id_to_new_token_position
prev_req_id_to_new_token_position = self.io_pairs[1 - self.current_pair].host_io.req_id_to_new_token_position
carry_over_ids = [-1 for _ in range(self.max_batch_tokens)]
# Carry over happens after the raw predictions have been indexed with logits_indices. So output_ids contains the
# a sequence of contiguous new tokens in the order the request were added to the batch. Eg:
# output_ids = [new_tok_req3, new_tok_req1, new_tok_req2]
# Since it's also the order of req_id_to_new_token_position, we just iterate over the old positions and look for
# a request_id match: if there is one, we carry the predicted token over to its new position.
for i, req_id in enumerate(prev_req_id_to_new_token_position.keys()):
new_token_position = next_req_id_to_new_token_position.get(req_id)
if new_token_position is not None:
carry_over_ids[new_token_position] = i
return torch.tensor(carry_over_ids, dtype=torch.int32)
# The get_model_kwargs method is where the H2D transfer happens
def get_model_kwargs(self, padded_q_size: int = 0, padded_kv_cache_size: int = 0) -> dict[str, Any]:
io_pair = self.io_pairs[self.current_pair]
io_pair.transfer_inputs_h2d(self.h2d_stream)
self.h2d_stream.record_event(io_pair.h2d_over)
self.compute_stream.wait_event(io_pair.h2d_over)
return io_pair.device_io.get_model_kwargs(padded_q_size, padded_kv_cache_size)
def carry_over_tokens(self, input_ids: torch.Tensor) -> None:
"""As explained in the infer_carry_over_ids method, we might need to carry over tokens just predicted in batch N
before launching the forwar pass of batch N+1. This method performs the carry over, and is recorded in CUDA
graphs if they are enabled."""
# Retrieve previous batch output ids
prev_output_ids = self.io_pairs[1 - self.current_pair].device_io.output_ids
# Retrieve the carry over ids and mask
carry_over_ids = self.io_pairs[self.current_pair].device_io.carry_over_ids
# Compute tokens to carry over and the corresponding mask
carried_over_ids = prev_output_ids[carry_over_ids]
carried_over_mask = (carry_over_ids != -1).int()
# Truncate everything to the right size
carried_over_ids = carried_over_ids[: input_ids.size(1)]
carried_over_mask = carried_over_mask[: input_ids.size(1)]
# Perform the carry over
input_ids[0] = carried_over_ids * carried_over_mask + input_ids[0] * (1 - carried_over_mask)
# This is called during compute, so we always pick the device IO in the IO pair
@property
def output_ids(self) -> torch.Tensor:
# The output ids are used to copy_ the infered tokens: they need to be on the device
return self.io_pairs[self.current_pair].device_io.output_ids
@property
def graphs(self) -> CudaGraphBuffer:
return self.io_pairs[self.current_pair].device_io.graphs
# The retrieve_device_outputs method is where the D2H transfer happens AND where we switch IO pair
def retrieve_device_outputs(self) -> None:
io_pair = self.io_pairs[self.current_pair]
# Wait for compute to finish before starting D2H transfer
self.compute_stream.record_event(io_pair.compute_over)
self.d2h_stream.wait_event(io_pair.compute_over)
# Transfer the outputs to the host
io_pair.transfer_outputs_d2h(self.d2h_stream)
self.d2h_stream.record_event(io_pair.d2h_over)
# Switch IO pair
self.current_pair = 1 - self.current_pair
# This method is called after the switch and not during the first batch
def prepare_batch_update(self) -> tuple[list[FutureRequestState], list[int]]:
io_pair = self.io_pairs[self.current_pair]
io_pair.d2h_over.synchronize()
return io_pair.host_io.prepare_batch_update()
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/generation/continuous_batching/input_outputs.py",
"license": "Apache License 2.0",
"lines": 547,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/utils/_typing.py | # Copyright 2025 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import logging
from collections.abc import Mapping, MutableMapping
from typing import Any, Protocol, TypeAlias
# A few helpful type aliases
Level: TypeAlias = int
ExcInfo: TypeAlias = (
None
| bool
| BaseException
| tuple[type[BaseException], BaseException, object] # traceback is `types.TracebackType`, but keep generic here
)
class TransformersLogger(Protocol):
# ---- Core Logger identity / configuration ----
name: str
level: int
parent: logging.Logger | None
propagate: bool
disabled: bool
handlers: list[logging.Handler]
# Exists on Logger; default is True. (Not heavily used, but is part of API.)
raiseExceptions: bool # type: ignore[assignment]
# ---- Standard methods ----
def setLevel(self, level: Level) -> None: ...
def isEnabledFor(self, level: Level) -> bool: ...
def getEffectiveLevel(self) -> int: ...
def getChild(self, suffix: str) -> logging.Logger: ...
def addHandler(self, hdlr: logging.Handler) -> None: ...
def removeHandler(self, hdlr: logging.Handler) -> None: ...
def hasHandlers(self) -> bool: ...
# ---- Logging calls ----
def debug(self, msg: object, *args: object, **kwargs: object) -> None: ...
def info(self, msg: object, *args: object, **kwargs: object) -> None: ...
def warning(self, msg: object, *args: object, **kwargs: object) -> None: ...
def warn(self, msg: object, *args: object, **kwargs: object) -> None: ...
def error(self, msg: object, *args: object, **kwargs: object) -> None: ...
def exception(self, msg: object, *args: object, exc_info: ExcInfo = True, **kwargs: object) -> None: ...
def critical(self, msg: object, *args: object, **kwargs: object) -> None: ...
def fatal(self, msg: object, *args: object, **kwargs: object) -> None: ...
# The lowest-level primitive
def log(self, level: Level, msg: object, *args: object, **kwargs: object) -> None: ...
# ---- Record-level / formatting ----
def makeRecord(
self,
name: str,
level: Level,
fn: str,
lno: int,
msg: object,
args: tuple[object, ...] | Mapping[str, object],
exc_info: ExcInfo,
func: str | None = None,
extra: Mapping[str, object] | None = None,
sinfo: str | None = None,
) -> logging.LogRecord: ...
def handle(self, record: logging.LogRecord) -> None: ...
def findCaller(
self,
stack_info: bool = False,
stacklevel: int = 1,
) -> tuple[str, int, str, str | None]: ...
def callHandlers(self, record: logging.LogRecord) -> None: ...
def getMessage(self) -> str: ... # NOTE: actually on LogRecord; included rarely; safe to omit if you want
def _log(
self,
level: Level,
msg: object,
args: tuple[object, ...] | Mapping[str, object],
exc_info: ExcInfo = None,
extra: Mapping[str, object] | None = None,
stack_info: bool = False,
stacklevel: int = 1,
) -> None: ...
# ---- Filters ----
def addFilter(self, filt: logging.Filter) -> None: ...
def removeFilter(self, filt: logging.Filter) -> None: ...
@property
def filters(self) -> list[logging.Filter]: ...
def filter(self, record: logging.LogRecord) -> bool: ...
# ---- Convenience helpers ----
def setFormatter(self, fmt: logging.Formatter) -> None: ... # mostly on handlers; present on adapters sometimes
def debugStack(self, msg: object, *args: object, **kwargs: object) -> None: ... # not std; safe no-op if absent
# ---- stdlib dictConfig-friendly / extra storage ----
# Logger has `manager` and can have arbitrary attributes; Protocol can't express arbitrary attrs,
# but we can at least include `__dict__` to make "extra attributes" less painful.
__dict__: MutableMapping[str, Any]
# ---- Transformers logger specific methods ----
def warning_advice(self, msg: object, *args: object, **kwargs: object) -> None: ...
def warning_once(self, msg: object, *args: object, **kwargs: object) -> None: ...
def info_once(self, msg: object, *args: object, **kwargs: object) -> None: ...
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/utils/_typing.py",
"license": "Apache License 2.0",
"lines": 103,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/higgs_audio_v2/generation_higgs_audio_v2.py | # Copyright 2025, The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Any, Optional
import torch
import torch.nn as nn
from ...generation import (
GenerateDecoderOnlyOutput,
GenerationConfig,
GenerationMixin,
GenerationMode,
LogitsProcessorList,
StoppingCriteriaList,
)
from ...generation.logits_process import LogitsProcessor
from ...generation.streamers import BaseStreamer
from ...generation.utils import GenerateNonBeamOutput
from ...utils import add_start_docstrings, logging
logger = logging.get_logger(__name__)
LOGITS_PROCESSOR_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
Return:
`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
"""
class HiggsAudioV2DelayPatternLogitsProcessor(LogitsProcessor):
r"""
[`LogitsProcessor`] for Higgs Audio V2 text-to-speech model to handle codebook delay pattern.
<Tip warning={true}>
This logits processor is exclusively compatible with
[Higgs Audio V2](https://huggingface.co/docs/transformers/main/en/model_doc/higgs_audio_v2)
</Tip>
Args:
delay_pattern (list[int]):
The delay pattern for the audio bos and eos tokens.
audio_bos_token_id (int):
The id of the audio bos token.
audio_eos_token_id (int):
The id of the audio eos token.
audio_stream_bos_id (int):
The id of the audio stream bos token.
audio_stream_eos_id (int):
The id of the audio stream eos token.
num_codebooks (int):
The number of codebooks in the audio stream.
codebook_size (int):
The size of each codebook in the audio stream.
"""
def __init__(
self,
delay_pattern: list[int],
audio_bos_token_id: int,
audio_eos_token_id: int,
audio_stream_bos_id: int,
audio_stream_eos_id: int,
num_codebooks: int,
codebook_size: int,
):
self.delay_pattern = torch.tensor(delay_pattern)
self.audio_bos_token_id = audio_bos_token_id
self.audio_eos_token_id = audio_eos_token_id
self.audio_stream_bos_id = audio_stream_bos_id
self.audio_stream_eos_id = audio_stream_eos_id
self.num_codebooks = num_codebooks
self.codebook_size = codebook_size
self.bos_delay_pattern = None
self.eos_delay_pattern = None
self.vocab_mask_bos = torch.arange(codebook_size) != audio_stream_bos_id
self.vocab_mask_eos = torch.arange(codebook_size) != audio_stream_eos_id
@add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
scores = scores.reshape(-1, self.num_codebooks, self.codebook_size)
batch_size = scores.shape[0]
# we only look at the n-th last tokens to initialize the bos and eos delay patterns, where n is the delay pattern size
delay_pattern_size = len(self.delay_pattern)
input_ids = input_ids[:, -delay_pattern_size:]
# Initialize bos delay pattern
if self.bos_delay_pattern is None:
self.bos_delay_pattern = self.delay_pattern.repeat(batch_size, 1)
audio_bos_idxs = (input_ids == self.audio_bos_token_id).nonzero()
if len(audio_bos_idxs) > 0:
batch_idxs = audio_bos_idxs[:, 0]
is_first = torch.cat([batch_idxs.new_ones(1, dtype=torch.bool), batch_idxs[1:] != batch_idxs[:-1]])
min_bos_idxs = audio_bos_idxs[is_first]
current_after_bos = (delay_pattern_size - min_bos_idxs[:, 1]).unsqueeze(-1)
unique_batch_idxs = batch_idxs.unique().to(self.bos_delay_pattern.device)
self.bos_delay_pattern[unique_batch_idxs] = self.bos_delay_pattern[
unique_batch_idxs
] - current_after_bos.to(self.bos_delay_pattern.device)
else:
# there is no audio bos token,
self.bos_delay_pattern = torch.zeros_like(self.bos_delay_pattern)
# Initialize eos delay pattern
if self.eos_delay_pattern is None:
self.eos_delay_pattern = self.delay_pattern.repeat(batch_size, 1)
audio_eos_idxs = (input_ids == self.audio_eos_token_id).nonzero()
if len(audio_eos_idxs) > 0:
batch_idxs = audio_eos_idxs[:, 0]
is_first = torch.cat([batch_idxs.new_ones(1, dtype=torch.bool), batch_idxs[1:] != batch_idxs[:-1]])
min_eos_idxs = audio_eos_idxs[is_first]
current_after_eos = (delay_pattern_size - min_eos_idxs[:, 1]).unsqueeze(-1)
unique_batch_idxs = batch_idxs.unique().to(self.eos_delay_pattern.device)
self.eos_delay_pattern[unique_batch_idxs] = self.eos_delay_pattern[
unique_batch_idxs
] - current_after_eos.to(self.eos_delay_pattern.device)
# at each generation step, we decrement the bos delay pattern
row_mask = self.bos_delay_pattern >= 0
scores[(row_mask[..., None] & self.vocab_mask_bos).to(scores.device)] = -float("inf")
self.bos_delay_pattern[row_mask] -= 1
# when the audio eos token is generated, we decrement the eos delay pattern
self.eos_delay_pattern[input_ids[:, -1].to(self.eos_delay_pattern.device) == self.audio_eos_token_id] -= 1
row_mask = self.eos_delay_pattern <= 0
scores[(row_mask[..., None] & self.vocab_mask_eos).to(scores.device)] = -float("inf")
return scores.reshape(-1, self.codebook_size)
@dataclass
class HiggsAudioV2GenerationOutput(GenerateDecoderOnlyOutput):
"""
Outputs of HiggsAudioV2 generation models, when using non-beam methods.
Args:
sequences (`torch.LongTensor` of shape `(batch_size, audio_sequence_length, num_codebooks)`):
The generated text sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter
if all batches finished early due to the `eos_token_id`.
scores (`tuple(torch.FloatTensor)` *optional*, returned when `output_scores=True`):
Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)
at each generation step. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for
each generated token).
If the generated token is a text token, the tensor will have shape `(batch_size, config.vocab_size)`.
If the generated token is an audio token, the tensor will have shape `(config.num_codebooks, self.model.codebook_size)`
logits (`tuple(torch.FloatTensor)` *optional*, returned when `output_logits=True`):
Unprocessed prediction scores of the language modeling head or the audio head (scores for each vocabulary token before SoftMax)
at each generation step. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for
each generated token).
If the generated token is a text token, the tensor will have shape `(batch_size, config.vocab_size)`.
If the generated token is an audio token, the tensor will have shape `(config.num_codebooks, self.model.codebook_size)`
attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`torch.FloatTensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`.
hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`torch.FloatTensor` of shape `(batch_size, generated_length, hidden_size)`.
past_key_values (`tuple(tuple(torch.FloatTensor)))`, *optional*, returned when `use_cache=True`):
Returns the model cache, used to speed up decoding. Different models have a different cache format, check
the model's documentation. Usually, a [`~cache_utils.Cache`] instance.
audio_sequences (`tuple(torch.LongTensor)` *optional*):
The generated discrete audio codes.
"""
audio_sequences: list[torch.LongTensor] | None = None
class HiggsAudioV2GenerationMixin(GenerationMixin):
def _get_logits_processor(self, *args, **kwargs):
if kwargs.get("logits_processor") is None:
logits_processor = LogitsProcessorList()
else:
logits_processor = kwargs.get("logits_processor")
logits_processor.append(
HiggsAudioV2DelayPatternLogitsProcessor(
delay_pattern=[el + 1 for el in range(self.config.num_codebooks)],
audio_bos_token_id=self.config.audio_bos_token_id,
audio_eos_token_id=self.config.audio_delay_token_id,
audio_stream_bos_id=self.config.audio_stream_bos_id,
audio_stream_eos_id=self.config.audio_stream_eos_id,
num_codebooks=self.config.num_codebooks,
codebook_size=self.config.codebook_size,
)
)
return logits_processor
def _prepare_generation_config(
self, generation_config: GenerationConfig | None, **kwargs: Any
) -> tuple[GenerationConfig, dict]:
generation_config, model_kwargs = super()._prepare_generation_config(generation_config, **kwargs)
original_get_generation_mode = generation_config.get_generation_mode
def patched_get_generation_mode(assistant_model=None):
generation_mode = original_get_generation_mode(assistant_model)
if generation_mode not in [GenerationMode.GREEDY_SEARCH, GenerationMode.SAMPLE]:
raise ValueError(
f"Generation mode {generation_mode} is not supported for HiggsAudioV2 model. Please set generation parameters to use greedy or sampling generation."
)
return generation_mode
generation_config.get_generation_mode = patched_get_generation_mode
return generation_config, model_kwargs
def _sample(
self,
input_ids: torch.LongTensor,
logits_processor: LogitsProcessorList,
stopping_criteria: StoppingCriteriaList,
generation_config: GenerationConfig,
synced_gpus: bool = False,
streamer: Optional["BaseStreamer"] = None,
**model_kwargs,
) -> GenerateNonBeamOutput | torch.LongTensor:
output_attentions = generation_config.output_attentions
output_hidden_states = generation_config.output_hidden_states
output_scores = generation_config.output_scores
output_logits = generation_config.output_logits
return_dict_in_generate = generation_config.return_dict_in_generate
has_eos_stopping_criteria = any(hasattr(criteria, "eos_token_id") for criteria in stopping_criteria)
do_sample = generation_config.do_sample
# init attention / hidden states / scores tuples
scores = () if (return_dict_in_generate and output_scores) else None
raw_logits = () if (return_dict_in_generate and output_logits) else None
decoder_attentions = () if (return_dict_in_generate and output_attentions) else None
decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None
# keep track of which sequences are already finished
batch_size, cur_len = input_ids.shape[:2]
this_peer_finished = False
unfinished_sequences = torch.ones(batch_size, dtype=torch.long, device=input_ids.device)
model_forward = (
self.get_compiled_call(generation_config.compile_config)
if self._valid_auto_compile_criteria(model_kwargs, generation_config)
else self.__call__
)
prefill_consumed = False
outputs = self._prefill(
input_ids,
generation_config,
model_kwargs,
is_first_iteration=not generation_config.is_assistant,
)
while self._has_unfinished_sequences(this_peer_finished, synced_gpus, device=input_ids.device):
if prefill_consumed:
next_sequence_length = 1 if model_kwargs["use_cache"] else None
model_inputs = self.prepare_inputs_for_generation(
input_ids, next_sequence_length=next_sequence_length, **model_kwargs
)
with self._optimize_model_for_decode():
outputs = model_forward(**model_inputs, return_dict=True)
prefill_consumed = True
model_kwargs = self._update_model_kwargs_for_generation(
outputs,
model_kwargs,
is_encoder_decoder=self.config.is_encoder_decoder,
)
if synced_gpus and this_peer_finished:
continue
# Copy is needed to avoid keeping a hanging ref to outputs.logits which may be very large for first iteration
# (the clone itself is always small)
next_token_logits = outputs.logits[:, -1, :].to(copy=True, dtype=torch.float32, device=input_ids.device)
# pre-process distribution
next_token_scores = logits_processor(input_ids, next_token_logits)
# ===========================
# BELOW DIFFERENCES WITH GenerationMixin._sample()
# Store scores, attentions and hidden_states when required
if return_dict_in_generate:
if output_scores:
scores += (
next_token_scores.reshape(batch_size, self.config.num_codebooks, self.config.codebook_size),
)
if output_logits:
raw_logits += (next_token_logits,)
if output_attentions:
decoder_attentions += (outputs.attentions,)
if output_hidden_states:
decoder_hidden_states += (outputs.hidden_states,)
# token selection
if do_sample:
probs = nn.functional.softmax(next_token_scores, dim=-1)
# TODO (joao): this OP throws "skipping cudagraphs due to ['incompatible ops']", find solution
next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)
else:
next_tokens = torch.argmax(next_token_scores, dim=-1)
next_token_logits = next_token_logits.reshape(-1, self.config.num_codebooks, self.config.codebook_size)
next_tokens = next_tokens.reshape(batch_size, self.config.num_codebooks)
ras_win_len = generation_config.ras_win_len if hasattr(generation_config, "ras_win_len") else None
ras_win_max_num_repeat = (
generation_config.ras_win_max_num_repeat
if hasattr(generation_config, "ras_win_max_num_repeat")
else None
)
audio_input_ids = model_kwargs.get("audio_input_ids")
if ras_win_len is not None and ras_win_max_num_repeat is not None and audio_input_ids is not None:
# check if there are repetitions over a window of tokens.
audio_inputs_ids_window = audio_input_ids[:, -ras_win_len:, :]
repetition_mask = audio_inputs_ids_window == next_tokens.unsqueeze(1)
# avoid counting the repetition of the audio stream EOS and BOS tokens
not_excluded_mask = (audio_inputs_ids_window != self.config.audio_stream_bos_id) & (
audio_inputs_ids_window != self.config.audio_stream_eos_id
)
repetition_mask = repetition_mask & not_excluded_mask
rep_num = repetition_mask.sum(dim=1)
# if we saw repeated tokens in the most recent window of tokens, resample without temperature.
replacement_mask = rep_num >= ras_win_max_num_repeat
replacement_tokens = (
next_token_logits[replacement_mask].softmax(dim=-1).multinomial(1, replacement=True).view(-1)
)
next_tokens[replacement_mask] = replacement_tokens
# finished sentences should have their next token be a padding token
if has_eos_stopping_criteria:
next_tokens = next_tokens * unfinished_sequences[:, None] + self.config.audio_stream_eos_id * (
1 - unfinished_sequences[:, None]
)
has_audio_stream_eos = (next_tokens == self.config.audio_stream_eos_id).any(dim=-1)
has_all_audio_stream_eos = (next_tokens == self.config.audio_stream_eos_id).all(dim=-1)
next_tokens = next_tokens[:, None, :]
if audio_input_ids is not None:
model_kwargs["audio_input_ids"] = torch.cat([audio_input_ids, next_tokens], dim=1)
else:
model_kwargs["audio_input_ids"] = next_tokens
next_audio_input_ids_mask = torch.ones((batch_size, 1), dtype=torch.bool, device=next_tokens.device)
next_audio_input_ids_mask[has_all_audio_stream_eos] = 0
audio_input_ids_mask = model_kwargs.get("audio_input_ids_mask")
if audio_input_ids_mask is not None:
model_kwargs["audio_input_ids_mask"] = torch.cat(
[audio_input_ids_mask, next_audio_input_ids_mask], dim=1
)
else:
model_kwargs["audio_input_ids_mask"] = next_audio_input_ids_mask
# generation of a stream eos audio token will start delay pattern masking in the logits processor
# for that, we need to set next text token to audio_eos_start_delay_token_id
next_tokens_flat = input_ids.new_ones(batch_size) * self.config.audio_token_id
next_tokens_flat[has_audio_stream_eos | (input_ids[:, -1] == self.config.audio_delay_token_id)] = (
self.config.audio_delay_token_id
)
if self.config.eos_token_id is not None:
next_tokens_flat[has_all_audio_stream_eos] = self.config.eos_token_id
next_tokens = next_tokens_flat
# ============================
# update generated ids, model inputs, and length for next step
input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)
if streamer is not None:
streamer.put(next_tokens.cpu())
unfinished_sequences = unfinished_sequences & ~stopping_criteria(input_ids, scores)
this_peer_finished = unfinished_sequences.max() == 0
cur_len += 1
# This is needed to properly delete outputs.logits which may be very large for first iteration
# Otherwise a reference to outputs is kept which keeps the logits alive in the next iteration
del outputs
if streamer is not None:
streamer.end()
if return_dict_in_generate:
return HiggsAudioV2GenerationOutput(
sequences=input_ids,
scores=scores,
logits=raw_logits,
attentions=decoder_attentions,
hidden_states=decoder_hidden_states,
past_key_values=model_kwargs.get("past_key_values"),
audio_sequences=model_kwargs.get("audio_input_ids"),
)
else:
return model_kwargs.get("audio_input_ids")
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/higgs_audio_v2/generation_higgs_audio_v2.py",
"license": "Apache License 2.0",
"lines": 354,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/higgs_audio_v2/modular_higgs_audio_v2.py | # Copyright 2025 the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
from ... import initialization as init
from ...cache_utils import Cache, DynamicCache
from ...masking_utils import create_causal_mask
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from ...modeling_utils import PreTrainedModel
from ...processing_utils import Unpack
from ...utils import (
TransformersKwargs,
auto_docstring,
can_return_tuple,
logging,
)
from ...utils.output_capturing import capture_outputs
from ..csm.modeling_csm import CsmBackboneModelEmbeddings
from ..llama.configuration_llama import LlamaConfig
from ..llama.modeling_llama import LlamaDecoderLayer, LlamaMLP, LlamaModel, LlamaPreTrainedModel, LlamaRMSNorm
from .generation_higgs_audio_v2 import HiggsAudioV2GenerationMixin
logger = logging.get_logger(__name__)
class HiggsAudioV2Config(LlamaConfig):
r"""
This is the configuration class to store the configuration of a [`HiggsAudioV2Model`]. It is used to instantiate an HiggsAudioV2
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the 3B model.
e.g. [bosonai/higgs-audio-v2-generation-3B-base](https://huggingface.co/bosonai/higgs-audio-v2-generation-3B-base)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 128256):
Vocabulary size of the HiggsAudioV2 model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`HiggsAudioV2Model`]
hidden_size (`int`, *optional*, defaults to 3072):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 8192):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 28):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 24):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*, defaults to 8):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*, defaults to 128001):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 1):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 128009):
End of stream token id.
pretraining_tp (`int`, *optional*, defaults to 1):
Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
document](https://huggingface.co/docs/transformers/main/perf_train_gpu_many#tensor-parallelism) to
understand more about it. This value is necessary to ensure exact reproducibility of the pretraining
results. Please refer to [this issue](https://github.com/pytorch/pytorch/issues/76232).
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
mlp_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in up_proj, down_proj and gate_proj layers in the MLP layers.
head_dim (`int`, *optional*, defaults to 128):
The attention head dimension. If None, it will default to hidden_size // num_attention_heads
num_codebooks (`int`, *optional*, defaults to 8):
Number of codebooks used in the underlying codec model responsible for tokenizing the audio.
codebook_size (`int`, *optional*, defaults to 1024):
Size of the codebook used in the underlying codec model for audio tokenization.
audio_token_id (`int`, *optional*, defaults to 128016):
The token ID used to represent audio output in the text sequence.
audio_bos_token_id (`int`, *optional*, defaults to 128013):
The token ID for the beginning-of-sequence token for audio output.
audio_delay_token_id (`int`, *optional*, defaults to 128014):
The token ID used for audio delay pattern in multi-codebook generation.
audio_stream_bos_id (`int`, *optional*, defaults to 1024):
The ID for the beginning-of-stream token in audio sequences.
audio_stream_eos_id (`int`, *optional*, defaults to 1025):
The ID for the end-of-stream token in audio sequences.
Example:
```python
>>> from transformers import HiggsAudioV2Model, HiggsAudioV2Config
>>> # Initializing a HiggsAudioV2 style configuration
>>> configuration = HiggsAudioV2Config()
>>> # Initializing a model from the configuration
>>> model = HiggsAudioV2Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
def __init__(
self,
vocab_size=128256,
hidden_size=3072,
intermediate_size=8192,
num_hidden_layers=28,
num_attention_heads=24,
num_key_value_heads=8,
hidden_act="silu",
max_position_embeddings=2048,
initializer_range=0.02,
rms_norm_eps=1e-05,
use_cache=True,
pad_token_id=128001,
bos_token_id=1,
eos_token_id=128009,
pretraining_tp=1,
tie_word_embeddings=False,
rope_parameters={
"factor": 32.0,
"rope_theta": 500000.0,
"high_freq_factor": 0.5,
"low_freq_factor": 0.125,
"original_max_position_embeddings": 1024,
"rope_type": "llama3",
},
attention_bias=False,
attention_dropout=0.0,
mlp_bias=False,
head_dim=128,
num_codebooks=8,
codebook_size=1024,
audio_token_id=128016,
audio_bos_token_id=128013,
audio_delay_token_id=128014,
audio_stream_bos_id=1024,
audio_stream_eos_id=1025,
**kwargs,
):
super().__init__(
vocab_size=vocab_size,
hidden_size=hidden_size,
intermediate_size=intermediate_size,
num_hidden_layers=num_hidden_layers,
num_attention_heads=num_attention_heads,
num_key_value_heads=num_key_value_heads,
hidden_act=hidden_act,
max_position_embeddings=max_position_embeddings,
initializer_range=initializer_range,
rms_norm_eps=rms_norm_eps,
use_cache=use_cache,
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
pretraining_tp=pretraining_tp,
tie_word_embeddings=tie_word_embeddings,
rope_parameters=rope_parameters,
attention_bias=attention_bias,
attention_dropout=attention_dropout,
mlp_bias=mlp_bias,
head_dim=head_dim,
**kwargs,
)
self.num_codebooks = num_codebooks
self.codebook_size = codebook_size
self.audio_token_id = audio_token_id
self.audio_bos_token_id = audio_bos_token_id
self.audio_delay_token_id = audio_delay_token_id
self.audio_stream_bos_id = audio_stream_bos_id
self.audio_stream_eos_id = audio_stream_eos_id
class HiggsAudioV2MLP(LlamaMLP):
pass
class HiggsAudioV2RMSNorm(LlamaRMSNorm):
pass
class HiggsAudioV2DecoderLayer(LlamaDecoderLayer):
def __init__(self, config: HiggsAudioV2Config, layer_idx: int):
super().__init__(config, layer_idx)
self.audio_mlp = HiggsAudioV2MLP(config)
self.audio_input_layernorm = HiggsAudioV2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.audio_post_attention_layernorm = HiggsAudioV2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor] | None,
attention_mask: torch.Tensor | None = None,
audio_token_mask: torch.BoolTensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
use_cache: bool | None = False,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
residual = hidden_states
if audio_token_mask is None:
hidden_states = self.audio_input_layernorm(hidden_states)
else:
audio_token_mask = audio_token_mask.to(hidden_states.device)
hidden_states = hidden_states.masked_scatter(
audio_token_mask.unsqueeze(-1),
self.audio_input_layernorm(hidden_states[audio_token_mask]).to(hidden_states.device),
)
hidden_states = hidden_states.masked_scatter(
~audio_token_mask.unsqueeze(-1),
self.input_layernorm(hidden_states[~audio_token_mask]).to(hidden_states.device),
)
# Self Attention
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + hidden_states
if audio_token_mask is None:
audio_hidden_states = self.audio_post_attention_layernorm(hidden_states)
audio_hidden_states = self.audio_mlp(audio_hidden_states)
hidden_states = hidden_states + audio_hidden_states.to(hidden_states.device)
else:
text_hidden_states = self.post_attention_layernorm(hidden_states[~audio_token_mask])
audio_hidden_states = self.audio_post_attention_layernorm(hidden_states[audio_token_mask])
text_hidden_states = self.mlp(text_hidden_states)
hidden_states[~audio_token_mask] += text_hidden_states.to(hidden_states.device)
audio_hidden_states = self.audio_mlp(audio_hidden_states)
hidden_states[audio_token_mask] += audio_hidden_states.to(hidden_states.device)
return hidden_states
class HiggsAudioV2Embeddings(CsmBackboneModelEmbeddings):
def forward(self, input_ids):
inputs_embeds = self.embed_audio_tokens(input_ids + self.audio_tokens_offsets)
inputs_embeds = inputs_embeds.sum(dim=-2)
return inputs_embeds
class HiggsAudioV2PreTrainedModel(LlamaPreTrainedModel, PreTrainedModel):
@torch.no_grad()
def _init_weights(self, module):
PreTrainedModel._init_weights(module)
if isinstance(module, HiggsAudioV2Embeddings):
init.copy_(
module.audio_tokens_offsets, torch.arange(self.config.num_codebooks) * self.config.codebook_size
)
class HiggsAudioV2Model(LlamaModel):
def __init__(self, config: HiggsAudioV2Config):
super().__init__(config)
self.embed_audio_tokens = HiggsAudioV2Embeddings(config)
def get_placeholder_mask(
self, input_ids: torch.LongTensor, inputs_embeds: torch.FloatTensor, audio_input_ids_mask: torch.LongTensor
):
"""
Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is
equal to the length of audio_input_ids. If the lengths are different, an error is raised.
If input_ids and inputs_embeds are None, we return None.
Indeed this means we cannot determine the placeholder mask, the model is to be used in a audio-only mode, hence we return None.
"""
if input_ids is None and inputs_embeds is None:
return None
elif input_ids is None:
special_audio_mask = inputs_embeds == self.embed_tokens(
torch.tensor(self.config.audio_token_id, dtype=torch.long, device=inputs_embeds.device)
)
special_audio_mask = special_audio_mask.all(-1)
else:
special_audio_mask = (input_ids == self.config.audio_token_id) | (
input_ids == self.config.audio_delay_token_id
)
return special_audio_mask
@capture_outputs
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
audio_input_ids: torch.LongTensor | None = None,
attention_mask: torch.LongTensor | None = None,
audio_input_ids_mask: torch.BoolTensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
cache_position: torch.LongTensor | None = None,
use_cache: bool | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutputWithPast:
r"""
audio_input_ids (`torch.LongTensor` of shape `(batch_size, num_audio_frames, num_codebooks)`, *optional*):
Indices of audio codebook tokens.
Indices can be obtained using [`HiggsAudioV2TokenizerModel.encode`].
audio_input_ids_mask (`torch.BoolTensor` of shape `(batch_size, num_audio_frames)`, *optional*):
Indicates which audio frames in `audio_input_ids` are valid.
Returns:
[`~models.modeling_outputs.BaseModelOutputWithPast`]:
Usual decoder outputs with the placeholder positions already substituted by their corresponding
audio embeddings.
Example:
```python
>>> from transformers import AutoProcessor, HiggsAudioV2Model
>>> import torch
>>> device = "cuda" if torch.cuda.is_available() else "cpu"
>>> processor = AutoProcessor.from_pretrained("eustlb/higgs-audio-v2-generation-3B-base", device_map=device)
>>> model = HiggsAudioV2Model.from_pretrained("eustlb/higgs-audio-v2-generation-3B-base", device_map=device)
>>> conversation = [
... {
... "role": "system",
... "content": [
... {
... "type": "text",
... "text": "Generate audio following instruction."
... }
... ]
... },
... {
... "role": "scene",
... "content": [
... {
... "type": "text",
... "text": "Audio is recorded from a quiet room."
... }
... ]
... },
... {
... "role": "user",
... "content": [
... {
... "type": "text",
... "text": "It was the night before my birthday. Hooray! It's almost here! It may not be a holiday, but it's the best day of the year."
... }
... ]
... },
... {
... "role": "assistant",
... "content": [
... {
... "type": "audio",
... "url": "https://huggingface.co/datasets/eustlb/dummy-audio-samples-higgs/resolve/main/belinda.wav"
... }
... ]
... },
... {
... "role": "user",
... "content": [
... {
... "type": "text",
... "text": "The sun rises in the east and sets in the west. This simple fact has been observed by humans for thousands of years."
... }
... ]
... }
... ]
>>> inputs = processor.apply_chat_template(conversation, return_dict=True, tokenize=True, sampling_rate=24000, return_tensors="pt")
>>> inputs = inputs.to(model.device)
>>> outputs = model(**inputs)
```
"""
if (input_ids is None) and (inputs_embeds is None) and (audio_input_ids is None):
raise ValueError("You must specify at least one of input_ids, inputs_embeds, or audio_input_ids")
if (input_ids is not None) and (inputs_embeds is not None):
raise ValueError("Only one of input_ids or inputs_embeds can be provided")
audio_token_mask = self.get_placeholder_mask(input_ids, inputs_embeds, audio_input_ids_mask)
if input_ids is not None:
inputs_embeds = self.embed_tokens(input_ids)
if audio_input_ids is not None:
audio_embeds = self.embed_audio_tokens(audio_input_ids)
if inputs_embeds is not None and audio_input_ids is not None:
audio_embeds = (
audio_embeds[audio_input_ids_mask.to(audio_embeds.device)]
if audio_input_ids_mask is not None
else audio_embeds
)
inputs_embeds = inputs_embeds.masked_scatter(
audio_token_mask[..., None].expand_as(inputs_embeds), audio_embeds.to(inputs_embeds.device)
)
elif audio_input_ids is not None:
inputs_embeds = audio_embeds
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position: torch.Tensor = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = create_causal_mask(
config=self.config,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
position_ids=position_ids,
)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
hidden_states = decoder_layer(
hidden_states,
attention_mask=causal_mask,
audio_token_mask=audio_token_mask,
position_ids=position_ids,
past_key_values=past_key_values,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
@auto_docstring(
custom_intro="""
The Higgs Audio model, a llama-like auto-regressive transformer model with dual-FFN.
"""
)
class HiggsAudioV2ForConditionalGeneration(HiggsAudioV2PreTrainedModel, HiggsAudioV2GenerationMixin):
base_model_prefix = "model"
_keys_to_ignore_on_load_unexpected = ["text_lm_head.weight"]
def __init__(self, config: HiggsAudioV2Config, use_text_head: bool = False):
r"""
use_text_head (`bool`, *optional*, defaults to False):
Whether to use a text language model head. Such head is not required for generation,
but can be used to compute the text loss when training.
"""
super().__init__(config)
self.model = HiggsAudioV2Model(config)
self.audio_lm_head = nn.Linear(config.hidden_size, config.num_codebooks * config.codebook_size, bias=False)
self.text_lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) if use_text_head else None
self.post_init()
def prepare_inputs_for_generation(
self,
input_ids: torch.LongTensor,
audio_input_ids: torch.LongTensor | None = None,
audio_input_ids_mask: torch.LongTensor | None = None,
**kwargs,
):
model_inputs = super().prepare_inputs_for_generation(input_ids, **kwargs)
if audio_input_ids is not None and model_inputs.get("past_key_values") is not None:
current_cache_length = model_inputs["cache_position"][0]
audio_token_mask = (input_ids == self.config.audio_token_id) | (
input_ids == self.config.audio_delay_token_id
)
in_cache_num_audio_input_ids = audio_token_mask[:, :current_cache_length].sum(dim=-1)
# already cached audio_input_ids should be masked
# this surmise that audio_input_ids are right padded!
valid_audio_input_ids = audio_input_ids_mask.cumsum(dim=-1) > in_cache_num_audio_input_ids[:, None]
audio_input_ids_mask = audio_input_ids_mask & valid_audio_input_ids
if audio_input_ids_mask is not None and (~audio_input_ids_mask[:, :-1]).all():
# in decoding mode, we only pass audio_input_ids
audio_input_ids = audio_input_ids[:, -1:, :].clone(memory_format=torch.contiguous_format)
model_inputs.pop("input_ids", None)
audio_input_ids_mask = None
model_inputs["audio_input_ids"] = audio_input_ids
model_inputs["audio_input_ids_mask"] = audio_input_ids_mask
return model_inputs
@auto_docstring
@can_return_tuple
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.BoolTensor | None = None,
audio_input_ids: torch.LongTensor | None = None,
audio_input_ids_mask: torch.LongTensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
labels: torch.LongTensor | None = None,
audio_labels: torch.LongTensor | None = None,
use_cache: bool | None = None,
cache_position: torch.LongTensor | None = None,
logits_to_keep: int | torch.Tensor = 0,
**kwargs: Unpack[TransformersKwargs],
):
r"""
audio_input_ids (`torch.LongTensor` of shape `(batch_size, num_audio_frames, num_codebooks)`, *optional*):
Indices of audio codebook tokens.
Indices can be obtained using [`HiggsAudioV2TokenizerModel.encode`].
audio_input_ids_mask (`torch.BoolTensor` of shape `(batch_size, num_audio_frames)`, *optional*):
Indicates which audio frames in `audio_input_ids` are valid.
audio_labels (`torch.LongTensor` of shape `(batch_size, num_audio_frames, num_codebooks)`, *optional*):
Labels for the audio codebook tokens for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.codebook_size]. Token with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.codebook_size]`.
Can be obtained using `output_labels=True` when calling [`HiggsAudioV2Processor`].
Returns:
[`~models.modeling_outputs.CausalLMOutputWithPast`]:
A [`~models.modeling_outputs.CausalLMOutputWithPast`] containing the logits, loss (if labels are provided),
and other outputs from the model.
Example:
```python
>>> from transformers import AutoProcessor, HiggsAudioV2ForConditionalGeneration
>>> model_id = "eustlb/higgs-audio-v2-generation-3B-base"
>>> processor = AutoProcessor.from_pretrained(model_id, device_map="auto")
>>> model = HiggsAudioV2ForConditionalGeneration.from_pretrained(model_id, device_map="auto")
>>> conversation = [
... {
... "role": "system",
... "content": [
... {
... "type": "text",
... "text": "Generate audio following instruction."
... }
... ]
... },
... {
... "role": "scene",
... "content": [
... {
... "type": "text",
... "text": "Audio is recorded from a quiet room."
... }
... ]
... },
... {
... "role": "user",
... "content": [
... {
... "type": "text",
... "text": "It was the night before my birthday. Hooray! It's almost here! It may not be a holiday, but it's the best day of the year."
... }
... ]
... },
... {
... "role": "assistant",
... "content": [
... {
... "type": "audio",
... "url": "https://huggingface.co/datasets/eustlb/dummy-audio-samples-higgs/resolve/main/belinda.wav"
... }
... ]
... },
... {
... "role": "user",
... "content": [
... {
... "type": "text",
... "text": "The sun rises in the east and sets in the west. This simple fact has been observed by humans for thousands of years."
... }
... ]
... }
... ]
>>> inputs = processor.apply_chat_template(conversation, return_dict=True, tokenize=True, sampling_rate=24000, return_tensors="pt")
>>> inputs = inputs.to(model.device)
>>> outputs = model(**inputs)
```
"""
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
audio_input_ids=audio_input_ids,
audio_input_ids_mask=audio_input_ids_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.audio_lm_head(hidden_states[:, slice_indices, :])
loss = None
if audio_labels is not None:
audio_logits = logits.reshape(*logits.shape[:2], self.config.num_codebooks, self.config.codebook_size)
audio_labels_expanded = input_ids.new_ones((*input_ids.shape[:2], 8)) * -100
audio_token_mask = self.model.get_placeholder_mask(input_ids, inputs_embeds, audio_input_ids_mask)
audio_labels_expanded[audio_token_mask] = audio_labels[audio_input_ids_mask]
codebook_losses = []
for codebook_idx in range(self.config.num_codebooks):
codebook_logits = audio_logits[:, :, codebook_idx, :]
codebook_labels = audio_labels_expanded[:, :, codebook_idx]
codebook_losses.append(
self.loss_function(codebook_logits, codebook_labels, self.config.codebook_size, **kwargs)
)
loss = sum(codebook_losses)
if labels is not None:
if self.text_lm_head is not None:
text_logits = self.text_lm_head(hidden_states[:, slice_indices, :])
text_loss = self.loss_function(text_logits, labels, self.config.vocab_size, **kwargs)
loss = text_loss if loss is None else loss + text_loss
else:
logger.warning_once(
f"`labels` provided to {self.__class__.__name__} but `text_lm_head` is disabled. "
f"Text labels ignored. Set `use_text_head=True` in model init to enable text loss."
)
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
__all__ = [
"HiggsAudioV2ForConditionalGeneration",
"HiggsAudioV2PreTrainedModel",
"HiggsAudioV2Model",
"HiggsAudioV2Config",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/higgs_audio_v2/modular_higgs_audio_v2.py",
"license": "Apache License 2.0",
"lines": 615,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/higgs_audio_v2/processing_higgs_audio_v2.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from itertools import islice
from pathlib import Path
from ...audio_utils import AudioInput, make_list_of_audio
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessingKwargs, ProcessorMixin, Unpack
from ...tokenization_utils_base import PreTokenizedInput, TextInput
from ...utils import is_soundfile_available, is_torch_available, logging
if is_torch_available():
import torch
import torch.nn.functional as F
if is_soundfile_available():
import soundfile as sf
logger = logging.get_logger(__name__)
class HiggsAudioV2ProcessorKwargs(ProcessingKwargs, total=False):
_defaults = {
"text_kwargs": {
"padding": True,
"padding_side": "left",
},
"audio_kwargs": {
"padding": False,
"sampling_rate": 24000,
},
}
class HiggsAudioV2Processor(ProcessorMixin):
r"""
Constructs a Higgs Audio processor which wraps a [`DacFeatureExtractor`], a [`AutoTokenizer`],
and a [`HiggsAudioV2TokenizerModel`] into a single processor. It inherits, the audio feature extraction, tokenizer,
and audio encode/decode functionalities.
See [`~HiggsAudioV2Processor.__call__`] and [`~HiggsAudioV2Processor.decode`] for more information.
Args:
feature_extractor (`DacFeatureExtractor`):
An instance of [`DacFeatureExtractor`]. The feature extractor is a required input.
tokenizer (`AutoTokenizer`):
An instance of [`AutoTokenizer`]. The tokenizer is a required input.
audio_tokenizer (`HiggsAudioV2TokenizerModel`):
An instance of [`HiggsAudioV2TokenizerModel`]. The audio tokenizer is a required input.
chat_template (`str`, *optional*):
A template string for chat formatting when combining text and audio interactions.
audio_token (`str`, *optional*, defaults to `"<|AUDIO_OUT|>"`):
The token used to represent audio output in the text sequence.
audio_bos_token (`str`, *optional*, defaults to `"<|audio_out_bos|>"`):
The beginning-of-sequence token for audio output.
audio_eos_token (`str`, *optional*, defaults to `"<|audio_eos|>"`):
The end-of-sequence token for audio output.
audio_delay_token (`str`, *optional*, defaults to `"<|reserved_special_token_6|>"`):
The token used for audio delay pattern in multi-codebook generation.
audio_stream_bos_id (`int`, *optional*, defaults to 1024):
The ID for the beginning-of-stream token in audio sequences.
audio_stream_eos_id (`int`, *optional*, defaults to 1025):
The ID for the end-of-stream token in audio sequences.
"""
feature_extractor_class = "DacFeatureExtractor"
tokenizer_class = "AutoTokenizer"
audio_tokenizer_class = "HiggsAudioV2TokenizerModel"
def __init__(
self,
feature_extractor,
tokenizer,
audio_tokenizer,
chat_template=None,
audio_token="<|AUDIO_OUT|>",
audio_bos_token="<|audio_out_bos|>",
audio_eos_token="<|audio_eos|>",
audio_delay_token="<|reserved_special_token_6|>",
audio_stream_bos_id=1024,
audio_stream_eos_id=1025,
):
self.audio_token = tokenizer.audio_token if hasattr(tokenizer, "audio_token") else audio_token
self.audio_bos_token = tokenizer.audio_bos_token if hasattr(tokenizer, "audio_bos_token") else audio_bos_token
self.audio_eos_token = tokenizer.audio_eos_token if hasattr(tokenizer, "audio_eos_token") else audio_eos_token
self.audio_delay_token = (
tokenizer.audio_delay_token if hasattr(tokenizer, "audio_delay_token") else audio_delay_token
)
self.audio_token_id = tokenizer.convert_tokens_to_ids(self.audio_token)
self.audio_bos_token_id = tokenizer.convert_tokens_to_ids(self.audio_bos_token)
self.audio_eos_token_id = tokenizer.convert_tokens_to_ids(self.audio_eos_token)
self.audio_delay_token_id = tokenizer.convert_tokens_to_ids(self.audio_delay_token)
self.audio_stream_bos_id = audio_stream_bos_id
self.audio_stream_eos_id = audio_stream_eos_id
super().__init__(
feature_extractor,
tokenizer,
audio_tokenizer=audio_tokenizer,
chat_template=chat_template,
)
def get_audio_tokens(self, num_audio_tokens):
"""
Returns the audio tokens for a given number of audio tokens.
"""
num_codebooks = self.audio_tokenizer.config.num_quantizers
return self.audio_token * (num_audio_tokens - (num_codebooks - 1)) + self.audio_delay_token * (
num_codebooks - 1
)
def __call__(
self,
text: TextInput | PreTokenizedInput | list[TextInput] | list[PreTokenizedInput] | None = None,
audio: AudioInput | None = None,
output_labels: bool | None = False,
**kwargs: Unpack[HiggsAudioV2ProcessorKwargs],
):
output_kwargs = self._merge_kwargs(
HiggsAudioV2ProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
text_kwargs = output_kwargs["text_kwargs"]
audio_kwargs = output_kwargs["audio_kwargs"]
return_tensors = text_kwargs.get("return_tensors", None)
if return_tensors != "pt":
raise ValueError(f"{self.__class__.__name__} only supports `return_tensors='pt'`.")
if isinstance(text, str):
text = [text]
elif not (isinstance(text, (list, tuple)) and all(isinstance(t, str) for t in text)):
raise ValueError("Invalid input text. Please provide a string, or a list of strings")
n_audio_in_text = [t.count(self.audio_token) for t in text]
n_audio = 0
if audio is not None:
audio = make_list_of_audio(audio)
n_audio = len(audio)
if sum(n_audio_in_text) > 0 and n_audio != sum(n_audio_in_text):
if audio is None:
raise ValueError("No audio were provided, but there are audio tokens in the prompt")
else:
raise ValueError(
f"The number of audio tokens in each text ({n_audio_in_text}) should be the same as the "
f"number of provided audios ({n_audio})."
)
elif sum(n_audio_in_text) == 0 and n_audio > 0:
raise ValueError("Audio were provided, but there are no audio tokens in the prompt")
if audio is not None:
# tokenize audio
audio_input_ids_list = []
for audio_el in audio:
# TODO: @eustlb, this should be batched !!!
audio_inputs = self.feature_extractor(audio_el, **audio_kwargs)
# TODO: @eustlb, padding_mask should be supported...
audio_inputs.pop("padding_mask", None)
audio_inputs.to(self.audio_tokenizer.device)
audio_input_ids = self.audio_tokenizer.encode(**audio_inputs).audio_codes
# add audio eos and bos
bos_codes = audio_input_ids.new_full((*audio_input_ids.shape[:2], 1), self.audio_stream_bos_id)
eos_codes = audio_input_ids.new_full((*audio_input_ids.shape[:2], 1), self.audio_stream_eos_id)
audio_input_ids = torch.cat([bos_codes, audio_input_ids, eos_codes], dim=2)
audio_input_ids = self.build_delay_pattern(audio_input_ids)
audio_input_ids_list.append(audio_input_ids[0].transpose(0, 1))
# expand audio tokens in text
num_audio_tokens_iter = iter(len(audio_input_ids) for audio_input_ids in audio_input_ids_list)
for i in range(len(text)):
expanded = re.sub(
re.escape(self.audio_token), lambda _: self.get_audio_tokens(next(num_audio_tokens_iter)), text[i]
)
text[i] = expanded
# convert to nested list according to n_audio_in_text
# [audio_1, audio_2, ...] -> [[audio_1_1, audio_1_2, ...], [audio_2_1, audio_2_2, ...], ...]
audio_input_ids_iter = iter(audio_input_ids_list)
audio_input_ids_list = [list(islice(audio_input_ids_iter, l)) for l in n_audio_in_text]
audio_input_ids_list = [torch.cat(batch_el, dim=0) for batch_el in audio_input_ids_list]
# pad and stack
lenghts = [ids.shape[0] for ids in audio_input_ids_list]
max_length = max(lenghts)
audio_input_ids_list = [
F.pad(ids, (0, 0, 0, max_length - ids.shape[0]), value=self.audio_stream_eos_id)
for ids in audio_input_ids_list
]
audio_input_ids = torch.stack(audio_input_ids_list, dim=0)
audio_input_ids_mask = torch.arange(max_length)[None, :] < torch.tensor(lenghts)[:, None]
# tokenize text
data = self.tokenizer(text, **text_kwargs)
if audio is not None:
data.update(
{
"audio_input_ids": audio_input_ids,
"audio_input_ids_mask": audio_input_ids_mask,
}
)
if output_labels:
labels = data["input_ids"].clone()
labels[labels == self.audio_token_id] = -100
labels[labels == self.tokenizer.pad_token_id] = -100
labels[labels == self.audio_bos_token_id] = -100
data["labels"] = labels
if audio is not None:
audio_labels = audio_input_ids.clone()
audio_labels[audio_labels == self.audio_stream_bos_id] = -100
audio_labels[audio_labels == self.audio_stream_eos_id] = -100
data.update({"audio_labels": audio_labels})
return BatchFeature(data=data, tensor_type="pt")
def batch_decode(self, audio_input_ids):
"""
Decode a batch of audio token sequences into audio waveforms.
This method processes audio token sequences generated by the model, extracting the actual audio tokens
between the beginning-of-stream (BOS) and end-of-stream (EOS) markers, reverting the delay pattern
used during generation, and decoding them into audio waveforms using the audio tokenizer.
Args:
audio_input_ids (`torch.LongTensor`):
Shape `(batch_size, sequence_length, num_codebooks)`
The audio token sequences to decode. These should contain audio tokens with BOS and EOS markers
in a delay pattern format as generated by the model.
Returns:
`list[torch.Tensor]`: A list of decoded audio waveforms, one for each batch element. Each waveform
is a 1D tensor containing the audio samples.
"""
# start idx should be the last sequence index of the audio bos tokens
audio_bos_token_idxs = (audio_input_ids == self.audio_stream_bos_id).all(-1).nonzero()
start_of_generation_idx = audio_bos_token_idxs[-1, -1].item()
audio_input_ids = audio_input_ids[:, start_of_generation_idx:]
# end idx for each batch idx should be the first sequence index of the audio eos tokens
audio_eos_token_idxs = (audio_input_ids == self.audio_stream_eos_id).all(-1).nonzero()
end_of_generation_idxs = [
audio_eos_token_idxs[audio_eos_token_idxs[:, 0] == batch_idx, 1].min().item()
if len(audio_eos_token_idxs[audio_eos_token_idxs[:, 0] == batch_idx]) > 0
else audio_input_ids.shape[1]
for batch_idx in range(audio_input_ids.shape[0])
]
audios = []
with torch.no_grad():
# TODO: @eustlb, this should be batched !!!
for batch_idx in range(audio_input_ids.shape[0]):
audio_token_ids = audio_input_ids[batch_idx, 1 : end_of_generation_idxs[batch_idx]]
audio_token_ids = self.revert_delay_pattern(audio_token_ids).clip(0, self.audio_stream_bos_id - 1)
audio_i = (
self.audio_tokenizer.decode(audio_token_ids.transpose(0, 1).unsqueeze(0))
.audio_values.cpu()
.squeeze()
)
audios.append(audio_i)
return audios
def decode(self, audio_input_ids):
if audio_input_ids.shape[0] != 1:
raise ValueError(
f"Expecting a single output to be decoded but received {audio_input_ids.shape[0]} samples instead."
)
return self.batch_decode(audio_input_ids)[0]
def build_delay_pattern(self, input_ids):
bsz, num_codebooks, seq_len = input_ids.shape
new_seq_len = seq_len + num_codebooks - 1
# Create output tensor with delay pattern
output = torch.ones((bsz, num_codebooks, new_seq_len), dtype=torch.long, device=input_ids.device)
# Create masks for different regions
bos_mask = torch.tril(output, -1) > 0
eos_mask = torch.triu(output, seq_len) > 0
data_mask = ~(bos_mask | eos_mask)
# Fill the tensor
output[bos_mask] = self.audio_stream_bos_id
output[data_mask] = input_ids.reshape(-1)
output[eos_mask] = self.audio_stream_eos_id
return output
def revert_delay_pattern(self, input_ids):
seq_len, num_codebooks = input_ids.shape
# Extract diagonal slices from the delay pattern
slices = []
for i in range(num_codebooks):
end_idx = seq_len - num_codebooks + 1 + i
slices.append(input_ids[i:end_idx, i : i + 1])
return torch.cat(slices, dim=1)
# Copied from transformers.models.csm.processing_csm.CsmProcessor.save_audio with Csm->HiggsAudioV2
def save_audio(
self,
audio: AudioInput,
saving_path: str | Path | list[str | Path],
**kwargs: Unpack[HiggsAudioV2ProcessorKwargs],
):
# TODO: @eustlb, this should be in AudioProcessor
if not is_soundfile_available():
raise ImportError("Please install `soundfile` to save audio files.")
# ensure correct audio input
audio = make_list_of_audio(audio)
# ensure correct saving path
if isinstance(saving_path, (str, Path)):
saving_path = [saving_path]
elif not (isinstance(saving_path, (list, tuple)) and all(isinstance(p, (str, Path)) for p in saving_path)):
raise ValueError("Invalid input path. Please provide a string, or a list of strings")
if len(audio) != len(saving_path):
raise ValueError("The number of audio and saving paths must be the same")
output_kwargs = self._merge_kwargs(
HiggsAudioV2ProcessorKwargs,
**kwargs,
)
audio_kwargs = output_kwargs["audio_kwargs"]
sampling_rate = audio_kwargs["sampling_rate"]
for audio_value, p in zip(audio, saving_path):
if isinstance(audio_value, torch.Tensor):
audio_value = audio_value.cpu().float().numpy()
sf.write(p, audio_value, sampling_rate)
@property
def model_input_names(self):
tokenizer_input_names = self.tokenizer.model_input_names
# TODO: @eustlb, to be standardized!!
audio_tokenizer_input_names = ["audio_input_ids", "audio_input_ids_mask"]
return tokenizer_input_names + audio_tokenizer_input_names
__all__ = ["HiggsAudioV2Processor"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/higgs_audio_v2/processing_higgs_audio_v2.py",
"license": "Apache License 2.0",
"lines": 306,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/higgs_audio_v2_tokenizer/convert_higgs_audio_v2_tokenizer_to_hf.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import gc
import re
import torch
from transformers import (
DacConfig,
DacFeatureExtractor,
HiggsAudioV2TokenizerConfig,
HiggsAudioV2TokenizerModel,
)
from transformers.utils.hub import cached_file
INNER_LAYER_NAMES = ["snake1", "conv1", "snake2", "conv2"]
# fmt: off
ORIGINAL_TO_CONVERTED_KEY_MAPPING = {
# Encoder: initial conv, final snake + conv
r"^encoder\.block\.0": "acoustic_encoder.conv1",
r"^encoder\.block\.6": "acoustic_encoder.snake1",
r"^encoder\.block\.7": "acoustic_encoder.conv2",
# Encoder: res_unit inner layers (block.M res_unit index 0-2 → 1-3, block.K layer index 0-3 → snake1/conv1/snake2/conv2)
r"^encoder\.block\.(\d+)\.block\.([012])\.block\.([0123])": lambda m: f"acoustic_encoder.block.{int(m[1])-1}.res_unit{int(m[2])+1}.{INNER_LAYER_NAMES[int(m[3])]}",
# Encoder: block-level snake + downsampling conv
r"^encoder\.block\.(\d+)\.block\.3": lambda m: f"acoustic_encoder.block.{int(m[1])-1}.snake1",
r"^encoder\.block\.(\d+)\.block\.4": lambda m: f"acoustic_encoder.block.{int(m[1])-1}.conv1",
# Decoder: initial conv, final snake + conv
r"^decoder_2\.model\.0": "acoustic_decoder.conv1",
r"^decoder_2\.model\.6": "acoustic_decoder.snake1",
r"^decoder_2\.model\.7": "acoustic_decoder.conv2",
# Decoder: block-level snake + upsample conv_t
r"^decoder_2\.model\.(\d+)\.block\.0": lambda m: f"acoustic_decoder.block.{int(m[1])-1}.snake1",
r"^decoder_2\.model\.(\d+)\.block\.1": lambda m: f"acoustic_decoder.block.{int(m[1])-1}.conv_t1",
# Decoder: res_unit inner layers (block.M res_unit index 2-4 → 1-3, block.K layer index 0-3 → snake1/conv1/snake2/conv2)
r"^decoder_2\.model\.(\d+)\.block\.([234])\.block\.([0123])": lambda m: f"acoustic_decoder.block.{int(m[1])-1}.res_unit{int(m[2])-1}.{INNER_LAYER_NAMES[int(m[3])]}",
# Quantizer
r"^quantizer\.vq\.layers": "quantizer.quantizers",
r"\._codebook\.": ".codebook.",
# FC layers
r"^fc_prior\.": "fc.",
r"^fc_post1\.": "fc1.",
r"^fc_post2\.": "fc2.",
# Semantic encoder/decoder: unwrap nested conv modules
r"\.conv\.conv\.": ".conv.",
r"\.conv1\.conv\.": ".conv1.",
r"\.conv2\.conv\.": ".conv2.",
}
# fmt: on
def convert_key(key, mapping):
for pattern, replacement in mapping.items():
key = re.sub(pattern, replacement, key)
return key
def compute_weight_from_weight_norm(weight_v, weight_g):
"""Combine weight_v and weight_g from weight normalization into a plain weight."""
dims = list(range(1, weight_v.dim()))
norm = weight_v.norm(dim=dims, keepdim=True)
return weight_g * weight_v / norm
def convert_model(input_path_or_repo, revision=None):
print("Converting the model.")
config = HiggsAudioV2TokenizerConfig(
acoustic_model_config=DacConfig(
encoder_hidden_size=64,
downsampling_ratios=[8, 5, 4, 2, 3],
decoder_hidden_size=1024,
upsampling_ratios=[8, 5, 4, 2, 3],
hidden_size=256,
),
)
model_path = cached_file(input_path_or_repo, "model.pth", revision=revision)
print(f"Fetching all parameters from the checkpoint at {model_path}...")
loaded = torch.load(model_path, map_location="cpu", weights_only=False)
print("Converting model...")
# -----------------------------------------
# Preprocess: merge weight_norm into weight
# -----------------------------------------
preprocessed = {}
for key, value in loaded.items():
if key.endswith(".weight_g"):
base = key.removesuffix(".weight_g")
weight = compute_weight_from_weight_norm(loaded[base + ".weight_v"], value)
preprocessed[base + ".weight"] = weight
elif key.endswith(".weight_v"):
continue # already handled with weight_g
else:
preprocessed[key] = value
del loaded
gc.collect()
# -----------------------
# Convert parameter names
# -----------------------
state_dict = {}
for key, value in preprocessed.items():
# fc1 is not used in the forward pass
if key.startswith("fc1."):
continue
# masked_spec_embed is not used in inference
if key == "semantic_model.masked_spec_embed":
continue
new_key = convert_key(key, ORIGINAL_TO_CONVERTED_KEY_MAPPING)
state_dict[new_key] = value
del preprocessed
gc.collect()
# -------------------------
# Load the weights
# -------------------------
print("Loading the checkpoint in a HiggsAudioV2TokenizerModel.")
with torch.device("meta"):
model = HiggsAudioV2TokenizerModel(config)
model.load_state_dict(state_dict, strict=True, assign=True)
print("Model converted successfully.")
del model.config._name_or_path
return model
def create_feature_extractor():
feature_extractor = DacFeatureExtractor(
feature_size=1,
hop_length=960,
padding_side="right",
padding_value=0.0,
return_attention_mask=True,
sampling_rate=24000,
)
return feature_extractor
def main():
parser = argparse.ArgumentParser(description="Convert HiggsAudioV2Tokenizer weights to HuggingFace format")
parser.add_argument("--input_path_or_repo", type=str, default="bosonai/higgs-audio-v2-tokenizer")
parser.add_argument("--input_revision", type=str, default=None)
parser.add_argument("--output_dir", type=str, default=None)
parser.add_argument("--push_to_hub_path", type=str, default=None)
args = parser.parse_args()
if args.output_dir is None and args.push_to_hub_path is None:
raise ValueError("Either --output_dir or --push_to_hub_path must be provided.")
model = convert_model(args.input_path_or_repo, revision=args.input_revision)
feature_extractor = create_feature_extractor()
if args.output_dir is not None:
model.save_pretrained(args.output_dir)
feature_extractor.save_pretrained(args.output_dir)
print(f"Model and feature extractor saved to {args.output_dir}")
if args.push_to_hub_path is not None:
model.push_to_hub(args.push_to_hub_path)
feature_extractor.push_to_hub(args.push_to_hub_path)
print(f"Model and feature extractor pushed to {args.push_to_hub_path}")
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/higgs_audio_v2_tokenizer/convert_higgs_audio_v2_tokenizer_to_hf.py",
"license": "Apache License 2.0",
"lines": 153,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/higgs_audio_v2_tokenizer/modular_higgs_audio_v2_tokenizer.py | # Copyright 2025 Boson AI and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchaudio
from ..xcodec.configuration_xcodec import XcodecConfig
from ..xcodec.modeling_xcodec import XcodecEuclideanCodebook, XcodecModel, XcodecPreTrainedModel
class HiggsAudioV2TokenizerConfig(XcodecConfig):
r"""
This is the configuration class to store the configuration of an [`HiggsAudioV2TokenizerModel`]. It is used to instantiate a
HiggsAudioV2Tokenizer model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the [`Higgs Audio v2 Tokenizer`](https://huggingface.co/bosonai/higgs-audio-v2-tokenizer).
e.g. [bosonai/higgs-audio-v2-tokenizer](https://huggingface.co/bosonai/higgs-audio-v2-tokenizer)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
target_bandwidths (`List[float]`, *optional*, defaults to `[0.5, 1, 1.5, 2]`):
The range of different bandwidths (in kbps) the model can encode audio with.
sample_rate (`int`, *optional*, defaults to 24000):
The sampling rate at which the audio waveform should be digitalized, in hertz (Hz).
kernel_size (`int`, *optional*, defaults to 3):
Kernel size for the initial semantic convolution.
channel_ratios (`List[float]`, *optional*, defaults to `[1, 1]`):
Expansion factors for the number of output channels in each semantic block.
strides (`List[int]`, *optional*, defaults to `[1, 1]`):
Strides for each semantic encoder block.
block_dilations (`List[int]`, *optional*, defaults to `[1, 1]`):
Dilation factors for the residual units in semantic blocks.
unit_kernel_size (`int`, *optional*, defaults to 3):
Kernel size inside each ResidualUnit in semantic blocks.
codebook_size (`int`, *optional*, defaults to 1024):
Number of entries in each residual quantizer's codebook.
codebook_dim (`int`, *optional*, defaults to 64):
Dimensionality of each codebook vector.
initializer_range (`float`, *optional*, defaults to 0.02):
Standard deviation of the truncated normal initializer for all weight matrices.
acoustic_model_config (`Union[Dict, AutoConfig]`, *optional*):
An instance of the configuration for the acoustic (DAC) model.
semantic_model_config (`Union[Dict, AutoConfig]`, *optional*):
An instance of the configuration object for the semantic (HuBERT) model.
semantic_sample_rate (`int`, *optional*, defaults to 16000):
The sampling rate at which the semantic model expects audio input, in hertz (Hz).
downsample_factor (`int`, *optional*, defaults to 320):
Downsampling factor for the semantic features.
Example:
```python
>>> from transformers import HiggsAudioV2TokenizerModel, HiggsAudioV2TokenizerConfig
>>> # Initializing configuration
>>> configuration = HiggsAudioV2TokenizerConfig()
>>> # Initializing a model (with random weights) from the configuration
>>> model = HiggsAudioV2TokenizerModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
_default_semantic_model_config_kwargs = {
"mask_time_prob": 0.0,
}
def __init__(
self,
target_bandwidths=[0.5, 1, 1.5, 2],
sample_rate=24000,
kernel_size=3,
channel_ratios=[1, 1],
strides=[1, 1],
block_dilations=[1, 1],
unit_kernel_size=3,
codebook_size=1024,
codebook_dim=64,
initializer_range=0.02,
acoustic_model_config=None,
semantic_model_config=None,
semantic_sample_rate=16000,
downsample_factor=320,
**kwargs,
):
super().__init__(
target_bandwidths=target_bandwidths,
sample_rate=sample_rate,
kernel_size=kernel_size,
channel_ratios=channel_ratios,
strides=strides,
block_dilations=block_dilations,
unit_kernel_size=unit_kernel_size,
codebook_size=codebook_size,
codebook_dim=codebook_dim,
initializer_range=initializer_range,
acoustic_model_config=acoustic_model_config,
semantic_model_config=semantic_model_config,
**kwargs,
)
self.semantic_sample_rate = semantic_sample_rate
self.downsample_factor = downsample_factor
@property
def semantic_downsample_factor(self):
return int(self.hop_length / (self.sample_rate / self.semantic_sample_rate) / self.downsample_factor)
class HiggsAudioV2TokenizerPreTrainedModel(XcodecPreTrainedModel):
_no_split_modules = ["HiggsAudioV2TokenizerResidualVectorQuantization", "DacResidualUnit"]
_keys_to_ignore_on_load_unexpected = ["semantic_model.masked_spec_embed"]
class HiggsAudioV2TokenizerEuclideanCodebook(XcodecEuclideanCodebook): ...
class HiggsAudioV2TokenizerVectorQuantization(nn.Module):
def __init__(self, config: HiggsAudioV2TokenizerConfig):
super().__init__()
self.codebook = HiggsAudioV2TokenizerEuclideanCodebook(config)
self.project_in = nn.Linear(config.hidden_size, config.codebook_dim)
self.project_out = nn.Linear(config.codebook_dim, config.hidden_size)
def encode(self, hidden_states):
hidden_states = hidden_states.permute(0, 2, 1)
hidden_states = self.project_in(hidden_states)
embed_in = self.codebook.encode(hidden_states)
return embed_in
def decode(self, embed_ind):
quantize = self.codebook.decode(embed_ind)
quantize = self.project_out(quantize)
quantize = quantize.permute(0, 2, 1)
return quantize
class HiggsAudioV2TokenizerModel(XcodecModel):
def _extract_semantic_features(self, input_values: torch.FloatTensor) -> torch.FloatTensor:
if self.config.sample_rate != self.config.semantic_sample_rate:
input_values = torchaudio.functional.resample(
input_values, self.config.sample_rate, self.config.semantic_sample_rate
)
input_values = input_values[:, 0, :]
# TODO: there is a diff here with original codebase https://github.com/boson-ai/higgs-audio/blob/f644b62b855ba2b938896436221e01efadcc76ca/boson_multimodal/audio_processing/higgs_audio_v2_tokenizer.py#L173-L174
# input_values = F.pad(input_values, (self.pad, self.pad))
input_values = F.pad(input_values, (160, 160))
with torch.no_grad():
outputs = self.semantic_model(input_values, output_hidden_states=True)
hidden_states = outputs.hidden_states
stacked = torch.stack([h.to(input_values.device) for h in hidden_states], dim=1)
semantic_features = stacked.mean(dim=1)
if self.config.semantic_downsample_factor > 1:
semantic_features = semantic_features[:, :: self.config.semantic_downsample_factor, :]
return semantic_features
__all__ = ["HiggsAudioV2TokenizerConfig", "HiggsAudioV2TokenizerPreTrainedModel", "HiggsAudioV2TokenizerModel"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/higgs_audio_v2_tokenizer/modular_higgs_audio_v2_tokenizer.py",
"license": "Apache License 2.0",
"lines": 146,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/higgs_audio_v2/test_modeling_higgs_audio_v2.py | # Copyright 2024, The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch ConversationalSpeechModel model."""
import unittest
import pytest
from parameterized import parameterized
from transformers import (
AutoProcessor,
HiggsAudioV2Config,
HiggsAudioV2ForConditionalGeneration,
HiggsAudioV2Model,
is_torch_available,
)
from transformers.testing_utils import (
cleanup,
require_torch,
require_torch_accelerator,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin, has_similar_generate_outputs
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, global_rng, ids_tensor, random_attention_mask
if is_torch_available():
import torch
class HiggsAudioV2ModelTester:
base_model_class = HiggsAudioV2Model
causal_lm_class = HiggsAudioV2ForConditionalGeneration
def __init__(
self,
parent,
batch_size=3,
seq_length=14,
audio_seq_length=10,
is_training=True,
main_input_name_for_generation="audio_input_ids",
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=2,
num_key_value_heads=2,
intermediate_size=37,
hidden_act="silu",
max_position_embeddings=512,
initializer_range=0.02,
rms_norm_eps=1e-05,
use_cache=True,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
pretraining_tp=1,
tie_word_embeddings=False,
rope_theta=500000.0,
rope_scaling={
"factor": 32.0,
"high_freq_factor": 4.0,
"low_freq_factor": 1.0,
"original_max_position_embeddings": 8192,
"rope_type": "llama3",
},
attention_bias=False,
attention_dropout=0.0,
mlp_bias=False,
head_dim=16,
num_codebooks=2,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.audio_seq_length = audio_seq_length
self.is_training = is_training
self.main_input_name_for_generation = main_input_name_for_generation
self.config_kwargs = {
"vocab_size": vocab_size,
"hidden_size": hidden_size,
"intermediate_size": intermediate_size,
"num_hidden_layers": num_hidden_layers,
"num_attention_heads": num_attention_heads,
"num_key_value_heads": num_key_value_heads,
"hidden_act": hidden_act,
"max_position_embeddings": max_position_embeddings,
"initializer_range": initializer_range,
"rms_norm_eps": rms_norm_eps,
"use_cache": use_cache,
"pad_token_id": pad_token_id,
"bos_token_id": bos_token_id,
"eos_token_id": eos_token_id,
"pretraining_tp": pretraining_tp,
"tie_word_embeddings": tie_word_embeddings,
"rope_theta": rope_theta,
"rope_scaling": rope_scaling,
"attention_bias": attention_bias,
"attention_dropout": attention_dropout,
"mlp_bias": mlp_bias,
"head_dim": head_dim,
"num_codebooks": num_codebooks,
"codebook_size": hidden_size // num_codebooks,
"audio_token_id": vocab_size - 1,
"audio_bos_token_id": vocab_size - 2,
"audio_delay_token_id": vocab_size - 3,
"audio_stream_bos_id": hidden_size // num_codebooks - 1,
"audio_stream_eos_id": hidden_size // num_codebooks - 2,
}
# also set them as attributes
for key, value in self.config_kwargs.items():
setattr(self, key, value)
def prepare_config_and_inputs(self):
# let's make sure we don't sample audio_token_id, audio_bos_token_id, audio_delay_token_id
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size - 3)
attention_mask = random_attention_mask([self.batch_size, self.seq_length])
# for audio token positions, we use a second random attention mask to set idxs
audio_token_mask = random_attention_mask([self.batch_size, self.audio_seq_length])
# let's ensure at least one batch_idx has audio_seq_length audio tokens
batch_idx = global_rng.randint(0, self.batch_size - 1)
audio_token_mask[batch_idx, :] = 1
# audio_token_mask should have the shape of input_ids
audio_token_mask = torch.cat(
[audio_token_mask.new_zeros([self.batch_size, self.seq_length - self.audio_seq_length]), audio_token_mask],
dim=1,
)
input_ids[audio_token_mask.bool()] = self.audio_token_id
audio_seq_lengths = audio_token_mask.sum(-1)
max_audio_seq_length = audio_seq_lengths.max()
audio_input_ids = ids_tensor([self.batch_size, max_audio_seq_length, self.num_codebooks], self.codebook_size)
audio_input_ids_mask = (
torch.arange(max_audio_seq_length, device=torch_device)[None, :] < audio_seq_lengths[:, None]
)
# TODO: @eustlb, should il really be bool?
audio_input_ids_mask = audio_input_ids_mask.bool()
config = self.get_config()
return config, input_ids, attention_mask, audio_input_ids, audio_input_ids_mask
def get_config(self):
return HiggsAudioV2Config(**self.config_kwargs)
def create_and_check_model(self, config, input_ids):
model = config
model.to(torch_device)
model.eval()
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def prepare_config_and_inputs_for_common(self):
config, input_ids, attention_mask, audio_input_ids, audio_input_ids_mask = self.prepare_config_and_inputs()
inputs_dict = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"audio_input_ids": audio_input_ids,
"audio_input_ids_mask": audio_input_ids_mask,
}
return config, inputs_dict
@require_torch
class HiggsAudioV2ModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = (HiggsAudioV2ForConditionalGeneration,) if is_torch_available() else ()
pipeline_model_mapping = {"text-to-speech": HiggsAudioV2ForConditionalGeneration} if is_torch_available() else {}
test_pruning = False
def setUp(self):
self.model_tester = HiggsAudioV2ModelTester(self)
self.config_tester = ConfigTester(self, config_class=HiggsAudioV2Config)
def test_config(self):
self.config_tester.run_common_tests()
@parameterized.expand([("random",), ("same",)])
@pytest.mark.generate
@pytest.mark.skip(reason="HiggsAudioV2 does not support assisted decoding.")
def test_assisted_decoding_matches_greedy_search(self, assistant_type):
pass
@pytest.mark.generate
@pytest.mark.skip(reason="HiggsAudioV2 does not support assisted decoding.")
def test_assisted_decoding_sample(self):
pass
@pytest.mark.generate
@pytest.mark.skip(reason="HiggsAudioV2 does not support beam search.")
def test_beam_sample_generate(self):
pass
@pytest.mark.generate
@pytest.mark.skip(reason="HiggsAudioV2 does not support beam search.")
def test_beam_search_generate(self):
pass
@pytest.mark.generate
@pytest.mark.skip(reason="HiggsAudioV2 does not support beam search.")
def test_beam_search_generate_dict_output(self):
pass
@pytest.mark.generate
@pytest.mark.skip(reason="HiggsAudioV2 does not support beam search.")
def test_beam_search_generate_dict_outputs_use_cache(self):
pass
@pytest.mark.generate
@pytest.mark.skip(reason="HiggsAudioV2 does not support beam search.")
def test_beam_sample_generate_dict_output(self):
pass
@pytest.mark.generate
@pytest.mark.skip(reason="HiggsAudioV2 does not support beam search.")
def test_generate_from_inputs_embeds_1_beam_search(self, _, num_beams):
pass
@pytest.mark.generate
@pytest.mark.skip(reason="HiggsAudioV2 does not support beam search.")
def test_model_parallel_beam_search(self):
pass
@pytest.mark.generate
@pytest.mark.skip(reason="HiggsAudioV2 does not support prompt lookup decoding.")
def test_prompt_lookup_decoding_matches_greedy_search(self):
pass
@pytest.mark.generate
@pytest.mark.skip(reason="HiggsAudioV2 does not support prompt lookup decoding.")
def test_prompt_lookup_decoding_stops_at_eos(self):
pass
@pytest.mark.skip(reason="HiggsAudioV2 has custom embedding approach (text and audio embeddings).")
def test_model_get_set_embeddings(self):
pass
@pytest.mark.skip(reason="HiggsAudioV2 has custom embedding approach (text and audio embeddings).")
def test_tie_model_weights(self):
pass
@pytest.mark.skip(reason="HiggsAudioV2 has custom embedding approach (text and audio embeddings).")
def test_inputs_embeds_matches_input_ids(self):
pass
@pytest.mark.skip(reason="HiggsAudioV2 has custom embedding approach (text and audio embeddings).")
def test_resize_tokens_embeddings(self):
pass
@pytest.mark.skip(reason="HiggsAudioV2 has special embeddings that can never be tied")
def test_tied_weights_keys(self):
pass
def _check_scores(self, batch_size, scores, generated_length, config):
expected_shape = (batch_size, config.num_codebooks, config.codebook_size)
self.assertIsInstance(scores, tuple)
self.assertEqual(len(scores), generated_length)
self.assertListEqual([iter_scores.shape for iter_scores in scores], [expected_shape] * len(scores))
def _check_logits(self, batch_size, logits, config):
self.assertIsInstance(logits, tuple)
self.assertListEqual([iter_logits.shape[0] for iter_logits in logits], [batch_size] * len(logits))
# Check that the shape matches expected codebook dimensions
expected_last_dim = config.num_codebooks * config.codebook_size
self.assertListEqual([iter_logits.shape[-1] for iter_logits in logits], [expected_last_dim] * len(logits))
@pytest.mark.generate
def test_greedy_generate(self):
for model_class in self.all_generative_model_classes:
config, inputs_dict = self.prepare_config_and_inputs_for_generate()
model = model_class(config).to(torch_device).eval()
output_generate = self._greedy_generate(model=model, inputs_dict=inputs_dict)
self.assertTrue(output_generate.shape[1] == self.max_new_tokens + inputs_dict["audio_input_ids"].shape[1])
@pytest.mark.generate
def test_sample_generate(self):
for model_class in self.all_generative_model_classes:
config, inputs_dict = self.prepare_config_and_inputs_for_generate()
model = model_class(config).to(torch_device).eval()
output_generate = self._sample_generate(model=model, inputs_dict=inputs_dict, num_return_sequences=1)
self.assertTrue(output_generate.shape[1] == self.max_new_tokens + inputs_dict["audio_input_ids"].shape[1])
def test_forward_with_logits_to_keep(self):
for model_class in self.all_generative_model_classes:
config, inputs = self.model_tester.prepare_config_and_inputs_for_common()
batch_size, sequence_length = inputs["input_ids"].shape[:2]
model = model_class(config).to(device=torch_device).eval()
# some models have labels but `logits_to_keep` should not be used in train mode
_ = inputs.pop("labels", None)
# logits_to_keep=0 is a special case meaning "keep all logits"
all_logits = model(**inputs, logits_to_keep=0).logits
last_token_logits = model(**inputs, logits_to_keep=1).logits
# Assert all shapes are correct
self.assertEqual(
tuple(all_logits.shape), (batch_size, sequence_length, config.num_codebooks * config.codebook_size)
)
self.assertEqual(
tuple(last_token_logits.shape), (batch_size, 1, config.num_codebooks * config.codebook_size)
)
# Assert the last tokens are actually the same (except for the natural fluctuation due to order of FP ops)
torch.testing.assert_close(all_logits[:, -1:, :], last_token_logits, rtol=1e-5, atol=1e-5)
@pytest.mark.generate
def test_generate_continue_from_past_key_values(self):
# Tests that we can continue generating from past key values, returned from a previous `generate` call
for model_class in self.all_generative_model_classes:
if any(model_name in model_class.__name__.lower() for model_name in ["imagegpt", "mllama"]):
self.skipTest(reason="Won't fix: old model with unique inputs/caches/other")
if any(model_name in model_class.__name__.lower() for model_name in ["umt5"]):
self.skipTest(reason="TODO: needs modeling or test input preparation fixes for compatibility")
config, inputs = self.model_tester.prepare_config_and_inputs_for_common()
if not hasattr(config.get_text_config(), "use_cache"):
self.skipTest(reason=f"{model_class.__name__} doesn't support caching")
# Let's make it always:
# 1. use cache (for obvious reasons)
# 2. generate to max length (which can be achieved by setting the eos token to an invalid value), which
# would make the test flaky (e.g. EOS is generated on iteration 1 on both generations, but the
# continuation would force it to generate beyond an EOS token)
# 3. ignore `token_type_ids` for simplicity
# 4. ignore `forced_eos_token_id`, which requires further manipulation of the continuation inputs and is
# active by default on some models
# 5. ignore `encoder_no_repeat_ngram_size`, which is set by default in some encoder-decoder models. When
# we use their decoder as a stand-alone model, `encoder_no_repeat_ngram_size` actually prevents
# repetition exclusively from the prompt. This test relies on comparing one call vs 2 calls
# with cache, what is considered a prompt is different in the two cases.
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
model = model_class(config).to(torch_device)
model.eval()
# If "past_key_values" is not returned, skip the test (e.g. RWKV uses a different cache name and format)
outputs = model(**inputs)
if "past_key_values" not in outputs:
self.skipTest(reason="This model doesn't return `past_key_values`")
generate_kwargs = {
"pad_token_id": -1,
"eos_token_id": -1,
"forced_eos_token_id": None,
"encoder_no_repeat_ngram_size": 0,
"use_cache": True,
"do_sample": False,
"return_dict_in_generate": True,
"output_scores": True,
}
# Traditional way of generating text, with `return_dict_in_generate` to return the past key values
outputs = model.generate(**inputs, **generate_kwargs, max_new_tokens=4)
# Let's generate again, but passing the past key values in between (3 + 1 = 4 tokens). Note that the
# inputs may need to be tweaked across `generate` calls (like the attention mask).
outputs_cached = model.generate(**inputs, **generate_kwargs, max_new_tokens=3)
# Continue from the tokens generated above, preparing the inputs accordingly
inputs["past_key_values"] = outputs_cached.past_key_values
new_attention_len = outputs_cached.sequences.shape[1]
new_audio_input_ids_len = outputs_cached.audio_sequences.shape[1]
inputs["input_ids"] = outputs_cached.sequences
inputs["audio_input_ids"] = outputs_cached.audio_sequences
if "attention_mask" in inputs:
inputs["attention_mask"] = torch.nn.functional.pad(
inputs["attention_mask"],
(0, new_attention_len - inputs["attention_mask"].shape[1]),
mode="constant",
value=1,
)
if "audio_input_ids_mask" in inputs:
num_gen_audio_inputs_ids = new_audio_input_ids_len - inputs["audio_input_ids_mask"].shape[1]
inputs["audio_input_ids_mask"] = torch.nn.functional.pad(
inputs["audio_input_ids_mask"],
(0, num_gen_audio_inputs_ids),
mode="constant",
value=1,
)
mask = inputs["input_ids"][:, -num_gen_audio_inputs_ids:] == config.eos_token_id
inputs["audio_input_ids_mask"][:, -num_gen_audio_inputs_ids:][mask] = False
first_caches_scores = outputs_cached.scores
outputs_cached = model.generate(**inputs, **generate_kwargs, max_new_tokens=1)
full_cached_scores = first_caches_scores + outputs_cached.scores
outputs_cached.scores = full_cached_scores
# The two sets of generated text and past kv should be equal to each other
self.assertTrue(has_similar_generate_outputs(outputs, outputs_cached))
self._check_caches_are_equal(outputs.past_key_values, outputs_cached.past_key_values)
class HiggsAudioV2ForConditionalGenerationIntegrationTest(unittest.TestCase):
def setUp(self):
self.checkpoint_name = "eustlb/higgs-audio-v2-generation-3B-base"
self.processor = AutoProcessor.from_pretrained(self.checkpoint_name, device_map=torch_device)
def tearDown(self):
cleanup(torch_device, gc_collect=True)
@slow
@require_torch_accelerator
def test_single_speaker_smart_voice(self):
torch.manual_seed(0)
conversation = [
{
"role": "system",
"content": [{"type": "text", "text": "Generate audio following instruction."}],
},
{"role": "scene", "content": [{"type": "text", "text": "Audio is recorded from a quiet room."}]},
{"role": "user", "content": [{"type": "text", "text": "The sun rises in the east and sets in the west."}]},
]
model = HiggsAudioV2ForConditionalGeneration.from_pretrained(self.checkpoint_name, device_map=torch_device)
inputs = self.processor.apply_chat_template(
conversation,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
sampling_rate=24000,
return_tensors="pt",
).to(model.device)
outputs = model.generate(**inputs, max_new_tokens=50, do_sample=False)
# fmt: off
EXPECTED_OUTPUT_TOKENS = torch.tensor(
[
[
[1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024],
[244, 1024, 1024, 1024, 1024, 1024, 1024, 1024],
[498, 537, 1024, 1024, 1024, 1024, 1024, 1024],
[430, 851, 977, 1024, 1024, 1024, 1024, 1024],
[950, 986, 39, 130, 1024, 1024, 1024, 1024],
[196, 212, 784, 392, 283, 1024, 1024, 1024],
[196, 367, 242, 1022, 686, 325, 1024, 1024],
[196, 562, 971, 196, 932, 645, 53, 1024],
[239, 432, 971, 75, 709, 157, 326, 669],
[668, 935, 243, 339, 406, 434, 245, 655],
[273, 974, 466, 400, 297, 809, 417, 794],
[431, 219, 568, 999, 126, 83, 677, 104],
[852, 797, 915, 809, 270, 720, 201, 962],
[988, 280, 179, 370, 647, 500, 862, 790],
[988, 673, 75, 651, 879, 931, 670, 446],
[945, 112, 102, 338, 354, 276, 770, 880],
[287, 18, 555, 6, 53, 357, 716, 794],
[300, 474, 801, 55, 377, 595, 1022, 820],
[169, 276, 762, 173, 743, 987, 422, 625],
[363, 974, 104, 886, 581, 25, 99, 249],
[1006, 89, 630, 197, 668, 101, 627, 197],
[363, 955, 961, 290, 275, 529, 242, 127],
[444, 192, 721, 711, 689, 778, 352, 901],
[300, 853, 363, 402, 217, 51, 75, 464],
[343, 304, 961, 833, 289, 374, 890, 682],
[343, 962, 784, 911, 289, 583, 463, 974],
[473, 227, 450, 926, 586, 957, 920, 550],
[300, 212, 965, 969, 659, 699, 846, 837],
[409, 440, 307, 995, 144, 435, 34, 510],
[343, 559, 812, 850, 621, 684, 72, 726],
[965, 227, 612, 19, 396, 627, 711, 448],
[740, 1019, 450, 869, 207, 751, 6, 862],
[740, 542, 784, 68, 400, 239, 62, 886],
[40, 18, 889, 414, 532, 620, 698, 43],
[486, 170, 152, 714, 538, 865, 1, 300],
[473, 153, 784, 1016, 755, 727, 700, 73],
[95, 305, 595, 226, 849, 333, 985, 245],
[221, 261, 50, 117, 42, 697, 808, 326],
[300, 261, 290, 966, 538, 567, 929, 518],
[473, 69, 458, 993, 97, 956, 99, 276],
[885, 560, 409, 649, 686, 377, 5, 857],
[676, 662, 784, 975, 674, 473, 487, 242],
[780, 867, 782, 926, 931, 895, 428, 86],
[815, 1010, 398, 637, 512, 62, 47, 49],
[338, 829, 784, 443, 512, 157, 596, 360],
[338, 590, 242, 73, 533, 298, 15, 564],
[338, 962, 186, 371, 462, 298, 835, 894],
[95, 829, 579, 984, 333, 504, 963, 451],
[815, 367, 300, 608, 333, 243, 78, 338],
[360, 393, 816, 317, 271, 488, 233, 60]
]
]
)
# fmt: on
torch.testing.assert_close(outputs.cpu(), EXPECTED_OUTPUT_TOKENS)
@slow
@require_torch_accelerator
def test_multi_speaker_smart_voice(self):
torch.manual_seed(0)
system_message = """You are an AI assistant designed to convert text into speech.
If the user's message includes a [SPEAKER*] tag, do not read out the tag and generate speech for the following text, using the specified voice.
If no speaker tag is present, select a suitable voice on your own."""
user_message = """[SPEAKER0] I can't believe you did that without even asking me first!
[SPEAKER1] Oh, come on! It wasn't a big deal, and I knew you would overreact like this.
[SPEAKER0] Overreact? You made a decision that affects both of us without even considering my opinion!
[SPEAKER1] Because I didn't have time to sit around waiting for you to make up your mind! Someone had to act."""
conversation = [
{"role": "system", "content": [{"type": "text", "text": system_message}]},
{
"role": "scene",
"content": [
{"type": "text", "text": "Audio is recorded from a quiet room."},
{"type": "text", "text": "SPEAKER0: feminine"},
{"type": "text", "text": "SPEAKER1: masculine"},
],
},
{"role": "user", "content": [{"type": "text", "text": user_message}]},
]
model = HiggsAudioV2ForConditionalGeneration.from_pretrained(self.checkpoint_name, device_map=torch_device)
inputs = self.processor.apply_chat_template(
conversation,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
sampling_rate=24000,
return_tensors="pt",
).to(model.device)
outputs = model.generate(**inputs, do_sample=False, max_new_tokens=50)
# fmt: off
EXPECTED_OUTPUT_TOKENS = torch.tensor(
[
[
[1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024],
[127, 1024, 1024, 1024, 1024, 1024, 1024, 1024],
[296, 713, 1024, 1024, 1024, 1024, 1024, 1024],
[252, 477, 872, 1024, 1024, 1024, 1024, 1024],
[569, 477, 142, 954, 1024, 1024, 1024, 1024],
[252, 644, 57, 623, 283, 1024, 1024, 1024],
[743, 746, 869, 313, 786, 809, 1024, 1024],
[470, 644, 142, 805, 185, 608, 442, 1024],
[662, 334, 297, 890, 982, 100, 248, 727],
[492, 582, 697, 658, 856, 169, 367, 201],
[867, 51, 322, 586, 929, 897, 959, 96],
[470, 566, 934, 188, 594, 308, 881, 385],
[874, 63, 189, 859, 443, 980, 48, 94],
[860, 51, 5, 290, 719, 484, 537, 136],
[646, 347, 446, 978, 793, 152, 909, 112],
[646, 253, 104, 277, 663, 792, 626, 568],
[646, 309, 148, 448, 973, 482, 604, 664],
[173, 114, 702, 701, 50, 446, 976, 30],
[874, 855, 677, 273, 227, 351, 859, 652],
[821, 363, 834, 901, 19, 805, 135, 328],
[874, 452, 517, 805, 47, 588, 452, 646],
[412, 809, 702, 998, 595, 503, 423, 582],
[569, 648, 208, 0, 353, 724, 141, 507],
[95, 473, 548, 483, 903, 280, 888, 528],
[259, 477, 733, 26, 889, 748, 452, 569],
[549, 382, 845, 421, 417, 305, 101, 663],
[253, 334, 524, 333, 662, 644, 207, 742],
[569, 107, 446, 160, 109, 12, 348, 1012],
[736, 160, 5, 879, 25, 819, 781, 636],
[849, 895, 840, 898, 227, 739, 658, 988],
[422, 582, 547, 115, 888, 856, 178, 495],
[446, 107, 507, 160, 1009, 145, 741, 351],
[31, 582, 835, 879, 947, 169, 452, 136],
[273, 466, 189, 845, 326, 94, 11, 973],
[861, 64, 315, 776, 594, 482, 630, 940],
[422, 346, 984, 931, 299, 435, 331, 832],
[944, 691, 283, 185, 461, 731, 1008, 206],
[854, 582, 835, 425, 458, 56, 438, 302],
[65, 582, 820, 713, 298, 187, 835, 652],
[549, 245, 466, 716, 710, 381, 10, 179],
[874, 452, 394, 623, 595, 349, 881, 859],
[979, 309, 507, 33, 171, 316, 354, 326],
[422, 741, 517, 357, 554, 482, 496, 883],
[874, 311, 719, 142, 554, 616, 50, 652],
[902, 277, 548, 505, 581, 226, 537, 100],
[472, 895, 835, 0, 595, 967, 437, 130],
[176, 356, 673, 700, 745, 627, 877, 714],
[273, 582, 517, 366, 50, 980, 790, 454],
[854, 582, 295, 380, 175, 268, 452, 752],
[65, 64, 835, 623, 1009, 548, 568, 746]
]
]
)
# fmt: on
torch.testing.assert_close(outputs.cpu(), EXPECTED_OUTPUT_TOKENS)
@slow
@require_torch_accelerator
def test_zero_shot_voice_cloning(self):
torch.manual_seed(0)
conversation = [
{"role": "system", "content": [{"type": "text", "text": "Generate audio following instruction."}]},
{"role": "scene", "content": [{"type": "text", "text": "Audio is recorded from a quiet room."}]},
{
"role": "user",
"content": [
{
"type": "text",
"text": "Twas the night before my birthday. Hooray! It's almost here! It may not be a holiday, but it's the best day of the year.",
}
],
},
{
"role": "assistant",
"content": [
{
"type": "audio",
"url": "https://huggingface.co/datasets/eustlb/dummy-audio-samples-higgs/resolve/main/belinda.wav",
}
],
},
{
"role": "user",
"content": [
{
"type": "text",
"text": "The sun rises in the east and sets in the west. This simple fact has been observed by humans for thousands of years.",
}
],
},
]
model = HiggsAudioV2ForConditionalGeneration.from_pretrained(self.checkpoint_name, device_map=torch_device)
inputs = self.processor.apply_chat_template(
conversation,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
sampling_rate=24000,
return_tensors="pt",
).to(model.device)
outputs = model.generate(**inputs, do_sample=False, max_new_tokens=50)
outputs = outputs[:, inputs.audio_input_ids.shape[1] :, :]
# fmt: off
EXPECTED_OUTPUT_TOKENS = torch.tensor(
[
[
[1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024],
[800, 1024, 1024, 1024, 1024, 1024, 1024, 1024],
[800, 354, 1024, 1024, 1024, 1024, 1024, 1024],
[800, 74, 427, 1024, 1024, 1024, 1024, 1024],
[800, 74, 468, 351, 1024, 1024, 1024, 1024],
[800, 74, 468, 467, 433, 1024, 1024, 1024],
[800, 74, 136, 467, 433, 552, 1024, 1024],
[800, 478, 427, 467, 433, 764, 926, 1024],
[800, 202, 52, 467, 433, 764, 926, 858],
[800, 74, 468, 343, 433, 552, 926, 858],
[321, 74, 136, 550, 513, 552, 83, 669],
[321, 74, 468, 161, 433, 890, 602, 858],
[846, 641, 468, 467, 799, 653, 926, 419],
[151, 717, 478, 161, 362, 890, 602, 419],
[29, 878, 490, 862, 799, 904, 485, 981],
[71, 212, 677, 723, 283, 342, 926, 858],
[114, 357, 299, 486, 646, 764, 926, 669],
[299, 1004, 91, 894, 208, 440, 986, 981],
[299, 203, 91, 224, 793, 649, 986, 867],
[487, 282, 619, 517, 250, 571, 42, 209],
[246, 218, 830, 260, 838, 576, 181, 736],
[487, 907, 216, 323, 9, 778, 53, 958],
[784, 739, 93, 719, 374, 472, 924, 765],
[367, 984, 554, 757, 120, 1016, 995, 819],
[14, 950, 204, 266, 672, 557, 598, 1013],
[808, 947, 474, 543, 120, 906, 327, 317],
[614, 282, 277, 769, 895, 198, 909, 417],
[135, 185, 276, 649, 895, 658, 81, 360],
[487, 968, 462, 188, 824, 740, 286, 723],
[487, 879, 394, 212, 682, 824, 546, 397],
[487, 96, 273, 517, 1018, 211, 113, 261],
[614, 251, 75, 414, 969, 1016, 645, 76],
[286, 643, 141, 281, 672, 446, 578, 107],
[114, 351, 44, 517, 769, 924, 688, 334],
[71, 677, 769, 440, 665, 794, 793, 864],
[759, 278, 286, 972, 635, 794, 129, 1012],
[263, 392, 123, 67, 365, 651, 117, 62],
[114, 274, 780, 661, 911, 20, 219, 12],
[946, 677, 394, 808, 544, 671, 795, 230],
[314, 555, 286, 238, 363, 242, 29, 38],
[784, 156, 910, 567, 96, 573, 643, 541],
[784, 708, 474, 425, 624, 314, 405, 1012],
[721, 541, 474, 223, 260, 449, 474, 910],
[386, 99, 44, 823, 495, 212, 84, 4],
[644, 793, 823, 385, 823, 566, 120, 460],
[386, 403, 6, 48, 991, 93, 514, 456],
[330, 341, 258, 902, 229, 212, 597, 656],
[1012, 513, 21, 902, 704, 430, 373, 62],
[330, 513, 903, 659, 719, 691, 701, 536],
[330, 57, 903, 957, 49, 309, 992, 187]
]
]
)
# fmt: on
torch.testing.assert_close(outputs.cpu(), EXPECTED_OUTPUT_TOKENS)
@slow
@require_torch_accelerator
def test_multi_speaker_voice_cloning(self):
torch.manual_seed(0)
conversation = [
{"role": "system", "content": [{"type": "text", "text": "Generate audio following instruction."}]},
{
"role": "scene",
"content": [
{"type": "text", "text": "Audio is recorded from a quiet room."},
{"type": "text", "text": "SPEAKER0:"},
{
"type": "audio",
"url": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/guess_age_gender.wav",
},
{"type": "text", "text": "SPEAKER1:"},
{
"type": "audio",
"url": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/1272-128104-0000.flac",
},
],
},
{
"role": "user",
"content": [
{"type": "text", "text": "[SPEAKER0] I can't believe you did that without even asking me first!"}
],
},
]
model = HiggsAudioV2ForConditionalGeneration.from_pretrained(self.checkpoint_name, device_map=torch_device)
inputs = self.processor.apply_chat_template(
conversation,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
sampling_rate=24000,
return_tensors="pt",
).to(model.device)
outputs = model.generate(**inputs, do_sample=False, max_new_tokens=50)
outputs = outputs[:, inputs.audio_input_ids.shape[1] :, :]
# fmt: off
EXPECTED_OUTPUT_TOKENS = torch.tensor(
[
[
[1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024],
[633, 1024, 1024, 1024, 1024, 1024, 1024, 1024],
[242, 908, 1024, 1024, 1024, 1024, 1024, 1024],
[242, 176, 1018, 1024, 1024, 1024, 1024, 1024],
[280, 978, 386, 647, 1024, 1024, 1024, 1024],
[703, 792, 386, 289, 93, 1024, 1024, 1024],
[703, 792, 886, 88, 272, 812, 1024, 1024],
[242, 808, 639, 424, 807, 654, 680, 1024],
[846, 808, 639, 138, 617, 334, 737, 165],
[846, 442, 446, 838, 995, 473, 419, 479],
[749, 537, 446, 184, 337, 1021, 509, 440],
[321, 537, 287, 214, 252, 1021, 350, 453],
[160, 712, 981, 865, 843, 890, 64, 453],
[160, 818, 862, 880, 609, 153, 805, 29],
[721, 642, 283, 312, 421, 890, 721, 1021],
[888, 186, 82, 983, 283, 325, 417, 215],
[749, 357, 110, 956, 161, 243, 182, 1023],
[252, 839, 824, 609, 484, 49, 505, 620],
[867, 99, 231, 534, 322, 990, 443, 964],
[21, 194, 726, 395, 886, 692, 354, 532],
[25, 352, 139, 766, 887, 855, 921, 391],
[851, 486, 30, 37, 482, 456, 19, 740],
[912, 465, 158, 583, 607, 610, 565, 639],
[12, 389, 80, 956, 450, 471, 532, 130],
[798, 928, 277, 788, 642, 946, 353, 83],
[401, 288, 277, 979, 879, 323, 491, 268],
[30, 314, 877, 190, 626, 335, 630, 906],
[14, 538, 80, 461, 790, 348, 106, 779],
[723, 938, 255, 12, 863, 632, 302, 855],
[177, 924, 737, 745, 825, 94, 302, 609],
[725, 521, 500, 22, 104, 473, 566, 462],
[721, 194, 983, 743, 1005, 324, 119, 764],
[12, 811, 112, 1023, 611, 370, 960, 334],
[727, 1007, 164, 517, 1005, 902, 342, 828],
[683, 186, 681, 180, 4, 803, 151, 327],
[286, 601, 164, 543, 113, 72, 210, 448],
[652, 341, 600, 737, 891, 86, 412, 1003],
[842, 600, 241, 488, 499, 781, 457, 626],
[66, 719, 1003, 419, 813, 415, 990, 35],
[165, 39, 754, 276, 399, 615, 556, 448],
[29, 240, 609, 1012, 368, 202, 643, 384],
[413, 408, 956, 64, 748, 626, 204, 1012],
[842, 601, 232, 906, 612, 291, 189, 762],
[707, 180, 729, 462, 673, 803, 366, 860],
[747, 579, 500, 428, 997, 948, 33, 158],
[851, 96, 983, 565, 633, 444, 630, 738],
[851, 138, 507, 195, 428, 739, 921, 663],
[12, 96, 750, 897, 379, 810, 740, 648],
[12, 682, 590, 156, 370, 86, 178, 436],
[12, 164, 383, 979, 229, 865, 860, 325]
]
]
)
# fmt: on
torch.testing.assert_close(outputs.cpu(), EXPECTED_OUTPUT_TOKENS)
@slow
@require_torch_accelerator
def test_batched_inference(self):
torch.manual_seed(0)
conversation1 = [
{"role": "system", "content": [{"type": "text", "text": "Generate audio following instruction."}]},
{"role": "scene", "content": [{"type": "text", "text": "Audio is recorded from a quiet room."}]},
{
"role": "user",
"content": [
{
"type": "text",
"text": "Twas the night before my birthday. Hooray! It's almost here! It may not be a holiday, but it's the best day of the year.",
}
],
},
{
"role": "assistant",
"content": [
{
"type": "audio",
"url": "https://huggingface.co/datasets/eustlb/dummy-audio-samples-higgs/resolve/main/belinda.wav",
}
],
},
{
"role": "user",
"content": [
{
"type": "text",
"text": "The sun rises in the east and sets in the west. This simple fact has been observed by humans for thousands of years.",
}
],
},
]
conversation2 = [
{"role": "system", "content": [{"type": "text", "text": "Generate audio following instruction."}]},
{"role": "scene", "content": [{"type": "text", "text": "Audio is recorded from a quiet room."}]},
{
"role": "user",
"content": [
{
"type": "text",
"text": " It's super important to assess fairly the fact that our former model is over. And this is not a question of adjustment. This is not the same world, 2024, 2025. And on top of that, we are making the same mistakes, on top of the key elements I mentioned. We are over-regulating and under-investing. So just if, in the two to three years to come, if we follow our classical agenda, we will be out of the market. I have no doubts.",
}
],
},
{
"role": "assistant",
"content": [
{
"type": "audio",
"url": "https://huggingface.co/datasets/eustlb/dummy-audio-samples-higgs/resolve/main/macron.wav",
}
],
},
{"role": "user", "content": [{"type": "text", "text": "Hey, here is a clone from the given voice."}]},
]
model = HiggsAudioV2ForConditionalGeneration.from_pretrained(self.checkpoint_name, device_map=torch_device)
inputs = self.processor.apply_chat_template(
[conversation1, conversation2],
add_generation_prompt=True,
tokenize=True,
return_dict=True,
sampling_rate=24000,
return_tensors="pt",
).to(model.device)
outputs = model.generate(**inputs, do_sample=False, max_new_tokens=50)
outputs = outputs[:, inputs.audio_input_ids.shape[1] :, :]
# fmt: off
EXPECTED_OUTPUT_TOKENS = torch.tensor(
[
[
[1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024],
[800, 1024, 1024, 1024, 1024, 1024, 1024, 1024],
[800, 354, 1024, 1024, 1024, 1024, 1024, 1024],
[800, 74, 427, 1024, 1024, 1024, 1024, 1024],
[800, 74, 468, 351, 1024, 1024, 1024, 1024],
[800, 74, 468, 467, 433, 1024, 1024, 1024],
[800, 74, 136, 467, 433, 552, 1024, 1024],
[800, 354, 427, 467, 433, 552, 926, 1024],
[800, 202, 11, 467, 799, 764, 926, 858],
[800, 74, 468, 865, 433, 764, 926, 858],
[321, 74, 998, 351, 799, 552, 22, 669],
[321, 641, 468, 584, 433, 904, 926, 1023],
[846, 641, 998, 467, 609, 153, 581, 419],
[29, 717, 300, 584, 609, 890, 405, 215],
[683, 142, 490, 664, 324, 904, 926, 708],
[187, 204, 91, 483, 1012, 552, 44, 708],
[114, 878, 251, 371, 143, 890, 442, 390],
[752, 755, 332, 177, 711, 440, 442, 606],
[752, 267, 462, 470, 277, 521, 986, 719],
[114, 267, 986, 555, 434, 617, 464, 727],
[299, 1011, 986, 436, 838, 412, 685, 574],
[414, 152, 286, 436, 513, 254, 418, 296],
[414, 313, 826, 51, 513, 254, 83, 1021],
[270, 911, 496, 779, 10, 778, 624, 539],
[9, 435, 189, 543, 607, 771, 624, 852],
[927, 408, 96, 491, 841, 566, 49, 77],
[450, 849, 509, 955, 605, 929, 552, 407],
[861, 790, 474, 64, 608, 25, 327, 762],
[727, 282, 164, 522, 759, 458, 256, 701],
[614, 8, 848, 17, 94, 673, 612, 126],
[135, 185, 903, 591, 256, 561, 1003, 970],
[487, 968, 462, 517, 149, 968, 618, 207],
[487, 52, 394, 193, 62, 478, 641, 145],
[797, 696, 51, 79, 1018, 472, 466, 488],
[314, 785, 884, 146, 812, 483, 311, 547],
[721, 355, 279, 266, 322, 446, 913, 375],
[873, 429, 745, 266, 780, 758, 332, 252],
[299, 910, 909, 223, 80, 833, 806, 465],
[572, 26, 398, 4, 921, 280, 1013, 397],
[583, 693, 337, 953, 698, 795, 307, 844],
[318, 710, 844, 132, 208, 848, 515, 186],
[299, 894, 715, 823, 208, 441, 183, 596],
[845, 32, 394, 192, 843, 383, 142, 476],
[881, 355, 91, 517, 202, 437, 436, 199],
[679, 792, 930, 555, 684, 568, 662, 280],
[679, 849, 892, 763, 760, 254, 827, 707],
[356, 596, 474, 198, 374, 341, 722, 415],
[588, 274, 283, 195, 484, 314, 959, 488],
[785, 69, 91, 759, 956, 945, 719, 363],
[644, 793, 1010, 784, 796, 362, 147, 663]
],
[
[1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024],
[244, 1024, 1024, 1024, 1024, 1024, 1024, 1024],
[624, 518, 1024, 1024, 1024, 1024, 1024, 1024],
[497, 945, 427, 1024, 1024, 1024, 1024, 1024],
[207, 963, 270, 841, 1024, 1024, 1024, 1024],
[207, 172, 468, 865, 799, 1024, 1024, 1024],
[497, 914, 270, 514, 362, 243, 1024, 1024],
[666, 1021, 427, 550, 609, 653, 579, 1024],
[998, 415, 270, 880, 362, 552, 53, 981],
[515, 9, 65, 351, 609, 890, 581, 364],
[497, 415, 265, 351, 984, 890, 798, 340],
[207, 873, 0, 550, 799, 890, 22, 229],
[272, 172, 427, 433, 916, 243, 798, 719],
[207, 1003, 270, 161, 843, 342, 760, 1023],
[497, 983, 52, 974, 984, 342, 118, 708],
[526, 1021, 982, 584, 609, 552, 296, 418],
[82, 349, 265, 157, 433, 666, 53, 340],
[717, 14, 270, 675, 421, 270, 494, 364],
[792, 438, 350, 550, 609, 347, 83, 419],
[883, 820, 672, 741, 483, 75, 798, 438],
[609, 662, 321, 196, 574, 826, 417, 544],
[1012, 657, 653, 430, 921, 812, 53, 981],
[442, 438, 669, 745, 253, 584, 201, 438],
[498, 800, 431, 23, 214, 602, 668, 1006],
[207, 306, 838, 542, 247, 503, 850, 151],
[207, 1020, 93, 283, 835, 37, 860, 976],
[272, 37, 265, 928, 935, 889, 613, 571],
[911, 810, 265, 113, 156, 243, 363, 946],
[280, 624, 270, 149, 912, 537, 201, 860],
[758, 576, 321, 865, 916, 636, 663, 1022],
[179, 136, 854, 595, 541, 937, 737, 802],
[106, 359, 98, 28, 916, 544, 330, 25],
[213, 52, 321, 970, 403, 544, 405, 341],
[16, 457, 134, 408, 622, 883, 53, 320],
[1012, 45, 15, 608, 345, 1023, 53, 729],
[815, 45, 299, 227, 814, 851, 422, 589],
[317, 545, 817, 670, 814, 68, 275, 10],
[708, 204, 817, 138, 540, 781, 624, 943],
[526, 136, 817, 444, 348, 95, 248, 193],
[15, 681, 497, 68, 550, 788, 294, 989],
[412, 229, 567, 478, 196, 1021, 743, 458],
[444, 933, 265, 984, 589, 168, 996, 727],
[165, 135, 56, 10, 253, 754, 349, 584],
[326, 795, 412, 663, 877, 168, 905, 925],
[1013, 860, 800, 520, 128, 20, 472, 651],
[355, 434, 299, 85, 891, 626, 272, 80],
[150, 70, 1016, 72, 819, 521, 670, 536],
[150, 623, 621, 541, 577, 763, 505, 906],
[517, 944, 586, 207, 147, 248, 843, 243],
[907, 77, 726, 75, 745, 746, 620, 653]
]
]
)
# fmt: on
torch.testing.assert_close(outputs.cpu(), EXPECTED_OUTPUT_TOKENS)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/higgs_audio_v2/test_modeling_higgs_audio_v2.py",
"license": "Apache License 2.0",
"lines": 912,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/models/higgs_audio_v2_tokenizer/test_modeling_higgs_audio_v2_tokenizer.py | # Copyright 2025 Boson AI and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import math
import os
import tempfile
import unittest
import numpy as np
from pytest import mark
from tests.test_configuration_common import ConfigTester
from tests.test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from transformers import DacConfig, HiggsAudioV2TokenizerConfig, HubertConfig
from transformers.testing_utils import (
is_flaky,
is_torch_available,
require_flash_attn,
require_torch,
require_torch_gpu,
slow,
torch_device,
)
if is_torch_available():
import torch
from transformers import HiggsAudioV2TokenizerModel
@require_torch
class HiggsAudioV2TokenizerModelTester:
def __init__(
self,
parent,
batch_size=4,
num_channels=1,
sample_rate=16000,
codebook_size=1024,
num_samples=256,
is_training=False,
):
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.sample_rate = sample_rate
self.codebook_size = codebook_size
self.is_training = is_training
self.num_samples = num_samples
self.acoustic_model_config = DacConfig(
decoder_hidden_size=8, encoder_hidden_size=8, codebook_size=16, downsampling_ratios=[16, 16]
)
self.semantic_model_config = HubertConfig(
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=2,
intermediate_size=12,
conv_dim=(4, 4, 4, 4, 4, 4, 4),
)
def prepare_config_and_inputs(self):
config = self.get_config()
inputs_dict = {
"input_values": floats_tensor([self.batch_size, self.num_channels, self.num_samples], scale=1.0)
}
return config, inputs_dict
def prepare_config_and_inputs_for_common(self):
config, inputs_dict = self.prepare_config_and_inputs()
return config, inputs_dict
def prepare_config_and_inputs_for_model_class(self, model_class):
config, inputs_dict = self.prepare_config_and_inputs()
codes_length = math.ceil(self.num_samples / config.hop_length)
inputs_dict["audio_codes"] = ids_tensor(
[self.batch_size, config.num_quantizers, codes_length], config.codebook_size
)
return config, inputs_dict
def get_config(self):
return HiggsAudioV2TokenizerConfig(
sample_rate=self.sample_rate,
audio_channels=self.num_channels,
codebook_size=self.codebook_size,
acoustic_model_config=self.acoustic_model_config,
semantic_model_config=self.semantic_model_config,
)
def create_and_check_model_forward(self, config, inputs_dict):
model = HiggsAudioV2TokenizerModel(config=config).to(torch_device).eval()
result = model(input_values=inputs_dict["input_values"])
self.parent.assertEqual(result.audio_values.shape, (self.batch_size, self.num_channels, self.num_samples))
@require_torch
class HiggsAudioV2TokenizerModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (HiggsAudioV2TokenizerModel,) if is_torch_available() else ()
is_encoder_decoder = True
test_pruning = False
test_headmasking = False
test_resize_embeddings = False
test_torchscript = False
test_can_init_all_missing_weights = False
# The quantizer module takes ~78% of model size, so default split percents (0.5, 0.7, 0.9)
# are too low — at 0.7 the GPU budget can't fit any module and everything lands on a single
# device, preventing accelerate from creating a multi-device map.
model_split_percents = [0.5, 0.8, 0.9]
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
# model does not support returning hidden states
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
if "output_attentions" in inputs_dict:
inputs_dict.pop("output_attentions")
if "output_hidden_states" in inputs_dict:
inputs_dict.pop("output_hidden_states")
return inputs_dict
def setUp(self):
self.model_tester = HiggsAudioV2TokenizerModelTester(self)
self.config_tester = ConfigTester(
self, config_class=HiggsAudioV2TokenizerConfig, common_properties=[], has_text_modality=False
)
def test_config(self):
self.config_tester.run_common_tests()
def test_model_forward(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_forward(*config_and_inputs)
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["input_values", "audio_codes", "bandwidth", "return_dict"]
self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names)
def test_batching_equivalence(self, atol=2e-4, rtol=2e-4):
super().test_batching_equivalence(atol=atol, rtol=rtol)
def test_gradient_checkpointing_backward_compatibility(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
if not model_class.supports_gradient_checkpointing:
continue
config.text_encoder.gradient_checkpointing = True
config.audio_encoder.gradient_checkpointing = True
config.decoder.gradient_checkpointing = True
model = model_class(config)
self.assertTrue(model.is_gradient_checkpointing)
@unittest.skip("HiggsAudioV2TokenizerModel cannot be tested with meta device")
def test_can_load_with_meta_device_context_manager(self):
pass
@unittest.skip(reason="We cannot configure to output a smaller model.")
def test_model_is_small(self):
pass
@unittest.skip(reason="The HiggsAudioV2TokenizerModel does not have `inputs_embeds` logics")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="The HiggsAudioV2TokenizerModel does not have `inputs_embeds` logics")
def test_model_get_set_embeddings(self):
pass
@unittest.skip(reason="The HiggsAudioV2TokenizerModel does not have the usual `attention` logic")
def test_retain_grad_hidden_states_attentions(self):
pass
@unittest.skip(reason="The HiggsAudioV2TokenizerModel does not have the usual `attention` logic")
def test_torchscript_output_attentions(self):
pass
@unittest.skip(reason="The HiggsAudioV2TokenizerModel does not have the usual `hidden_states` logic")
def test_torchscript_output_hidden_state(self):
pass
# Copied from transformers.tests.encodec.test_modeling_encodec.XcodecModelTest._create_and_check_torchscript
def _create_and_check_torchscript(self, config, inputs_dict):
if not self.test_torchscript:
self.skipTest(reason="test_torchscript is set to False")
configs_no_init = _config_zero_init(config) # To be sure we have no Nan
configs_no_init.torchscript = True
configs_no_init.return_dict = False
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
model.to(torch_device)
model.eval()
inputs = self._prepare_for_class(inputs_dict, model_class)
main_input_name = model_class.main_input_name
try:
main_input = inputs[main_input_name]
model(main_input)
traced_model = torch.jit.trace(model, main_input)
except RuntimeError:
self.fail("Couldn't trace module.")
with tempfile.TemporaryDirectory() as tmp_dir_name:
pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt")
try:
torch.jit.save(traced_model, pt_file_name)
except Exception:
self.fail("Couldn't save module.")
try:
loaded_model = torch.jit.load(pt_file_name)
except Exception:
self.fail("Couldn't load module.")
model.to(torch_device)
model.eval()
loaded_model.to(torch_device)
loaded_model.eval()
model_state_dict = model.state_dict()
loaded_model_state_dict = loaded_model.state_dict()
non_persistent_buffers = {}
for key in loaded_model_state_dict.keys():
if key not in model_state_dict.keys():
non_persistent_buffers[key] = loaded_model_state_dict[key]
loaded_model_state_dict = {
key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers
}
self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys()))
model_buffers = list(model.buffers())
for non_persistent_buffer in non_persistent_buffers.values():
found_buffer = False
for i, model_buffer in enumerate(model_buffers):
if torch.equal(non_persistent_buffer, model_buffer):
found_buffer = True
break
self.assertTrue(found_buffer)
model_buffers.pop(i)
model_buffers = list(model.buffers())
for non_persistent_buffer in non_persistent_buffers.values():
found_buffer = False
for i, model_buffer in enumerate(model_buffers):
if torch.equal(non_persistent_buffer, model_buffer):
found_buffer = True
break
self.assertTrue(found_buffer)
model_buffers.pop(i)
models_equal = True
for layer_name, p1 in model_state_dict.items():
if layer_name in loaded_model_state_dict:
p2 = loaded_model_state_dict[layer_name]
if p1.data.ne(p2.data).sum() > 0:
models_equal = False
self.assertTrue(models_equal)
# Avoid memory leak. Without this, each call increase RAM usage by ~20MB.
# (Even with this call, there are still memory leak by ~0.04MB)
self.clear_torch_jit_class_registry()
@unittest.skip(reason="The HiggsAudioV2TokenizerModel does not have the usual `attention` logic")
def test_attention_outputs(self):
pass
@unittest.skip(reason="The HiggsAudioV2TokenizerModel does not have the usual `hidden_states` logic")
def test_hidden_states_output(self):
pass
# Copied from transformers.tests.encodec.test_modeling_encodecEncodecModelTest.test_determinism
def test_determinism(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def check_determinism(first, second):
# outputs are not tensors but list (since each sequence don't have the same frame_length)
out_1 = first.cpu().numpy()
out_2 = second.cpu().numpy()
out_1 = out_1[~np.isnan(out_1)]
out_2 = out_2[~np.isnan(out_2)]
max_diff = np.amax(np.abs(out_1 - out_2))
self.assertLessEqual(max_diff, 1e-5)
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
first = model(**self._prepare_for_class(inputs_dict, model_class))[0]
second = model(**self._prepare_for_class(inputs_dict, model_class))[0]
if isinstance(first, tuple) and isinstance(second, tuple):
for tensor1, tensor2 in zip(first, second):
check_determinism(tensor1, tensor2)
else:
check_determinism(first, second)
# Copied from transformers.tests.encodec.test_modeling_encodecEncodecModelTest.test_model_outputs_equivalence
def test_model_outputs_equivalence(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(t):
t[t != t] = 0
return t
def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}):
with torch.no_grad():
tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs)
dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs)
self.assertTrue(isinstance(tuple_output, tuple))
self.assertTrue(isinstance(dict_output, dict))
dict_to_tuple_output = (dict_output["audio_codes"], dict_output["audio_values"])
for tuple_value, dict_value in zip(tuple_output, dict_to_tuple_output):
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(tuple_value), set_nan_tensor_to_zero(dict_value), atol=1e-5
),
msg=(
"Tuple and dict output are not equal. Difference:"
f" {torch.max(torch.abs(tuple_value - dict_value))}. Tuple has `nan`:"
f" {torch.isnan(tuple_value).any()} and `inf`: {torch.isinf(tuple_value)}. Dict has"
f" `nan`: {torch.isnan(dict_value).any()} and `inf`: {torch.isinf(dict_value)}."
),
)
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs)
def test_initialization(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
configs_no_init = _config_zero_init(config)
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
for name, param in model.named_parameters():
# skipping the parametrizations original0 tensor
if name == "semantic_model.encoder.pos_conv_embed.conv.parametrizations.weight.original0":
continue
uniform_init_parms = ["conv"]
if param.requires_grad:
if any(x in name for x in uniform_init_parms):
self.assertTrue(
-1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0,
msg=f"Parameter {name} of {model_class.__name__} seems not properly initialized",
)
@require_flash_attn
@require_torch_gpu
@mark.flash_attn_test
@slow
@is_flaky()
def test_flash_attn_2_inference_equivalence(self):
for model_class in self.all_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model_fa = model_class.from_pretrained(
tmpdirname, torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2"
)
model_fa.to(torch_device)
model = model_class.from_pretrained(tmpdirname, torch_dtype=torch.bfloat16)
model.to(torch_device)
dummy_input = inputs_dict[model.main_input_name][:1]
if dummy_input.dtype in [torch.float32, torch.float16]:
dummy_input = dummy_input.to(torch.bfloat16)
outputs = model(dummy_input)
outputs_fa = model_fa(dummy_input)
logits = outputs[1]
logits_fa = outputs_fa[1]
assert torch.allclose(logits_fa, logits, atol=4e-2, rtol=4e-2)
@unittest.skip(reason="The HiggsAudioV2TokenizerModel does not support right padding")
def test_flash_attn_2_inference_equivalence_right_padding(self):
pass
@unittest.skip(reason="The HiggsAudioV2TokenizerModel does not have support dynamic compile yet")
def test_sdpa_can_compile_dynamic(self):
pass
# @slow
# @require_torch
# class HiggsAudioV2TokenizerIntegrationTest(unittest.TestCase):
# TODO: @eustlb, requires first fixing Xcodec integration !!!
# for now, it is inderectly tested in HiggsAudioV2 integration test
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/higgs_audio_v2_tokenizer/test_modeling_higgs_audio_v2_tokenizer.py",
"license": "Apache License 2.0",
"lines": 349,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/integrations/sinq.py | # Copyright 2026 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import Any
from transformers.utils import is_torch_available, logging
from ..core_model_loading import ConversionOps
from ..quantizers.quantizers_utils import get_module_from_name, should_convert_module
logger = logging.get_logger(__name__)
if is_torch_available():
import torch
import torch.nn as nn
def replace_with_sinq_linear(
model: torch.nn.Module,
modules_to_not_convert: list[str] | None = None,
quant_config: dict | None = None,
compute_dtype: torch.dtype = None,
device: str = "cuda:0",
pre_quantized: bool = False,
) -> torch.nn.Module:
"""
Replace nn.Linear modules with empty SINQLinear modules.
Args:
model: The model to modify
modules_to_not_convert: List of module names to skip
quant_config: SINQ quantization config dict (None for pre-quantized models)
compute_dtype: Computation dtype for the quantized layers
device: Device string for the quantized layers
pre_quantized: Whether loading a pre-quantized checkpoint
Returns:
The modified model with SINQLinear modules
"""
from sinq.sinqlinear_hf import SINQLinear
if modules_to_not_convert is None:
modules_to_not_convert = []
for full_name, module in list(model.named_modules()):
if not isinstance(module, nn.Linear):
continue
if not should_convert_module(full_name, modules_to_not_convert):
continue
parent_path, _, child_name = full_name.rpartition(".")
parent = model.get_submodule(parent_path) if parent_path else model
sinq_layer = SINQLinear(
in_features=module.in_features if not pre_quantized else None,
out_features=module.out_features if not pre_quantized else None,
bias=(module.bias is not None) if not pre_quantized else False,
quant_config=quant_config,
compute_dtype=compute_dtype,
device=device,
use_unpack_kernel=True,
)
setattr(parent, child_name, sinq_layer)
return model
class SinqQuantize(ConversionOps):
"""
Param-level ConversionOp for SINQ (from FP weights).
At load time, for each `Linear.weight` that should be quantized:
- The SINQLinear module already exists (created in _process_model_before_weight_loading)
- We just call quantize() on it with the loaded weight tensor
"""
def __init__(self, hf_quantizer):
self.hf_quantizer = hf_quantizer
def convert(
self,
input_dict: dict[str, Any],
model: torch.nn.Module | None = None,
full_layer_name: str | None = None,
missing_keys=None,
**kwargs,
) -> dict[str, torch.Tensor]:
_, values = next(iter(input_dict.items()))
weight_tensor = values[0] if isinstance(values, list) else values
module, tensor_name = get_module_from_name(model, full_layer_name)
module.quantize(weight_tensor)
if missing_keys is not None:
missing_keys.discard(full_layer_name)
module._is_hf_initialized = True
return {}
class SinqDeserialize(ConversionOps):
"""
ConversionOp for loading *pre-quantized* SINQ checkpoints.
Checkpoint layout (what `SINQLinear.state_dict` produces) is, per module:
<prefix>.W_q
<prefix>.bias
<prefix>.meta
WeightConverter in the quantizer is configured so that:
- we group ".W_q", ".meta", ".bias" as input_dict
- conceptually treat them as belonging to "<prefix>.weight"
- and call this SinqDeserialize.convert to load the state into the existing SINQLinear.
The returned dict is {} because we load directly into the module.
"""
def __init__(self, hf_quantizer):
self.hf_quantizer = hf_quantizer
def convert(
self,
input_dict: dict[str, Any],
model: torch.nn.Module | None = None,
full_layer_name: str | None = None,
**kwargs,
) -> dict[str, torch.Tensor]:
for k, v in list(input_dict.items()):
if isinstance(v, list):
input_dict[k] = v[0]
W_q = input_dict.get(".W_q")
meta = input_dict.get(".meta")
bias = input_dict.get(".bias")
# Fallback path: if W_q or meta is missing, this is not a valid SINQ checkpoint.
# Return the tensor as-is so standard HF weight loading can handle it.
if W_q is None or meta is None:
v = next(iter(input_dict.values()))
if isinstance(v, list):
v = v[0]
return {full_layer_name: v}
module, _ = get_module_from_name(model, full_layer_name)
state = {
"W_q": W_q,
"meta": meta,
}
if bias is not None:
state["bias"] = bias
module.load_state_dict(state)
module._is_hf_initialized = True
return {}
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/integrations/sinq.py",
"license": "Apache License 2.0",
"lines": 133,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/quantizers/quantizer_sinq.py | # Copyright 2026 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import TYPE_CHECKING
from ..utils import is_torch_available, logging
from ..utils.quantization_config import SinqConfig
from .base import HfQuantizer
from .quantizers_utils import get_module_from_name
if is_torch_available():
import torch
if TYPE_CHECKING:
from ..modeling_utils import PreTrainedModel
logger = logging.get_logger(__name__)
class SinqHfQuantizer(HfQuantizer):
"""
HF v5 quantizer for SINQ.
Modes:
- method="sinq" (default):
* weight-only SINQ
* param-level ConversionOps (`SinqQuantize`) during load for pure language models
(each Linear.weight is turned into a SINQLinear module)
* module-level quantization after load for multimodal models
- method="asinq":
* A-SINQ (activation-aware) SINQ quantization
"""
requires_parameters_quantization: bool = True
def __init__(self, quantization_config: SinqConfig, **kwargs):
super().__init__(quantization_config, **kwargs)
self._normalized_device_str: str | None = None
self._do_param_level_sinq: bool = False
def is_serializable(self) -> bool:
return True
@property
def is_trainable(self) -> bool:
return True
def update_device_map(self, device_map):
if device_map is None:
if torch.cuda.is_available():
device_map = {"": torch.cuda.current_device()}
else:
device_map = {"": "cpu"}
logger.info(
"The device_map was not initialized. "
f"Setting device_map to {device_map}. "
"If you want to use the model for inference, please set device_map='auto'"
)
return device_map
def update_dtype(self, dtype: torch.dtype) -> torch.dtype:
if dtype is None:
dtype = torch.bfloat16
self.dtype = dtype
return dtype
def validate_environment(self, *args, **kwargs) -> None:
from ..utils import is_sinq_available
if not is_sinq_available():
raise ImportError("The 'sinq' package is not installed. Please install it with: pip install sinq")
if not torch.cuda.is_available():
logger.warning(
"No CUDA device is available. Quantization and inference will run on the CPU. Please note that this will significantly slow down inference speed and increase quantization time."
)
device_map = kwargs.get("device_map")
if isinstance(device_map, dict):
device_map_values = set(device_map.values())
if len(device_map_values) > 1:
raise RuntimeError(
"SinqHfQuantizer: multi-GPU device_map detected, but SINQ currently supports only a single CUDA "
f"device. Got {sorted(device_map_values)}. Please use device_map=None."
)
if self.quantization_config.method == "asinq" and not self.pre_quantized:
raise ValueError(
"You are using `method='asinq'` in the quantization config. Right now the calibrated version of SINQ"
" is not supported in Hugging Face, please refer and use the official SINQ repository "
"`to quantize a model with this method. "
)
def _build_sinq_quant_dict(self, cfg: SinqConfig) -> dict:
"""
Build the dict that SINQLinear expects as quant_config.
"""
from sinq.sinqlinear_hf import sinq_base_quant_config as sinq_base_quant_config_fn
method = cfg.method
return sinq_base_quant_config_fn(
nbits=int(cfg.nbits),
group_size=int(cfg.group_size) if cfg.group_size is not None else None,
quant_zero=False,
quant_scale=False,
view_as_float=False,
axis=1,
tiling_mode=str(cfg.tiling_mode),
method=method,
)
def param_needs_quantization(self, model: PreTrainedModel, param_name: str, **kwargs) -> bool:
"""
Called per-parameter to decide whether to run `SinqQuantize` on it.
- If `self.pre_quantized`, we do *not* quantize again (handled by SinqDeserialize instead).
- For method="asinq": return False (ASINQ is not supported in Hugging Face).
- For method="sinq": True only for SINQLinear.weight not in modules_to_not_convert.
Note: After _process_model_before_weight_loading(), the modules are already SINQLinear,
not nn.Linear. We check for SINQLinear modules that are not yet quantized (ready=False).
"""
from sinq.sinqlinear_hf import SINQLinear
if self.pre_quantized:
return False
if self.quantization_config.method == "asinq":
return False
# SINQ param-level only if deemed safe
if not self._do_param_level_sinq:
return False
module, tensor_name = get_module_from_name(model, param_name)
if tensor_name != "weight":
return False
# Check if it's an unquantized SINQLinear
is_sinq = isinstance(module, SINQLinear)
is_ready = getattr(module, "ready", True)
result = is_sinq and not is_ready
return result
def get_quantize_ops(self):
"""
Return the ConversionOps used for param-level quantization (Sinq).
The actual SINQLinear construction is in integrations/sinq.py.
"""
from ..integrations.sinq import SinqQuantize
return SinqQuantize(self)
def get_weight_conversions(self):
"""
If `pre_quantized=True`, interpret a checkpoint produced by SINQLinear.state_dict:
<prefix>.W_q
<prefix>.bias
<prefix>.meta
via a WeightConverter + SinqDeserialize so that we reconstruct a SINQLinear
module instead of a plain nn.Linear.
"""
from ..core_model_loading import WeightConverter
if self.pre_quantized:
from ..integrations.sinq import SinqDeserialize
return [
WeightConverter(
source_patterns=[
".W_q",
".meta",
".bias",
],
target_patterns=[".weight"],
operations=[SinqDeserialize(self)],
)
]
return []
def _process_model_before_weight_loading(
self,
model: PreTrainedModel,
device_map,
keep_in_fp32_modules: list[str] | None = None,
**kwargs,
):
"""
Called on meta-initialized model, before loading any weights.
For SINQ, we replace nn.Linear modules with empty SINQLinear modules here.
The actual quantization happens later in SinqQuantize.convert() when weights are loaded.
"""
from ..integrations.sinq import replace_with_sinq_linear
self.modules_to_not_convert = self.get_modules_to_not_convert(
model, (self.quantization_config.modules_to_not_convert or []), keep_in_fp32_modules
)
# Enable param-level quantization for SINQ method
self._do_param_level_sinq = self.quantization_config.method == "sinq" and not self.pre_quantized
sinq_quant_dict = None if self.pre_quantized else self._build_sinq_quant_dict(self.quantization_config)
# Extract device from device_map (guaranteed to be set by update_device_map)
if isinstance(device_map, dict):
first_device = next(iter(device_map.values()), 0)
if isinstance(first_device, int):
device_str = f"cuda:{first_device}"
else:
device_str = str(first_device)
else:
device_str = "cuda:0" if torch.cuda.is_available() else "cpu"
model = replace_with_sinq_linear(
model,
modules_to_not_convert=self.modules_to_not_convert,
quant_config=sinq_quant_dict,
compute_dtype=self.dtype,
device=device_str,
pre_quantized=self.pre_quantized,
)
def _process_model_after_weight_loading(
self,
model: PreTrainedModel,
**kwargs,
):
"""
Called after *all* weights have been loaded.
For SINQ:
1. Move non-SINQLinear modules to GPU (embeddings, norms, lm_head, etc.)
- SINQLinear modules already have GemLite buffers on GPU
- We skip moving SINQLinear's W_q/meta to avoid memory duplication
2. Patch HF save/load methods for SINQ serialization
"""
from sinq.hf_io import patch_hf_pretrained_io
# Patch HF save/load methods for SINQ serialization
patch_hf_pretrained_io()
return model
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/quantizers/quantizer_sinq.py",
"license": "Apache License 2.0",
"lines": 210,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/quantization/sinq/test_sinq.py | # Copyright 2026 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import tempfile
import unittest
from unittest.mock import patch
from transformers import AutoModelForCausalLM, AutoTokenizer, SinqConfig
from transformers.testing_utils import (
backend_empty_cache,
require_torch_gpu,
slow,
torch_device,
)
from transformers.utils import is_torch_available
if is_torch_available():
import torch
class SinqConfigTest(unittest.TestCase):
"""Test the SinqConfig class."""
def test_default_config(self):
"""Test default configuration values."""
config = SinqConfig()
self.assertEqual(config.nbits, 4)
self.assertEqual(config.group_size, 64)
self.assertEqual(config.tiling_mode, "1D")
self.assertEqual(config.method, "sinq")
def test_custom_config(self):
"""Test custom configuration values."""
config = SinqConfig(
nbits=8,
group_size=128,
tiling_mode="2D",
method="sinq",
)
self.assertEqual(config.nbits, 8)
self.assertEqual(config.group_size, 128)
self.assertEqual(config.tiling_mode, "2D")
self.assertEqual(config.method, "sinq")
def test_modules_to_not_convert(self):
"""Test modules_to_not_convert configuration."""
modules = ["layer1", "layer2.weight"]
config = SinqConfig(modules_to_not_convert=modules)
self.assertEqual(config.modules_to_not_convert, modules)
def test_to_dict(self):
"""Test that config converts to dict correctly."""
quantization_config = SinqConfig()
config_to_dict = quantization_config.to_dict()
for key in config_to_dict:
self.assertEqual(getattr(quantization_config, key), config_to_dict[key])
def test_from_dict(self):
"""Test that config can be created from dict."""
config_dict = {
"nbits": 8,
"group_size": 128,
"method": "sinq",
}
config = SinqConfig.from_dict(config_dict)
self.assertEqual(config.nbits, 8)
self.assertEqual(config.group_size, 128)
self.assertEqual(config.method, "sinq")
def test_method_validation(self):
"""Test that invalid method raises error."""
with self.assertRaises(ValueError):
SinqConfig(method="invalid_method")
@slow
@require_torch_gpu
class SinqTest(unittest.TestCase):
"""Integration tests for SINQ quantization."""
model_name = "Qwen/Qwen3-0.6B"
input_text = "What is the capital of France?"
max_new_tokens = 10
device_map = torch_device
EXPECTED_OUTPUTS = {
"What is the capital of France? Paris.",
"What is the capital of France? The capital of France is Paris.",
"What is the capital of France? The capital of France is Paris. The statement is",
"What is the capital of France? Paris is the capital and most populous city of France.",
}
@classmethod
def setUpClass(cls):
"""Setup quantized model and tokenizer once for all tests."""
cls.quantization_config = SinqConfig(
nbits=4,
group_size=64,
method="sinq",
modules_to_not_convert=["lm_head"],
)
cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name)
cls.quantized_model = AutoModelForCausalLM.from_pretrained(
cls.model_name,
torch_dtype=torch.bfloat16,
quantization_config=cls.quantization_config,
)
def tearDown(self):
gc.collect()
backend_empty_cache(torch_device)
gc.collect()
def test_quantizer_validation_no_cuda(self):
"""Test that quantizer logs warning when CUDA is not available."""
from transformers.quantizers.quantizer_sinq import SinqHfQuantizer
config = SinqConfig()
quantizer = SinqHfQuantizer(quantization_config=config)
with patch("torch.cuda.is_available", return_value=False):
with self.assertLogs("transformers", level="WARNING") as cm:
quantizer.validate_environment()
self.assertTrue(any("No CUDA is available" in msg for msg in cm.output))
def test_asinq_not_supported(self):
"""Test that asinq method raises error for non-pre-quantized models."""
from transformers.quantizers.quantizer_sinq import SinqHfQuantizer
config = SinqConfig(method="asinq")
quantizer = SinqHfQuantizer(quantization_config=config)
quantizer.pre_quantized = False
with self.assertRaises(ValueError):
quantizer.validate_environment()
def test_quantized_model_conversion(self):
"""Test that Linear modules are converted to SINQLinear."""
from sinq.sinqlinear_hf import SINQLinear
nb_sinq_linear = 0
for module in self.quantized_model.modules():
if isinstance(module, SINQLinear):
nb_sinq_linear += 1
self.assertGreater(nb_sinq_linear, 0)
self.assertNotIsInstance(self.quantized_model.lm_head, SINQLinear)
def test_quantized_model(self):
"""Test that quantized model can generate text."""
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(self.device_map)
output = self.quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens, do_sample=False)
decoded = self.tokenizer.decode(output[0], skip_special_tokens=True)
self.assertIsNotNone(decoded)
self.assertGreater(len(decoded), len(self.input_text))
self.assertIn(decoded, self.EXPECTED_OUTPUTS)
def test_save_pretrained(self):
"""Test that quantized model can be saved and loaded."""
with tempfile.TemporaryDirectory() as tmpdirname:
self.quantized_model.save_pretrained(tmpdirname)
loaded_model = AutoModelForCausalLM.from_pretrained(
tmpdirname,
device_map=self.device_map,
)
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(self.device_map)
output = loaded_model.generate(**input_ids, max_new_tokens=self.max_new_tokens, do_sample=False)
decoded = self.tokenizer.decode(output[0], skip_special_tokens=True)
self.assertIsNotNone(decoded)
self.assertGreater(len(decoded), len(self.input_text))
del loaded_model
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/quantization/sinq/test_sinq.py",
"license": "Apache License 2.0",
"lines": 155,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/voxtral_realtime/configuration_voxtral_realtime.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...configuration_utils import PreTrainedConfig
from ...modeling_rope_utils import RopeParameters, RotaryEmbeddingConfigMixin
from ..auto import CONFIG_MAPPING, AutoConfig
from ..mistral.configuration_mistral import MistralConfig
class VoxtralRealtimeTextConfig(MistralConfig):
r"""
This is the configuration class to store the configuration of a [`VoxtralRealtimeText`]. It is used to instantiate a
Voxtral Realtime text decoder according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the text decoder of the Voxtral Realtime
architecture.
e.g. [mistralai/Voxtral-Mini-4B-Realtime-2602](https://huggingface.co/mistralai/Voxtral-Mini-4B-Realtime-2602)
"""
model_type = "voxtral_realtime_text"
class VoxtralRealtimeEncoderConfig(PreTrainedConfig, RotaryEmbeddingConfigMixin):
r"""
This is the configuration class to store the configuration of a [`VoxtralRealtimeEncoder`]. It is used to instantiate a
Voxtral Realtime audio encoder according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the audio encoder of the Voxtral Realtime
architecture.
e.g. [mistralai/Voxtral-Mini-4B-Realtime-2602](https://huggingface.co/mistralai/Voxtral-Mini-4B-Realtime-2602)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 131072):
Vocabulary size of the model.
hidden_size (`int`, *optional*, defaults to 1280):
Dimensionality of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 5120):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer encoder.
activation_function (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler.
num_mel_bins (`int`, *optional*, defaults to 128):
Number of mel features used per input features. Should correspond to the value used in the
`VoxtralRealtimeProcessor` class.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
hidden_act (`str`, *optional*, defaults to `"silu"`):
The activation function used in the MLP layers.
max_position_embeddings (`int`, *optional*, defaults to 1500):
The maximum sequence length that this model might ever be used with.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the RMS normalization layers.
rope_parameters (`Union[RopeParameters, dict]`, *optional*):
The parameters for the rotary position embeddings.
sliding_window (`int`, *optional*, defaults to 750):
The sliding window size for local attention.
head_dim (`int`, *optional*, defaults to 64):
The dimension of each attention head.
```python
>>> from transformers import VoxtralRealtimeEncoderConfig, VoxtralRealtimeEncoder
>>> # Initializing a VoxtralRealtimeEncoderConfig
>>> configuration = VoxtralRealtimeEncoderConfig()
>>> # Initializing a VoxtralRealtimeEncoder (with random weights)
>>> model = VoxtralRealtimeEncoder(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "voxtral_realtime_encoder"
attribute_map = {
"d_model": "hidden_size",
"encoder_layers": "num_hidden_layers",
"encoder_attention_heads": "num_attention_heads",
"encoder_ffn_dim": "intermediate_size",
"encoder_layerdrop": "layerdrop",
}
def __init__(
self,
vocab_size=131072,
hidden_size=1280,
intermediate_size=5120,
num_hidden_layers=32,
num_attention_heads=32,
activation_function="gelu",
num_mel_bins=128,
initializer_range=0.02,
attention_dropout=0.0,
hidden_act="silu",
max_position_embeddings=1500,
rms_norm_eps=1e-05,
rope_parameters: RopeParameters | dict[str, RopeParameters] | None = None,
sliding_window=750,
head_dim=64,
**kwargs,
):
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.activation_function = activation_function
self.num_mel_bins = num_mel_bins
self.initializer_range = initializer_range
self.num_key_value_heads = num_attention_heads
self.rms_norm_eps = rms_norm_eps
self.max_position_embeddings = max_position_embeddings
self.rope_parameters = rope_parameters
self.hidden_act = hidden_act
self.sliding_window = sliding_window
self.head_dim = head_dim if head_dim is not None else hidden_size // num_attention_heads
self.attention_dropout = attention_dropout
super().__init__(**kwargs)
class VoxtralRealtimeConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`VoxtralRealtimeForConditionalGeneration`]. It is used to instantiate a
Voxtral Realtime model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the Voxtral Realtime.
e.g. [mistralai/Voxtral-Mini-4B-Realtime-2602](https://huggingface.co/mistralai/Voxtral-Mini-4B-Realtime-2602)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
audio_config (`Union[AutoConfig, dict]`, *optional*):
The config object or dictionary of the audio encoder.
text_config (`Union[AutoConfig, dict]`, *optional*):
The config object or dictionary of the text model.
projector_hidden_act (`str`, *optional*, defaults to `"gelu"`):
The activation function (function or string) in the multi-modal projector.
audio_length_per_tok (`int`, *optional*, defaults to 8):
The number of audio frames corresponding to each text token.
default_num_delay_tokens (`int`, *optional*, defaults to 6):
The default number of delay tokens used for streaming.
downsample_factor (`int`, *optional*, defaults to 4):
The downsampling factor applied to audio features before projection.
```python
>>> from transformers import VoxtralRealtimeForConditionalGeneration, VoxtralRealtimeConfig
>>> # Initializing a VoxtralRealtime configuration
>>> configuration = VoxtralRealtimeConfig()
>>> # Initializing a model with random weights
>>> model = VoxtralRealtimeForConditionalGeneration(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "voxtral_realtime"
sub_configs = {"text_config": AutoConfig, "audio_config": AutoConfig}
_default_text_config_kwargs = {
"vocab_size": 131072,
"hidden_size": 3072,
"intermediate_size": 9216,
"num_hidden_layers": 26,
"num_attention_heads": 32,
"num_key_value_heads": 8,
"max_position_embeddings": 131072,
"rms_norm_eps": 1e-05,
"use_cache": True,
"rope_theta": 1000000.0,
"head_dim": 128,
"tie_word_embeddings": True,
"sliding_window": 8192,
}
def __init__(
self,
audio_config=None,
text_config=None,
projector_hidden_act="gelu",
audio_length_per_tok=8,
default_num_delay_tokens=6,
downsample_factor=4,
**kwargs,
):
if isinstance(audio_config, dict):
audio_config["model_type"] = audio_config.get("model_type", "voxtral_realtime_encoder")
audio_config = CONFIG_MAPPING[audio_config["model_type"]](**audio_config)
elif audio_config is None:
audio_config = CONFIG_MAPPING["voxtral_realtime_encoder"]()
self.audio_config = audio_config
if isinstance(text_config, dict):
text_config["model_type"] = text_config.get("model_type", "voxtral_realtime_text")
text_config = CONFIG_MAPPING[text_config["model_type"]](
**{**self._default_text_config_kwargs, **text_config}
)
elif text_config is None:
text_config = CONFIG_MAPPING["voxtral_realtime_text"](**self._default_text_config_kwargs)
self.text_config = text_config
self.hidden_size = text_config.hidden_size
self.projector_hidden_act = projector_hidden_act
self.audio_length_per_tok = audio_length_per_tok
self.default_num_delay_tokens = default_num_delay_tokens
self.downsample_factor = downsample_factor
super().__init__(**kwargs)
__all__ = ["VoxtralRealtimeEncoderConfig", "VoxtralRealtimeConfig", "VoxtralRealtimeTextConfig"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/voxtral_realtime/configuration_voxtral_realtime.py",
"license": "Apache License 2.0",
"lines": 198,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/voxtral_realtime/convert_voxtral_realtime_weights_to_hf.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import gc
import json
import os
import re
import torch
from safetensors.torch import load_file
from transformers import (
MistralCommonBackend,
VoxtralRealtimeConfig,
VoxtralRealtimeFeatureExtractor,
VoxtralRealtimeForConditionalGeneration,
VoxtralRealtimeProcessor,
)
from transformers.utils.hub import cached_file
# fmt: off
STATE_DICT_MAPPING = {
# Text model keys
r"^output.weight": r"language_model.lm_head.weight",
r"^norm.weight": r"language_model.model.norm.weight",
r"^tok_embeddings.weight": r"language_model.model.embed_tokens.weight",
r"^layers.(\d+).attention_norm.weight": r"language_model.model.layers.\1.input_layernorm.weight",
r"^layers.(\d+).ffn_norm.weight": r"language_model.model.layers.\1.post_attention_layernorm.weight",
r"^layers.(\d+).attention.w(q|k|v|o).weight": r"language_model.model.layers.\1.self_attn.\2_proj.weight",
r"^layers.(\d+).feed_forward.w1.weight": r"language_model.model.layers.\1.mlp.gate_proj.weight",
r"^layers.(\d+).feed_forward.w2.weight": r"language_model.model.layers.\1.mlp.down_proj.weight",
r"^layers.(\d+).feed_forward.w3.weight": r"language_model.model.layers.\1.mlp.up_proj.weight",
r"mm_streams_embeddings.embedding_module.tok_embeddings.weight": r"language_model.model.embed_tokens.weight",
r"^layers.(\d+).ada_rms_norm_t_cond.0.weight": r"language_model.model.layers.\1.ada_rms_norm.linear1.weight",
r"^layers.(\d+).ada_rms_norm_t_cond.2.weight": r"language_model.model.layers.\1.ada_rms_norm.linear2.weight",
# Audio model keys
r"mm_streams_embeddings\.embedding_module\.whisper_encoder\.conv_layers\.0\.conv\.(weight|bias)": r"audio_tower.embedder.conv1.\1",
r"mm_streams_embeddings\.embedding_module\.whisper_encoder\.conv_layers\.1\.conv\.(weight|bias)": r"audio_tower.embedder.conv2.\1",
r"mm_streams_embeddings\.embedding_module\.whisper_encoder\.transformer\.norm\.(weight|bias)": r"audio_tower.norm.\1",
r"mm_streams_embeddings\.embedding_module\.whisper_encoder\.transformer\.layers\.(\d+)\.attention\.w([qkv])\.(weight|bias)": r"audio_tower.layers.\1.self_attn.\2_proj.\3",
r"mm_streams_embeddings\.embedding_module\.whisper_encoder\.transformer\.layers\.(\d+)\.attention\.wo\.(weight|bias)": r"audio_tower.layers.\1.self_attn.o_proj.\2",
r"mm_streams_embeddings\.embedding_module\.whisper_encoder\.transformer\.layers\.(\d+)\.attention_norm\.(weight|bias)": r"audio_tower.layers.\1.self_attn_layer_norm.\2",
r"mm_streams_embeddings\.embedding_module\.whisper_encoder\.transformer\.layers\.(\d+)\.feed_forward\.w1\.(weight|bias)": r"audio_tower.layers.\1.mlp.gate_proj.\2",
r"mm_streams_embeddings\.embedding_module\.whisper_encoder\.transformer\.layers\.(\d+)\.feed_forward\.w2\.(weight|bias)": r"audio_tower.layers.\1.mlp.down_proj.\2",
r"mm_streams_embeddings\.embedding_module\.whisper_encoder\.transformer\.layers\.(\d+)\.feed_forward\.w3\.(weight|bias)": r"audio_tower.layers.\1.mlp.up_proj.\2",
r"mm_streams_embeddings\.embedding_module\.whisper_encoder\.transformer\.layers\.(\d+)\.ffn_norm\.(weight|bias)": r"audio_tower.layers.\1.final_layer_norm.\2",
r"mm_streams_embeddings\.embedding_module\.audio_language_projection\.0\.weight": r"multi_modal_projector.linear_1.weight",
r"mm_streams_embeddings\.embedding_module\.audio_language_projection\.2\.weight": r"multi_modal_projector.linear_2.weight",
}
# fmt: on
def convert_config(original_config: dict, max_position_embeddings: int = 131072):
original_audio_config = original_config.pop("multimodal")
original_audio_config = original_audio_config["whisper_model_args"]["encoder_args"]
original_text_config = original_config
# Text config
text_key_mapping = {
"hidden_size": "dim",
"num_hidden_layers": "n_layers",
"intermediate_size": "hidden_dim",
"num_attention_heads": "n_heads",
"num_key_value_heads": "n_kv_heads",
"rms_norm_eps": "norm_eps",
}
similar_text_keys_to_keep = [
"head_dim",
"vocab_size",
"rope_theta",
]
new_text_config_kwargs = {k: original_text_config[v] for k, v in text_key_mapping.items()}
new_text_config_kwargs.update({k: v for k, v in original_text_config.items() if k in similar_text_keys_to_keep})
# These are not always defined depending on `params.json`
new_text_config_kwargs["sliding_window"] = original_text_config.get("sliding_window", None)
new_text_config_kwargs["max_position_embeddings"] = original_text_config.get(
"max_seq_len", max_position_embeddings
)
# This may sometimes be a string in `params.json`
if new_text_config_kwargs["sliding_window"] is not None:
new_text_config_kwargs["sliding_window"] = int(new_text_config_kwargs["sliding_window"])
# Audio config
audio_key_mapping = {
"hidden_size": "dim",
"num_hidden_layers": "n_layers",
"intermediate_size": "hidden_dim",
"num_attention_heads": "n_heads",
"num_key_value_heads": "n_heads",
}
similar_audio_keys_to_keep = [
"head_dim",
"vocab_size",
"rope_theta",
]
new_audio_config_kwargs = {k: original_audio_config[v] for k, v in audio_key_mapping.items()}
new_audio_config_kwargs.update({k: v for k, v in original_audio_config.items() if k in similar_audio_keys_to_keep})
new_config = VoxtralRealtimeConfig(
audio_config=new_audio_config_kwargs,
text_config=new_text_config_kwargs,
projector_hidden_act="gelu",
)
return new_config
def map_old_key_to_new(old_key):
"""Map of a key of the original state dict to the equivalent key in HF format"""
for pattern, replacement in STATE_DICT_MAPPING.items():
new_key, n_replace = re.subn(pattern, replacement, old_key)
# Early exit of the loop
if n_replace > 0:
return new_key
raise ValueError(f"Key: {old_key} could not be mapped (check the mapping).")
def permute_for_rope(tensor, n_heads, dim1, dim2=None):
"""Permute the weights for the ROPE formulation."""
if dim2 is None:
# Handle bias case where tensor is 1D with shape (dim1,)
tensor = tensor.view(n_heads, dim1 // n_heads // 2, 2)
tensor = tensor.transpose(1, 2)
tensor = tensor.reshape(dim1)
else:
# Handle weight case where tensor is 2D with shape (dim1, dim2)
tensor = tensor.view(n_heads, dim1 // n_heads // 2, 2, dim2)
tensor = tensor.transpose(1, 2)
tensor = tensor.reshape(dim1, dim2)
return tensor
def convert_state_dict(original_state_dict, config):
"""Convert a state dict file, when a single `nn.Module` is never sharded in different files (usual case)."""
new_dict = {}
for old_key, tensor in original_state_dict.items():
new_key = map_old_key_to_new(old_key)
if "audio_tower" in new_key:
num_attention_heads = config.audio_config.num_attention_heads
hidden_size = config.audio_config.hidden_size
head_dim = config.audio_config.head_dim
num_key_value_heads = config.audio_config.num_key_value_heads
key_value_dim = head_dim * num_key_value_heads
query_dim = head_dim * num_attention_heads
tensor = _permute_projection_weights(
tensor,
new_key,
num_attention_heads,
num_key_value_heads,
head_dim,
hidden_size,
query_dim,
key_value_dim,
)
elif "language_model" in new_key:
num_attention_heads = config.text_config.num_attention_heads
hidden_size = config.text_config.hidden_size
head_dim = config.text_config.head_dim
num_key_value_heads = config.text_config.num_key_value_heads
key_value_dim = head_dim * num_key_value_heads
query_dim = head_dim * num_attention_heads
tensor = _permute_projection_weights(
tensor,
new_key,
num_attention_heads,
num_key_value_heads,
head_dim,
hidden_size,
query_dim,
key_value_dim,
)
new_dict[new_key] = tensor
return new_dict
def _permute_projection_weights(
tensor, key, num_attention_heads, num_key_value_heads, head_dim, hidden_size, query_dim, key_value_dim
):
"""Permute projection weights for q_proj, k_proj, and v_proj."""
if "q_proj" in key:
if "weight" in key:
tensor = tensor.view(num_attention_heads, head_dim, hidden_size).reshape(query_dim, hidden_size)
tensor = permute_for_rope(tensor, num_attention_heads, query_dim, hidden_size)
elif "bias" in key:
tensor = tensor.view(num_attention_heads, head_dim).reshape(query_dim)
tensor = permute_for_rope(tensor, num_attention_heads, query_dim)
elif "k_proj" in key:
if "weight" in key:
tensor = tensor.view(num_key_value_heads, head_dim, hidden_size).reshape(key_value_dim, hidden_size)
tensor = permute_for_rope(tensor, num_key_value_heads, key_value_dim, hidden_size)
elif "bias" in key:
tensor = tensor.view(num_key_value_heads, head_dim).reshape(key_value_dim)
tensor = permute_for_rope(tensor, num_key_value_heads, key_value_dim)
elif "v_proj" in key:
if "weight" in key:
tensor = tensor.view(num_key_value_heads, head_dim, hidden_size).reshape(key_value_dim, hidden_size)
elif "bias" in key:
tensor = tensor.view(num_key_value_heads, head_dim).reshape(key_value_dim)
return tensor
def write_model(
input_path_or_repo,
model_name,
config_name,
output_dir,
):
print("Converting the model.")
os.makedirs(output_dir, exist_ok=True)
# --------------
# convert config
# --------------
config_path = cached_file(input_path_or_repo, config_name)
with open(config_path, "r") as f:
original_config = json.load(f)
config = convert_config(original_config)
# ---------------
# convert weights
# ---------------
model_path = cached_file(input_path_or_repo, model_name)
print(f"Fetching all parameters from the checkpoint at {model_path}...")
state_dict = load_file(model_path)
print("Converting model...")
converted_state_dict = convert_state_dict(state_dict, config)
converted_state_dict["language_model.lm_head.weight"] = converted_state_dict[
"language_model.model.embed_tokens.weight"
].clone()
# -------------------------
# load the weights and save
# -------------------------
print("Loading the checkpoint in a VoxtralRealtime model.")
with torch.device("meta"):
model = VoxtralRealtimeForConditionalGeneration(config)
model.load_state_dict(converted_state_dict, strict=True, assign=True)
model.tie_weights()
print("Checkpoint loaded successfully.")
del model.config._name_or_path
del model.generation_config._from_model_config
model.generation_config.pad_token_id = 11
print("Saving the model.")
model.save_pretrained(output_dir)
del state_dict, model
# Safety check: reload the converted model
gc.collect()
print("Reloading the model to check if it's saved correctly.")
VoxtralRealtimeForConditionalGeneration.from_pretrained(output_dir, dtype=torch.bfloat16, device_map="auto")
print("Model reloaded successfully.")
def write_processor(input_path_or_repo: str, output_dir: str):
tokenizer = MistralCommonBackend.from_pretrained(input_path_or_repo)
feature_extractor = VoxtralRealtimeFeatureExtractor()
print("Creating the processor...")
# Create the processor and save it
processor = VoxtralRealtimeProcessor(
feature_extractor=feature_extractor,
tokenizer=tokenizer,
)
processor.save_pretrained(output_dir)
print("Processor saved successfully.")
def main():
parser = argparse.ArgumentParser(description="Convert VoxtralRealtime weights to Hugging Face format")
parser.add_argument(
"--input_path_or_repo",
type=str,
required=True,
help="Path or repo containing the original weights",
)
parser.add_argument(
"--model_name",
type=str,
required=True,
help="Name of the model in input_path_or_repo",
)
parser.add_argument(
"--config_name",
type=str,
required=True,
help="Name of the config in input_path_or_repo",
)
parser.add_argument(
"--output_dir",
help="Location to write HF model and tokenizer",
)
args = parser.parse_args()
write_model(
args.input_path_or_repo,
args.model_name,
args.config_name,
args.output_dir,
)
write_processor(
args.input_path_or_repo,
args.output_dir,
)
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/voxtral_realtime/convert_voxtral_realtime_weights_to_hf.py",
"license": "Apache License 2.0",
"lines": 287,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/voxtral_realtime/feature_extraction_voxtral_realtime.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ...audio_utils import mel_filter_bank
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
from ...utils.import_utils import requires
logger = logging.get_logger(__name__)
@requires(backends=("torch",))
class VoxtralRealtimeFeatureExtractor(SequenceFeatureExtractor):
r"""
Constructs a VOXTRAL_REALTIME feature extractor.
This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains
most of the main methods. Users should refer to this superclass for more information regarding those methods.
This class extracts mel-filter bank features from raw speech using a custom numpy implementation of the `Short Time
Fourier Transform` which should match pytorch's `torch.stft` equivalent.
Args:
feature_size (`int`, *optional*, defaults to 128):
The feature dimension of the extracted features.
sampling_rate (`int`, *optional*, defaults to 16000):
The sampling rate at which the audio files should be digitalized expressed in hertz (Hz).
hop_length (`int`, *optional*, defaults to 160):
Length of the overlapping windows for the STFT used to obtain the Mel Frequency coefficients.
n_fft (`int`, *optional*, defaults to 512):
Size of the Fourier transform.
win_length (`int`, *optional*, defaults to 400):
The window length for the STFT computation.
padding_value (`float`, *optional*, defaults to 0.0):
Padding value used to pad the audio. Should correspond to silences.
"""
model_input_names = ["input_features", "attention_mask"]
def __init__(
self,
feature_size=128,
sampling_rate=16000,
hop_length=160,
n_fft=400,
win_length=400,
padding_value=0.0,
global_log_mel_max=1.5,
**kwargs,
):
super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs)
self.hop_length = hop_length
self.n_fft = n_fft
self.win_length = win_length
self.mel_filters = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2,
num_mel_filters=feature_size,
min_frequency=0.0,
max_frequency=8000.0,
sampling_rate=sampling_rate,
norm="slaney",
mel_scale="slaney",
)
self.global_log_mel_max = global_log_mel_max
def _torch_extract_fbank_features(self, waveform, device: str = "cpu", center: bool = True):
window = torch.hann_window(self.n_fft, device=device)
stft = torch.stft(waveform, self.n_fft, self.hop_length, window=window, return_complex=True, center=center)
magnitudes = stft[..., :-1].abs() ** 2
mel_filters = torch.from_numpy(self.mel_filters).to(device, torch.float32)
mel_spec = mel_filters.T @ magnitudes
log_spec = torch.clamp(mel_spec, min=1e-10).log10()
if self.global_log_mel_max is not None:
log_spec_max = torch.tensor(
self.global_log_mel_max,
device=log_spec.device,
dtype=log_spec.dtype,
)
else:
log_spec_max = log_spec.max()
log_spec = torch.maximum(log_spec, log_spec_max - 8.0)
log_spec = (log_spec + 4.0) / 4.0
if device != "cpu":
log_spec = log_spec.detach().cpu()
return log_spec
def __call__(
self,
raw_speech: np.ndarray | list[float] | list[np.ndarray] | list[list[float]],
truncation: bool = False,
pad_to_multiple_of: int | None = None,
return_tensors: str | TensorType | None = None,
return_attention_mask: bool | None = None,
padding: str | None = "longest",
max_length: int | None = None,
sampling_rate: int | None = None,
do_normalize: bool | None = None,
device: str | None = "cpu",
return_token_timestamps: bool | None = None,
center: bool = True,
**kwargs,
) -> BatchFeature:
"""
Main method to featurize and prepare for the model one or several sequence(s). Implementation uses PyTorch for
the STFT computation if available, otherwise a slower NumPy based one.
Args:
raw_speech (`np.ndarray`, `list[float]`, `list[np.ndarray]`, `list[list[float]]`):
The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float
values, a list of numpy arrays or a list of list of float values. Must be mono channel audio, not
stereo, i.e. single float per timestep.
truncation (`bool`, *optional*, default to `True`):
Activates truncation to cut input sequences longer than *max_length* to *max_length*.
pad_to_multiple_of (`int`, *optional*, defaults to None):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
`>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128.
return_attention_mask (`bool`, *optional*):
Whether to return the attention mask. If left to the default, will return the attention mask according
to the specific feature_extractor's default.
[What are attention masks?](../glossary#attention-mask)
<Tip>
For Parakeet models, `attention_mask` should always be passed for batched inference, to avoid subtle
bugs.
</Tip>
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
sampling_rate (`int`, *optional*):
The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass
`sampling_rate` at the forward call to prevent silent errors and allow automatic speech recognition
pipeline.
padding_value (`float`, *optional*, defaults to 0.0):
The value that is used to fill the padding values / vectors.
do_normalize (`bool`, *optional*, defaults to `False`):
Whether or not to zero-mean unit-variance normalize the input. Normalizing can help to significantly
improve the performance of the model.
device (`str`, *optional*, defaults to `'cpu'`):
Specifies the device for computation of the log-mel spectrogram of audio signals in the
`_torch_extract_fbank_features` method. (e.g., "cpu", "cuda")
return_token_timestamps (`bool`, *optional*, defaults to `None`):
Deprecated. Use `return_attention_mask` instead from which the number of frames can be inferred.
Whether or not to return the number of frames of the input raw_speech.
These num_frames can be used by the model to compute word level timestamps.
center (`bool`, *optional*, defaults to `True`):
Whether to use centering for the STFT computation.
"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
f" was sampled with {self.sampling_rate} and not {sampling_rate}."
)
else:
logger.warning(
f"It is strongly recommended to pass the `sampling_rate` argument to `{self.__class__.__name__}()`. "
"Failing to do so can result in silent errors that might be hard to debug."
)
# Convert to torch tensor
if isinstance(raw_speech, np.ndarray):
raw_speech = torch.tensor(raw_speech)
elif isinstance(raw_speech, (list, tuple)):
if isinstance(raw_speech[0], (list, np.ndarray)):
raw_speech = [torch.tensor(speech) for speech in raw_speech]
else: # list[float]
raw_speech = torch.tensor(raw_speech)
is_batched_torch = isinstance(raw_speech, torch.Tensor) and len(raw_speech.shape) > 1
if is_batched_torch and len(raw_speech.shape) > 2:
logger.warning(
f"Only mono-channel audio is supported for input to {self.__class__.__name__}. "
"We will take the mean of the channels to convert to mono."
)
raw_speech = raw_speech.mean(-1)
is_batched_sequence = isinstance(raw_speech, (list, tuple))
if is_batched_sequence:
for speech in raw_speech:
if len(speech.shape) > 1:
logger.warning(
f"Only mono-channel audio is supported for input to {self.__class__.__name__}. "
"We will take the mean of the channels to convert to mono."
)
speech = speech.mean(-1)
if is_batched_torch or is_batched_sequence:
raw_speech = [speech[:, None].to(torch.float32) for speech in raw_speech]
else:
raw_speech = [raw_speech[:, None].to(torch.float32)]
batched_speech = BatchFeature({"input_features": raw_speech})
padded_inputs = self.pad(
batched_speech,
padding=padding,
max_length=max_length,
truncation=truncation,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
return_tensors="pt",
)
input_features = padded_inputs.input_features.squeeze(-1)
input_features = self._torch_extract_fbank_features(input_features, device, center)
data = {
"input_features": input_features.to(torch.float32),
}
if return_attention_mask:
attention_mask = padded_inputs.attention_mask[:, self.win_length - 1 :: self.hop_length]
data["attention_mask"] = attention_mask.to(torch.bool)
return BatchFeature(data=data, tensor_type=return_tensors)
__all__ = ["VoxtralRealtimeFeatureExtractor"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/voxtral_realtime/feature_extraction_voxtral_realtime.py",
"license": "Apache License 2.0",
"lines": 211,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/voxtral_realtime/modular_voxtral_realtime.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from dataclasses import dataclass
from functools import cached_property
from types import GeneratorType
import torch
import torch.nn as nn
from ... import initialization as init
from ...activations import ACT2FN
from ...cache_utils import Cache, DynamicCache, StaticCache
from ...generation import GenerationMixin
from ...masking_utils import create_causal_mask, create_sliding_window_causal_mask
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import BaseModelOutputWithPast, BaseModelOutputWithPooling, CausalLMOutputWithPast
from ...modeling_utils import PreTrainedModel
from ...models.llama.modeling_llama import LlamaRotaryEmbedding
from ...models.mistral.modeling_mistral import (
MistralAttention,
MistralDecoderLayer,
MistralForCausalLM,
MistralMLP,
MistralModel,
MistralRMSNorm,
)
from ...models.voxtral.modeling_voxtral import (
VoxtralForConditionalGeneration,
VoxtralMultiModalProjector,
VoxtralPreTrainedModel,
)
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, is_torchdynamo_compiling, logging
from ...utils.output_capturing import capture_outputs
from .configuration_voxtral_realtime import VoxtralRealtimeEncoderConfig
logger = logging.get_logger(__name__)
class VoxtralRealtimeConv1dCacheLayer:
def __init__(self):
self.cache: torch.Tensor | None = None
self.is_initialized: bool = False
def lazy_initialization(self, hidden_states, conv_module):
self.left_pad = conv_module.left_pad
self.in_channels = conv_module.in_channels
self.cache = torch.zeros(
hidden_states.shape[0],
self.in_channels,
self.left_pad,
device=hidden_states.device,
dtype=hidden_states.dtype,
)
if not is_torchdynamo_compiling():
torch._dynamo.mark_static_address(self.cache)
self.is_initialized = True
def update(self, hidden_states, conv_module=None):
if not self.is_initialized and conv_module is not None:
self.lazy_initialization(hidden_states, conv_module)
elif not self.is_initialized:
raise ValueError(
"VoxtralRealtimeConv1dCacheLayer is not initialized. Make sure to provide conv_module to the update method."
)
# get the padding states
if self.left_pad > 0:
shortfall = max(0, self.left_pad - hidden_states.shape[-1])
if shortfall > 0:
padding_states = torch.cat([self.cache[:, :, -shortfall:], hidden_states], dim=-1)
else:
padding_states = hidden_states[:, :, -self.left_pad :]
else:
padding_states = torch.empty(
hidden_states.shape[0], self.in_channels, 0, dtype=hidden_states.dtype, device=hidden_states.device
)
current_cache = self.cache.clone()
self.cache.copy_(padding_states)
return current_cache
class VoxtralRealtimeConv1dPaddingCache:
def __init__(self):
self.layers = {}
def update(self, hidden_states, cache_key, conv_module):
if cache_key not in self.layers:
self.layers[cache_key] = VoxtralRealtimeConv1dCacheLayer()
padding_states = self.layers[cache_key].update(hidden_states, conv_module)
padded_hidden_states = torch.cat([padding_states, hidden_states], dim=-1)
return padded_hidden_states
@dataclass
class VoxtralRealtimeEncoderOutput(BaseModelOutputWithPast):
padding_cache: VoxtralRealtimeConv1dPaddingCache | None = None
@dataclass
class VoxtralRealtimeCausalLMOutputWithPast(CausalLMOutputWithPast):
r"""
Args:
encoder_past_key_values (`Cache`, *optional*):
Pre-computed hidden-states (key and value in the self-attention blocks) for the audio encoder
that can be used to speed up sequential decoding.
padding_cache (`VoxtralRealtimeConv1dPaddingCache`, *optional*):
Cache for padding in convolutional layers to maintain state across streaming chunks.
"""
encoder_past_key_values: Cache | None = None
padding_cache: VoxtralRealtimeConv1dPaddingCache | None = None
class VoxtralRealtimeRotaryEmbedding(LlamaRotaryEmbedding): ...
class VoxtralRealtimeCausalConv1d(nn.Conv1d):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: int,
cache_key: str,
stride: int = 1,
dilation: int = 1,
bias: bool = True,
):
super().__init__(in_channels, out_channels, kernel_size, stride=stride, dilation=dilation, bias=bias)
self.cache_key = cache_key
@cached_property
def left_pad(self):
effective_kernel_size = (self.kernel_size[0] - 1) * self.dilation[0] + 1
return effective_kernel_size - self.stride[0]
def forward(
self,
x: torch.Tensor,
padding_cache: VoxtralRealtimeConv1dPaddingCache | None = None,
) -> torch.Tensor:
if padding_cache is not None:
x = padding_cache.update(x, self.cache_key, self)
else:
x = nn.functional.pad(x, (self.left_pad, 0))
return super().forward(x)
class VoxtralRealtimeRMSNorm(MistralRMSNorm): ...
class VoxtralRealtimeAttention(MistralAttention):
def __init__(self, config, layer_idx: int):
super().__init__(config, layer_idx)
self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=True)
# similar to Whisper's original implementation the k projection does **not** have a bias
self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=True)
self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=True)
class VoxtralRealtimeMLP(MistralMLP):
def __init__(self, config):
super().__init__(config)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=True)
class VoxtralRealtimeEmbedder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.conv1 = VoxtralRealtimeCausalConv1d(
config.num_mel_bins, config.hidden_size, kernel_size=3, cache_key="conv1"
)
self.conv2 = VoxtralRealtimeCausalConv1d(
config.hidden_size, config.hidden_size, kernel_size=3, stride=2, cache_key="conv2"
)
def forward(self, input_features, padding_cache=None):
inputs_embeds = nn.functional.gelu(self.conv1(input_features, padding_cache=padding_cache))
inputs_embeds = nn.functional.gelu(self.conv2(inputs_embeds, padding_cache=padding_cache))
inputs_embeds = inputs_embeds.permute(0, 2, 1)
return inputs_embeds
class VoxtralRealtimeEncoderLayer(GradientCheckpointingLayer):
def __init__(self, config, layer_idx: int):
super().__init__()
self.self_attn = VoxtralRealtimeAttention(config, layer_idx)
self.self_attn_layer_norm = VoxtralRealtimeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.activation_fn = ACT2FN[config.activation_function]
self.final_layer_norm = VoxtralRealtimeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.mlp = VoxtralRealtimeMLP(config)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
use_cache: bool | None = False,
cache_position: torch.LongTensor | None = None,
position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
class VoxtralRealtimePreTrainedModel(VoxtralPreTrainedModel, PreTrainedModel):
# TODO: @eustlb, this should be enabled soon
_can_compile_fullgraph = False
@torch.no_grad()
def _init_weights(self, module):
PreTrainedModel._init_weights(module)
if isinstance(module, VoxtralRealtimeTimeEmbedding):
inv_freq = torch.exp(-math.log(module.theta) * torch.arange(module.dim // 2).float() / (module.dim // 2))
init.copy_(module.inv_freq, inv_freq)
@auto_docstring(
custom_intro="""
The VoxtralRealtime encoder, which is a Whisper encoder.
"""
)
class VoxtralRealtimeEncoder(VoxtralRealtimePreTrainedModel):
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
[`VoxtralRealtimeEncoderLayer`].
Args:
config: VoxtralRealtimeEncoderConfig
"""
config: VoxtralRealtimeEncoderConfig
main_input_name = "input_features"
input_modalities = "audio"
_no_split_modules = ["VoxtralRealtimeEncoderLayer"]
_can_record_outputs = {
"attentions": VoxtralRealtimeAttention,
"hidden_states": VoxtralRealtimeEncoderLayer,
}
def __init__(self, config):
super().__init__(config)
self.embedder = VoxtralRealtimeEmbedder(config)
self.layers = nn.ModuleList(
[VoxtralRealtimeEncoderLayer(config, layer_idx) for layer_idx in range(config.encoder_layers)]
)
self.norm = VoxtralRealtimeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = VoxtralRealtimeRotaryEmbedding(config=config)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
@capture_outputs
@auto_docstring
def forward(
self,
input_features: torch.FloatTensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
padding_cache: VoxtralRealtimeConv1dPaddingCache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
cache_position: torch.LongTensor | None = None,
use_cache: bool | None = None,
use_padding_cache: bool | None = None,
attention_mask: torch.Tensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | BaseModelOutputWithPooling:
r"""
padding_cache (`VoxtralRealtimeConv1dPaddingCache`, *optional*):
Cache for padding in convolutional layers to maintain state across streaming chunks.
use_padding_cache (`bool`, *optional*):
Whether to use the padding cache.
"""
if (input_features is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_features or inputs_embeds")
if use_padding_cache and padding_cache is None:
padding_cache = VoxtralRealtimeConv1dPaddingCache()
if inputs_embeds is None:
inputs_embeds = self.embedder(input_features, padding_cache)
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
mask_function = create_causal_mask if self.config.sliding_window is None else create_sliding_window_causal_mask
causal_mask = mask_function(
config=self.config,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
position_ids=position_ids,
)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids)
for encoder_layer in self.layers:
hidden_states = encoder_layer(
hidden_states,
attention_mask=causal_mask,
position_embeddings=position_embeddings,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return VoxtralRealtimeEncoderOutput(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
padding_cache=padding_cache,
)
class VoxtralRealtimeTextAdaRmsNorm(nn.Module):
def __init__(self, config):
super().__init__()
self.linear1 = nn.Linear(config.hidden_size, 32, bias=False)
self.linear2 = nn.Linear(32, config.hidden_size, bias=False)
def forward(self, hidden_states):
hidden_states = self.linear1(hidden_states)
hidden_states = nn.functional.gelu(hidden_states)
hidden_states = self.linear2(hidden_states)
return hidden_states
class VoxtralRealtimeTextAttention(MistralAttention): ...
class VoxtralRealtimeTextMLP(MistralMLP): ...
class VoxtralRealtimeTextDecoderLayer(MistralDecoderLayer):
def __init__(self, config, layer_idx):
super().__init__(config, layer_idx)
self.input_layernorm = VoxtralRealtimeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = VoxtralRealtimeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.ada_rms_norm = VoxtralRealtimeTextAdaRmsNorm(config)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
use_cache: bool | None = False,
cache_position: torch.LongTensor | None = None,
position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None,
t_cond: torch.Tensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = hidden_states * (1 + self.ada_rms_norm(t_cond))
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
class VoxtralRealtimeTextModel(MistralModel):
def __init__(self, config):
super().__init__(config)
self.norm = VoxtralRealtimeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = VoxtralRealtimeRotaryEmbedding(config=config)
class VoxtralRealtimeTextForCausalLM(MistralForCausalLM):
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
labels: torch.LongTensor | None = None,
use_cache: bool | None = None,
cache_position: torch.LongTensor | None = None,
logits_to_keep: int | torch.Tensor = 0,
**kwargs: Unpack[TransformersKwargs],
) -> CausalLMOutputWithPast:
r"""
Example:
```python
>>> from transformers import AutoTokenizer, VoxtralRealtimeTextForCausalLM
>>> model = VoxtralRealtimeTextForCausalLM.from_pretrained("mistralai/Voxtral-Mini-4B-Realtime-2602")
>>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Voxtral-Mini-4B-Realtime-2602")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```"""
outputs: BaseModelOutputWithPast = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class VoxtralRealtimeTimeEmbedding(nn.Module):
"""Sinusoidal Embedding for encoding time"""
def __init__(self, dim: int, theta: float = 10000.0) -> None:
super().__init__()
self.dim = dim
self.theta = theta
inv_freq = torch.exp(-math.log(self.theta) * torch.arange(self.dim // 2).float() / (self.dim // 2))
self.register_buffer("inv_freq", inv_freq, persistent=False)
def forward(self, time_tensor: torch.Tensor) -> torch.Tensor:
inv_freq = self.inv_freq.to(device=time_tensor.device, dtype=time_tensor.dtype)
emb = time_tensor * inv_freq
return torch.cat((emb.cos(), emb.sin()))
class VoxtralRealtimeMultiModalProjector(VoxtralMultiModalProjector):
def __init__(self, config):
super().__init__(config)
self.linear_1 = nn.Linear(
config.audio_config.hidden_size * config.downsample_factor, config.text_config.hidden_size, bias=False
)
class VoxtralRealtimeForConditionalGeneration(VoxtralForConditionalGeneration, GenerationMixin):
_keep_in_fp32_modules_strict = None
def __init__(self, config):
super().__init__(config)
self.language_model = VoxtralRealtimeTextForCausalLM(config.text_config)
self.time_embedding = VoxtralRealtimeTimeEmbedding(config.text_config.hidden_size)
@can_return_tuple
@auto_docstring(
custom_intro="This method is used to get the audio embeddings from input features (a log mel spectrogram), meaning inferring the audio encoder and the multi-modal projector."
)
def get_audio_features(
self,
input_features: torch.FloatTensor = None,
padding_cache: VoxtralRealtimeConv1dPaddingCache | None = None,
encoder_inputs_embeds: torch.FloatTensor | None = None,
past_key_values: Cache | None = None,
use_cache: bool | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | BaseModelOutputWithPooling:
r"""
input_features (`torch.FloatTensor`):
Float values of mel features extracted from the raw speech waveform. Raw speech waveform can be
obtained by loading a `.flac` or `.wav` audio file into an array of type `list[float]` or a
`numpy.ndarray`, *e.g.* via the soundfile library (`pip install soundfile`). To prepare the array into
`input_features`, the [`AutoFeatureExtractor`] should be used for extracting the mel features, padding
and conversion into a tensor of type `torch.FloatTensor`. See [`~VoxtralRealtimeFeatureExtractor.__call__`]
padding_cache (`VoxtralRealtimeConv1dPaddingCache`, *optional*):
Cache for padding in convolutional layers to maintain state across streaming chunks.
encoder_inputs_embeds (`torch.FloatTensor`, *optional*):
Optionally, instead of passing `input_features` you can choose to directly pass an embedded representation for the encoder.
"""
audio_outputs = self.audio_tower(
input_features=input_features,
inputs_embeds=encoder_inputs_embeds,
past_key_values=past_key_values,
padding_cache=padding_cache,
return_dict=True,
use_cache=use_cache,
use_padding_cache=use_cache,
**kwargs,
)
audio_hidden_states = audio_outputs.last_hidden_state
audio_hidden_states = audio_hidden_states.reshape(
audio_hidden_states.shape[0], -1, self.config.audio_config.hidden_size * self.config.downsample_factor
)
audio_embeds = self.multi_modal_projector(audio_hidden_states)
audio_outputs.pooler_output = audio_embeds
return audio_outputs
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
input_features: torch.FloatTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
encoder_past_key_values: Cache | None = None,
padding_cache: VoxtralRealtimeConv1dPaddingCache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
encoder_inputs_embeds: torch.FloatTensor | None = None,
labels: torch.LongTensor | None = None,
use_cache: bool | None = None,
cache_position: torch.LongTensor | None = None,
logits_to_keep: int | torch.Tensor = 0,
num_delay_tokens: int | torch.Tensor = None,
**kwargs: Unpack[TransformersKwargs],
) -> VoxtralRealtimeCausalLMOutputWithPast:
r"""
encoder_past_key_values (`Cache`, *optional*):
Pre-computed hidden-states (key and value in the self-attention blocks) for the encoder that can be used to speed up sequential decoding.
padding_cache (`VoxtralRealtimeConv1dPaddingCache`, *optional*):
Cache for padding in convolutional layers to maintain state across streaming chunks.
encoder_inputs_embeds (`torch.FloatTensor`, *optional*):
Optionally, instead of passing `input_features` you can choose to directly pass an embedded representation for the encoder.
num_delay_tokens (`int` or `torch.Tensor`, *optional*):
Number of delay tokens used when preparing inputs, see [`~VoxtralRealtimeProcessor`] for more details.
Example:
```python
>>> import torch
>>> from transformers import VoxtralRealtimeForConditionalGeneration, AutoProcessor
>>> from datasets import load_dataset
>>> repo_id = "mistralai/Voxtral-Mini-4B-Realtime-2602"
>>> processor = AutoProcessor.from_pretrained(repo_id)
>>> model = VoxtralRealtimeForConditionalGeneration.from_pretrained(repo_id, dtype=torch.bfloat16, device_map="auto")
>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
>>> audio = ds[0]["audio"]["array"]
>>> inputs = processor(audio, return_tensors="pt")
>>> inputs = inputs.to(model.device, dtype=model.dtype)
>>> outputs = model.generate(**inputs)
>>> processor.batch_decode(outputs, skip_special_tokens=True)
```"""
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if (input_features is None) ^ (encoder_inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_features or encoder_inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.get_input_embeddings()(input_ids)
if input_features is not None or encoder_inputs_embeds is not None:
audio_outputs = self.get_audio_features(
input_features=input_features,
encoder_inputs_embeds=encoder_inputs_embeds,
past_key_values=encoder_past_key_values,
padding_cache=padding_cache,
use_cache=use_cache,
return_dict=True,
)
inputs_embeds += audio_outputs.pooler_output.to(inputs_embeds.device)
if num_delay_tokens is None:
num_delay_tokens = self.config.default_num_delay_tokens
logger.warning_once(
f"`num_delay_tokens` was not provided. "
f"Falling back to `config.default_num_delay_tokens={num_delay_tokens}`. "
f"Consider preparing inputs with [`~VoxtralRealtimeProcessor.__call__`] which automatically sets this parameter."
)
time_tensor = torch.full(
(1,),
num_delay_tokens,
device=inputs_embeds.device,
dtype=inputs_embeds.dtype,
)
t_cond = self.time_embedding(time_tensor)
t_cond = t_cond[None, ...] # broadcastable to batch size
outputs: CausalLMOutputWithPast = self.language_model(
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
labels=labels,
use_cache=use_cache,
cache_position=cache_position,
logits_to_keep=logits_to_keep,
t_cond=t_cond,
**kwargs,
)
return VoxtralRealtimeCausalLMOutputWithPast(
loss=outputs.loss,
logits=outputs.logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
encoder_past_key_values=audio_outputs.past_key_values if use_cache else None,
padding_cache=audio_outputs.padding_cache if use_cache else None,
)
def prepare_inputs_for_generation(
self,
*args,
encoder_inputs_embeds: torch.Tensor | None = None,
**kwargs,
):
model_inputs = GenerationMixin.prepare_inputs_for_generation(*args, **kwargs)
if encoder_inputs_embeds is not None:
start_idx = model_inputs["cache_position"][0] * self.config.downsample_factor
end_idx = (model_inputs["cache_position"][-1] + 1) * self.config.downsample_factor
model_inputs["encoder_inputs_embeds"] = encoder_inputs_embeds[:, start_idx:end_idx, :]
return model_inputs
def _prepare_model_inputs(
self,
inputs: torch.Tensor | None = None,
bos_token_id: torch.Tensor | None = None,
model_kwargs: dict[str, torch.Tensor] | None = None,
) -> tuple[torch.Tensor, str | None, dict[str, torch.Tensor]]:
inputs, input_name, model_kwargs = GenerationMixin._prepare_model_inputs(inputs, bos_token_id, model_kwargs)
input_features = model_kwargs.get("input_features")
if input_features is not None and not isinstance(input_features, GeneratorType):
model_kwargs["encoder_inputs_embeds"] = self.audio_tower.embedder(model_kwargs.pop("input_features"))
elif isinstance(input_features, GeneratorType):
input_features_generator = model_kwargs.pop("input_features")
model_kwargs["input_features_generator"] = input_features_generator
try:
model_kwargs["input_features"] = next(input_features_generator)
except StopIteration:
self._stream_exhausted = True
return inputs, input_name, model_kwargs
def _has_unfinished_sequences(self, this_peer_finished: bool, synced_gpus: bool, device: torch.device) -> bool:
if getattr(self, "_stream_exhausted", False):
self._stream_exhausted = False
return False
return GenerationMixin._has_unfinished_sequences(this_peer_finished, synced_gpus, device)
def _update_model_kwargs_for_generation(
self,
outputs,
model_kwargs,
is_encoder_decoder: bool = False,
num_new_tokens: int = 1,
):
model_kwargs = GenerationMixin._update_model_kwargs_for_generation(
outputs, model_kwargs, is_encoder_decoder, num_new_tokens
)
if hasattr(outputs, "encoder_past_key_values"):
model_kwargs["encoder_past_key_values"] = outputs.encoder_past_key_values
if hasattr(outputs, "padding_cache"):
model_kwargs["padding_cache"] = outputs.padding_cache
input_features_generator = model_kwargs.get("input_features_generator")
if input_features_generator is not None:
try:
model_kwargs["input_features"] = next(input_features_generator)
except StopIteration:
self._stream_exhausted = True
return model_kwargs
def _prepare_cache_for_generation(
self,
generation_config,
model_kwargs: dict,
generation_mode,
batch_size: int,
max_cache_length: int,
):
GenerationMixin._prepare_cache_for_generation(
generation_config, model_kwargs, generation_mode, batch_size, max_cache_length
)
# NOTE: we use the encoder prefix here this is not a classical encoder-decoder model - no cross-attention
# the model is better seen as a VLM/ AudioLM, so with an encoder that can take psat_key_values for it's forward pass
if generation_config.cache_implementation is not None:
if generation_config.cache_implementation in ("static", "offloaded_static"):
model_kwargs["encoder_past_key_values"] = self._get_encoder_cache(
cache_implementation=generation_config.cache_implementation,
batch_size=batch_size,
max_cache_len=self.config.audio_config.sliding_window,
)
else:
raise ValueError(f"{generation_config.cache_implementation} is not supported for VoxtralRealtime")
def _get_encoder_cache(self, cache_implementation: str, batch_size: int, max_cache_len: int) -> Cache:
offload_cache = "offloaded" in cache_implementation
if hasattr(self, "_encoder_cache"):
cache_to_check = self._encoder_cache
need_new_cache = (
not hasattr(self, "_encoder_cache")
or cache_to_check.offloading != offload_cache
or cache_to_check.max_batch_size != batch_size
or cache_to_check.max_cache_len < max_cache_len
)
if need_new_cache:
self_attention_cache_kwargs = {
"config": self.config.audio_config,
"max_cache_len": max_cache_len,
"offloading": offload_cache,
}
self._encoder_cache = StaticCache(**self_attention_cache_kwargs)
else:
self._encoder_cache.reset()
return self._encoder_cache
def _prepare_generation_config(
self,
generation_config,
**kwargs,
):
# Check if user explicitly provided max_length or max_new_tokens BEFORE
# the base class applies defaults
user_set_max_length = kwargs.get("max_length") is not None or (
generation_config is not None and generation_config.max_length is not None
)
user_set_max_new_tokens = kwargs.get("max_new_tokens") is not None or (
generation_config is not None and generation_config.max_new_tokens is not None
)
generation_config, model_kwargs = GenerationMixin._prepare_generation_config(generation_config, **kwargs)
input_features = model_kwargs.get("input_features")
if input_features is not None and not isinstance(input_features, GeneratorType):
audio_length = input_features.shape[-1]
num_audio_tokens = math.ceil(audio_length / self.config.audio_length_per_tok)
# Stash for use in _prepare_generated_length
generation_config._num_audio_tokens = num_audio_tokens
if not user_set_max_length and not user_set_max_new_tokens:
# Default: generate exactly num_audio_tokens
generation_config.max_length = num_audio_tokens
generation_config.max_new_tokens = None
generation_config._voxtral_set_max_length = True
else:
generation_config._voxtral_set_max_length = False
elif isinstance(input_features, GeneratorType):
# In streaming mode, generation length is controlled by stream exhaustion only
generation_config.max_new_tokens = None
generation_config.max_length = int(1e9)
generation_config._voxtral_set_max_length = True
return generation_config, model_kwargs
def _prepare_generated_length(
self,
generation_config,
has_default_max_length,
has_default_min_length,
model_input_name,
input_ids_length,
inputs_tensor,
):
# If we set max_length ourselves (user didn't provide any length param),
# prevent the base class from overriding it
if getattr(generation_config, "_voxtral_set_max_length", False):
has_default_max_length = False
generation_config = GenerationMixin._prepare_generated_length(
generation_config,
has_default_max_length,
has_default_min_length,
model_input_name,
input_ids_length,
inputs_tensor,
)
# num_audio_tokens is a hard upper bound: we can never generate more
# tokens than there are in the audio. Clamp after the base class has
# resolved max_new_tokens into max_length.
num_audio_tokens = getattr(generation_config, "_num_audio_tokens", None)
if num_audio_tokens is not None:
generation_config.max_length = min(generation_config.max_length, num_audio_tokens)
return generation_config
__all__ = [
"VoxtralRealtimeForConditionalGeneration",
"VoxtralRealtimeEncoder",
"VoxtralRealtimePreTrainedModel",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/voxtral_realtime/modular_voxtral_realtime.py",
"license": "Apache License 2.0",
"lines": 749,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/voxtral_realtime/processing_voxtral_realtime.py | # Copyright 2026 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...tokenization_mistral_common import MistralCommonBackend
from ...utils import auto_docstring, is_mistral_common_available, is_soundfile_available, is_torch_available, logging
from ...utils.import_utils import requires
if is_torch_available():
pass
if is_soundfile_available():
pass
if is_mistral_common_available():
from mistral_common.audio import Audio
from mistral_common.protocol.instruct.chunk import RawAudio
from mistral_common.protocol.transcription.request import StreamingMode, TranscriptionRequest
from ...audio_utils import AudioInput, make_list_of_audio
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessingKwargs, ProcessorMixin, Unpack
logger = logging.get_logger(__name__)
class VoxtralRealtimeProcessorKwargs(ProcessingKwargs, total=False):
_defaults = {
"text_kwargs": {
"padding": True,
"add_special_tokens": False,
},
"audio_kwargs": {
"sampling_rate": 16000,
"padding": True,
"truncation": False,
},
}
@auto_docstring
@requires(backends=("mistral-common",))
class VoxtralRealtimeProcessor(ProcessorMixin):
def __init__(self, feature_extractor, tokenizer):
if not isinstance(tokenizer, MistralCommonBackend):
raise ValueError("`tokenizer` must be a `MistralCommonBackend` tokenizer.")
super().__init__(feature_extractor, tokenizer)
if feature_extractor.win_length != self.mistral_common_audio_config.encoding_config.window_size:
raise ValueError(
f"feature_extractor.win_length ({feature_extractor.win_length}) "
f"and tokenizer.tokenizer.instruct_tokenizer.audio_encoder.audio_config.window_size "
f"({self.mistral_common_audio_config.encoding_config.window_size}) must be equal"
)
if feature_extractor.hop_length != self.mistral_common_audio_config.encoding_config.hop_length:
raise ValueError(
f"feature_extractor.hop_length ({feature_extractor.hop_length}) "
f"and tokenizer.tokenizer.instruct_tokenizer.audio_encoder.audio_config.hop_length "
f"({self.mistral_common_audio_config.encoding_config.hop_length}) must be equal"
)
if feature_extractor.feature_size != self.mistral_common_audio_config.encoding_config.num_mel_bins:
raise ValueError(
f"feature_extractor.feature_size ({feature_extractor.feature_size}) "
f"and tokenizer.tokenizer.instruct_tokenizer.audio_encoder.audio_config.num_mel_bins "
f"({self.mistral_common_audio_config.encoding_config.num_mel_bins}) must be equal"
)
if feature_extractor.sampling_rate != self.mistral_common_audio_config.sampling_rate:
raise ValueError(
f"feature_extractor.sampling_rate ({feature_extractor.sampling_rate}) "
f"and tokenizer.tokenizer.instruct_tokenizer.audio_encoder.audio_config.sampling_rate "
f"({self.mistral_common_audio_config.sampling_rate}) must be equal"
)
@property
def mistral_common_audio_config(self):
return self.tokenizer.tokenizer.instruct_tokenizer.audio_encoder.audio_config
@property
def num_delay_tokens(self):
return self.mistral_common_audio_config.num_delay_tokens
@property
def num_right_pad_tokens(self):
return self.mistral_common_audio_config.n_right_pad_tokens
@property
def audio_length_per_tok(self):
return self.mistral_common_audio_config.audio_length_per_tok
@property
def raw_audio_length_per_tok(self):
return self.mistral_common_audio_config.raw_audio_length_per_tok
@property
def num_mel_frames_first_audio_chunk(self):
# it is actually num_left_pad_tokens + num_delay_tokens + 1
# but the call to `encode_transcription` will add the left pad token
num_prefill_tokens = self.num_delay_tokens + 1
num_prefill_mel_frames = num_prefill_tokens * self.audio_length_per_tok
return num_prefill_mel_frames
@property
def num_samples_first_audio_chunk(self) -> int:
num_prefill_mel_frames = self.num_mel_frames_first_audio_chunk
num_prefill_audio_samples = (
num_prefill_mel_frames - 1
) * self.feature_extractor.hop_length + self.feature_extractor.win_length // 2
return num_prefill_audio_samples
@property
def num_samples_per_audio_chunk(self) -> int:
return self.audio_length_per_tok * self.feature_extractor.hop_length + self.feature_extractor.win_length
def __call__(
self,
audio: AudioInput | None = None,
is_streaming: bool = False,
is_first_audio_chunk: bool | None = True,
**kwargs: Unpack[VoxtralRealtimeProcessorKwargs],
):
r"""
Main method to prepare audio input for the Voxtral Realtime model. This method encodes the audio into
a transcription request using `mistral_common`, tokenizes the resulting text, and extracts mel spectrogram
features using the feature extractor. Supports both streaming and non-streaming modes.
Args:
audio (`AudioInput`, *optional*):
Input audio or batch of audios as NumPy arrays or PyTorch tensors.
is_streaming (`bool`, *optional*, defaults to `False`):
Whether to process audio in streaming mode. When `True`, audio can be passed in chunks
using `is_first_audio_chunk` to distinguish the first chunk from subsequent ones.
is_first_audio_chunk (`bool`, *optional*, defaults to `True`):
Whether the current audio is the first chunk in a streaming session. When `True`, the audio
is encoded into a full transcription request with tokenized text. When `False`, only audio
features are extracted (text encoding is skipped). Must be `True` when `is_streaming=False`.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to the model. Returned when `is_first_audio_chunk=True`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model.
Returned when `is_first_audio_chunk=True`.
- **input_features** -- Mel spectrogram features extracted from the audio input.
- **num_delay_tokens** -- The number of delay tokens used for streaming.
"""
output_kwargs = self._merge_kwargs(VoxtralRealtimeProcessorKwargs, **kwargs)
if not is_streaming and not is_first_audio_chunk:
raise ValueError("In non-streaming mode (`is_streaming=False`), `is_first_audio_chunk` must be `True`.")
audio = make_list_of_audio(audio)
input_ids, texts, audio_arrays = [], [], []
if is_first_audio_chunk:
for audio_el in audio:
# NOTE: format here is used only for serialization and therefore we can use wav for any audio array
audio = Audio(
audio_array=audio_el, sampling_rate=output_kwargs["audio_kwargs"]["sampling_rate"], format="wav"
)
transcription_request = TranscriptionRequest(
audio=RawAudio.from_audio(audio),
streaming=StreamingMode.ONLINE if is_streaming else StreamingMode.OFFLINE,
language=None,
)
tokenized_transcription_request = self.tokenizer.tokenizer.encode_transcription(transcription_request)
input_ids.append(tokenized_transcription_request.tokens)
texts.append(tokenized_transcription_request.text)
audio_arrays.extend([el.audio_array for el in tokenized_transcription_request.audios])
text_encoding = self.tokenizer(input_ids, **output_kwargs["text_kwargs"])
else:
# when not the first audio chunk, we only encode audio
audio_arrays = audio
text_encoding = {}
audio_encoding = self.feature_extractor(
audio_arrays,
center=is_first_audio_chunk,
**output_kwargs["audio_kwargs"],
)
encoding = {**text_encoding, **audio_encoding, "num_delay_tokens": self.num_delay_tokens}
return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None)
return BatchFeature(data=encoding, tensor_type=return_tensors)
__all__ = ["VoxtralRealtimeProcessor"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/voxtral_realtime/processing_voxtral_realtime.py",
"license": "Apache License 2.0",
"lines": 166,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/voxtral_realtime/test_modeling_voxtral_realtime.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch VoxtralRealtime model."""
import functools
import unittest
from transformers import (
AutoProcessor,
VoxtralRealtimeConfig,
VoxtralRealtimeForConditionalGeneration,
is_datasets_available,
is_torch_available,
)
from transformers.audio_utils import load_audio
from transformers.testing_utils import (
cleanup,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_datasets_available():
import datasets
if is_torch_available():
import torch
class VoxtralRealtimeModelTester:
def __init__(
self,
parent,
ignore_index=-100,
audio_token_id=0,
seq_length=5,
feat_seq_length=40,
text_config={
"model_type": "voxtral_realtime_text",
"intermediate_size": 36,
"initializer_range": 0.02,
"hidden_size": 32,
"max_position_embeddings": 52,
"num_hidden_layers": 2,
"num_attention_heads": 4,
"num_key_value_heads": 2,
"use_labels": True,
"vocab_size": 99,
"head_dim": 8,
"pad_token_id": 1, # can't be the same as the audio token id
"hidden_act": "silu",
"rms_norm_eps": 1e-6,
"attention_dropout": 0.0,
"rope_parameters": {
"rope_type": "default",
"rope_theta": 10000.0,
},
},
is_training=True,
audio_config={
"model_type": "voxtral_realtime_encoder",
"hidden_size": 16,
"num_attention_heads": 4,
"num_key_value_heads": 2,
"intermediate_size": 64,
"encoder_layers": 2,
"num_mel_bins": 80,
"max_position_embeddings": 100,
"initializer_range": 0.02,
"rms_norm_eps": 1e-6,
"activation_function": "silu",
"activation_dropout": 0.0,
"attention_dropout": 0.0,
"head_dim": 4,
"rope_parameters": {
"rope_type": "default",
"rope_theta": 10000.0,
},
},
):
self.parent = parent
self.ignore_index = ignore_index
self.audio_token_id = audio_token_id
self.text_config = text_config
self.audio_config = audio_config
self.seq_length = seq_length
self.feat_seq_length = feat_seq_length
self.num_hidden_layers = text_config["num_hidden_layers"]
self.vocab_size = text_config["vocab_size"]
self.hidden_size = text_config["hidden_size"]
self.num_attention_heads = text_config["num_attention_heads"]
self.is_training = is_training
self.batch_size = 3
self.encoder_seq_length = seq_length
self._max_new_tokens = None # this is used to set
def get_config(self):
return VoxtralRealtimeConfig(
text_config=self.text_config,
audio_config=self.audio_config,
ignore_index=self.ignore_index,
audio_token_id=self.audio_token_id,
)
def prepare_config_and_inputs(self):
if self._max_new_tokens is not None:
feat_seq_length = self.feat_seq_length + self._max_new_tokens * 8
else:
feat_seq_length = self.feat_seq_length
input_features_values = floats_tensor(
[
self.batch_size,
self.audio_config["num_mel_bins"],
feat_seq_length,
]
)
config = self.get_config()
return config, input_features_values
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_features_values = config_and_inputs
num_audio_tokens_per_batch_idx = 30
input_ids = ids_tensor([self.batch_size, self.seq_length], config.text_config.vocab_size - 1) + 1
attention_mask = torch.ones(input_ids.shape, dtype=torch.long).to(torch_device)
attention_mask[:, :1] = 0
input_ids[:, 1 : 1 + num_audio_tokens_per_batch_idx] = config.audio_token_id
inputs_dict = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"input_features": input_features_values,
}
return config, inputs_dict
@require_torch
class VoxtralRealtimeForConditionalGenerationModelTest(
ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase
):
"""
Model tester for `VoxtralRealtimeForConditionalGeneration`.
"""
additional_model_inputs = ["input_features"]
all_model_classes = (VoxtralRealtimeForConditionalGeneration,) if is_torch_available() else ()
pipeline_model_mapping = {"any-to-any": VoxtralRealtimeForConditionalGeneration} if is_torch_available() else {}
_is_composite = True
def setUp(self):
self.model_tester = VoxtralRealtimeModelTester(self)
self.config_tester = ConfigTester(self, config_class=VoxtralRealtimeConfig, has_text_modality=False)
def _with_max_new_tokens(max_new_tokens):
def decorator(test_func):
@functools.wraps(test_func)
def wrapper(self, *args, **kwargs):
try:
self.model_tester._max_new_tokens = max_new_tokens
return test_func(self, *args, **kwargs)
finally:
self.model_tester._max_new_tokens = None
return wrapper
return decorator
def prepare_config_and_inputs_for_generate(self, batch_size=2):
original_feat_seq_length = self.model_tester.feat_seq_length
try:
self.model_tester.feat_seq_length += self.max_new_tokens * 8
config, inputs_dict = super().prepare_config_and_inputs_for_generate(batch_size=batch_size)
finally:
self.model_tester.feat_seq_length = original_feat_seq_length
return config, inputs_dict
@_with_max_new_tokens(max_new_tokens=10)
def test_generate_methods_with_logits_to_keep(self):
super().test_generate_methods_with_logits_to_keep()
@_with_max_new_tokens(max_new_tokens=5)
def test_generate_compile_model_forward_fullgraph(self):
super().test_generate_compile_model_forward_fullgraph()
@_with_max_new_tokens(max_new_tokens=5)
def test_generate_with_and_without_position_ids(self):
super().test_generate_with_and_without_position_ids()
@unittest.skip(reason="VoxtralRealtime does not have a base model")
def test_model_base_model_prefix(self):
pass
@unittest.skip(
reason="This test does not apply to VoxtralRealtime since input_features must be provided along input_ids"
)
def test_flash_attention_2_continue_generate_with_position_ids(self):
pass
@unittest.skip(
reason="This test does not apply to VoxtralRealtime since input_features must be provided along input_ids"
)
def test_custom_4d_attention_mask(self):
pass
@unittest.skip(
reason="This test does not apply to VoxtralRealtime since input_features must be provided along input_ids"
)
def test_flash_attn_2_from_config(self):
pass
@unittest.skip(
reason="This test does not apply to VoxtralRealtime since input_features must be provided along input_ids"
)
def attention_mask_padding_matches_padding_free_with_position_ids(self):
pass
@unittest.skip(
reason="This test does not apply to VoxtralRealtime since input_features must be provided along input_ids"
)
def flash_attn_inference_equivalence(self):
pass
@unittest.skip(
reason="This test does not apply to VoxtralRealtime for now since encoder_past_key_values AND padding_cache are returned by generate"
)
def test_generate_continue_from_past_key_values(self):
pass
@unittest.skip(
reason="This test does not apply to VoxtralRealtime since prepare_inputs_for_generation is overwritten"
)
def test_prepare_inputs_for_generation_kwargs_forwards(self):
pass
@unittest.skip(
reason="This test does not apply to VoxtralRealtime since input_features must be provided along input_ids"
)
def test_generate_without_input_ids(self):
pass
@unittest.skip(
reason="VoxtralRealtime does not fall in the paradigm of assisted decoding (at least for the way it is implemented in generate)"
)
def test_assisted_decoding_sample(self):
pass
@unittest.skip(
reason="VoxtralRealtime does not fall in the paradigm of assisted decoding (at least for the way it is implemented in generate)"
)
def test_assisted_decoding_matches_greedy_search_0_random(self):
pass
@unittest.skip(
reason="VoxtralRealtime does not fall in the paradigm of assisted decoding (at least for the way it is implemented in generate)"
)
def test_assisted_decoding_matches_greedy_search_1_same(self):
pass
@unittest.skip(
reason="This test does not apply to VoxtralRealtime since in only pads input_ids but input_features should also be padded"
)
def test_left_padding_compatibility(self):
pass
@unittest.skip(
reason="VoxtralRealtime output contains non-tensor padding_cache state that is incompatible with DataParallel gather"
)
def test_multi_gpu_data_parallel_forward(self):
pass
@require_torch
class VoxtralRealtimeForConditionalGenerationIntegrationTest(unittest.TestCase):
def setUp(self):
self.checkpoint_name = "mistralai/Voxtral-Mini-4B-Realtime-2602"
self.processor = AutoProcessor.from_pretrained(self.checkpoint_name)
def tearDown(self):
cleanup(torch_device, gc_collect=True)
@slow
def test_single_longform(self):
"""
reproducer: https://gist.github.com/eustlb/980bade49311336509985f9a308e80af
"""
model = VoxtralRealtimeForConditionalGeneration.from_pretrained(self.checkpoint_name, device_map=torch_device)
audio = load_audio(
"https://huggingface.co/datasets/hf-internal-testing/dummy-audio-samples/resolve/main/dude_where_is_my_car.wav",
self.processor.feature_extractor.sampling_rate,
)
inputs = self.processor(audio, return_tensors="pt")
inputs.to(model.device, dtype=model.dtype)
outputs = model.generate(**inputs)
decoded_outputs = self.processor.batch_decode(outputs, skip_special_tokens=True)
EXPECTED_OUTPUT = [
" Come on! Dude. You got a tattoo. So did you, dude. No. Oh, dude, what does my tattoo say? Sweet! What about mine? Dude, what does mine say? Sweet! What about mine? Dude, what does mine say? Sweet! What about mine? Dude, what does mine say? Sweet! What about mine? Dude, what does mine say? Sweet! What about mine? Dude, what does mine say? Sweet! What about mine? Dude! What does mine say? Sweet! Idiot! Your tattoo says dude. Your tattoo says sweet. Got it? Sorry. Hey, sorry.",
]
self.assertEqual(decoded_outputs, EXPECTED_OUTPUT)
@slow
def test_batched(self):
"""
reproducer: https://gist.github.com/eustlb/980bade49311336509985f9a308e80af
"""
model = VoxtralRealtimeForConditionalGeneration.from_pretrained(self.checkpoint_name, device_map=torch_device)
# Load dataset manually
ds = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
speech_samples = ds.sort("id")[:5]["audio"]
input_speech = [x["array"] for x in speech_samples]
inputs = self.processor(input_speech, return_tensors="pt")
inputs.to(model.device, dtype=model.dtype)
outputs = model.generate(**inputs)
decoded_outputs = self.processor.batch_decode(outputs, skip_special_tokens=True)
EXPECTED_OUTPUT = [
" Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel.",
" nor is mr quilter's manner less interesting than his matter",
" He tells us that at this festive season of the year, with Christmas and roast beef looming before us, similes drawn from eating and its results occur most readily to the mind.",
" He has grave doubts whether Sir Frederick Leighton's work is really Greek after all, and can discover in it but little of rocky Ithaca.",
" Linnell's pictures are a sort of up-guards-and-atom paintings, and Mason's exquisite idylls are as national as a jingo poem. Mr. Burkett Foster's landscapes smile at one much in the same way that Mr. Carker used to flash his teeth. And Mr. John Collier gives his sitter a cheerful slap on the back before he says, like a shampooer in a Turkish bath, Next man!",
]
self.assertEqual(decoded_outputs, EXPECTED_OUTPUT)
@slow
def test_batched_longform(self):
"""
reproducer: https://gist.github.com/eustlb/980bade49311336509985f9a308e80af
"""
model = VoxtralRealtimeForConditionalGeneration.from_pretrained(self.checkpoint_name, device_map=torch_device)
audio1 = load_audio(
"https://huggingface.co/datasets/hf-internal-testing/dummy-audio-samples/resolve/main/dude_where_is_my_car.wav",
self.processor.feature_extractor.sampling_rate,
)
audio2 = load_audio(
"https://huggingface.co/datasets/hf-internal-testing/dummy-audio-samples/resolve/main/obama_first_45_secs.mp3",
self.processor.feature_extractor.sampling_rate,
)
inputs = self.processor([audio1, audio2], return_tensors="pt")
inputs.to(model.device, dtype=model.dtype)
outputs = model.generate(**inputs)
decoded_outputs = self.processor.batch_decode(outputs, skip_special_tokens=True)
EXPECTED_OUTPUT = [
" Come on. Dude. You got a tattoo. So did you, dude. No. Oh, dude, what does my tattoo say? Sweet! What about mine? Dude, what does mine say? Sweet! What about mine? Dude, what does mine say? Sweet! What about mine? Dude, what does mine say? Sweet! What about mine? Dude, what does mine say? Sweet! What about mine? Dude, what does mine say? Sweet! What about mine? Dude! What does mine say? Sweet! Idiot! Your tattoo says dude. Your tattoo says sweet. Got it? Sorry. Hey, sorry.",
" This week, I traveled to Chicago to deliver my final farewell address to the nation, following in the tradition of presidents before me. It was an opportunity to say thank you. Whether we've seen eye to eye or rarely agreed at all, my conversations with you, the American people, in living rooms and schools, at farms and on factory floors, at diners and on distant military outposts, All these conversations are what have kept me honest, kept me inspired, and kept me going. Every day, I learned from you. You made me a better president, and you made me a better man. Over the course of these eight years, I've seen the goodness, the resilience, and the hope of the",
]
self.assertEqual(decoded_outputs, EXPECTED_OUTPUT)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/voxtral_realtime/test_modeling_voxtral_realtime.py",
"license": "Apache License 2.0",
"lines": 319,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/integrations/liger.py | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Liger Kernel integration for applying optimized Triton kernels to transformer models.
See https://github.com/linkedin/Liger-Kernel for details.
"""
from ..modeling_utils import PreTrainedModel
from ..trainer_utils import unwrap_peft_model
from ..utils import is_liger_kernel_available, logging
logger = logging.get_logger(__name__)
def apply_liger_kernel(model, kernel_config):
"""
Apply Liger Kernel optimizations to a model instance.
Liger Kernel provides optimized Triton kernels for common transformer operations.
This function patches the model in-place with those kernels.
Args:
model: The model to patch. Must be a `PreTrainedModel` or a PEFT wrapper around one.
kernel_config: Kernel configuration.
"""
if not is_liger_kernel_available():
raise ImportError(
"You have set `use_liger_kernel` to `True` but liger-kernel >= 0.3.0 is not available. "
"Please install it with `pip install liger-kernel`"
)
from liger_kernel.transformers import _apply_liger_kernel_to_instance
kernel_config = kernel_config or {}
base_model = unwrap_peft_model(model)
if isinstance(base_model, PreTrainedModel):
_apply_liger_kernel_to_instance(model=base_model, **kernel_config)
else:
logger.warning("The model is not an instance of PreTrainedModel. No liger kernels will be applied.")
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/integrations/liger.py",
"license": "Apache License 2.0",
"lines": 42,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/trainer_optimizer.py | # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Optimizer utilities for the Trainer class.
"""
from __future__ import annotations
import importlib.metadata
import logging
from collections.abc import Callable
from dataclasses import dataclass
from typing import TYPE_CHECKING, Any
import torch
from packaging import version
from torch import nn
from .optimization import Adafactor
from .trainer_pt_utils import LayerWiseDummyOptimizer
from .trainer_utils import check_target_module_exists
from .training_args import OptimizerNames, ParallelMode
from .utils import (
is_apollo_torch_available,
is_bitsandbytes_available,
is_galore_torch_available,
is_grokadamw_available,
is_lomo_available,
is_schedulefree_available,
is_torch_optimi_available,
is_torchao_available,
strtobool,
)
if TYPE_CHECKING:
from .modeling_utils import PreTrainedModel
from .training_args import TrainingArguments
logger = logging.getLogger(__name__)
@dataclass
class OptimizerContext:
"""Context object passed to all optimizer handlers."""
args: TrainingArguments
model: PreTrainedModel | None
optimizer_kwargs: dict[str, Any]
adam_kwargs: dict[str, Any]
optim_args: dict[str, str]
def _parse_optim_args(optim_args_str: str | None) -> dict[str, str]:
"""Parse optimizer arguments from a comma-separated string."""
if not optim_args_str:
return {}
optim_args = {}
for mapping in optim_args_str.replace(" ", "").split(","):
key, value = mapping.split("=")
optim_args[key] = value
return optim_args
# Type alias for optimizer handler functions
OptimizerHandler = Callable[[OptimizerContext], tuple[Any, dict[str, Any]]]
def is_optimizer_factory(optimizer_cls_or_factory: Any) -> bool:
"""
Check if the returned value from a handler is a factory rather than an Optimizer class.
Factory callables are used for complex optimizers like Muon or Dion that need to:
- Split parameters between multiple internal optimizers
- Handle complex sharding logic
- Access the full model structure for parameter grouping
Args:
optimizer_cls_or_factory: The first element returned by an optimizer handler.
Returns:
`bool`: True if it's not an Optimizer class (i.e., likely a factory), False if it's an Optimizer class.
"""
# If it's a class that's a subclass of torch.optim.Optimizer, it's not a factory
if isinstance(optimizer_cls_or_factory, type) and issubclass(optimizer_cls_or_factory, torch.optim.Optimizer):
return False
return True
def _setup_low_rank_optimizer(
args: TrainingArguments,
model: PreTrainedModel,
optimizer_name: str,
optimizer_mapping: dict[str, Any],
optim_kwargs: dict[str, Any],
optimizer_kwargs: dict[str, Any],
is_layerwise_supported: bool = True,
) -> tuple[Any, dict[str, Any]]:
"""
Helper function to set up low-rank optimizers like GaLore and Apollo.
These optimizers apply low-rank projections to specific target modules (typically linear layers).
"""
is_layerwise = optimizer_name.lower().endswith("layerwise")
if is_layerwise and args.parallel_mode == ParallelMode.DISTRIBUTED and is_layerwise_supported:
raise NotImplementedError(f"Layer-wise {optimizer_name} does not support DDP at this time")
optimizer_cls = optimizer_mapping[optimizer_name]
if args.optim_target_modules is None:
raise ValueError(f"You need to define `optim_target_modules` to use {optimizer_name} optimizers")
if not isinstance(args.optim_target_modules, (list, str)):
raise TypeError(
f"`optim_target_modules` must be a list of strings, a regex string, or 'all-linear'. "
f"Got: {args.optim_target_modules}"
)
if model is None:
raise ValueError(f"You need to pass a model to initialize {optimizer_name} optimizer.")
all_linear = (
isinstance(args.optim_target_modules, str) and args.optim_target_modules.replace("_", "-") == "all-linear"
)
target_params_names = []
for module_name, module in model.named_modules():
target_module_exists, is_regex = check_target_module_exists(
args.optim_target_modules, module_name, return_is_regex=True
)
if not isinstance(module, nn.Linear):
if target_module_exists and not is_regex:
logger.warning(f"{module_name} matched but ignored. {optimizer_name} only supports linear layers.")
continue
if not target_module_exists and not all_linear:
continue
target_params_names.append(module_name + ".weight")
if len(target_params_names) == 0:
raise ValueError(f"No target modules found for {optimizer_name} ({args.optim_target_modules}).")
target_params = [p for n, p in model.named_parameters() if n in target_params_names]
non_target_params = [p for n, p in model.named_parameters() if n not in target_params_names]
param_groups = [
{"params": non_target_params},
{"params": target_params, **optim_kwargs},
]
if is_layerwise:
if args.gradient_accumulation_steps != 1:
raise ValueError(f"Layerwise {optimizer_name} does not support gradient accumulation!")
optimizer_dict = {}
for param in non_target_params:
optimizer_dict[param] = optimizer_cls([{"params": [param]}], **optimizer_kwargs)
for param in target_params:
optimizer_dict[param] = optimizer_cls([{"params": [param], **optim_kwargs}], **optimizer_kwargs)
def optimizer_hook(param):
if param.grad is not None:
optimizer_dict[param].step()
optimizer_dict[param].zero_grad()
for param in model.parameters():
if param.requires_grad:
param.register_post_accumulate_grad_hook(optimizer_hook)
optimizer_cls = LayerWiseDummyOptimizer
optimizer_kwargs.update({"optimizer_dict": optimizer_dict})
optimizer_kwargs.update({"params": param_groups})
return optimizer_cls, optimizer_kwargs
# =============================================================================
# Individual optimizer handlers
# =============================================================================
def _get_adafactor(ctx: OptimizerContext) -> tuple[Any, dict[str, Any]]:
"""Get Adafactor optimizer."""
ctx.optimizer_kwargs.update({"scale_parameter": False, "relative_step": False})
return Adafactor, ctx.optimizer_kwargs
def _get_adamw_torch(ctx: OptimizerContext) -> tuple[Any, dict[str, Any]]:
"""Get PyTorch AdamW optimizer (regular or fused)."""
from torch.optim import AdamW
ctx.optimizer_kwargs.update(ctx.adam_kwargs)
if ctx.args.optim == OptimizerNames.ADAMW_TORCH_FUSED:
ctx.optimizer_kwargs.update({"fused": True})
return AdamW, ctx.optimizer_kwargs
def _get_adamw_torch_xla(ctx: OptimizerContext) -> tuple[Any, dict[str, Any]]:
"""Get Torch XLA syncfree AdamW optimizer."""
try:
from torch_xla.amp.syncfree import AdamW
ctx.optimizer_kwargs.update(ctx.adam_kwargs)
return AdamW, ctx.optimizer_kwargs
except ImportError:
raise ValueError("Trainer failed to import syncfree AdamW from torch_xla.")
def _get_adamw_torch_npu_fused(ctx: OptimizerContext) -> tuple[Any, dict[str, Any]]:
"""Get NPU Fused AdamW optimizer."""
try:
from torch_npu.optim import NpuFusedAdamW
ctx.optimizer_kwargs.update(ctx.adam_kwargs)
return NpuFusedAdamW, ctx.optimizer_kwargs
except ImportError:
raise ValueError("Trainer failed to import FusedAdamW from torch_npu.")
def _get_adamw_apex_fused(ctx: OptimizerContext) -> tuple[Any, dict[str, Any]]:
"""Get Apex Fused Adam optimizer."""
try:
from apex.optimizers import FusedAdam
ctx.optimizer_kwargs.update(ctx.adam_kwargs)
return FusedAdam, ctx.optimizer_kwargs
except ImportError:
raise ValueError("Trainer tried to instantiate apex FusedAdam but apex is not installed!")
def _get_bitsandbytes_optimizer(ctx: OptimizerContext) -> tuple[Any, dict[str, Any]]:
"""Get bitsandbytes optimizer (AdamW, Lion, RMSprop variants)."""
if not is_bitsandbytes_available():
raise ImportError(
"You need to install `bitsandbytes` in order to use bitsandbytes optimizers: `pip install -U bitsandbytes`"
)
from bitsandbytes.optim import AdamW, Lion, RMSprop
optim_name = ctx.args.optim
is_paged = "paged" in optim_name
optim_bits = 8 if "8bit" in optim_name else 32
optimizer_cls = None
additional_optim_kwargs = ctx.adam_kwargs
if "adam" in optim_name:
optimizer_cls = AdamW
elif "lion" in optim_name:
optimizer_cls = Lion
additional_optim_kwargs = {"betas": (ctx.args.adam_beta1, ctx.args.adam_beta2)}
elif "rmsprop" in optim_name:
optimizer_cls = RMSprop
additional_optim_kwargs = ctx.optim_args
elif "ademamix" in optim_name:
from bitsandbytes.optim import AdEMAMix
optimizer_cls = AdEMAMix
additional_optim_kwargs = {
"betas": (
float(ctx.optim_args.get("beta1", ctx.args.adam_beta1)),
float(ctx.optim_args.get("beta2", ctx.args.adam_beta2)),
float(ctx.optim_args.get("beta3", 0.9999)),
),
"alpha": float(ctx.optim_args.get("alpha", 5.0)),
"eps": float(ctx.optim_args.get("eps", ctx.args.adam_epsilon)),
}
if "t_alpha" in ctx.optim_args:
additional_optim_kwargs["t_alpha"] = int(ctx.optim_args["t_alpha"])
if "t_beta3" in ctx.optim_args:
additional_optim_kwargs["t_beta3"] = int(ctx.optim_args["t_beta3"])
bnb_kwargs = {"optim_bits": optim_bits}
if "rmsprop" not in optim_name:
bnb_kwargs["is_paged"] = is_paged
ctx.optimizer_kwargs.update(additional_optim_kwargs)
ctx.optimizer_kwargs.update(bnb_kwargs)
return optimizer_cls, ctx.optimizer_kwargs
def _get_adamw_anyprecision(ctx: OptimizerContext) -> tuple[Any, dict[str, Any]]:
"""Get AnyPrecision AdamW optimizer."""
try:
from torchdistx.optimizers import AnyPrecisionAdamW
ctx.optimizer_kwargs.update(ctx.adam_kwargs)
ctx.optimizer_kwargs.update(
{
"use_kahan_summation": strtobool(ctx.optim_args.get("use_kahan_summation", "False")),
"momentum_dtype": getattr(torch, ctx.optim_args.get("momentum_dtype", "float32")),
"variance_dtype": getattr(torch, ctx.optim_args.get("variance_dtype", "float32")),
"compensation_buffer_dtype": getattr(
torch, ctx.optim_args.get("compensation_buffer_dtype", "bfloat16")
),
}
)
return AnyPrecisionAdamW, ctx.optimizer_kwargs
except ImportError:
raise ValueError("Please install https://github.com/pytorch/torchdistx")
def _get_sgd(ctx: OptimizerContext) -> tuple[Any, dict[str, Any]]:
"""Get SGD optimizer."""
kwargs = ctx.optimizer_kwargs.copy()
if ctx.optim_args:
for key in ("momentum", "dampening", "weight_decay"):
if key in ctx.optim_args:
kwargs[key] = float(ctx.optim_args[key])
if "nesterov" in ctx.optim_args:
kwargs["nesterov"] = ctx.optim_args["nesterov"].lower() in ("true", "1", "yes")
return torch.optim.SGD, kwargs
def _get_adagrad(ctx: OptimizerContext) -> tuple[Any, dict[str, Any]]:
"""Get Adagrad optimizer."""
kwargs = ctx.optimizer_kwargs.copy()
if ctx.optim_args:
for key in ("lr_decay", "weight_decay", "eps"):
if key in ctx.optim_args:
kwargs[key] = float(ctx.optim_args[key])
return torch.optim.Adagrad, kwargs
def _get_rmsprop(ctx: OptimizerContext) -> tuple[Any, dict[str, Any]]:
"""Get RMSprop optimizer."""
kwargs = ctx.optimizer_kwargs.copy()
if ctx.optim_args:
for key in ("momentum", "alpha", "eps", "weight_decay"):
if key in ctx.optim_args:
kwargs[key] = float(ctx.optim_args[key])
if "centered" in ctx.optim_args:
kwargs["centered"] = ctx.optim_args["centered"].lower() in ("true", "1", "yes")
return torch.optim.RMSprop, kwargs
def _get_galore_optimizer(ctx: OptimizerContext) -> tuple[Any, dict[str, Any]]:
"""Get GaLore optimizer."""
if not is_galore_torch_available():
raise ImportError(
"You need to install `galore_torch` in order to use GaLore optimizers. "
"Install it with `pip install git+https://github.com/jiaweizzhao/GaLore`"
)
from galore_torch import GaLoreAdafactor, GaLoreAdamW, GaLoreAdamW8bit
optimizer_mapping = {
OptimizerNames.GALORE_ADAMW: GaLoreAdamW,
OptimizerNames.GALORE_ADAMW_8BIT: GaLoreAdamW8bit,
OptimizerNames.GALORE_ADAFACTOR: GaLoreAdafactor,
OptimizerNames.GALORE_ADAMW_LAYERWISE: GaLoreAdamW,
OptimizerNames.GALORE_ADAMW_8BIT_LAYERWISE: GaLoreAdamW8bit,
OptimizerNames.GALORE_ADAFACTOR_LAYERWISE: GaLoreAdafactor,
}
galore_optim_kwargs = {
"rank": int(ctx.optim_args.pop("rank", 128)),
"update_proj_gap": int(ctx.optim_args.pop("update_proj_gap", 200)),
"scale": float(ctx.optim_args.pop("scale", 0.25)),
"proj_type": ctx.optim_args.pop("proj_type", "std"),
}
optimizer_cls, optimizer_kwargs = _setup_low_rank_optimizer(
ctx.args, ctx.model, ctx.args.optim, optimizer_mapping, galore_optim_kwargs, ctx.optimizer_kwargs
)
if ctx.args.optim == OptimizerNames.GALORE_ADAFACTOR:
optimizer_kwargs.update({"scale_parameter": False, "relative_step": False})
return optimizer_cls, optimizer_kwargs
def _get_apollo_optimizer(ctx: OptimizerContext) -> tuple[Any, dict[str, Any]]:
"""Get Apollo optimizer."""
if not is_apollo_torch_available():
raise ImportError(
"You need to install `apollo_torch` in order to use APOLLO optimizers. "
"Install it with `pip install git+https://github.com/zhuhanqing/APOLLO`"
)
from apollo_torch import APOLLOAdamW
optimizer_mapping = {
OptimizerNames.APOLLO_ADAMW: APOLLOAdamW,
OptimizerNames.APOLLO_ADAMW_LAYERWISE: APOLLOAdamW,
}
apollo_optim_kwargs = {
"rank": int(ctx.optim_args.pop("rank", 128)),
"proj": ctx.optim_args.pop("proj", "random"),
"scale_type": ctx.optim_args.pop("scale_type", "channel"),
"update_proj_gap": int(ctx.optim_args.pop("update_proj_gap", 200)),
"scale": float(ctx.optim_args.pop("scale", 1.0)),
"proj_type": ctx.optim_args.pop("proj_type", "std"),
}
apollo_optim_kwargs.update(ctx.adam_kwargs)
return _setup_low_rank_optimizer(
ctx.args, ctx.model, ctx.args.optim, optimizer_mapping, apollo_optim_kwargs, ctx.optimizer_kwargs
)
def _get_lomo_optimizer(ctx: OptimizerContext) -> tuple[Any, dict[str, Any]]:
"""Get LOMO optimizer."""
if not is_lomo_available():
raise ImportError(
"You need to install `lomo_optim` in order to use LOMO optimizers. "
"Install it with `pip install lomo-optim`"
)
if ctx.model is None:
raise ValueError("You need to pass a `model` in order to correctly initialize a LOMO optimizer.")
from lomo_optim import AdaLomo, Lomo
optimizer_cls = AdaLomo if "ada" in ctx.args.optim else Lomo
ctx.optimizer_kwargs.update({"model": ctx.model})
return optimizer_cls, ctx.optimizer_kwargs
def _get_grokadamw(ctx: OptimizerContext) -> tuple[Any, dict[str, Any]]:
"""Get GrokAdamW optimizer."""
if not is_grokadamw_available():
raise ValueError("Please install grokadamw with `pip install grokadamw`")
from grokadamw import GrokAdamW
ctx.optimizer_kwargs.update(
{
"alpha_init": float(ctx.optim_args.get("alpha_init", 0.98)),
"lamb": float(ctx.optim_args.get("lamb", 2.0)),
"gamma": float(ctx.optim_args.get("gamma", 0.1)),
"grokking_signal_decay_rate": float(ctx.optim_args.get("grokking_signal_decay_rate", 0.1)),
"gradient_clipping": float(ctx.optim_args.get("gradient_clipping", 1.0)),
}
)
return GrokAdamW, ctx.optimizer_kwargs
def _get_torchao_optimizer(ctx: OptimizerContext) -> tuple[Any, dict[str, Any]]:
"""Get TorchAO 4-bit or 8-bit optimizer."""
if not is_torchao_available() or version.parse(importlib.metadata.version("torchao")) < version.parse("0.4.0"):
raise ImportError(
"You need to have `torchao>=0.4.0` in order to use torch 4-bit optimizers. "
"Install it with `pip install torchao` or follow the instructions here: "
"https://github.com/pytorch/ao"
)
if version.parse(importlib.metadata.version("torch")) <= version.parse("2.4"):
raise ImportError(
"You need to have `torch>2.4` in order to use torch 4-bit optimizers. "
"Install it with `pip install --upgrade torch` it is available on pipy. "
"Otherwise, you need to install torch nightly."
)
if version.parse(importlib.metadata.version("torchao")) >= version.parse("0.11.0"):
from torchao.optim import AdamW4bit, AdamW8bit
else:
from torchao.prototype.low_bit_optim import AdamW4bit, AdamW8bit
if ctx.args.optim == OptimizerNames.ADAMW_TORCH_4BIT:
optimizer_cls = AdamW4bit
else:
optimizer_cls = AdamW8bit
ctx.optimizer_kwargs.update(
{
"block_size": ctx.optim_args.get("block_size", 256),
"bf16_stochastic_round": strtobool(ctx.optim_args.get("bf16_stochastic_round", "False")),
}
)
ctx.optimizer_kwargs.update(ctx.adam_kwargs)
return optimizer_cls, ctx.optimizer_kwargs
def _get_schedule_free_optimizer(ctx: OptimizerContext) -> tuple[Any, dict[str, Any]]:
"""Get ScheduleFree optimizer."""
if not is_schedulefree_available():
raise ImportError(
"You need to install `schedulefree` in order to use schedulefree optimizers. "
"Install it with `pip install schedulefree.`"
)
from schedulefree import AdamWScheduleFree, SGDScheduleFree
additional_optim_kwargs = {}
require_warmup = True
if ctx.args.optim == OptimizerNames.SCHEDULE_FREE_RADAM:
if not is_schedulefree_available("1.4.0"):
raise ImportError(
"You need to install `schedulefree>=1.4.0` in order to use RAdamScheduleFree optimizer. "
"Install it with `pip install schedulefree.`"
)
from schedulefree import RAdamScheduleFree
optimizer_cls = RAdamScheduleFree
additional_optim_kwargs = ctx.adam_kwargs
require_warmup = False
elif ctx.args.optim == OptimizerNames.SCHEDULE_FREE_ADAMW:
optimizer_cls = AdamWScheduleFree
additional_optim_kwargs = ctx.adam_kwargs
elif ctx.args.optim == OptimizerNames.SCHEDULE_FREE_SGD:
optimizer_cls = SGDScheduleFree
else:
raise ValueError("Invalid schedulefree optimizer")
additional_optim_kwargs["weight_decay"] = ctx.args.weight_decay
if require_warmup:
additional_optim_kwargs["warmup_steps"] = ctx.args.warmup_steps
additional_optim_kwargs.update(
{
"weight_lr_power": float(ctx.optim_args.get("weight_lr_power", 2.0)),
"r": float(ctx.optim_args.get("r", 0.0)),
}
)
ctx.optimizer_kwargs.update(additional_optim_kwargs)
return optimizer_cls, ctx.optimizer_kwargs
def _get_stable_adamw(ctx: OptimizerContext) -> tuple[Any, dict[str, Any]]:
"""Get StableAdamW optimizer from torch-optimi."""
if not is_torch_optimi_available():
raise ImportError(
"You need to install `torch-optimi` in order to use stable_adamw optimizers. "
"Install it with `pip install torch-optimi`."
)
from optimi import StableAdamW
max_lr = ctx.optim_args.pop("max_lr", None)
if max_lr is not None:
max_lr = float(max_lr)
kahan_sum = ctx.optim_args.pop("kahan_sum", None)
if kahan_sum is not None:
kahan_sum = bool(kahan_sum)
ctx.adam_kwargs["weight_decay"] = ctx.args.weight_decay
stable_adamw_kwargs = {
"decouple_lr": bool(ctx.optim_args.pop("decouple_lr", False)),
"max_lr": max_lr,
"kahan_sum": kahan_sum,
}
ctx.optimizer_kwargs.update(ctx.adam_kwargs)
ctx.optimizer_kwargs.update(stable_adamw_kwargs)
return StableAdamW, ctx.optimizer_kwargs
# =============================================================================
# Dispatch table
# =============================================================================
_BITSANDBYTES_OPTIMIZERS = [
OptimizerNames.ADAMW_BNB,
OptimizerNames.ADAMW_8BIT,
OptimizerNames.PAGED_ADAMW,
OptimizerNames.PAGED_ADAMW_8BIT,
OptimizerNames.ADEMAMIX,
OptimizerNames.ADEMAMIX_8BIT,
OptimizerNames.PAGED_ADEMAMIX,
OptimizerNames.PAGED_ADEMAMIX_8BIT,
OptimizerNames.LION,
OptimizerNames.LION_8BIT,
OptimizerNames.PAGED_LION,
OptimizerNames.PAGED_LION_8BIT,
OptimizerNames.RMSPROP_BNB,
OptimizerNames.RMSPROP_8BIT,
OptimizerNames.RMSPROP_32BIT,
]
_GALORE_OPTIMIZERS = [
OptimizerNames.GALORE_ADAMW,
OptimizerNames.GALORE_ADAMW_8BIT,
OptimizerNames.GALORE_ADAFACTOR,
OptimizerNames.GALORE_ADAMW_LAYERWISE,
OptimizerNames.GALORE_ADAMW_8BIT_LAYERWISE,
OptimizerNames.GALORE_ADAFACTOR_LAYERWISE,
]
_APOLLO_OPTIMIZERS = [
OptimizerNames.APOLLO_ADAMW,
OptimizerNames.APOLLO_ADAMW_LAYERWISE,
]
_TORCHAO_OPTIMIZERS = [
OptimizerNames.ADAMW_TORCH_4BIT,
OptimizerNames.ADAMW_TORCH_8BIT,
]
_SCHEDULE_FREE_OPTIMIZERS = [
OptimizerNames.SCHEDULE_FREE_RADAM,
OptimizerNames.SCHEDULE_FREE_ADAMW,
OptimizerNames.SCHEDULE_FREE_SGD,
]
# =============================================================================
# Built-in optimizer handlers registry
# =============================================================================
_OPTIMIZER_HANDLERS: dict[str, OptimizerHandler] = {
OptimizerNames.ADAFACTOR: _get_adafactor,
OptimizerNames.ADAMW_TORCH: _get_adamw_torch,
OptimizerNames.ADAMW_TORCH_FUSED: _get_adamw_torch,
OptimizerNames.ADAMW_TORCH_XLA: _get_adamw_torch_xla,
OptimizerNames.ADAMW_TORCH_NPU_FUSED: _get_adamw_torch_npu_fused,
OptimizerNames.ADAMW_APEX_FUSED: _get_adamw_apex_fused,
OptimizerNames.ADAMW_ANYPRECISION: _get_adamw_anyprecision,
OptimizerNames.SGD: _get_sgd,
OptimizerNames.ADAGRAD: _get_adagrad,
OptimizerNames.RMSPROP: _get_rmsprop,
OptimizerNames.GROKADAMW: _get_grokadamw,
OptimizerNames.STABLE_ADAMW: _get_stable_adamw,
OptimizerNames.LOMO: _get_lomo_optimizer,
OptimizerNames.ADALOMO: _get_lomo_optimizer,
**dict.fromkeys(_BITSANDBYTES_OPTIMIZERS, _get_bitsandbytes_optimizer),
**dict.fromkeys(_GALORE_OPTIMIZERS, _get_galore_optimizer),
**dict.fromkeys(_APOLLO_OPTIMIZERS, _get_apollo_optimizer),
**dict.fromkeys(_TORCHAO_OPTIMIZERS, _get_torchao_optimizer),
**dict.fromkeys(_SCHEDULE_FREE_OPTIMIZERS, _get_schedule_free_optimizer),
}
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/trainer_optimizer.py",
"license": "Apache License 2.0",
"lines": 509,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/trainer/test_training_args.py | import dataclasses
import os
import tempfile
import unittest
import torch
from transformers import TrainingArguments
from transformers.debug_utils import DebugOption
from transformers.trainer_utils import HubStrategy, IntervalStrategy, SaveStrategy, SchedulerType
from transformers.training_args import OptimizerNames
class TestTrainingArguments(unittest.TestCase):
def test_default_output_dir(self):
"""Test that output_dir defaults to 'trainer_output' when not specified."""
args = TrainingArguments(output_dir=None)
self.assertEqual(args.output_dir, "trainer_output")
def test_custom_output_dir(self):
"""Test that output_dir is respected when specified."""
with tempfile.TemporaryDirectory() as tmp_dir:
args = TrainingArguments(output_dir=tmp_dir)
self.assertEqual(args.output_dir, tmp_dir)
def test_output_dir_creation(self):
"""Test that output_dir is created only when needed."""
with tempfile.TemporaryDirectory() as tmp_dir:
output_dir = os.path.join(tmp_dir, "test_output")
# Directory should not exist before creating args
self.assertFalse(os.path.exists(output_dir))
# Create args with save_strategy="no" - should not create directory
args = TrainingArguments(
output_dir=output_dir,
do_train=True,
save_strategy="no",
report_to=None,
)
self.assertFalse(os.path.exists(output_dir))
# Now set save_strategy="steps" - should create directory when needed
args.save_strategy = "steps"
args.save_steps = 1
self.assertFalse(os.path.exists(output_dir)) # Still shouldn't exist
# Directory should be created when actually needed (e.g. in Trainer)
def test_torch_empty_cache_steps_requirements(self):
"""Test that torch_empty_cache_steps is a positive integer or None."""
# None is acceptable (feature is disabled):
args = TrainingArguments(torch_empty_cache_steps=None)
self.assertIsNone(args.torch_empty_cache_steps)
# non-int is unacceptable:
with self.assertRaises(ValueError):
TrainingArguments(torch_empty_cache_steps=1.0)
with self.assertRaises(ValueError):
TrainingArguments(torch_empty_cache_steps="none")
# negative int is unacceptable:
with self.assertRaises(ValueError):
TrainingArguments(torch_empty_cache_steps=-1)
# zero is unacceptable:
with self.assertRaises(ValueError):
TrainingArguments(torch_empty_cache_steps=0)
# positive int is acceptable:
args = TrainingArguments(torch_empty_cache_steps=1)
self.assertEqual(args.torch_empty_cache_steps, 1)
def test_output_dir_expands_user(self):
"""Test that ~ in output_dir is expanded to the user's home directory."""
args = TrainingArguments(output_dir="~/foo", report_to=None)
self.assertEqual(args.output_dir, os.path.expanduser("~/foo"))
def test_enum_coercions(self):
"""Test that string values are correctly converted to their enum types."""
args = TrainingArguments(
output_dir="tmp",
eval_strategy="steps",
eval_steps=10,
logging_strategy="steps",
save_strategy="epoch",
hub_strategy="end",
lr_scheduler_type="linear",
optim="adamw_torch",
report_to=None,
)
self.assertEqual(args.eval_strategy, IntervalStrategy.STEPS)
self.assertEqual(args.logging_strategy, IntervalStrategy.STEPS)
self.assertEqual(args.save_strategy, SaveStrategy.EPOCH)
self.assertEqual(args.hub_strategy, HubStrategy.END)
self.assertEqual(args.lr_scheduler_type, SchedulerType.LINEAR)
self.assertEqual(args.optim, OptimizerNames.ADAMW_TORCH)
# Invalid string should raise ValueError
with self.assertRaises(ValueError):
TrainingArguments(output_dir="tmp", eval_strategy="invalid_strategy", report_to=None)
def test_do_eval_auto_enabled(self):
"""Test that do_eval is automatically set to True when eval_strategy is not 'no'."""
args = TrainingArguments(
output_dir="tmp",
do_eval=False,
eval_strategy="steps",
eval_steps=10,
report_to=None,
)
self.assertTrue(args.do_eval)
def test_eval_steps_fallback_to_logging_steps(self):
"""Test that eval_steps falls back to logging_steps when not specified."""
args = TrainingArguments(
output_dir="tmp",
eval_strategy="steps",
logging_steps=10,
report_to=None,
)
self.assertEqual(args.eval_steps, 10)
def test_eval_steps_required_when_strategy_steps(self):
"""Test that eval_strategy='steps' with logging_steps=0 raises ValueError."""
with self.assertRaises(ValueError):
TrainingArguments(
output_dir="tmp",
eval_strategy="steps",
logging_steps=0,
report_to=None,
)
def test_logging_steps_required_nonzero(self):
"""Test that logging_strategy='steps' with logging_steps=0 raises ValueError."""
with self.assertRaises(ValueError):
TrainingArguments(
output_dir="tmp",
logging_strategy="steps",
logging_steps=0,
report_to=None,
)
def test_steps_must_be_integer_when_greater_than_one(self):
"""Test that fractional steps >1 raise ValueError, but <=1 are allowed."""
with self.assertRaises(ValueError):
TrainingArguments(
output_dir="tmp",
logging_strategy="steps",
logging_steps=10.5,
report_to=None,
)
with self.assertRaises(ValueError):
TrainingArguments(
output_dir="tmp",
eval_strategy="steps",
eval_steps=10.5,
report_to=None,
)
with self.assertRaises(ValueError):
TrainingArguments(
output_dir="tmp",
save_strategy="steps",
save_steps=10.5,
report_to=None,
)
# Fractional values <=1 (ratios) are allowed
args = TrainingArguments(
output_dir="tmp",
logging_strategy="steps",
logging_steps=0.5,
report_to=None,
)
self.assertEqual(args.logging_steps, 0.5)
def test_load_best_model_requires_matching_strategies(self):
"""Test load_best_model_at_end validation for strategy and step compatibility."""
# Mismatched eval/save strategy should raise
with self.assertRaises(ValueError):
TrainingArguments(
output_dir="tmp",
load_best_model_at_end=True,
eval_strategy="steps",
eval_steps=10,
save_strategy="epoch",
report_to=None,
)
# save_steps not a multiple of eval_steps should raise
with self.assertRaises(ValueError):
TrainingArguments(
output_dir="tmp",
load_best_model_at_end=True,
eval_strategy="steps",
eval_steps=10,
save_strategy="steps",
save_steps=15,
report_to=None,
)
# Valid: matching strategies with compatible steps should not raise
args = TrainingArguments(
output_dir="tmp",
load_best_model_at_end=True,
eval_strategy="steps",
eval_steps=10,
save_strategy="steps",
save_steps=20,
report_to=None,
)
self.assertTrue(args.load_best_model_at_end)
def test_metric_for_best_model_defaults(self):
"""Test default metric_for_best_model and greater_is_better behavior."""
# load_best_model_at_end with no metric → defaults to "loss"
args = TrainingArguments(
output_dir="tmp",
load_best_model_at_end=True,
eval_strategy="epoch",
save_strategy="epoch",
report_to=None,
)
self.assertEqual(args.metric_for_best_model, "loss")
self.assertFalse(args.greater_is_better)
# metric ending in "loss" → greater_is_better is False
args = TrainingArguments(
output_dir="tmp",
load_best_model_at_end=True,
eval_strategy="epoch",
save_strategy="epoch",
metric_for_best_model="eval_loss",
report_to=None,
)
self.assertFalse(args.greater_is_better)
# metric not ending in "loss" → greater_is_better is True
args = TrainingArguments(
output_dir="tmp",
load_best_model_at_end=True,
eval_strategy="epoch",
save_strategy="epoch",
metric_for_best_model="accuracy",
report_to=None,
)
self.assertTrue(args.greater_is_better)
def test_fp16_bf16_mutual_exclusivity(self):
"""Test that fp16 and bf16 cannot both be True."""
with self.assertRaises(ValueError):
TrainingArguments(output_dir="tmp", fp16=True, bf16=True, report_to=None)
with self.assertRaises(ValueError):
TrainingArguments(output_dir="tmp", fp16_full_eval=True, bf16_full_eval=True, report_to=None)
def test_reduce_on_plateau_requires_eval(self):
"""Test that reduce_lr_on_plateau scheduler requires an eval strategy."""
with self.assertRaises(ValueError):
TrainingArguments(
output_dir="tmp",
lr_scheduler_type="reduce_lr_on_plateau",
eval_strategy="no",
report_to=None,
)
def test_torch_compile_auto_enable(self):
"""Test that torch_compile is auto-enabled when mode or backend is set."""
args = TrainingArguments(
output_dir="tmp",
torch_compile_mode="reduce-overhead",
report_to=None,
)
self.assertTrue(args.torch_compile)
args = TrainingArguments(
output_dir="tmp",
torch_compile_backend="inductor",
report_to=None,
)
self.assertTrue(args.torch_compile)
# Default backend when torch_compile=True
args = TrainingArguments(
output_dir="tmp",
torch_compile=True,
report_to=None,
)
self.assertEqual(args.torch_compile_backend, "inductor")
def test_report_to_none_handling(self):
"""Test report_to normalization for 'none' and string values."""
args = TrainingArguments(output_dir="tmp", report_to="none")
self.assertEqual(args.report_to, [])
args = TrainingArguments(output_dir="tmp", report_to=["none"])
self.assertEqual(args.report_to, [])
args = TrainingArguments(output_dir="tmp", report_to="tensorboard")
self.assertEqual(args.report_to, ["tensorboard"])
def test_warmup_steps_validation(self):
"""Test warmup_steps validation for negative values."""
with self.assertRaises(ValueError):
TrainingArguments(output_dir="tmp", warmup_steps=-1, report_to=None)
# Zero and fractional values are valid
args = TrainingArguments(output_dir="tmp", warmup_steps=0, report_to=None)
self.assertEqual(args.warmup_steps, 0)
args = TrainingArguments(output_dir="tmp", warmup_steps=0.5, report_to=None)
self.assertEqual(args.warmup_steps, 0.5)
def test_debug_option_parsing(self):
"""Test debug string parsing into DebugOption enum list."""
args = TrainingArguments(output_dir="tmp", debug="underflow_overflow", report_to=None)
self.assertEqual(args.debug, [DebugOption.UNDERFLOW_OVERFLOW])
args = TrainingArguments(output_dir="tmp", debug=None, report_to=None)
self.assertEqual(args.debug, [])
def test_dataloader_prefetch_requires_workers(self):
"""Test that dataloader_prefetch_factor requires num_workers > 0."""
with self.assertRaises(ValueError):
TrainingArguments(
output_dir="tmp",
dataloader_prefetch_factor=2,
dataloader_num_workers=0,
report_to=None,
)
# Valid: prefetch with workers > 0
args = TrainingArguments(
output_dir="tmp",
dataloader_prefetch_factor=2,
dataloader_num_workers=2,
report_to=None,
)
self.assertEqual(args.dataloader_prefetch_factor, 2)
def test_use_cpu_disables_pin_memory(self):
"""Test that use_cpu=True disables dataloader_pin_memory."""
args = TrainingArguments(output_dir="tmp", use_cpu=True, report_to=None)
self.assertFalse(args.dataloader_pin_memory)
def test_include_num_input_tokens_seen_coercion(self):
"""Test bool-to-string coercion for include_num_input_tokens_seen."""
args = TrainingArguments(output_dir="tmp", include_num_input_tokens_seen=True, report_to=None)
self.assertEqual(args.include_num_input_tokens_seen, "all")
args = TrainingArguments(output_dir="tmp", include_num_input_tokens_seen=False, report_to=None)
self.assertEqual(args.include_num_input_tokens_seen, "no")
def test_dict_field_parsing(self):
"""Test that JSON string dict fields are parsed into dicts."""
args = TrainingArguments(output_dir="tmp", lr_scheduler_kwargs='{"factor": 0.5}', report_to=None)
self.assertEqual(args.lr_scheduler_kwargs, {"factor": 0.5})
def test_dtype_to_json(self):
@dataclasses.dataclass
class TorchDtypeTrainingArguments(TrainingArguments):
dtype: torch.dtype = dataclasses.field(
default=torch.float32,
)
for dtype in [
"float32",
"float64",
"complex64",
"complex128",
"float16",
"bfloat16",
"uint8",
"int8",
"int16",
"int32",
"int64",
"bool",
]:
torch_dtype = getattr(torch, dtype)
with tempfile.TemporaryDirectory() as tmp_dir:
args = TorchDtypeTrainingArguments(output_dir=tmp_dir, dtype=torch_dtype)
args_dict = args.to_dict()
self.assertIn("dtype", args_dict)
self.assertEqual(args_dict["dtype"], dtype)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/trainer/test_training_args.py",
"license": "Apache License 2.0",
"lines": 333,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/glm_moe_dsa/modular_glm_moe_dsa.py | # Copyright 2026 the HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Callable
import torch
import torch.nn as nn
import torch.nn.functional as F
from ...cache_utils import Cache
from ...configuration_utils import PreTrainedConfig, layer_type_validation
from ...modeling_flash_attention_utils import FlashAttentionKwargs
from ...modeling_rope_utils import RopeParameters
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS
from ...models.llama.modeling_llama import rotate_half
from ...processing_utils import Unpack
from ...utils import logging
from ...utils.generic import is_flash_attention_requested
from ..glm4_moe.modeling_glm4_moe import (
Glm4MoeForCausalLM,
Glm4MoeModel,
Glm4MoePreTrainedModel,
Glm4MoeRMSNorm,
)
from ..glm4_moe_lite.modeling_glm4_moe_lite import (
Glm4MoeLiteDecoderLayer,
eager_attention_forward,
)
logger = logging.get_logger(__name__)
def apply_rotary_pos_emb(
x: torch.Tensor,
cos: torch.Tensor,
sin: torch.Tensor,
unsqueeze_dim: int = 1,
) -> torch.Tensor:
"""
Applies Rotary Position Embedding to a single tensor.
This is the transformers equivalent of DeepSeek V3.2's `apply_rotary_emb(x, freqs_cis, interleaved)`.
Instead of using complex-number `freqs_cis`, we use pre-split `(cos, sin)` tensors from RotaryEmbedding.
Args:
x (`torch.Tensor`): Input tensor of shape `[..., head_dim]`.
cos (`torch.Tensor`): Cosine part from RotaryEmbedding, shape `[batch, seq_len, head_dim]`.
sin (`torch.Tensor`): Sine part from RotaryEmbedding, shape `[batch, seq_len, head_dim]`.
unsqueeze_dim (`int`): Dimension along which to unsqueeze cos/sin for broadcasting.
Use `1` when x is `[B, H, S, D]` (BHSD) and `2` when x is `[B, S, H, D]` (BSHD).
Returns:
`torch.Tensor`: Tensor with rotary embeddings applied, same shape as input.
"""
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
# Split-half (NeoX/Llama style): (x[:d/2], x[d/2:])
# This matches llama's apply_rotary_pos_emb logic.
x_rotated = (x * cos) + (rotate_half(x) * sin)
return x_rotated
class GlmMoeDsaConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`GlmMoeDsaModel`]. It is used to instantiate a
GLM-5 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the GLM-5.
e.g. [zai-org/GLM-5](https://huggingface.co/zai-org/GLM-5)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 154880):
Vocabulary size of the model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`GlmMoeDsaModel`].
hidden_size (`int`, *optional*, defaults to 6144):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 12288):
Dimension of the dense MLP representations.
moe_intermediate_size (`int`, *optional*, defaults to 2048):
Dimension of the MoE expert representations.
num_hidden_layers (`int`, *optional*, defaults to 78):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 64):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*, defaults to 64):
Number of key-value heads for Grouped Query Attention. If equal to `num_attention_heads`, uses MHA.
n_shared_experts (`int`, *optional*, defaults to 1):
Number of shared experts in MoE layers.
n_routed_experts (`int`, *optional*, defaults to 256):
Number of routed experts in MoE layers.
routed_scaling_factor (`float`, *optional*, defaults to 2.5):
Scaling factor for routed experts.
kv_lora_rank (`int`, *optional*, defaults to 512):
Rank of the LoRA matrices for key and value projections (MLA).
q_lora_rank (`int`, *optional*, defaults to 2048):
Rank of the LoRA matrices for query projections (MLA).
qk_rope_head_dim (`int`, *optional*, defaults to 64):
Dimension of the query/key heads that use rotary position embeddings.
qk_nope_head_dim (`int`, *optional*, defaults to 192):
Dimension of the query/key heads that don't use rotary position embeddings.
v_head_dim (`int`, *optional*, defaults to 256):
Dimension of the value heads.
n_group (`int`, *optional*, defaults to 1):
Number of groups for routed experts.
topk_group (`int`, *optional*, defaults to 1):
Number of selected groups for each token.
num_experts_per_tok (`int`, *optional*, defaults to 8):
Number of experts selected per token.
norm_topk_prob (`bool`, *optional*, defaults to `True`):
Whether to normalize the weights of the routed experts.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 202752):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions.
pad_token_id (`int`, *optional*):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 0):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 1):
End of stream token id.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings.
rope_parameters (`RopeParameters`, *optional*):
Configuration parameters for the RoPE embeddings, including `rope_theta` and optional scaling parameters.
rope_interleave (`bool`, *optional*, defaults to `True`):
Whether to interleave the rotary position embeddings.
mlp_layer_types (`list`, *optional*):
MLP type pattern for each layer (`"dense"` or `"sparse"`). Defaults to 3 dense + rest sparse.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
index_topk (`int`, *optional*, defaults to 2048):
Number of top tokens selected by the indexer for sparse attention.
index_head_dim (`int`, *optional*, defaults to 128):
Head dimension for the indexer projections (DSA).
index_n_heads (`int | None`, *optional*, defaults to 32):
Number of heads for the indexer projections (DSA).
indexer_rope_interleave (`bool`, *optional*, defaults to `True`):
Whether the indexer uses interleaved rotary position embeddings.
```python
>>> from transformers import GlmMoeDsaConfig, GlmMoeDsaModel
>>> # Initializing a GLM-MoE-DSA configuration
>>> configuration = GlmMoeDsaConfig()
>>> # Initializing a model from the configuration
>>> model = GlmMoeDsaModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "glm_moe_dsa"
keys_to_ignore_at_inference = ["past_key_values"]
base_model_tp_plan = {
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.experts.gate_up_proj": "packed_colwise",
"layers.*.mlp.experts.down_proj": "rowwise",
"layers.*.mlp.experts": "moe_tp_experts",
"layers.*.mlp.gate_proj": "colwise",
"layers.*.mlp.up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
}
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
"norm": (["hidden_states"], ["hidden_states"]),
}
attribute_map = {
"num_local_experts": "n_routed_experts",
}
def __init__(
self,
vocab_size: int | None = 154880,
hidden_size: int | None = 6144,
intermediate_size: int | None = 12288,
moe_intermediate_size: int | None = 2048,
num_hidden_layers: int | None = 78,
num_attention_heads: int | None = 64,
num_key_value_heads: int | None = 64,
n_shared_experts: int | None = 1,
n_routed_experts: int | None = 256,
routed_scaling_factor: float | None = 2.5,
kv_lora_rank: int | None = 512,
q_lora_rank: int | None = 2048,
qk_rope_head_dim: int | None = 64,
qk_nope_head_dim: int | None = 192,
v_head_dim: int | None = 256,
n_group: int | None = 1,
topk_group: int | None = 1,
num_experts_per_tok: int | None = 8,
norm_topk_prob: bool | None = True,
hidden_act: str | None = "silu",
max_position_embeddings: int | None = 202752,
initializer_range: float | None = 0.02,
rms_norm_eps: float | None = 1e-5,
use_cache: bool | None = True,
pad_token_id: int | None = None,
bos_token_id: int | None = 0,
eos_token_id: int | None = 1,
tie_word_embeddings: bool | None = False,
rope_parameters: RopeParameters | dict[str, RopeParameters] | None = None,
mlp_layer_types=None,
attention_bias: bool | None = False,
attention_dropout: float | None = 0.0,
index_topk: int | None = 2048,
index_head_dim: int | None = 128,
index_n_heads: int | None = 32,
**kwargs,
):
# Model dimensions
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.moe_intermediate_size = moe_intermediate_size
self.num_hidden_layers = num_hidden_layers
self.max_position_embeddings = max_position_embeddings
# Attention dimensions (MLA)
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.kv_lora_rank = kv_lora_rank
self.q_lora_rank = q_lora_rank
self.qk_rope_head_dim = qk_rope_head_dim
self.qk_nope_head_dim = qk_nope_head_dim
self.qk_head_dim = qk_nope_head_dim + qk_rope_head_dim
self.v_head_dim = v_head_dim
self.head_dim = qk_rope_head_dim
# MoE parameters
self.n_shared_experts = n_shared_experts
self.n_routed_experts = n_routed_experts
self.routed_scaling_factor = routed_scaling_factor
self.n_group = n_group
self.topk_group = topk_group
self.num_experts_per_tok = num_experts_per_tok
self.norm_topk_prob = norm_topk_prob
# MLP layer types: first 3 dense, rest sparse
self.mlp_layer_types = mlp_layer_types
if self.mlp_layer_types is None:
self.mlp_layer_types = ["dense"] * min(3, num_hidden_layers) + ["sparse"] * (num_hidden_layers - 3)
layer_type_validation(self.mlp_layer_types, self.num_hidden_layers, attention=False)
# Indexer (DSA) parameters
self.index_topk = index_topk
self.index_head_dim = index_head_dim
self.index_n_heads = index_n_heads
# General config
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.rope_parameters = rope_parameters
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
class GlmMoeDsaRMSNorm(Glm4MoeRMSNorm):
pass
class GlmMoeDsaIndexer(nn.Module):
"""
Dynamic Sparse Attention (DSA) indexer for selecting top-k tokens.
The Indexer has its own lightweight projections (wq_b, wk) separate from the
main MLA attention. It uses non-interleaved (NeoX/Llama) RoPE, unlike the main attention
which uses interleaved RoPE.
**Cache strategy**: The Indexer manages its own key cache (`_cached_keys`) separately
from the DynamicCache used by MLA attention, since DynamicCache is sized for exactly
`num_hidden_layers` attention layers. Keys are concatenated along the sequence dimension
during autoregressive decode.
"""
def __init__(self, config: "GlmMoeDsaConfig", layer_idx: int):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.hidden_size: int = config.hidden_size
self.n_heads: int = config.index_n_heads
self.head_dim: int = config.index_head_dim
self.qk_rope_head_dim: int = config.qk_rope_head_dim
self.index_topk: int = config.index_topk
self.q_lora_rank: int = config.q_lora_rank
# Named to match checkpoint: wq_b, wk, k_norm
self.wq_b = nn.Linear(self.q_lora_rank, self.n_heads * self.head_dim, bias=False)
self.wk = nn.Linear(self.hidden_size, self.head_dim, bias=False)
self.k_norm = nn.LayerNorm(self.head_dim, eps=1e-6)
# Named to match checkpoint: weights_proj
# In the reference, this is fp32; the HF FP8 checkpoint stores a bf16 tensor.
# Keeping it as a plain Linear prevents FP8 conversion (see `_keep_in_fp32_modules`).
self.weights_proj = nn.Linear(self.hidden_size, self.n_heads, bias=False)
self.softmax_scale = self.head_dim**-0.5
# Indexer maintains its own key cache (not in DynamicCache, which is sized for attention layers only)
self._cached_keys: torch.Tensor | None = None
@torch.no_grad()
def forward(
self,
hidden_states: torch.Tensor, # [B, S, hidden]
q_resid: torch.Tensor, # [B, S, q_lora_rank]
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: torch.Tensor | None,
use_cache: bool = False,
) -> torch.LongTensor:
"""
Computes top-k token indices for sparse attention (DSA).
This is the bf16 equivalent of the reference Indexer which uses `rotate_activation` (Hadamard transform)
and `fp8_index` (FP8 quantized scoring kernel). Since the Hadamard transform is orthogonal (dot products
are preserved: Hq·Hk = q·k), and FP8 quantization is a precision optimization, we skip both and compute
scores directly in bf16/fp32.
The scoring logic computes:
index_score[b,s,t] = Σ_h (weight[b,s,h] · softmax_scale · q[b,s,h,:] · k[b,t,:])
Args:
hidden_states: Input hidden states `[B, S, hidden_size]`.
q_resid: Query residual from `q_a_layernorm(q_a_proj(x))`, shape `[B, S, q_lora_rank]`.
position_embeddings: `(cos, sin)` from RotaryEmbedding.
attention_mask: Causal mask, broadcastable to `[B, S, T]`.
use_cache: Whether to store/update the indexer's own key cache for autoregressive decode.
Returns:
`torch.LongTensor`: Top-k token indices of shape `[B, S, topk]`.
"""
batch_size, seq_len, _ = hidden_states.shape
cos, sin = position_embeddings
# === Queries ===
q = self.wq_b(q_resid) # [B, S, H*D]
q = q.view(batch_size, seq_len, self.n_heads, self.head_dim) # [B, S, H, D]
q_pe, q_nope = torch.split(q, [self.qk_rope_head_dim, self.head_dim - self.qk_rope_head_dim], dim=-1)
q_pe = apply_rotary_pos_emb(q_pe, cos, sin, unsqueeze_dim=2) # [B, S, H, rope_D]
q = torch.cat([q_pe, q_nope], dim=-1) # [B, S, H, D]
# === Keys ===
k = self.k_norm(self.wk(hidden_states)) # [B, S, D]
k_pe, k_nope = torch.split(k, [self.qk_rope_head_dim, self.head_dim - self.qk_rope_head_dim], dim=-1)
k_pe = apply_rotary_pos_emb(k_pe.unsqueeze(2), cos, sin, unsqueeze_dim=2).squeeze(2) # [B, S, rope_D]
k = torch.cat([k_pe, k_nope], dim=-1) # [B, S, D]
# === Key cache (managed by the indexer, not DynamicCache) ===
# Reset cache on prefill (new prompt) to avoid stale keys / batch-size mismatch
if seq_len > 1:
self._cached_keys = None
if use_cache:
if self._cached_keys is not None:
k_cached = torch.cat([self._cached_keys, k], dim=1) # [B, T, D]
else:
k_cached = k
self._cached_keys = k_cached
else:
k_cached = k
# === Scoring ===
# Reference: weights = weights_proj(x.float()) * n_heads^(-0.5)
# Reference: weights = weights.unsqueeze(-1) * q_scale * softmax_scale
# Reference: index_score = fp8_index(q_fp8, weights, k_cache, k_scale_cache)
#
# In bf16 mode (no FP8), q_scale = 1. The fp8_index kernel computes:
# score[b,s,t] = sum_h(weights[b,s,h] * dot(q[b,s,h,:], k[b,t,:]))
# where weights already absorbs n_heads^(-0.5) and softmax_scale.
# Don't force fp32 inputs here: the checkpoint stores `weights_proj.weight` in bf16.
# Use native dtype for matmul, then upcast the result for scoring stability.
weights = self.weights_proj(hidden_states).float() * (self.n_heads**-0.5) # [B, S, H]
# q·k^T per head: [B, S, H, D] @ [B, T, D]^T → [B, S, H, T]
scores = torch.einsum("bshd,btd->bsht", q.float(), k_cached.float()) * self.softmax_scale
# Weight per head and sum across heads → [B, S, T]
index_scores = torch.einsum("bsht,bsh->bst", scores, weights)
if attention_mask is not None:
index_scores = index_scores + attention_mask
total_len = index_scores.shape[-1]
topk = min(self.index_topk, total_len)
topk_indices = index_scores.topk(topk, dim=-1).indices # [B, S, topk]
return topk_indices
class GlmMoeDsaAttention(nn.Module):
"""
Multi-head Latent Attention (MLA) with Dynamic Sparse Attention (DSA) indexer.
This follows the same architecture as DeepSeek V3.2's MLA:
- Query: x → q_a_proj → RMSNorm → q_b_proj → split(q_nope, q_pe) → RoPE(q_pe)
- KV: x → kv_a_proj → split(kv_compressed, k_pe) → RMSNorm(kv_compressed) → kv_b_proj
→ RoPE(k_pe)
- Cache: fully expanded key_states [B, H, T, qk_head_dim] and value_states [B, H, T, v_head_dim]
- Indexer: selects top-k tokens via DSA, applied as an additive -inf mask on attention scores
**Caching strategy**: follows the DeepSeek V3 transformers convention of fully expanding K/V
before caching. This ensures compatibility with DynamicCache, StaticCache, flash attention,
and SDPA backends. The reference's compressed-cache decode path (which avoids the kv_b_proj
expansion at decode time) is a future optimization that would require a dedicated MLA cache class.
**FP8 compatibility**: all weight accesses use standard nn.Linear forward calls (never
raw `.weight` access), so FP8-quantized checkpoints work transparently.
"""
def __init__(self, config: GlmMoeDsaConfig, layer_idx: int):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.attention_dropout = config.attention_dropout
self.num_heads = config.num_attention_heads
self.q_lora_rank = config.q_lora_rank
self.qk_rope_head_dim = config.qk_rope_head_dim
self.kv_lora_rank = config.kv_lora_rank
self.v_head_dim = config.v_head_dim
self.qk_nope_head_dim = config.qk_nope_head_dim
self.qk_head_dim = config.qk_head_dim
self.is_causal = True
# Query projection (with optional LoRA)
if self.q_lora_rank is None:
self.q_proj = nn.Linear(config.hidden_size, self.num_heads * self.qk_head_dim, bias=False)
else:
self.q_a_proj = nn.Linear(config.hidden_size, config.q_lora_rank, bias=config.attention_bias)
self.q_a_layernorm = GlmMoeDsaRMSNorm(config.q_lora_rank)
self.q_b_proj = nn.Linear(config.q_lora_rank, self.num_heads * self.qk_head_dim, bias=False)
# Key-Value projections (MLA compressed path)
self.kv_a_proj_with_mqa = nn.Linear(
config.hidden_size,
self.kv_lora_rank + self.qk_rope_head_dim,
bias=config.attention_bias,
)
self.kv_a_layernorm = GlmMoeDsaRMSNorm(self.kv_lora_rank)
self.kv_b_proj = nn.Linear(
self.kv_lora_rank,
self.num_heads * (self.qk_nope_head_dim + self.v_head_dim),
bias=False,
)
# Output projection
self.o_proj = nn.Linear(
self.num_heads * self.v_head_dim,
config.hidden_size,
bias=config.attention_bias,
)
self.scaling = self.qk_head_dim ** (-0.5)
self.indexer = GlmMoeDsaIndexer(config, layer_idx)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: torch.Tensor | None,
past_key_values: Cache | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, torch.Tensor | None, tuple[torch.Tensor] | None]:
batch_size, seq_length = hidden_states.shape[:-1]
cos, sin = position_embeddings
# ===== Query path =====
if self.q_lora_rank is None:
query_states = self.q_proj(hidden_states)
q_resid = None
else:
q_resid = self.q_a_layernorm(self.q_a_proj(hidden_states)) # [B, S, q_lora_rank]
query_states = self.q_b_proj(q_resid)
query_states = query_states.view(batch_size, seq_length, self.num_heads, self.qk_head_dim).transpose(1, 2)
# Split nope/rope, apply RoPE, recombine — layout: [B, H, S, D]
q_nope, q_pe = torch.split(query_states, [self.qk_nope_head_dim, self.qk_rope_head_dim], dim=-1)
q_pe = apply_rotary_pos_emb(q_pe, cos, sin, unsqueeze_dim=1) # BHSD format
# ===== KV path =====
compressed_kv = self.kv_a_proj_with_mqa(hidden_states) # [B, S, kv_rank + rope_D]
k_compressed, k_pe = torch.split(compressed_kv, [self.kv_lora_rank, self.qk_rope_head_dim], dim=-1)
k_compressed = self.kv_a_layernorm(k_compressed) # [B, S, kv_rank]
# Expand KV through kv_b_proj
kv_expanded = self.kv_b_proj(k_compressed) # [B, S, H * (nope_D + v_D)]
kv_expanded = kv_expanded.view(batch_size, seq_length, self.num_heads, self.qk_nope_head_dim + self.v_head_dim)
k_nope, value_states = torch.split(kv_expanded, [self.qk_nope_head_dim, self.v_head_dim], dim=-1)
k_nope = k_nope.transpose(1, 2) # [B, H, S, nope_D]
value_states = value_states.transpose(1, 2) # [B, H, S, v_D]
# RoPE on k_pe (single-head rope stream)
k_pe = k_pe.view(batch_size, 1, seq_length, self.qk_rope_head_dim) # [B, 1, S, rope_D]
k_pe = apply_rotary_pos_emb(k_pe, cos, sin, unsqueeze_dim=1) # BHSD format
k_pe = k_pe.expand(-1, self.num_heads, -1, -1) # [B, H, S, rope_D]
# Assemble full Q and K
query_states = torch.cat([q_nope, q_pe], dim=-1) # [B, H, S, qk_head_dim]
key_states = torch.cat([k_nope, k_pe], dim=-1) # [B, H, S, qk_head_dim]
# Cache update
if past_key_values is not None:
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
# ===== Indexer (DSA sparse mask) =====
# attention_mask is [B, 1, S, T] (4D) for eager and (2D) otherwise but indexer works with [B, S, T] (3D)
indexer_mask = (
attention_mask[:, 0, :, :]
if attention_mask is not None and attention_mask.dim() == 4
else attention_mask.unsqueeze(1)
if attention_mask is not None
else None
)
topk_indices = self.indexer(
hidden_states,
q_resid,
position_embeddings,
indexer_mask,
use_cache=past_key_values is not None,
) # [B, S, topk]
# Build combined DSA + causal mask: -inf everywhere except selected top-k positions
total_len = key_states.shape[2]
index_mask = torch.full(
(batch_size, seq_length, total_len),
float("-inf"),
device=hidden_states.device,
dtype=query_states.dtype,
)
index_mask.scatter_(-1, topk_indices, 0.0) # [B, S, T]
index_mask = index_mask.unsqueeze(1) # [B, 1, S, T]
if attention_mask is not None and attention_mask.dim() == 4:
causal_mask = attention_mask[..., :total_len]
combined_mask = index_mask + causal_mask
else:
combined_mask = (
attention_mask.masked_fill(index_mask == float("-inf"), float("-inf"))
if attention_mask is not None
else index_mask
)
# Flash attention head_dim padding (qk_head_dim != v_head_dim)
if is_flash_attention_requested(self.config) and self.qk_head_dim != self.v_head_dim:
value_states = F.pad(value_states, [0, self.qk_head_dim - self.v_head_dim])
attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface(
self.config._attn_implementation, eager_attention_forward
)
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
combined_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
indices=topk_indices, # flash_mla_with_kvcache
**kwargs,
)
if is_flash_attention_requested(self.config) and self.qk_head_dim != self.v_head_dim:
attn_output = attn_output[:, :, :, : self.v_head_dim]
attn_output = attn_output.reshape(batch_size, seq_length, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class GlmMoeDsaDecoderLayer(Glm4MoeLiteDecoderLayer):
pass
class GlmMoeDsaPreTrainedModel(Glm4MoePreTrainedModel):
# NOTE: FP8 quantization uses `_keep_in_fp32_modules` (not `_strict`) to decide which modules to NOT convert.
# We must keep `indexer.weights_proj` as a plain Linear to match the checkpoint (no `weight_scale_inv`).
_keep_in_fp32_modules = ["indexer.weights_proj"]
_keep_in_fp32_modules_strict = ["e_score_correction_bias"]
_keys_to_ignore_on_load_unexpected = [r"model\.layers\.78.*"]
_supports_flash_attn = False # flash-mla kernels need a bit more work in the way we enable them!
_supports_sdpa = True
_supports_flex_attn = False
_compatible_flash_implementations = ["kernels-community/flash-mla"]
class GlmMoeDsaModel(Glm4MoeModel):
pass
class GlmMoeDsaForCausalLM(Glm4MoeForCausalLM):
pass
__all__ = [
"GlmMoeDsaConfig",
"GlmMoeDsaPreTrainedModel",
"GlmMoeDsaModel",
"GlmMoeDsaForCausalLM",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/glm_moe_dsa/modular_glm_moe_dsa.py",
"license": "Apache License 2.0",
"lines": 547,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/glm_moe_dsa/test_modeling_glm_moe_dsa.py | # Copyright 2026 the HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch GlmMoeDsa model."""
import unittest
import torch
from parameterized import parameterized
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
Cache,
FineGrainedFP8Config,
GlmMoeDsaConfig,
is_torch_available,
set_seed,
)
from transformers.testing_utils import (
require_torch,
require_torch_accelerator,
slow,
)
from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester
from ...test_modeling_common import (
TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION,
)
if is_torch_available():
from transformers import GlmMoeDsaForCausalLM, GlmMoeDsaModel
class GlmMoeDsaModelTester(CausalLMModelTester):
if is_torch_available():
base_model_class = GlmMoeDsaModel
causal_lm_class = GlmMoeDsaForCausalLM
def __init__(
self,
parent,
n_routed_experts=8,
kv_lora_rank=32,
q_lora_rank=16,
qk_nope_head_dim=64,
qk_rope_head_dim=64,
v_head_dim=128,
num_hidden_layers=2,
mlp_layer_types=["sparse", "dense"],
):
super().__init__(parent=parent, num_hidden_layers=num_hidden_layers)
self.n_routed_experts = n_routed_experts
self.kv_lora_rank = kv_lora_rank
self.q_lora_rank = q_lora_rank
self.qk_nope_head_dim = qk_nope_head_dim
self.qk_rope_head_dim = qk_rope_head_dim
self.v_head_dim = v_head_dim
self.mlp_layer_types = mlp_layer_types
@require_torch
class GlmMoeDsaModelTest(CausalLMModelTest, unittest.TestCase):
model_tester_class = GlmMoeDsaModelTester
test_all_params_have_gradient = False
model_split_percents = [0.5, 0.7, 0.8]
def _check_past_key_values_for_generate(self, batch_size, past_key_values, seq_length, config):
"""Needs to be overridden as GLM-4.7-Flash has special MLA cache format (though we don't really use the MLA)"""
self.assertIsInstance(past_key_values, Cache)
# (batch, head, seq_length, head_features)
expected_common_shape = (
batch_size,
getattr(config, "num_key_value_heads", config.num_attention_heads),
seq_length,
)
expected_key_shape = expected_common_shape + (config.qk_nope_head_dim + config.qk_rope_head_dim,)
expected_value_shape = expected_common_shape + (config.v_head_dim,)
for layer in past_key_values.layers:
self.assertEqual(layer.keys.shape, expected_key_shape)
self.assertEqual(layer.values.shape, expected_value_shape)
def test_default_mlp_layer_types(self):
config = GlmMoeDsaConfig(num_hidden_layers=8)
self.assertEqual(
config.mlp_layer_types, ["dense", "dense", "dense", "sparse", "sparse", "sparse", "sparse", "sparse"]
)
@parameterized.expand(TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION)
@unittest.skip("Won't fix: Blip2 + T5 backbone needs custom input preparation for this test")
def test_eager_matches_sdpa_inference(self, *args):
pass
@unittest.skip("Not sure MoE can pass this + indexer outputs are not deterministic wrt padding")
def test_left_padding_compatibility(
self,
):
pass
@unittest.skip("Not sure MoE can pass this + indexer outputs are not deterministic wrt padding")
def test_sdpa_padding_matches_padding_free_with_position_ids(
self,
):
pass
@unittest.skip("Not sure MoE can pass this + indexer outputs are not deterministic wrt padding")
def test_training_overfit(
self,
):
pass
@require_torch_accelerator
@slow
def test_flash_attn_2_inference_equivalence_right_padding(self):
self.skipTest(reason="Qwen2Moe flash attention does not support right padding")
@unittest.skip("DSA indexer mask shape mismatch with assisted decoding")
@parameterized.expand([("random",), ("same",)])
def test_assisted_decoding_matches_greedy_search(self, assistant_type):
pass
@unittest.skip("DSA indexer mask shape mismatch with assisted decoding")
def test_assisted_decoding_sample(self):
pass
@unittest.skip("Requires torch>=2.9.0 for grouped MM")
def test_eager_matches_batched_and_grouped_inference(self):
pass
@unittest.skip("DSA indexer mask shape mismatch with static cache")
def test_generate_from_inputs_embeds_with_static_cache(self):
pass
@unittest.skip("DSA indexer mask shape mismatch with compiled forward")
def test_generate_compile_model_forward_fullgraph(self):
pass
@unittest.skip("DSA indexer mask shape mismatch with compilation")
def test_generate_compilation_all_outputs(self):
pass
@unittest.skip("DSA indexer mask shape mismatch with static cache")
def test_generate_with_static_cache(self):
pass
@require_torch_accelerator
@slow
class GlmMoeDsaIntegrationTest(unittest.TestCase):
@unittest.skip("Test requires 2 nodes")
def test_glm_moe_dsa_fp8_inference(self):
# TORCH_DISTRIBUTED_DEBUG=DETAIL python -m torch.distributed.run --nnodes=2 --nproc_per_node=8 --node_rank=0 --master_addr=ip-26-0-169-86 --master_port=29500
set_seed(0) # different ranks need the same seed
model_id = "zai-org/GLM-5-FP8"
quantization_config = FineGrainedFP8Config(
modules_to_not_convert=[
"model.layers.*.mlp.gate$",
"model.layers.*.self_attn.indexer.weights_proj$",
"lm_head",
],
weight_block_size=(128, 128),
)
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(
model_id,
quantization_config=quantization_config,
tp_plan="auto",
attn_implementation="eager",
)
prompt = ["Hi, introduce yourself", "The capital of France is known for"]
inputs = tokenizer(prompt, return_tensors="pt", padding=True).to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=16,
)
output = tokenizer.decode(outputs, skip_special_tokens=False)
self.assertqual(
output,
[
"<|endoftext|><|endoftext|><|endoftext|>Hi, introduce yourself!\nI'm a 18 years old boy from Italy and I'm a student",
"The capital of France is known for its rich history, culture, and the city of the of the of the of",
],
)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/glm_moe_dsa/test_modeling_glm_moe_dsa.py",
"license": "Apache License 2.0",
"lines": 168,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/qwen3_5/modular_qwen3_5.py | # Copyright 2025 The Qwen Team and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch Qwen3.5 model."""
from typing import Optional
import torch
import torch.nn.functional as F
from torch import nn
from ... import initialization as init
from ...cache_utils import Cache
from ...masking_utils import create_causal_mask
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import BaseModelOutputWithPast, BaseModelOutputWithPooling
from ...modeling_rope_utils import RopeParameters
from ...modeling_utils import PreTrainedModel
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
from ...utils.generic import merge_with_config_defaults
from ...utils.output_capturing import capture_outputs
from ..qwen3.modeling_qwen3 import Qwen3ForCausalLM
from ..qwen3_next.configuration_qwen3_next import Qwen3NextConfig
from ..qwen3_next.modeling_qwen3_next import (
Qwen3NextAttention,
Qwen3NextDynamicCache,
Qwen3NextGatedDeltaNet,
Qwen3NextMLP,
Qwen3NextModel,
Qwen3NextPreTrainedModel,
Qwen3NextRMSNorm,
apply_mask_to_padding_states,
)
from ..qwen3_vl.configuration_qwen3_vl import Qwen3VLConfig, Qwen3VLVisionConfig
from ..qwen3_vl.modeling_qwen3_vl import (
Qwen3VLForConditionalGeneration,
Qwen3VLModel,
Qwen3VLModelOutputWithPast,
Qwen3VLTextRotaryEmbedding,
Qwen3VLVisionModel,
Qwen3VLVisionRotaryEmbedding,
)
logger = logging.get_logger(__name__)
class Qwen3_5TextConfig(Qwen3NextConfig):
r"""
This is the configuration class to store the configuration of a [`Qwen3_5TextModel`]. It is used to instantiate a
Qwen3_5 model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of
Qwen3.5-9B-Instruct [Qwen/Qwen3.5-9B-Instruct](https://huggingface.co/Qwen/Qwen3.5-9B-Instruct).
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 248320):
Vocabulary size of the model. Defines the number of different tokens that can be represented by the
`inputs_ids`.
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 12288):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
num_key_value_heads (`int`, *optional*, defaults to 4):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details checkout [this
paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `32`.
hidden_act (`str`, *optional*, defaults to `"silu"`):
The non-linear activation function in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 32768):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether the model's input and output word embeddings should be tied.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
head_dim (`int`, *optional*, defaults to 256):
Projection weights dimension in multi-head attention.
linear_conv_kernel_dim (`int`, *optional*, defaults to 4):
Kernel size of the convolution used in linear attention layers.
linear_key_head_dim (`int`, *optional*, defaults to 128):
Dimension of each key head in linear attention.
linear_value_head_dim (`int`, *optional*, defaults to 128):
Dimension of each value head in linear attention.
linear_num_key_heads (`int`, *optional*, defaults to 16):
Number of key heads used in linear attention layers.
linear_num_value_heads (`int`, *optional*, defaults to 32):
Number of value heads used in linear attention layers.
layer_types (`list[str]`, *optional*):
Types of each layer (attention or linear).
pad_token_id (`int`, *optional*):
Padding token id.
bos_token_id (`int`, *optional*):
Beginning of stream token id.
eos_token_id (`int`, *optional*):
End of stream token id.
```python
>>> from transformers import Qwen3_5TextModel, Qwen3_5TextConfig
>>> # Initializing a Qwen3.5 style configuration
>>> configuration = Qwen3_5TextConfig()
>>> # Initializing a model from the Qwen3.5-9B style configuration
>>> model = Qwen3_5TextModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = "qwen3_5_text"
base_config_key = "text_config"
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise",
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.gate_proj": "colwise",
"layers.*.mlp.up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
}
def __init__(
self,
vocab_size=248320,
hidden_size=4096,
intermediate_size=12288,
num_hidden_layers=32,
num_attention_heads=16,
num_key_value_heads=4,
hidden_act="silu",
max_position_embeddings=32768,
initializer_range=0.02,
rms_norm_eps=1e-6,
use_cache=True,
tie_word_embeddings=False,
rope_parameters: RopeParameters | dict[str, RopeParameters] | None = None,
attention_bias=False,
attention_dropout=0.0,
head_dim=256,
linear_conv_kernel_dim=4,
linear_key_head_dim=128,
linear_value_head_dim=128,
linear_num_key_heads=16,
linear_num_value_heads=32,
layer_types=None,
pad_token_id: int | None = None,
bos_token_id: int | None = None,
eos_token_id: int | None = None,
**kwargs,
):
kwargs["ignore_keys_at_rope_validation"] = {"mrope_section", "mrope_interleaved"}
super().__init__(
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
del self.decoder_sparse_step
del self.norm_topk_prob
del self.mlp_only_layers
del self.moe_intermediate_size
del self.shared_expert_intermediate_size
del self.num_experts_per_tok
del self.num_experts
del self.output_router_logits
del self.router_aux_loss_coef
class Qwen3_5VisionConfig(Qwen3VLVisionConfig):
model_type = "qwen3_5"
def __init__(
self,
depth=27,
hidden_size=1152,
hidden_act="gelu_pytorch_tanh",
intermediate_size=4304,
num_heads=16,
in_channels=3,
patch_size=16,
spatial_merge_size=2,
temporal_patch_size=2,
out_hidden_size=3584,
num_position_embeddings=2304,
initializer_range=0.02,
**kwargs,
):
super().__init__(**kwargs)
del self.deepstack_visual_indexes
class Qwen3_5Config(Qwen3VLConfig):
r"""
This is the configuration class to store the configuration of a [`Qwen3_5Model`]. It is used to instantiate a
Qwen3.5 model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of
Qwen3.5-9B-Instruct [Qwen/Qwen3.5-9B-Instruct](https://huggingface.co/Qwen/Qwen3.5-9B-Instruct).
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
text_config (`Union[PreTrainedConfig, dict]`, *optional*, defaults to `Qwen3_5TextConfig`):
The config object or dictionary of the text backbone.
vision_config (`Union[PreTrainedConfig, dict]`, *optional*, defaults to `Qwen3_5VisionConfig`):
The config object or dictionary of the vision backbone.
image_token_id (`int`, *optional*, defaults to 248056):
The image token index to encode the image prompt.
video_token_id (`int`, *optional*, defaults to 248057):
The video token index to encode the image prompt.
vision_start_token_id (`int`, *optional*, defaults to 248053):
The start token index to encode the image prompt.
vision_end_token_id (`int`, *optional*, defaults to 248054):
The end token index to encode the image prompt.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie the word embeddings.
```python
>>> from transformers import Qwen3_5ForConditionalGeneration, Qwen3_5Config
>>> # Initializing a Qwen3.5 style configuration
>>> configuration = Qwen3_5Config()
>>> # Initializing a model from the Qwen3.5-9B style configuration
>>> model = Qwen3_5ForConditionalGeneration(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "qwen3_5"
sub_configs = {"vision_config": Qwen3_5VisionConfig, "text_config": Qwen3_5TextConfig}
def __init__(
self,
text_config=None,
vision_config=None,
image_token_id=248056,
video_token_id=248057,
vision_start_token_id=248053,
vision_end_token_id=248054,
tie_word_embeddings=False,
**kwargs,
):
super().__init__(
text_config=text_config,
vision_config=vision_config,
image_token_id=image_token_id,
video_token_id=video_token_id,
vision_start_token_id=vision_start_token_id,
vision_end_token_id=vision_end_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
class Qwen3_5DynamicCache(Qwen3NextDynamicCache):
pass
class Qwen3_5VisionRotaryEmbedding(Qwen3VLVisionRotaryEmbedding):
pass
class Qwen3_5TextRotaryEmbedding(Qwen3VLTextRotaryEmbedding):
def __init__(self, config: Qwen3_5TextConfig, device=None):
super().__init__()
self.mrope_section = config.rope_parameters.get("mrope_section", [11, 11, 10])
def compute_default_rope_parameters(
config: Qwen3_5TextConfig | None = None,
device: Optional["torch.device"] = None,
seq_len: int | None = None,
) -> tuple["torch.Tensor", float]:
base = config.rope_parameters["rope_theta"]
partial_rotary_factor = config.rope_parameters.get("partial_rotary_factor", 1.0)
head_dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
dim = int(head_dim * partial_rotary_factor)
attention_factor = 1.0 # Unused in this type of RoPE
# Compute the inverse frequencies
inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
)
return inv_freq, attention_factor
class Qwen3_5GatedDeltaNet(Qwen3NextGatedDeltaNet):
def __init__(self, config: Qwen3_5Config, layer_idx: int):
super().__init__(config, layer_idx)
del projection_size_qkvz # noqa: F821
del projection_size_ba # noqa: F821
del self.in_proj_qkvz
del self.in_proj_ba
self.in_proj_qkv = nn.Linear(self.hidden_size, self.key_dim * 2 + self.value_dim, bias=False)
self.in_proj_z = nn.Linear(self.hidden_size, self.value_dim, bias=False)
self.in_proj_b = nn.Linear(self.hidden_size, self.num_v_heads, bias=False)
self.in_proj_a = nn.Linear(self.hidden_size, self.num_v_heads, bias=False)
def fix_query_key_value_ordering(self):
raise AttributeError("Not needed for Qwen3.5 Series")
def forward(
self,
hidden_states: torch.Tensor,
cache_params: Qwen3_5DynamicCache | None = None,
cache_position: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
):
hidden_states = apply_mask_to_padding_states(hidden_states, attention_mask)
# Set up dimensions for reshapes later
batch_size, seq_len, _ = hidden_states.shape
use_precomputed_states = (
cache_params is not None
and cache_params.has_previous_state
and seq_len == 1
and cache_position is not None
)
# getting projected states from cache if it exists
if cache_params is not None:
conv_state = cache_params.conv_states[self.layer_idx]
recurrent_state = cache_params.recurrent_states[self.layer_idx]
mixed_qkv = self.in_proj_qkv(hidden_states)
mixed_qkv = mixed_qkv.transpose(1, 2)
z = self.in_proj_z(hidden_states)
z = z.reshape(batch_size, seq_len, -1, self.head_v_dim)
b = self.in_proj_b(hidden_states)
a = self.in_proj_a(hidden_states)
if use_precomputed_states:
# 2. Convolution sequence transformation
# NOTE: the conv state is updated in `causal_conv1d_update`
mixed_qkv = self.causal_conv1d_update(
mixed_qkv,
conv_state,
self.conv1d.weight.squeeze(1),
self.conv1d.bias,
self.activation,
)
else:
if cache_params is not None:
conv_state = F.pad(mixed_qkv, (self.conv_kernel_size - mixed_qkv.shape[-1], 0))
cache_params.conv_states[self.layer_idx] = conv_state
if self.causal_conv1d_fn is not None:
mixed_qkv = self.causal_conv1d_fn(
x=mixed_qkv,
weight=self.conv1d.weight.squeeze(1),
bias=self.conv1d.bias,
activation=self.activation,
seq_idx=None,
)
else:
mixed_qkv = F.silu(self.conv1d(mixed_qkv)[:, :, :seq_len])
mixed_qkv = mixed_qkv.transpose(1, 2)
query, key, value = torch.split(
mixed_qkv,
[
self.key_dim,
self.key_dim,
self.value_dim,
],
dim=-1,
)
query = query.reshape(batch_size, seq_len, -1, self.head_k_dim)
key = key.reshape(batch_size, seq_len, -1, self.head_k_dim)
value = value.reshape(batch_size, seq_len, -1, self.head_v_dim)
beta = b.sigmoid()
# If the model is loaded in fp16, without the .float() here, A might be -inf
g = -self.A_log.float().exp() * F.softplus(a.float() + self.dt_bias)
if self.num_v_heads // self.num_k_heads > 1:
query = query.repeat_interleave(self.num_v_heads // self.num_k_heads, dim=2)
key = key.repeat_interleave(self.num_v_heads // self.num_k_heads, dim=2)
if not use_precomputed_states:
core_attn_out, last_recurrent_state = self.chunk_gated_delta_rule(
query,
key,
value,
g=g,
beta=beta,
initial_state=None,
output_final_state=cache_params is not None,
use_qk_l2norm_in_kernel=True,
)
else:
core_attn_out, last_recurrent_state = self.recurrent_gated_delta_rule(
query,
key,
value,
g=g,
beta=beta,
initial_state=recurrent_state,
output_final_state=cache_params is not None,
use_qk_l2norm_in_kernel=True,
)
# Update cache
if cache_params is not None:
cache_params.recurrent_states[self.layer_idx] = last_recurrent_state
# reshape input data into 2D tensor
core_attn_out = core_attn_out.reshape(-1, self.head_v_dim)
z = z.reshape(-1, self.head_v_dim)
core_attn_out = self.norm(core_attn_out, z)
core_attn_out = core_attn_out.reshape(batch_size, seq_len, -1)
output = self.out_proj(core_attn_out)
return output
class Qwen3_5Attention(Qwen3NextAttention):
pass
class Qwen3_5MLP(Qwen3NextMLP):
def __init__(self, config: Qwen3_5Config, intermediate_size: int):
super().__init__(config, intermediate_size)
self.intermediate_size = intermediate_size
class Qwen3_5RMSNorm(Qwen3NextRMSNorm):
pass
class Qwen3_5DecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: Qwen3_5TextConfig, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.layer_type = config.layer_types[layer_idx]
if self.layer_type == "linear_attention":
self.linear_attn = Qwen3_5GatedDeltaNet(config, layer_idx)
elif self.layer_type == "full_attention":
self.self_attn = Qwen3_5Attention(config, layer_idx)
self.mlp = Qwen3_5MLP(config, config.intermediate_size)
self.input_layernorm = Qwen3_5RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = Qwen3_5RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> torch.FloatTensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Token Mixer
if self.layer_type == "linear_attention":
hidden_states = self.linear_attn(
hidden_states=hidden_states,
cache_params=past_key_values,
cache_position=cache_position,
attention_mask=attention_mask,
)
elif self.layer_type == "full_attention":
# Self Attention
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
class Qwen3_5PreTrainedModel(Qwen3NextPreTrainedModel):
config: Qwen3_5Config
_no_split_modules = ["Qwen3_5DecoderLayer", "Qwen3_5VisionBlock"]
_can_record_outputs = {
"hidden_states": Qwen3_5DecoderLayer,
"attentions": Qwen3_5Attention,
}
@torch.no_grad()
def _init_weights(self, module):
PreTrainedModel._init_weights(self, module)
if isinstance(module, Qwen3_5GatedDeltaNet):
init.ones_(module.dt_bias)
init.copy_(module.A_log, torch.empty_like(module.A_log).uniform_(0, 16).log_())
# We initialize with 0s to be 1 centered as the RMSNorm here does (1 + weight)
elif isinstance(module, Qwen3_5RMSNorm):
init.zeros_(module.weight)
elif isinstance(module, Qwen3_5VisionRotaryEmbedding):
inv_freq = 1.0 / (module.theta ** (torch.arange(0, module.dim, 2, dtype=torch.float) / module.dim))
init.copy_(module.inv_freq, inv_freq)
class Qwen3_5VisionModel(Qwen3VLVisionModel):
config: Qwen3_5VisionConfig
_no_split_modules = ["Qwen3_5VisionBlock"]
def __init__(self, config, *inputs, **kwargs) -> None:
super().__init__(config, *inputs, **kwargs)
del self.deepstack_visual_indexes
del self.deepstack_merger_list
@merge_with_config_defaults
@capture_outputs
def forward(self, hidden_states: torch.Tensor, grid_thw: torch.Tensor, **kwargs) -> torch.Tensor:
"""
Args:
hidden_states (`torch.Tensor` of shape `(seq_len, hidden_size)`):
The final hidden states of the model.
grid_thw (`torch.Tensor` of shape `(num_images_or_videos, 3)`):
The temporal, height and width of feature shape of each image in LLM.
Returns:
`torch.Tensor`: hidden_states.
"""
hidden_states = self.patch_embed(hidden_states)
pos_embeds = self.fast_pos_embed_interpolate(grid_thw)
hidden_states = hidden_states + pos_embeds
rotary_pos_emb = self.rot_pos_emb(grid_thw)
seq_len, _ = hidden_states.size()
hidden_states = hidden_states.reshape(seq_len, -1)
rotary_pos_emb = rotary_pos_emb.reshape(seq_len, -1)
emb = torch.cat((rotary_pos_emb, rotary_pos_emb), dim=-1)
position_embeddings = (emb.cos(), emb.sin())
cu_seqlens = torch.repeat_interleave(grid_thw[:, 1] * grid_thw[:, 2], grid_thw[:, 0]).cumsum(
dim=0,
# Select dtype based on the following factors:
# - FA2 requires that cu_seqlens_q must have dtype int32
# - torch.onnx.export requires that cu_seqlens_q must have same dtype as grid_thw
# See https://github.com/huggingface/transformers/pull/34852 for more information
dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32,
)
cu_seqlens = F.pad(cu_seqlens, (1, 0), value=0)
for blk in self.blocks:
hidden_states = blk(
hidden_states,
cu_seqlens=cu_seqlens,
position_embeddings=position_embeddings,
**kwargs,
)
merged_hidden_states = self.merger(hidden_states)
return BaseModelOutputWithPooling(
last_hidden_state=hidden_states,
pooler_output=merged_hidden_states,
)
class Qwen3_5ModelOutputWithPast(Qwen3VLModelOutputWithPast):
pass
class Qwen3_5TextModel(Qwen3NextModel):
def __init__(self, config: Qwen3_5TextConfig):
super().__init__(config)
self.rotary_emb = Qwen3_5TextRotaryEmbedding(config=config)
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
use_cache: bool | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = Qwen3_5DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
# mrope: the hard coded `4` is for text, temporal, height and width.
if position_ids is None:
position_ids = cache_position.view(1, 1, -1).expand(4, inputs_embeds.shape[0], -1)
elif position_ids.ndim == 2:
position_ids = position_ids[None, ...].expand(4, position_ids.shape[0], -1)
if position_ids.ndim == 3 and position_ids.shape[0] == 4:
text_position_ids = position_ids[0]
position_ids = position_ids[1:]
else:
text_position_ids = None
causal_mask = create_causal_mask(
config=self.config,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
position_ids=text_position_ids,
)
linear_attn_mask = self._update_linear_attn_mask(attention_mask, cache_position)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for layer_idx, decoder_layer in enumerate(self.layers[: self.config.num_hidden_layers]):
layer_mask = linear_attn_mask if decoder_layer.layer_type == "linear_attention" else causal_mask
hidden_states = decoder_layer(
hidden_states,
position_embeddings=position_embeddings,
attention_mask=layer_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return Qwen3_5ModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
class Qwen3_5Model(Qwen3VLModel):
def get_video_features(
self,
**super_kwargs,
) -> tuple | BaseModelOutputWithPooling:
# Same implementation as for images
return super().get_video_features(**super_kwargs)
def get_image_features(
self,
pixel_values: torch.FloatTensor,
image_grid_thw: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | BaseModelOutputWithPooling:
pixel_values = pixel_values.type(self.visual.dtype)
vision_output: BaseModelOutputWithPooling = self.visual(
pixel_values, grid_thw=image_grid_thw, return_dict=True, **kwargs
)
image_embeds = vision_output.pooler_output
split_sizes = (image_grid_thw.prod(-1) // self.visual.spatial_merge_size**2).tolist()
image_embeds = torch.split(image_embeds, split_sizes)
vision_output.pooler_output = image_embeds
return vision_output
@auto_docstring
@can_return_tuple
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
pixel_values: torch.Tensor | None = None,
pixel_values_videos: torch.FloatTensor | None = None,
image_grid_thw: torch.LongTensor | None = None,
video_grid_thw: torch.LongTensor | None = None,
mm_token_type_ids: torch.IntTensor | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | Qwen3_5ModelOutputWithPast:
r"""
image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
The temporal, height and width of feature shape of each image in LLM.
video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
The temporal, height and width of feature shape of each video in LLM.
"""
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.get_input_embeddings()(input_ids)
if pixel_values is not None:
image_outputs: BaseModelOutputWithPooling = self.get_image_features(
pixel_values, image_grid_thw, return_dict=True
)
image_embeds = image_outputs.pooler_output
image_embeds = torch.cat(image_embeds, dim=0).to(inputs_embeds.device, inputs_embeds.dtype)
image_mask, _ = self.get_placeholder_mask(
input_ids, inputs_embeds=inputs_embeds, image_features=image_embeds
)
inputs_embeds = inputs_embeds.masked_scatter(image_mask, image_embeds)
if pixel_values_videos is not None:
video_outputs: BaseModelOutputWithPooling = self.get_video_features(
pixel_values_videos, video_grid_thw, return_dict=True
)
video_embeds = video_outputs.pooler_output
video_embeds = torch.cat(video_embeds, dim=0).to(inputs_embeds.device, inputs_embeds.dtype)
_, video_mask = self.get_placeholder_mask(
input_ids, inputs_embeds=inputs_embeds, video_features=video_embeds
)
inputs_embeds = inputs_embeds.masked_scatter(video_mask, video_embeds)
if position_ids is None:
position_ids = self.compute_3d_position_ids(
input_ids=input_ids,
image_grid_thw=image_grid_thw,
video_grid_thw=video_grid_thw,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
past_key_values=past_key_values,
mm_token_type_ids=mm_token_type_ids,
)
outputs = self.language_model(
input_ids=None,
position_ids=position_ids,
attention_mask=attention_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
cache_position=cache_position,
**kwargs,
)
return Qwen3_5ModelOutputWithPast(
**outputs,
rope_deltas=self.rope_deltas,
)
class Qwen3_5ForCausalLM(Qwen3ForCausalLM):
config: Qwen3_5TextConfig
_keys_to_ignore_on_load_unexpected = [r"^mtp.*", r"^model.visual.*"]
def __init__(self, config):
super().__init__(config)
self.model = Qwen3_5TextModel(config)
class Qwen3_5ForConditionalGeneration(Qwen3VLForConditionalGeneration):
def get_video_features(
self,
**super_kwargs,
) -> tuple | BaseModelOutputWithPooling:
return super().get_video_features(**super_kwargs)
def get_image_features(
self,
**super_kwargs,
) -> tuple | BaseModelOutputWithPooling:
return super().get_image_features(**super_kwargs)
__all__ = [
"Qwen3_5Config",
"Qwen3_5TextConfig",
"Qwen3_5VisionModel",
"Qwen3_5TextModel",
"Qwen3_5Model",
"Qwen3_5ForCausalLM",
"Qwen3_5ForConditionalGeneration",
"Qwen3_5PreTrainedModel",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/qwen3_5/modular_qwen3_5.py",
"license": "Apache License 2.0",
"lines": 706,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/qwen3_5/tokenization_qwen3_5.py | # Copyright 2024 The Qwen team, Alibaba Group and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for Qwen3.5."""
from tokenizers import Regex, Tokenizer, decoders, normalizers, pre_tokenizers
from tokenizers.models import BPE
from ...tokenization_utils_tokenizers import TokenizersBackend
from ...utils import logging
logger = logging.get_logger(__name__)
PRETOKENIZE_REGEX = r"""(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?[\p{L}\p{M}]+|\p{N}| ?[^\s\p{L}\p{M}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+"""
class Qwen3_5Tokenizer(TokenizersBackend):
model_input_names = ["input_ids", "attention_mask"]
model = BPE
def __init__(
self,
vocab: str | dict[str, int] | None = None,
merges: str | list[str] | None = None,
vocab_file=None,
merges_file=None,
unk_token: str = "<|endoftext|>",
bos_token=None,
eos_token: str = "<|endoftext|>",
pad_token: str = "<|endoftext|>",
add_prefix_space=None,
**kwargs,
):
self.add_prefix_space = add_prefix_space if add_prefix_space is not None else False
self._vocab = (
vocab
if vocab is not None
else {
"<|endoftext|>": 0,
}
)
self._merges = merges or []
self._tokenizer = Tokenizer(
BPE(
vocab=self._vocab,
merges=self._merges,
dropout=None,
unk_token=None,
continuing_subword_prefix="",
end_of_word_suffix="",
fuse_unk=False,
byte_fallback=False,
)
)
self._tokenizer.decoder = decoders.ByteLevel()
self._tokenizer.normalizer = normalizers.NFC()
self._tokenizer.pre_tokenizer = pre_tokenizers.Sequence(
[
pre_tokenizers.Split(
Regex(PRETOKENIZE_REGEX),
behavior="isolated",
invert=False,
),
pre_tokenizers.ByteLevel(
add_prefix_space=self.add_prefix_space,
use_regex=False,
),
]
)
super().__init__(
vocab_file=vocab_file,
merges_file=merges_file,
unk_token=unk_token,
bos_token=bos_token,
eos_token=eos_token,
pad_token=pad_token,
add_prefix_space=add_prefix_space,
**kwargs,
)
__all__ = ["Qwen3_5Tokenizer"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/qwen3_5/tokenization_qwen3_5.py",
"license": "Apache License 2.0",
"lines": 83,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/qwen3_5_moe/modular_qwen3_5_moe.py | # Copyright 2025 The Qwen Team and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch Qwen3.5Moe model."""
import torch
from ... import initialization as init
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import BaseModelOutputWithPooling
from ...modeling_rope_utils import RopeParameters
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from ..qwen3_5.configuration_qwen3_5 import Qwen3_5VisionConfig
from ..qwen3_5.modeling_qwen3_5 import (
Qwen3_5GatedDeltaNet,
Qwen3_5MLP,
Qwen3_5Model,
Qwen3_5TextModel,
Qwen3_5TextRotaryEmbedding,
Qwen3_5VisionModel,
Qwen3_5VisionRotaryEmbedding,
)
from ..qwen3_next.configuration_qwen3_next import Qwen3NextConfig
from ..qwen3_next.modeling_qwen3_next import (
Qwen3NextAttention,
Qwen3NextDecoderLayer,
Qwen3NextDynamicCache,
Qwen3NextExperts,
Qwen3NextForCausalLM,
Qwen3NextPreTrainedModel,
Qwen3NextRMSNorm,
Qwen3NextSparseMoeBlock,
)
from ..qwen3_vl.configuration_qwen3_vl import Qwen3VLConfig
from ..qwen3_vl_moe.modeling_qwen3_vl_moe import (
Qwen3VLMoeCausalLMOutputWithPast,
Qwen3VLMoeForConditionalGeneration,
Qwen3VLMoeModelOutputWithPast,
Qwen3VLMoeTextTopKRouter,
)
logger = logging.get_logger(__name__)
class Qwen3_5MoeTextConfig(Qwen3NextConfig):
r"""
This is the configuration class to store the configuration of a [`Qwen3_5MoeTextModel`]. It is used to instantiate a
Qwen3.5-MoE model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of
Qwen3.5-35B-A3B-Instruct [Qwen/Qwen3.5-35B-A3B-Instruct](https://huggingface.co/Qwen/Qwen3.5-35B-A3B-Instruct).
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 248320):
Vocabulary size of the model. Defines the number of different tokens that can be represented by the
`inputs_ids`.
hidden_size (`int`, *optional*, defaults to 2048):
Dimension of the hidden representations.
num_hidden_layers (`int`, *optional*, defaults to 40):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
num_key_value_heads (`int`, *optional*, defaults to 2):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details checkout [this
paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `32`.
hidden_act (`str`, *optional*, defaults to `"silu"`):
The non-linear activation function in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 32768):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether the model's input and output word embeddings should be tied.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
head_dim (`int`, *optional*, defaults to 256):
Projection weights dimension in multi-head attention.
linear_conv_kernel_dim (`int`, *optional*, defaults to 4):
Kernel size of the convolution used in linear attention layers.
linear_key_head_dim (`int`, *optional*, defaults to 128):
Dimension of each key head in linear attention.
linear_value_head_dim (`int`, *optional*, defaults to 128):
Dimension of each value head in linear attention.
linear_num_key_heads (`int`, *optional*, defaults to 16):
Number of key heads used in linear attention layers.
linear_num_value_heads (`int`, *optional*, defaults to 32):
Number of value heads used in linear attention layers.
moe_intermediate_size (`int`, *optional*, defaults to 512):
Intermediate size of the routed expert.
shared_expert_intermediate_size (`int`, *optional*, defaults to 512):
Intermediate size of the shared expert.
num_experts_per_tok (`int`, *optional*, defaults to 8):
Number of selected experts.
num_experts (`int`, *optional*, defaults to 256):
Number of routed experts.
output_router_logits (`bool`, *optional*, defaults to `False`):
Whether or not the router logits should be returned by the model. Enabling this will also
allow the model to output the auxiliary loss, including load balancing loss and router z-loss.
router_aux_loss_coef (`float`, *optional*, defaults to 0.001):
The aux loss factor for the total loss.
layer_types (`list[str]`, *optional*):
Types of each layer (attention or linear).
pad_token_id (`int`, *optional*):
Padding token id.
bos_token_id (`int`, *optional*):
Beginning of stream token id.
eos_token_id (`int`, *optional*):
End of stream token id.
```python
>>> from transformers import Qwen3_5MoeTextModel, Qwen3_5MoeTextConfig
>>> # Initializing a Qwen3.5-MoE style configuration
>>> configuration = Qwen3_5MoeTextConfig()
>>> # Initializing a model from the Qwen3.5-35B-A3B style configuration
>>> model = Qwen3_5MoeTextModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = "qwen3_5_moe_text"
base_config_key = "text_config"
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise",
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.experts.gate_up_proj": "packed_colwise",
"layers.*.mlp.experts.down_proj": "rowwise",
"layers.*.mlp.shared_expert.gate_proj": "colwise",
"layers.*.mlp.shared_expert.up_proj": "colwise",
"layers.*.mlp.shared_expert.down_proj": "rowwise",
}
def __init__(
self,
vocab_size=248320,
hidden_size=2048,
num_hidden_layers=40,
num_attention_heads=16,
num_key_value_heads=2,
hidden_act="silu",
max_position_embeddings=32768,
initializer_range=0.02,
rms_norm_eps=1e-6,
use_cache=True,
tie_word_embeddings=False,
rope_parameters: RopeParameters | dict[str, RopeParameters] | None = None,
attention_bias=False,
attention_dropout=0.0,
head_dim=256,
linear_conv_kernel_dim=4,
linear_key_head_dim=128,
linear_value_head_dim=128,
linear_num_key_heads=16,
linear_num_value_heads=32,
moe_intermediate_size=512,
shared_expert_intermediate_size=512,
num_experts_per_tok=8,
num_experts=256,
output_router_logits=False,
router_aux_loss_coef=0.001,
layer_types=None,
pad_token_id: int | None = None,
bos_token_id: int | None = None,
eos_token_id: int | None = None,
**kwargs,
):
kwargs["ignore_keys_at_rope_validation"] = {"mrope_section", "mrope_interleaved"}
super().__init__(
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
del self.intermediate_size
del self.decoder_sparse_step
del self.norm_topk_prob
del self.mlp_only_layers
class Qwen3_5MoeVisionConfig(Qwen3_5VisionConfig):
pass
class Qwen3_5MoeConfig(Qwen3VLConfig):
r"""
This is the configuration class to store the configuration of a [`Qwen3_5MoeModel`]. It is used to instantiate a
Qwen3.5-MoE model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of
Qwen3.5-35B-A3B-Instruct [Qwen/Qwen3.5-35B-A3B-Instruct](https://huggingface.co/Qwen/Qwen3.5-35B-A3B-Instruct).
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
text_config (`Union[PreTrainedConfig, dict]`, *optional*, defaults to `Qwen3_5TextConfig`):
The config object or dictionary of the text backbone.
vision_config (`Union[PreTrainedConfig, dict]`, *optional*, defaults to `Qwen3_5VisionConfig`):
The config object or dictionary of the vision backbone.
image_token_id (`int`, *optional*, defaults to 248056):
The image token index to encode the image prompt.
video_token_id (`int`, *optional*, defaults to 248057):
The video token index to encode the image prompt.
vision_start_token_id (`int`, *optional*, defaults to 248053):
The start token index to encode the image prompt.
vision_end_token_id (`int`, *optional*, defaults to 248054):
The end token index to encode the image prompt.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie the word embeddings.
```python
>>> from transformers import Qwen3_5MoeForConditionalGeneration, Qwen3_5MoeConfig
>>> # Initializing a Qwen3.5-MoE style configuration
>>> configuration = Qwen3_5MoeConfig()
>>> # Initializing a model from the Qwen3.5-35B-A3B style configuration
>>> model = Qwen3_5MoeForConditionalGeneration(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "qwen3_5_moe"
sub_configs = {"vision_config": Qwen3_5MoeVisionConfig, "text_config": Qwen3_5MoeTextConfig}
def __init__(
self,
text_config=None,
vision_config=None,
image_token_id=248056,
video_token_id=248057,
vision_start_token_id=248053,
vision_end_token_id=248054,
tie_word_embeddings=False,
**kwargs,
):
super().__init__(
text_config=text_config,
vision_config=vision_config,
image_token_id=image_token_id,
video_token_id=video_token_id,
vision_start_token_id=vision_start_token_id,
vision_end_token_id=vision_end_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
class Qwen3_5MoeVisionRotaryEmbedding(Qwen3_5VisionRotaryEmbedding):
pass
class Qwen3_5MoeTextRotaryEmbedding(Qwen3_5TextRotaryEmbedding):
pass
class Qwen3_5MoeDynamicCache(Qwen3NextDynamicCache):
pass
class Qwen3_5MoeGatedDeltaNet(Qwen3_5GatedDeltaNet):
pass
class Qwen3_5MoeAttention(Qwen3NextAttention):
pass
class Qwen3_5MoeMLP(Qwen3_5MLP):
pass
class Qwen3_5MoeExperts(Qwen3NextExperts):
pass
class Qwen3_5MoeTopKRouter(Qwen3VLMoeTextTopKRouter):
pass
class Qwen3_5MoeSparseMoeBlock(Qwen3NextSparseMoeBlock):
pass
class Qwen3_5MoeRMSNorm(Qwen3NextRMSNorm):
pass
class Qwen3_5MoeDecoderLayer(Qwen3NextDecoderLayer):
def __init__(self, config: Qwen3_5MoeTextConfig, layer_idx: int):
GradientCheckpointingLayer.__init__(self)
self.hidden_size = config.hidden_size
self.layer_type = config.layer_types[layer_idx]
if self.layer_type == "linear_attention":
self.linear_attn = Qwen3_5MoeGatedDeltaNet(config, layer_idx)
elif self.layer_type == "full_attention":
self.self_attn = Qwen3_5MoeAttention(config, layer_idx)
self.mlp = Qwen3_5MoeSparseMoeBlock(config)
self.input_layernorm = Qwen3_5MoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = Qwen3_5MoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
class Qwen3_5MoePreTrainedModel(Qwen3NextPreTrainedModel):
_no_split_modules = ["Qwen3_5MoeDecoderLayer", "Qwen3_5MoeVisionBlock"]
def _init_weights(self, module):
PreTrainedModel._init_weights(self, module)
if isinstance(module, Qwen3_5MoeGatedDeltaNet):
init.ones_(module.dt_bias)
init.copy_(module.A_log, torch.empty_like(module.A_log).uniform_(0, 16).log_())
# We initialize with 0s to be 1 centered as the RMSNorm here does (1 + weight)
elif isinstance(module, Qwen3_5MoeRMSNorm):
init.zeros_(module.weight)
elif isinstance(module, Qwen3_5MoeExperts):
init.normal_(module.gate_up_proj, mean=0.0, std=self.config.initializer_range)
init.normal_(module.down_proj, mean=0.0, std=self.config.initializer_range)
elif isinstance(module, Qwen3_5MoeSparseMoeBlock):
init.normal_(module.gate.weight, mean=0.0, std=self.config.initializer_range)
elif isinstance(module, Qwen3_5MoeVisionRotaryEmbedding):
inv_freq = 1.0 / (module.theta ** (torch.arange(0, module.dim, 2, dtype=torch.float) / module.dim))
init.copy_(module.inv_freq, inv_freq)
class Qwen3_5MoeVisionModel(Qwen3_5VisionModel):
pass
class Qwen3_5MoeModelOutputWithPast(Qwen3VLMoeModelOutputWithPast):
router_logits: tuple[torch.FloatTensor] | None = None
class Qwen3_5MoeCausalLMOutputWithPast(Qwen3VLMoeCausalLMOutputWithPast):
pass
class Qwen3_5MoeTextModel(Qwen3_5TextModel):
pass
class Qwen3_5MoeModel(Qwen3_5Model):
pass
class Qwen3_5MoeForCausalLM(Qwen3NextForCausalLM):
config: Qwen3_5MoeTextConfig
_keys_to_ignore_on_load_unexpected = [r"^mtp.*", r"^model.visual.*"]
def __init__(self, config):
super().__init__(config)
self.model = Qwen3_5MoeTextModel(config)
class Qwen3_5MoeForConditionalGeneration(Qwen3VLMoeForConditionalGeneration):
def forward(self, **super_kwargs):
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
The temporal, height and width of feature shape of each image in LLM.
video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
The temporal, height and width of feature shape of each video in LLM.
Example:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, Qwen3_5MoeForConditionalGeneration
>>> model = Qwen3_5MoeForConditionalGeneration.from_pretrained("Qwen/Qwen3.5-35B-A3B-Instruct", dtype="auto", device_map="auto")
>>> processor = AutoProcessor.from_pretrained("Qwen/Qwen3.5-35B-A3B-Instruct")
>>> messages = [
{
"role": "user",
"content": [
{
"type": "image",
"image": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg",
},
{"type": "text", "text": "Describe this image in short."},
],
}
]
>>> # Preparation for inference
>>> inputs = processor.apply_chat_template(
messages,
tokenize=True,
add_generation_prompt=True,
return_dict=True,
return_tensors="pt"
)
>>> inputs = inputs.to(model.device)
>>> # Generate
>>> generated_ids = model.generate(**inputs, max_new_tokens=128)
>>> generated_ids_trimmed = [
out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
]
>>> processor.batch_decode(generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"A woman in a plaid shirt sits on a sandy beach at sunset, smiling as she gives a high-five to a yellow Labrador Retriever wearing a harness. The ocean waves roll in the background."
```"""
super().forward(**super_kwargs)
def get_video_features(
self,
**super_kwargs,
) -> tuple | BaseModelOutputWithPooling:
return super().get_video_features(**super_kwargs)
def get_image_features(
self,
**super_kwargs,
) -> tuple | BaseModelOutputWithPooling:
return super().get_image_features(**super_kwargs)
__all__ = [
"Qwen3_5MoeConfig",
"Qwen3_5MoeTextConfig",
"Qwen3_5MoeVisionModel",
"Qwen3_5MoeTextModel",
"Qwen3_5MoeModel",
"Qwen3_5MoeForCausalLM",
"Qwen3_5MoeForConditionalGeneration",
"Qwen3_5MoePreTrainedModel",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/qwen3_5_moe/modular_qwen3_5_moe.py",
"license": "Apache License 2.0",
"lines": 386,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/qwen3_5/test_modeling_qwen3_5.py | # Copyright 2026 The Qwen Team and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch Qwen3.5 model."""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import (
require_torch,
torch_device,
)
from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import (
ModelTesterMixin,
floats_tensor,
ids_tensor,
)
if is_torch_available():
import torch
from transformers import (
Qwen3_5Config,
Qwen3_5ForCausalLM,
Qwen3_5ForConditionalGeneration,
Qwen3_5Model,
Qwen3_5TextConfig,
Qwen3_5TextModel,
)
from transformers.models.qwen3_5.modeling_qwen3_5 import Qwen3_5DynamicCache
class Qwen3_5TextModelTester(CausalLMModelTester):
if is_torch_available():
base_model_class = Qwen3_5TextModel
causal_lm_class = Qwen3_5ForCausalLM
def __init__(self, parent):
super().__init__(parent=parent)
self.layer_types = ["full_attention", "linear_attention"]
self.linear_conv_kernel_dim = 2
self.linear_key_head_dim = 16
self.linear_value_head_dim = 16
self.linear_num_key_heads = 4
self.linear_num_value_heads = 8
@require_torch
class Qwen3_5TextModelTest(CausalLMModelTest, unittest.TestCase):
model_tester_class = Qwen3_5TextModelTester
config_class = Qwen3_5TextConfig
model_split_percents = [0.5, 0.8, 0.9]
def _check_past_key_values_for_generate(self, batch_size, past_key_values, seq_length, config):
"Qwen3.5 has a special Cache as it alternates with gated deltanet layers"
self.assertIsInstance(past_key_values, Qwen3_5DynamicCache)
# (batch, kv heads, seq_length, head_dim)
num_heads = getattr(config, "num_key_value_heads", config.num_attention_heads)
head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
expected_shape = (batch_size, num_heads, seq_length, head_dim)
attention_layer_indices = past_key_values.transformer_layers
self.assertListEqual(
[past_key_values.key_cache[idx].shape for idx in attention_layer_indices],
[expected_shape] * len(attention_layer_indices),
)
self.assertListEqual(
[past_key_values.value_cache[idx].shape for idx in attention_layer_indices],
[expected_shape] * len(attention_layer_indices),
)
def _check_caches_are_equal(self, cache1, cache2):
"Qwen3.5 has a special Cache as it alternates with gated deltanet layers"
if not len(cache1) == len(cache2):
raise ValueError("Both caches do not have the same number of layers.")
num_layers = len(cache1)
for idx in range(num_layers):
if cache1.key_cache[idx] is not None:
torch.testing.assert_close(cache1.key_cache[idx], cache2.key_cache[idx])
torch.testing.assert_close(cache1.value_cache[idx], cache2.value_cache[idx])
def test_attention_outputs(self):
"Needs to be overwritten as Qwen3.5 alternates between attention layers and gated deltanet layers."
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
# force eager attention to support output attentions
config._attn_implementation = "eager"
seq_len = getattr(self.model_tester, "seq_length", None)
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class._from_config(config, attn_implementation="eager")
config = model.config
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), sum(layer == "full_attention" for layer in config.layer_types))
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), sum(layer == "full_attention" for layer in config.layer_types))
self.assertListEqual(list(attentions[0].shape[-3:]), [config.num_attention_heads, seq_len, seq_len])
out_len = len(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
self_attentions = outputs.attentions
self.assertEqual(out_len + 1, len(outputs))
self.assertEqual(len(self_attentions), sum(layer == "full_attention" for layer in config.layer_types))
self.assertListEqual(list(self_attentions[0].shape[-3:]), [config.num_attention_heads, seq_len, seq_len])
@unittest.skip("The specific cache format cannot be instantiated from dp/ddp data.")
def test_multi_gpu_data_parallel_forward(self):
pass
@unittest.skip("Intentionally not reversable (no changes) as only load time within a VLM depends on this")
def test_reverse_loading_mapping(self, check_keys_were_modified=True):
pass
class Qwen3_5VisionText2TextModelTester:
def __init__(
self,
parent,
batch_size=3,
seq_length=7,
num_channels=3,
ignore_index=-100,
image_size=16,
text_config={
"bos_token_id": 0,
"eos_token_id": 1,
"pad_token_id": 2,
"hidden_act": "silu",
"head_dim": 8,
"hidden_size": 32,
"vocab_size": 99,
"intermediate_size": 37,
"max_position_embeddings": 512,
"model_type": "qwen3_vl",
"num_attention_heads": 4,
"num_hidden_layers": 2,
"layer_types": ["full_attention", "linear_attention"],
"num_key_value_heads": 2,
"rope_theta": 10000,
"tie_word_embeddings": True,
"rope_parameters": {"rope_type": "default", "mrope_section": [16, 8, 8], "mrope_interleaved": True},
"linear_conv_kernel_dim": 2,
"linear_key_head_dim": 16,
"linear_value_head_dim": 16,
"linear_num_key_heads": 4,
"linear_num_value_heads": 8,
},
vision_config={
"depth": 2,
"in_chans": 3,
"hidden_act": "gelu_pytorch_tanh",
"intermediate_size": 32,
"out_hidden_size": 32,
"hidden_size": 32,
"num_heads": 4,
"patch_size": 16,
"spatial_merge_size": 1,
"temporal_patch_size": 2,
"num_position_embeddings": 16,
},
image_token_id=3,
video_token_id=4,
vision_start_token_id=5,
vision_end_token_id=6,
tie_word_embeddings=True,
is_training=True,
):
self.parent = parent
self.ignore_index = ignore_index
self.is_training = is_training
self.vision_config = vision_config
self.text_config = text_config
self.vocab_size = text_config["vocab_size"]
self.bos_token_id = text_config["bos_token_id"]
self.eos_token_id = text_config["eos_token_id"]
self.pad_token_id = text_config["pad_token_id"]
self.head_dim = text_config["head_dim"]
self.hidden_size = text_config["hidden_size"]
self.intermediate_size = text_config["intermediate_size"]
self.num_hidden_layers = text_config["num_hidden_layers"]
self.num_attention_heads = text_config["num_attention_heads"]
self.num_key_value_heads = text_config["num_key_value_heads"]
self.rope_theta = text_config["rope_theta"]
self.rope_parameters = text_config["rope_parameters"]
self.hidden_act = text_config["hidden_act"]
self.max_position_embeddings = text_config["max_position_embeddings"]
self.model_type = text_config["model_type"]
self.vision_start_token_id = vision_start_token_id
self.vision_end_token_id = vision_end_token_id
self.image_token_id = image_token_id
self.video_token_id = video_token_id
self.tie_word_embeddings = tie_word_embeddings
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.num_image_tokens = 32
self.seq_length = seq_length + self.num_image_tokens
def get_config(self):
return Qwen3_5Config(
text_config=self.text_config,
vision_config=self.vision_config,
image_token_id=self.image_token_id,
video_token_id=self.video_token_id,
vision_start_token_id=self.vision_start_token_id,
vision_end_token_id=self.vision_end_token_id,
tie_word_embeddings=self.tie_word_embeddings,
)
def prepare_config_and_inputs(self):
config = self.get_config()
patch_size = config.vision_config.patch_size
temporal_patch_size = config.vision_config.temporal_patch_size
pixel_values = floats_tensor(
[
self.batch_size * (self.image_size**2) // (patch_size**2),
self.num_channels * (patch_size**2) * temporal_patch_size,
]
)
return config, pixel_values
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
input_ids[:, -1] = self.pad_token_id
input_ids[input_ids == self.video_token_id] = self.pad_token_id
input_ids[input_ids == self.image_token_id] = self.pad_token_id
input_ids[input_ids == self.vision_start_token_id] = self.pad_token_id
input_ids[:, self.num_image_tokens] = self.image_token_id
input_ids[:, self.num_image_tokens - 1] = self.vision_start_token_id
mm_token_type_ids = torch.zeros_like(input_ids)
mm_token_type_ids[:, self.num_image_tokens] = 1
inputs_dict = {
"pixel_values": pixel_values,
"image_grid_thw": torch.tensor([[1, 1, 1]] * self.batch_size, device=torch_device),
"input_ids": input_ids,
"attention_mask": attention_mask,
"mm_token_type_ids": mm_token_type_ids,
}
return config, inputs_dict
@require_torch
class Qwen3_5ModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
"""
Model tester for `Qwen3_5ForConditionalGeneration`.
"""
all_model_classes = (
(
Qwen3_5Model,
Qwen3_5ForConditionalGeneration,
)
if is_torch_available()
else ()
)
model_split_percents = [0.5, 0.8, 0.9]
def setUp(self):
self.model_tester = Qwen3_5VisionText2TextModelTester(self)
self.config_tester = ConfigTester(self, config_class=Qwen3_5Config, has_text_modality=False)
def test_config(self):
self.config_tester.run_common_tests()
def _check_past_key_values_for_generate(self, batch_size, past_key_values, seq_length, config):
"Qwen3.5 has a special Cache as it alternates with gated deltanet layers"
self.assertIsInstance(past_key_values, Qwen3_5DynamicCache)
# (batch, kv heads, seq_length, head_dim)
num_heads = getattr(config, "num_key_value_heads", config.num_attention_heads)
head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
expected_shape = (batch_size, num_heads, seq_length, head_dim)
attention_layer_indices = past_key_values.transformer_layers
self.assertListEqual(
[past_key_values.key_cache[idx].shape for idx in attention_layer_indices],
[expected_shape] * len(attention_layer_indices),
)
self.assertListEqual(
[past_key_values.value_cache[idx].shape for idx in attention_layer_indices],
[expected_shape] * len(attention_layer_indices),
)
def _check_caches_are_equal(self, cache1, cache2):
"Qwen3.5 has a special Cache as it alternates with gated deltanet layers"
if not len(cache1) == len(cache2):
raise ValueError("Both caches do not have the same number of layers.")
num_layers = len(cache1)
for idx in range(num_layers):
if cache1.key_cache[idx] is not None:
torch.testing.assert_close(cache1.key_cache[idx], cache2.key_cache[idx])
torch.testing.assert_close(cache1.value_cache[idx], cache2.value_cache[idx])
def test_attention_outputs(self):
"Needs to be overwritten as Qwen3.5 alternates between attention layers and gated deltanet layers."
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
# force eager attention to support output attentions
config._attn_implementation = "eager"
seq_len = getattr(self.model_tester, "seq_length", None)
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class._from_config(config, attn_implementation="eager")
config = model.config
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(
len(attentions), sum(layer == "full_attention" for layer in config.text_config.layer_types)
)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.text_config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(
len(attentions), sum(layer == "full_attention" for layer in config.text_config.layer_types)
)
self.assertListEqual(
list(attentions[0].shape[-3:]), [config.text_config.num_attention_heads, seq_len, seq_len]
)
out_len = len(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
self_attentions = outputs.attentions
self.assertEqual(out_len + 1, len(outputs))
self.assertEqual(
len(self_attentions), sum(layer == "full_attention" for layer in config.text_config.layer_types)
)
self.assertListEqual(
list(self_attentions[0].shape[-3:]), [config.text_config.num_attention_heads, seq_len, seq_len]
)
@unittest.skip("The specific cache format cannot be instantiated from dp/ddp data.")
def test_multi_gpu_data_parallel_forward(self):
pass
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/qwen3_5/test_modeling_qwen3_5.py",
"license": "Apache License 2.0",
"lines": 355,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/models/qwen3_5_moe/test_modeling_qwen3_5_moe.py | # Copyright 2026 The Qwen Team and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch Qwen3.5 model."""
import copy
import os
import re
import tempfile
import unittest
from safetensors.torch import load_file
from transformers import is_torch_available
from transformers.conversion_mapping import get_model_conversion_mapping
from transformers.core_model_loading import WeightRenaming, process_target_pattern
from transformers.testing_utils import (
require_torch,
torch_device,
)
from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import (
ModelTesterMixin,
compare_state_dicts,
floats_tensor,
ids_tensor,
)
if is_torch_available():
import torch
from transformers import (
Qwen3_5MoeConfig,
Qwen3_5MoeForCausalLM,
Qwen3_5MoeForConditionalGeneration,
Qwen3_5MoeModel,
Qwen3_5MoeTextConfig,
Qwen3_5MoeTextModel,
)
from transformers.models.qwen3_5_moe.modeling_qwen3_5_moe import Qwen3_5MoeDynamicCache
class Qwen3_5MoeTextModelTester(CausalLMModelTester):
if is_torch_available():
base_model_class = Qwen3_5MoeTextModel
causal_lm_class = Qwen3_5MoeForCausalLM
def __init__(self, parent):
super().__init__(parent=parent)
self.layer_types = ["full_attention", "linear_attention"]
self.linear_conv_kernel_dim = 2
self.linear_key_head_dim = 16
self.linear_value_head_dim = 16
self.linear_num_key_heads = 4
self.linear_num_value_heads = 8
@require_torch
class Qwen3_5MoeTextModelTest(CausalLMModelTest, unittest.TestCase):
model_tester_class = Qwen3_5MoeTextModelTester
config_class = Qwen3_5MoeTextConfig
def _check_past_key_values_for_generate(self, batch_size, past_key_values, seq_length, config):
"Qwen3.5 Moe has a special Cache as it alternates with gated deltanet layers"
self.assertIsInstance(past_key_values, Qwen3_5MoeDynamicCache)
# (batch, kv heads, seq_length, head_dim)
num_heads = getattr(config, "num_key_value_heads", config.num_attention_heads)
head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
expected_shape = (batch_size, num_heads, seq_length, head_dim)
attention_layer_indices = past_key_values.transformer_layers
self.assertListEqual(
[past_key_values.key_cache[idx].shape for idx in attention_layer_indices],
[expected_shape] * len(attention_layer_indices),
)
self.assertListEqual(
[past_key_values.value_cache[idx].shape for idx in attention_layer_indices],
[expected_shape] * len(attention_layer_indices),
)
def _check_caches_are_equal(self, cache1, cache2):
"Qwen3.5 Moe has a special Cache as it alternates with gated deltanet layers"
if not len(cache1) == len(cache2):
raise ValueError("Both caches do not have the same number of layers.")
num_layers = len(cache1)
for idx in range(num_layers):
if cache1.key_cache[idx] is not None:
torch.testing.assert_close(cache1.key_cache[idx], cache2.key_cache[idx])
torch.testing.assert_close(cache1.value_cache[idx], cache2.value_cache[idx])
def test_attention_outputs(self):
"Needs to be overwritten as Qwen3.5 Moe alternates between attention layers and gated deltanet layers."
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
# force eager attention to support output attentions
config._attn_implementation = "eager"
seq_len = getattr(self.model_tester, "seq_length", None)
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class._from_config(config, attn_implementation="eager")
config = model.config
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), sum(layer == "full_attention" for layer in config.layer_types))
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), sum(layer == "full_attention" for layer in config.layer_types))
self.assertListEqual(list(attentions[0].shape[-3:]), [config.num_attention_heads, seq_len, seq_len])
out_len = len(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
self_attentions = outputs.attentions
self.assertEqual(out_len + 1, len(outputs))
self.assertEqual(len(self_attentions), sum(layer == "full_attention" for layer in config.layer_types))
self.assertListEqual(list(self_attentions[0].shape[-3:]), [config.num_attention_heads, seq_len, seq_len])
def test_reverse_loading_mapping(self, check_keys_were_modified=True):
"""
Overwritten to check for the moe portion but ignore the prefix as it results into a noop
(except we have a VLM struct initially)
"""
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
# Some MoE models alternate between a classic MLP and a MoE layer, in which case we want to have at
# lest one MoE layer here to check the mapping
config_to_set = config.get_text_config(decoder=True)
config_to_set.first_k_dense_replace = 1 # means that the first layer (idx 0) will be MLP, then MoE
config_to_set.moe_layer_start_index = 1 # same as above but for Ernie 4.5...
config_to_set.mlp_only_layers = [0] # same but for qwens
config_to_set.num_dense_layers = 1 # lfm2_moe
for model_class in self.all_model_classes:
# Each individual model is a subtest
with self.subTest(model_class.__name__):
model = model_class(copy.deepcopy(config))
# Skip if no conversions
conversions = get_model_conversion_mapping(model, add_legacy=False)
if len(conversions) == 0:
self.skipTest("No conversion found for this model")
# Find the model keys, so the targets according to the conversions
model_keys = list(model.state_dict().keys())
with tempfile.TemporaryDirectory() as tmpdirname:
# Serialize with reverse mapping
model.save_pretrained(tmpdirname)
state_dict = load_file(os.path.join(tmpdirname, "model.safetensors"))
# Get all the serialized keys that we just saved according to the reverse mapping
serialized_keys = list(state_dict.keys())
if check_keys_were_modified:
# They should be different, otherwise we did not perform any mapping
self.assertNotEqual(sorted(serialized_keys), sorted(model_keys), "No key mapping was performed!")
# Check that for each conversion entry, we at least map to one key
for conversion in conversions:
for source_pattern in conversion.source_patterns:
# Sometimes the mappings specify keys that are tied, so absent from the saved state dict
if isinstance(conversion, WeightRenaming):
# We need to revert the target pattern to make it compatible with regex search
target_pattern_reversed = conversion.target_patterns[0]
captured_group = process_target_pattern(source_pattern)[1]
if captured_group:
target_pattern_reversed = target_pattern_reversed.replace(r"\1", captured_group)
if any(re.search(target_pattern_reversed, k) for k in model.all_tied_weights_keys.keys()):
continue
num_matches = sum(re.search(source_pattern, key) is not None for key in serialized_keys)
# Key change: special case to load causal lm within vlm
if source_pattern == "^model.language_model":
continue
self.assertTrue(
num_matches > 0,
f"`{source_pattern}` in `{conversion}` did not match any of the source keys. "
"This indicates whether that the pattern is not properly written, ot that it could not be reversed correctly",
)
# If everything is still good at this point, let's test that we perform the same operations both when
# reverting ops from `from_pretrained` and from `__init__`
with tempfile.TemporaryDirectory() as tmpdirname:
# The model was instantiated from __init__ before being saved
model.save_pretrained(tmpdirname)
state_dict_saved_from_init = load_file(os.path.join(tmpdirname, "model.safetensors"))
# Now reload it
model_reloaded = model_class.from_pretrained(tmpdirname)
# Make sure both loaded state_dict are identical
self.assertTrue(compare_state_dicts(model_reloaded.state_dict(), model.state_dict()))
# The model was instantiated from `from_pretrained` before being saved
model_reloaded.save_pretrained(tmpdirname)
state_dict_saved_from_pretrained = load_file(os.path.join(tmpdirname, "model.safetensors"))
# Make sure both saved state_dict are identical
self.assertTrue(compare_state_dicts(state_dict_saved_from_init, state_dict_saved_from_pretrained))
@unittest.skip("The specific cache format cannot be instantiated from dp/ddp data.")
def test_multi_gpu_data_parallel_forward(self):
pass
class Qwen3_5MoeVisionText2TextModelTester:
def __init__(
self,
parent,
batch_size=3,
seq_length=7,
num_channels=3,
ignore_index=-100,
image_size=16,
text_config={
"bos_token_id": 0,
"eos_token_id": 1,
"pad_token_id": 2,
"hidden_act": "silu",
"head_dim": 8,
"hidden_size": 32,
"vocab_size": 99,
"intermediate_size": 37,
"max_position_embeddings": 512,
"model_type": "qwen3_vl",
"num_attention_heads": 4,
"num_hidden_layers": 2,
"layer_types": ["full_attention", "linear_attention"],
"num_key_value_heads": 2,
"rope_theta": 10000,
"tie_word_embeddings": True,
"rope_parameters": {"rope_type": "default", "mrope_section": [16, 8, 8], "mrope_interleaved": True},
"linear_conv_kernel_dim": 2,
"linear_key_head_dim": 16,
"linear_value_head_dim": 16,
"linear_num_key_heads": 4,
"linear_num_value_heads": 8,
"moe_intermediate_size": 16,
"shared_expert_intermediate_size": 36,
"num_experts_per_tok": 2,
"num_experts": 8,
},
vision_config={
"depth": 2,
"in_chans": 3,
"hidden_act": "gelu_pytorch_tanh",
"intermediate_size": 32,
"out_hidden_size": 32,
"hidden_size": 32,
"num_heads": 4,
"patch_size": 16,
"spatial_merge_size": 1,
"temporal_patch_size": 2,
"num_position_embeddings": 16,
},
image_token_id=3,
video_token_id=4,
vision_start_token_id=5,
vision_end_token_id=6,
tie_word_embeddings=True,
is_training=True,
):
self.parent = parent
self.ignore_index = ignore_index
self.is_training = is_training
self.vision_config = vision_config
self.text_config = text_config
self.vocab_size = text_config["vocab_size"]
self.bos_token_id = text_config["bos_token_id"]
self.eos_token_id = text_config["eos_token_id"]
self.pad_token_id = text_config["pad_token_id"]
self.head_dim = text_config["head_dim"]
self.hidden_size = text_config["hidden_size"]
self.intermediate_size = text_config["intermediate_size"]
self.num_hidden_layers = text_config["num_hidden_layers"]
self.num_attention_heads = text_config["num_attention_heads"]
self.num_key_value_heads = text_config["num_key_value_heads"]
self.rope_theta = text_config["rope_theta"]
self.rope_parameters = text_config["rope_parameters"]
self.hidden_act = text_config["hidden_act"]
self.max_position_embeddings = text_config["max_position_embeddings"]
self.model_type = text_config["model_type"]
self.vision_start_token_id = vision_start_token_id
self.vision_end_token_id = vision_end_token_id
self.image_token_id = image_token_id
self.video_token_id = video_token_id
self.tie_word_embeddings = tie_word_embeddings
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.num_image_tokens = 32
self.seq_length = seq_length + self.num_image_tokens
def get_config(self):
return Qwen3_5MoeConfig(
text_config=self.text_config,
vision_config=self.vision_config,
image_token_id=self.image_token_id,
video_token_id=self.video_token_id,
vision_start_token_id=self.vision_start_token_id,
vision_end_token_id=self.vision_end_token_id,
tie_word_embeddings=self.tie_word_embeddings,
)
def prepare_config_and_inputs(self):
config = self.get_config()
patch_size = config.vision_config.patch_size
temporal_patch_size = config.vision_config.temporal_patch_size
pixel_values = floats_tensor(
[
self.batch_size * (self.image_size**2) // (patch_size**2),
self.num_channels * (patch_size**2) * temporal_patch_size,
]
)
return config, pixel_values
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
input_ids[:, -1] = self.pad_token_id
input_ids[input_ids == self.video_token_id] = self.pad_token_id
input_ids[input_ids == self.image_token_id] = self.pad_token_id
input_ids[input_ids == self.vision_start_token_id] = self.pad_token_id
input_ids[:, self.num_image_tokens] = self.image_token_id
input_ids[:, self.num_image_tokens - 1] = self.vision_start_token_id
inputs_dict = {
"pixel_values": pixel_values,
"image_grid_thw": torch.tensor([[1, 1, 1]] * self.batch_size, device=torch_device),
"input_ids": input_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class Qwen3_5MoeModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
"""
Model tester for `Qwen3_5MoeForConditionalGeneration`.
"""
all_model_classes = (
(
Qwen3_5MoeModel,
Qwen3_5MoeForConditionalGeneration,
)
if is_torch_available()
else ()
)
def setUp(self):
self.model_tester = Qwen3_5MoeVisionText2TextModelTester(self)
self.config_tester = ConfigTester(self, config_class=Qwen3_5MoeConfig, has_text_modality=False)
def test_config(self):
self.config_tester.run_common_tests()
def _check_past_key_values_for_generate(self, batch_size, past_key_values, seq_length, config):
"Qwen3.5 Moe has a special Cache as it alternates with gated deltanet layers"
self.assertIsInstance(past_key_values, Qwen3_5MoeDynamicCache)
# (batch, kv heads, seq_length, head_dim)
num_heads = getattr(config, "num_key_value_heads", config.num_attention_heads)
head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
expected_shape = (batch_size, num_heads, seq_length, head_dim)
attention_layer_indices = past_key_values.transformer_layers
self.assertListEqual(
[past_key_values.key_cache[idx].shape for idx in attention_layer_indices],
[expected_shape] * len(attention_layer_indices),
)
self.assertListEqual(
[past_key_values.value_cache[idx].shape for idx in attention_layer_indices],
[expected_shape] * len(attention_layer_indices),
)
def _check_caches_are_equal(self, cache1, cache2):
"Qwen3.5 Moe has a special Cache as it alternates with gated deltanet layers"
if not len(cache1) == len(cache2):
raise ValueError("Both caches do not have the same number of layers.")
num_layers = len(cache1)
for idx in range(num_layers):
if cache1.key_cache[idx] is not None:
torch.testing.assert_close(cache1.key_cache[idx], cache2.key_cache[idx])
torch.testing.assert_close(cache1.value_cache[idx], cache2.value_cache[idx])
def test_attention_outputs(self):
"Needs to be overwritten as Qwen3.5 Moe alternates between attention layers and gated deltanet layers."
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
# force eager attention to support output attentions
config._attn_implementation = "eager"
seq_len = getattr(self.model_tester, "seq_length", None)
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class._from_config(config, attn_implementation="eager")
config = model.config
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(
len(attentions), sum(layer == "full_attention" for layer in config.text_config.layer_types)
)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.text_config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(
len(attentions), sum(layer == "full_attention" for layer in config.text_config.layer_types)
)
self.assertListEqual(
list(attentions[0].shape[-3:]), [config.text_config.num_attention_heads, seq_len, seq_len]
)
out_len = len(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
self_attentions = outputs.attentions
self.assertEqual(out_len + 1, len(outputs))
self.assertEqual(
len(self_attentions), sum(layer == "full_attention" for layer in config.text_config.layer_types)
)
self.assertListEqual(
list(self_attentions[0].shape[-3:]), [config.text_config.num_attention_heads, seq_len, seq_len]
)
@unittest.skip("The specific cache format cannot be instantiated from dp/ddp data.")
def test_multi_gpu_data_parallel_forward(self):
pass
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/qwen3_5_moe/test_modeling_qwen3_5_moe.py",
"license": "Apache License 2.0",
"lines": 427,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/utils/output_capturing.py | # Copyright 2026 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Contains the logic for automatic additional output capture with our forward decorators.
This mostly describe the hooks used and the logic to make capture thread/context safe.
"""
from __future__ import annotations
import threading
from contextvars import ContextVar
from dataclasses import dataclass
from functools import wraps
from typing import TYPE_CHECKING
from .import_utils import is_torchdynamo_compiling, requires
if TYPE_CHECKING:
from torch import nn
from ..modeling_utils import PreTrainedModel
_CAN_RECORD_REGISTRY = {}
@dataclass
@requires(backends=("torch",))
class OutputRecorder:
"""
Configuration for recording outputs from a model via hooks.
Attributes:
target_class (Type): The class (e.g., nn.Module) to which the hook will be attached.
index (Optional[int]): If the output is a tuple/list, optionally record only at a specific index.
layer_name (Optional[str]): Name of the submodule to target (if needed), e.g., "transformer.layer.3.attn".
class_name (Optional[str]): Name of the class to which the hook will be attached. Could be the suffix of class name in some cases.
"""
target_class: type[nn.Module]
index: int = 0
layer_name: str | None = None
class_name: str | None = None
class CompileableContextVar:
"""
Convenience wrapper around a ContextVar for usage with `torch.compile`.
This behaves exactly as a `ContextVar`, except when compilation is triggered in which case it behaves as a simple
global variable. This is useful as `torch.compile` cannot trace the `get` method of `ContextVar`. This however means
that the access to the underlying variable is not thread-safe when compilation is triggered.
"""
def __init__(self, name, default):
self.context_var = ContextVar(name, default=default)
self.global_var = default
self.compiling = False
def get(self):
# Set was called before and compilation was already detected
if self.compiling:
return self.global_var
else:
# Set was maybe never called, so still check it here
if is_torchdynamo_compiling():
self.is_compiling = True
return self.global_var
else:
return self.context_var.get()
def set(self, value):
if is_torchdynamo_compiling():
self.global_var = value
self.compiling = True
return None
else:
return self.context_var.set(value)
def reset(self, token):
if self.compiling:
self.global_var = None
self.compiling = False
else:
self.context_var.reset(token)
# Thread/context-safe global variable
_active_collector = CompileableContextVar("output_collector", default=None)
def install_output_capuring_hook(module: nn.Module, key: str, index: int) -> None:
"""Install the forward hook needed to capture the output described by `key` and `index` in `module`."""
def output_capturing_hook(module, args, output):
# Get the current thread-local collector
collected_outputs = _active_collector.get()
# If it's None or not a key we want to capture, simply return, the hook is inactive
if collected_outputs is None or key not in collected_outputs.keys():
return
if key == "hidden_states" and len(collected_outputs[key]) == 0:
collected_outputs[key].append(args[0])
if not isinstance(output, tuple):
collected_outputs[key].append(output)
elif output[index] is not None:
collected_outputs[key].append(output[index])
module.register_forward_hook(output_capturing_hook)
def recursively_install_hooks(
parent_module: nn.Module, module_name: str, capture_tasks: list[tuple[str, OutputRecorder]]
) -> None:
"""
Recursively install all output capturing hooks on all submodules of `parent_module`.
Note that we need to use this recursive approach instead of simply iterating over all modules, because we want
to respect the `capture_tasks` of all individual submodels (`PreTrainedModel` instances) in the graph. That is, once
we reach a submodel in the graph, its children should use this submodel's `capture_tasks`, but other parts of the graph
should not.
"""
from ..modeling_utils import PreTrainedModel
# First dispatch to children if needed
for name, module in parent_module.named_children():
# Keep dispatching the same `capture_tasks`
if not isinstance(module, PreTrainedModel):
recursively_install_hooks(module, f"{module_name}.{name}", capture_tasks)
# New Submodel: we need to dispatch its own `capture_tasks`
else:
install_all_output_capturing_hooks(module, prefix=f"{module_name}.{name}")
# Potentially install the hook on current `parent_module`
for key, specs in capture_tasks:
# The second check is for multimodals where only backbone layer suffix is available
if (specs.target_class is not None and isinstance(parent_module, specs.target_class)) or (
specs.class_name is not None and module_name.endswith(specs.class_name)
):
if specs.layer_name is not None and specs.layer_name not in module_name:
continue
install_output_capuring_hook(parent_module, key, specs.index)
def install_all_output_capturing_hooks(model: PreTrainedModel, prefix: str | None = None) -> None:
"""
Install the output recording hooks on all the modules in `model`. Tis will take care of correctly dispatching
the `_can_record_outputs` property of each individual submodels in case of composite models.
"""
# _can_record_outputs is None by default
capture_flags = _CAN_RECORD_REGISTRY.get(str(model.__class__)) or {} # there is a weak ref for executorch
capture_tasks = []
for key, layer_specs in capture_flags.items():
if not isinstance(layer_specs, list):
layer_specs = [layer_specs]
for specs in layer_specs:
if not isinstance(specs, OutputRecorder):
index = 0 if "hidden_states" in key else 1
class_name = None if not isinstance(specs, str) else specs
target_class = specs if not isinstance(specs, str) else None
specs = OutputRecorder(target_class=target_class, index=index, class_name=class_name)
capture_tasks.append((key, specs))
# Install the hooks
prefix = prefix if prefix is not None else ""
recursively_install_hooks(model, prefix, capture_tasks)
# Mark the model as already hooked
setattr(model, "_output_capturing_hooks_installed", True)
# We need this to make sure we don't have race conditions when installing hooks, resulting in them being installed
# several times
_hook_installation_lock = threading.Lock()
def maybe_install_capturing_hooks(model: PreTrainedModel) -> None:
"""
Check if the model already has output capturing hooks installed, and install them if it is not already the
case.
Note that this is thread-safe, in case 2 (or more) threads want to install them concurrently.
"""
# First check
if getattr(model, "_output_capturing_hooks_installed", False):
return
with _hook_installation_lock:
# Second check, in case several threads entered this function concurrently and did not return on the
# previous check
if getattr(model, "_output_capturing_hooks_installed", False):
return
# This will install the hooks and mark the model as hooked
install_all_output_capturing_hooks(model)
def capture_outputs(func=None, *, tie_last_hidden_states=True):
"""
Decorator to intercept specific layer outputs through hooks. The hooks are installed only once and lazily,
the first time output capture is requested with the `output_xxx` kwargs/config.
The implementation is fully context/thread safe, except when using `torch.compile`, as dynamo is unable to trace
through `ContextVar` methods.
Args:
tie_last_hidden_states (`bool`, *optional*, defaults to `True`):
Whether to overwrite `out.hidden_states[-1]` with the `out.last_hidden_state`.
This is true for all language models and should be toggled off only if
`out.hidden_states[-1]` has to be the hidden state before last layer norm, which
is needed for some vision models (e.g. CLIP, SigLIP)
"""
def wrapped_fn(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
# Pop it so that internal modules always return a dict even if False is requested
return_dict = kwargs.pop("return_dict", getattr(self.config, "return_dict", True))
# _can_record_outputs is None by default
capturable_flags = _CAN_RECORD_REGISTRY.get(str(self.__class__)) or {}
recordable_keys = {
f"output_{k}": kwargs.get(f"output_{k}", getattr(self.config, f"output_{k}", False))
for k in capturable_flags
}
# For BC as cross-attentions used to be captured with `output_attentions`
if "cross_attentions" in capturable_flags:
recordable_keys["output_cross_attentions"] = kwargs.get(
"output_attentions", getattr(self.config, "output_attentions", False)
)
# The sam model variants need this annoying exception as well...
if "mask_decoder_attentions" in capturable_flags:
recordable_keys["output_mask_decoder_attentions"] = kwargs.get(
"output_attentions", getattr(self.config, "output_attentions", False)
)
collected_outputs = {k.replace("output_", ""): [] for k, v in recordable_keys.items() if v}
# Make sure hooks are installed if we need to collect outputs
if len(collected_outputs) > 0:
maybe_install_capturing_hooks(self)
# Let's activate the output collector hooks if needed!
output_token = _active_collector.set(collected_outputs)
# Run the forward
try:
outputs = func(self, *args, **kwargs)
# Reset the states
finally:
_active_collector.reset(output_token)
# Inject collected outputs into model output (return everything as tuples for BC)
for key in collected_outputs:
if key == "hidden_states":
if not tie_last_hidden_states:
pass
elif hasattr(outputs, "vision_hidden_states"):
collected_outputs[key] = collected_outputs[key][:-1]
collected_outputs[key].append(outputs.vision_hidden_states)
elif hasattr(outputs, "last_hidden_state"):
collected_outputs[key] = collected_outputs[key][:-1]
collected_outputs[key].append(outputs.last_hidden_state)
outputs[key] = tuple(collected_outputs[key])
elif key == "attentions":
# In this case, the second item are cross attentions
if isinstance(capturable_flags[key], list) and len(capturable_flags[key]) == 2:
outputs[key] = tuple(collected_outputs[key][0::2])
outputs["cross_" + key] = tuple(collected_outputs[key][1::2])
else:
outputs[key] = tuple(collected_outputs[key])
else:
outputs[key] = tuple(collected_outputs[key])
if return_dict is False:
outputs = outputs.to_tuple()
return outputs
return wrapper
if func is not None:
return wrapped_fn(func)
return wrapped_fn
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/utils/output_capturing.py",
"license": "Apache License 2.0",
"lines": 239,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/vibevoice_acoustic_tokenizer/configuration_vibevoice_acoustic_tokenizer.py | # Copyright 2026 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...configuration_utils import PretrainedConfig
class VibeVoiceAcousticTokenizerConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`VibeVoiceAcousticTokenizerModel`]. It is used to
instantiate a VibeVoice acoustic tokenizer model according to the specified arguments, defining the model
architecture. Instantiating a configuration with the defaults will yield a similar configuration of the acoustic
tokenizer within the VibeVoice architecture.
e.g. [microsoft/VibeVoice-1.5B](https://huggingface.co/microsoft/VibeVoice-1.5B)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
channels (`int`, *optional*, defaults to 1):
Number of input channels.
hidden_size (`int`, *optional*, defaults to 64):
Dimensionality of latent representations.
kernel_size (`int`, *optional*, defaults to 7):
Kernel size for convolutional layers.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
Epsilon value for RMSNorm layers.
layer_scale_init_value (`float`, *optional*, defaults to 1e-06):
Initial value for layer scaling.
initializer_range (`float`, *optional*, defaults to 0.01):
Standard deviation for weight initialization.
num_filters (`int`, *optional*, defaults to 32):
Number of filters in initial convolutional layer, and doubles after each downsampling.
downsampling_ratios (`List[int]`, *optional*, defaults to `[2, 2, 4, 5, 5, 8]`):
Downsampling ratios for each layer.
depths (`List[int]`, *optional*, defaults to `[3, 3, 3, 3, 3, 3, 8]`):
Number of ConvNeXt blocks at each stage.
hidden_act (`str`, *optional*, defaults to `"gelu"`):
Activation function to use.
ffn_expansion (`int`, *optional*, defaults to 4):
Expansion factor for feed-forward networks.
vae_std (`float`, *optional*, defaults to 0.625):
Standard deviation used for VAE sampling after encoder.
Example:
```python
>>> from transformers import VibeVoiceAcousticTokenizerModel, VibeVoiceAcousticTokenizerConfig
>>> # Initializing a VibeVoice Acoustic Tokenizer configuration
>>> configuration = VibeVoiceAcousticTokenizerConfig()
>>> # Initializing a model (with random weights)
>>> model = VibeVoiceAcousticTokenizerModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "vibevoice_acoustic_tokenizer"
def __init__(
self,
channels=1,
hidden_size=64,
kernel_size=7,
rms_norm_eps=1e-5,
layer_scale_init_value=1e-6,
initializer_range=1e-2,
num_filters=32,
downsampling_ratios=[2, 2, 4, 5, 5, 8],
depths=[3, 3, 3, 3, 3, 3, 8],
hidden_act="gelu",
ffn_expansion=4,
vae_std=0.625,
**kwargs,
):
super().__init__(**kwargs)
self.channels = channels
self.hidden_size = hidden_size
self.hidden_act = hidden_act
self.kernel_size = kernel_size
self.rms_norm_eps = rms_norm_eps
self.layer_scale_init_value = layer_scale_init_value
self.ffn_expansion = ffn_expansion
self.initializer_range = initializer_range
self.num_filters = num_filters
self.downsampling_ratios = downsampling_ratios
self.depths = depths
self.vae_std = vae_std
@property
def upsampling_ratios(self):
return self.downsampling_ratios[::-1]
@property
def decoder_depths(self):
return self.depths[::-1]
__all__ = ["VibeVoiceAcousticTokenizerConfig"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/vibevoice_acoustic_tokenizer/configuration_vibevoice_acoustic_tokenizer.py",
"license": "Apache License 2.0",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/vibevoice_acoustic_tokenizer/convert_vibevoice_acoustic_tokenizer_to_hf.py | # Copyright 2026 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import gc
import json
import logging
import re
from typing import Any
import torch
from safetensors.torch import load_file
from transformers import (
AutoFeatureExtractor,
AutoModel,
VibeVoiceAcousticTokenizerConfig,
VibeVoiceAcousticTokenizerFeatureExtractor,
VibeVoiceAcousticTokenizerModel,
)
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s")
# fmt: off
STATE_DICT_MAPPING = {
# Encoder
r"^model\.acoustic_tokenizer\.encoder\.downsample_layers\.0\.0\.conv\.": r"encoder.stem.conv.conv.",
r"^model\.acoustic_tokenizer\.encoder\.stages\.0\.": r"encoder.stem.stage.",
r"^model\.acoustic_tokenizer\.encoder\.downsample_layers\.(\d+)\.0\.conv\.": (r"encoder.conv_layers.\1.conv.conv.", -1),
r"^model\.acoustic_tokenizer\.encoder\.stages\.(\d+)\.": (r"encoder.conv_layers.\1.stage.", -1),
r"^model\.acoustic_tokenizer\.encoder\.head\.conv\.": r"encoder.head.",
# Decoder
r"^model\.acoustic_tokenizer\.decoder\.upsample_layers\.0\.0\.conv\.conv\.": r"decoder.stem.conv.conv.",
r"^model\.acoustic_tokenizer\.decoder\.stages\.0\.": r"decoder.stem.stage.",
r"^model\.acoustic_tokenizer\.decoder\.upsample_layers\.(\d+)\.0\.convtr\.convtr\.": (r"decoder.conv_layers.\1.convtr.convtr.", -1),
r"^model\.acoustic_tokenizer\.decoder\.stages\.(\d+)\.": (r"decoder.conv_layers.\1.stage.", -1),
r"^model\.acoustic_tokenizer\.decoder\.head\.conv\.": r"decoder.head.",
# Common patterns (apply after specific patterns)
r"mixer\.conv\.conv\.conv\.": r"mixer.conv.",
r"\.conv\.conv\.conv\.": r".conv.conv.",
}
# fmt: on
def map_old_key_to_new(old_key: str) -> str:
new_key = old_key
# Apply all regex patterns
for pattern, replacement in STATE_DICT_MAPPING.items():
# Check if replacement needs index shifting
if isinstance(replacement, tuple):
replacement_pattern, index_shift = replacement
# Use callback to handle index shifting
def shift_index(match):
result = replacement_pattern
for i, group in enumerate(match.groups(), 1):
if group and group.isdigit():
shifted_idx = int(group) + index_shift
result = result.replace(f"\\{i}", str(shifted_idx))
else:
result = result.replace(f"\\{i}", group)
return result
new_key, n = re.subn(pattern, shift_index, new_key)
else:
new_key, n = re.subn(pattern, replacement, new_key)
return new_key
def convert_state_dict(original_state_dict: dict[str, Any]) -> dict[str, Any]:
new_state_dict = {}
for old_key, tensor in original_state_dict.items():
new_key = map_old_key_to_new(old_key)
new_state_dict[new_key] = tensor
if old_key != new_key:
logger.debug(f"Converted: {old_key} -> {new_key}")
return new_state_dict
def convert_checkpoint(checkpoint, config_path, push_to_hub, bfloat16, processor_config=None):
if bfloat16:
dtype = torch.bfloat16
else:
dtype = torch.float32
# 1) Load state dict from safetensors checkpoint
logger.info(f"Loading checkpoint from {checkpoint}")
original_state_dict = load_file(checkpoint)
# 2) Prepare feature extractor
audio_config = {}
if processor_config is not None:
with open(processor_config, "r") as f:
processor_config = json.load(f)
audio_config = processor_config.get("audio_processor", {})
if "sampling_rate" not in audio_config:
audio_config["sampling_rate"] = 24000
if "normalize_audio" not in audio_config:
audio_config["normalize_audio"] = True
if "target_dB_FS" not in audio_config:
audio_config["target_dB_FS"] = -25
if "eps" not in audio_config:
audio_config["eps"] = 1e-6
feature_extractor = VibeVoiceAcousticTokenizerFeatureExtractor(**audio_config)
# 3) Prepare model configuration
with open(config_path, "r") as f:
model_config = json.load(f)
# Clean up acoustic tokenizer config
acoustic_config_dict = model_config["acoustic_tokenizer_config"].copy()
if "encoder_depths" in acoustic_config_dict and isinstance(acoustic_config_dict["encoder_depths"], str):
acoustic_config_dict["encoder_depths"] = list(map(int, acoustic_config_dict["encoder_depths"].split("-")))
if "layernorm_eps" in acoustic_config_dict:
acoustic_config_dict["rms_norm_eps"] = acoustic_config_dict.pop("layernorm_eps")
if "encoder_ratios" in acoustic_config_dict:
acoustic_config_dict["downsampling_ratios"] = list(reversed(acoustic_config_dict.pop("encoder_ratios")))
if "encoder_n_filters" in acoustic_config_dict:
acoustic_config_dict["num_filters"] = acoustic_config_dict.pop("encoder_n_filters")
if "encoder_depths" in acoustic_config_dict:
acoustic_config_dict["depths"] = acoustic_config_dict.pop("encoder_depths")
if "vae_dim" in acoustic_config_dict:
acoustic_config_dict["hidden_size"] = acoustic_config_dict.pop("vae_dim")
if "fix_std" in acoustic_config_dict:
# Original hardcodes a scaling factor for vae_std
acoustic_config_dict["vae_std"] = acoustic_config_dict.pop("fix_std") / 0.8
# Remove unused/constant parameters
for key in [
"decoder_depths",
"decoder_n_filters",
"decoder_ratios",
"std_dist_type",
"pad_mode",
"conv_bias",
"causal",
"mixer_layer",
"layernorm",
"disable_last_norm",
"conv_norm",
"corpus_normalize",
"layernorm_elementwise_affine",
]:
acoustic_config_dict.pop(key, None)
# 4) Convert state dict to match HF model structure
logger.info("Converting state dict")
converted_state_dict = convert_state_dict(original_state_dict)
# 5) Filter for acoustic tokenizer weights
acoustic_state_dict = {
k: v for k, v in converted_state_dict.items() if k.startswith("encoder.") or k.startswith("decoder.")
}
# 6) Create and save acoustic tokenizer
logger.info("Creating acoustic tokenizer model")
acoustic_config = VibeVoiceAcousticTokenizerConfig(**acoustic_config_dict)
acoustic_model = VibeVoiceAcousticTokenizerModel(acoustic_config).to(dtype)
# Load weights into HF model
logger.info("Loading weights into model")
missing, unexpected = acoustic_model.load_state_dict(acoustic_state_dict, strict=False)
if len(unexpected) != 0:
raise ValueError(f"Unexpected keys: {unexpected}")
if len(missing) != 0:
raise ValueError(f"Missing keys: {missing}")
if push_to_hub:
logger.info(f"Pushing to hub as {push_to_hub}")
feature_extractor.push_to_hub(push_to_hub)
acoustic_model.push_to_hub(push_to_hub)
gc.collect()
logger.info("Verifying conversion by reloading model")
AutoFeatureExtractor.from_pretrained(push_to_hub)
AutoModel.from_pretrained(push_to_hub, dtype=torch.bfloat16, device_map="auto")
logger.info("Model reloaded successfully!")
logger.info("Conversion complete!")
"""
Conversion script to extract the acoustic tokenizer from the original VibeVoice model checkpoint and push a checkpoint
for an `VibeVoiceAcousticTokenizerModel` object.
1) download 1.5B model.
```bash
# -- download checkpoint and configs
# -- download script here: https://gist.github.com/ebezzam/507dfd544e0a0f12402966503cbc73e6#file-download_vibevoice_checkpoint-py
python src/transformers/models/vibevoice/download_vibevoice_checkpoint.py
wget https://huggingface.co/microsoft/VibeVoice-1.5B/resolve/main/config.json -P /raid/eric/vibevoice
wget https://huggingface.co/microsoft/VibeVoice-1.5B/resolve/main/preprocessor_config.json -P /raid/eric/vibevoice
```
2) run conversion with:
```
python src/transformers/models/vibevoice_acoustic_tokenizer/convert_vibevoice_acoustic_tokenizer_to_hf.py \
--checkpoint /raid/eric/vibevoice/VibeVoice-1.5B-combined.safetensors \
--config_path /raid/eric/vibevoice/config.json \
--processor_config /raid/eric/vibevoice/preprocessor_config.json \
--push_to_hub bezzam/VibeVoice-AcousticTokenizer
```
A checkpoint will be pushed to `bezzam/VibeVoice-AcousticTokenizer` on the HF Hub.
"""
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint", required=True, default=None, type=str, help="Original VibeVoice model checkpoint."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to config.json of model to convert")
parser.add_argument(
"--processor_config", default=None, type=str, help="Path to preprocessor_config.json of model to convert"
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--float32", action="store_true", help="Whether to use float32 precision. Default is bfloat16."
)
args = parser.parse_args()
convert_checkpoint(
args.checkpoint,
args.config_path,
args.push_to_hub,
bfloat16=not args.float32,
processor_config=args.processor_config,
)
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/vibevoice_acoustic_tokenizer/convert_vibevoice_acoustic_tokenizer_to_hf.py",
"license": "Apache License 2.0",
"lines": 210,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/vibevoice_acoustic_tokenizer/feature_extraction_vibevoice_acoustic_tokenizer.py | # Copyright 2026 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...audio_utils import AudioInput, make_list_of_audio
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, logging
from ...utils.import_utils import is_torch_available, requires
if is_torch_available():
import torch
logger = logging.get_logger(__name__)
@requires(backends=("torch",))
class VibeVoiceAcousticTokenizerFeatureExtractor(SequenceFeatureExtractor):
r"""
Constructs a VibeVoiceAcousticTokenizer feature extractor.
Args:
feature_size (`int`, *optional*, defaults to 1):
The number of channels.
sampling_rate (`int`, *optional*, defaults to 24000):
The sampling rate at which the audio waveform should be digitalized, expressed in hertz (Hz).
padding_value (`float`, *optional*, defaults to 0.0):
The value that is used for padding.
normalize_audio (`bool`, *optional*, defaults to `True`):
Whether to normalize audio to a target dB FS.
target_dB_FS (`float`, *optional*, defaults to -25):
Target dB FS for normalization.
eps (`float`, *optional*, defaults to 1e-06):
A small value to avoid division by zero when normalizing.
"""
model_input_names = ["input_values", "padding_mask"]
def __init__(
self,
feature_size=1,
sampling_rate=24000,
padding_value=0.0,
normalize_audio=True,
target_dB_FS=-25,
eps=1e-6,
**kwargs,
):
super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs)
self.normalize_audio = normalize_audio
self.target_dB_FS = target_dB_FS
self.eps = eps
def __call__(
self,
audio: AudioInput,
sampling_rate: int | None = None,
padding: bool | str | PaddingStrategy | None = True,
pad_to_multiple_of: int | None = None,
return_attention_mask: bool | None = True,
) -> BatchFeature:
"""
Args:
audio (`np.ndarray`, `torch.Tensor`, `list[np.ndarray]`, `list[torch.Tensor]`:
The sequence or batch of sequences to be processed. Each sequence can be a numpy array, a torch tensor,
a list of numpy arrays or a list of torch tensors.
sampling_rate (`int`, *optional*):
The sampling rate at which the `audio` input was sampled. It is strongly recommended to pass
`sampling_rate` at the forward call to prevent silent errors.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding
index) among:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value.
"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}."
)
else:
logger.warning(
f"It is strongly recommended to pass the `sampling_rate` argument to `{self.__class__.__name__}()`. "
"Failing to do so can result in silent errors that might be hard to debug."
)
# Ensure batch of mono tensors
audio = make_list_of_audio(audio)
for idx, example in enumerate(audio):
example = torch.tensor(example, dtype=torch.float32)
if example.ndim != 1:
raise ValueError(f"Audio should be mono, got shape: {example.shape}")
audio[idx] = example
if self.normalize_audio:
for idx, example in enumerate(audio):
rms = torch.sqrt(torch.mean(example**2))
example *= 10 ** (self.target_dB_FS / 20) / (rms + self.eps)
max_val = torch.max(torch.abs(example))
if max_val > 1.0:
example = example / (max_val + self.eps)
audio[idx] = example
output_values = BatchFeature({"input_values": audio})
if padding or pad_to_multiple_of:
output_values = self.pad(
output_values,
padding=padding,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
)
if return_attention_mask:
output_values["padding_mask"] = output_values.pop("attention_mask")
# add channel dimension
output_values["input_values"] = output_values["input_values"][:, None, :]
return output_values
__all__ = ["VibeVoiceAcousticTokenizerFeatureExtractor"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/vibevoice_acoustic_tokenizer/feature_extraction_vibevoice_acoustic_tokenizer.py",
"license": "Apache License 2.0",
"lines": 123,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/vibevoice_acoustic_tokenizer/modular_vibevoice_acoustic_tokenizer.py | # Copyright 2026 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Optional
import torch
import torch.nn as nn
from ... import initialization as init
from ...activations import ACT2FN
from ...modeling_utils import PreTrainedModel
from ...utils import ModelOutput, auto_docstring, can_return_tuple
from ..llama.modeling_llama import LlamaRMSNorm
from ..mimi.modeling_mimi import MimiConv1dPaddingCache
from .configuration_vibevoice_acoustic_tokenizer import VibeVoiceAcousticTokenizerConfig
@dataclass
@auto_docstring
class VibeVoiceAcousticTokenizerOutput(ModelOutput):
r"""
audio (`torch.FloatTensor` of shape `(batch_size, channels, sequence_length)`):
Decoded audio.
latents (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Projected latents (continuous representations for acoustic tokens) at the output of the encoder.
padding_cache (`VibeVoiceAcousticTokenizerConv1dPaddingCache`, *optional*, returned when `use_cache=True` is passed):
A [`VibeVoiceAcousticTokenizerConv1dPaddingCache`] instance containing cached convolution states for each decoder
layer that can be passed to subsequent forward calls.
"""
audio: torch.FloatTensor | None = None
latents: torch.FloatTensor | None = None
padding_cache: Optional["VibeVoiceAcousticTokenizerConv1dPaddingCache"] = None
@dataclass
@auto_docstring
class VibeVoiceAcousticTokenizerEncoderOutput(ModelOutput):
r"""
latents (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Projected latents (continuous representations for acoustic tokens) at the output of the encoder.
padding_cache (`VibeVoiceAcousticTokenizerConv1dPaddingCache`, *optional*, returned when `use_cache=True` is passed):
A [`VibeVoiceAcousticTokenizerConv1dPaddingCache`] instance containing cached convolution states for each encoder
layer that can be passed to subsequent forward calls.
"""
latents: torch.FloatTensor | None = None
padding_cache: Optional["VibeVoiceAcousticTokenizerConv1dPaddingCache"] = None
@dataclass
@auto_docstring
class VibeVoiceAcousticTokenizerDecoderOutput(ModelOutput):
r"""
audio (`torch.FloatTensor` of shape `(batch_size, channels, sequence_length)`):
Decoded audio.
padding_cache (`VibeVoiceAcousticTokenizerConv1dPaddingCache`, *optional*, returned when `use_cache=True` is passed):
A [`VibeVoiceAcousticTokenizerConv1dPaddingCache`] instance containing cached convolution states for each decoder
layer that can be passed to subsequent forward calls.
"""
audio: torch.FloatTensor | None = None
padding_cache: Optional["VibeVoiceAcousticTokenizerConv1dPaddingCache"] = None
class VibeVoiceAcousticTokenizerRMSNorm(LlamaRMSNorm):
pass
class VibeVoiceAcousticTokenizerFeedForward(nn.Module):
def __init__(self, config, hidden_size):
super().__init__()
self.linear1 = nn.Linear(hidden_size, config.ffn_expansion * hidden_size)
self.activation = ACT2FN[config.hidden_act]
self.linear2 = nn.Linear(config.ffn_expansion * hidden_size, hidden_size)
def forward(self, hidden_states):
return self.linear2(self.activation(self.linear1(hidden_states)))
class VibeVoiceAcousticTokenizerConv1dPaddingCache(MimiConv1dPaddingCache):
pass
class VibeVoiceAcousticTokenizerCausalConv1d(nn.Module):
"""Conv1d with built-in causal padding and optional streaming support through a cache."""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: int,
stride: int = 1,
dilation: int = 1,
groups: int = 1,
layer_idx: int | None = None,
):
super().__init__()
self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, stride, dilation=dilation, groups=groups)
self.causal_padding = (kernel_size - 1) * dilation - (stride - 1)
if self.causal_padding < 0:
raise ValueError(
f"Invalid causal padding {self.causal_padding} for kernel_size={kernel_size}, "
f"dilation={dilation}, stride={stride}."
)
self.layer_idx = layer_idx
def forward(
self,
hidden_states: torch.Tensor,
padding_cache: VibeVoiceAcousticTokenizerConv1dPaddingCache | None = None,
) -> torch.Tensor:
if padding_cache is not None:
layer_padding = padding_cache.update(hidden_states, self.layer_idx)
else:
layer_padding = torch.zeros(
hidden_states.shape[0],
hidden_states.shape[1],
self.causal_padding,
device=hidden_states.device,
dtype=hidden_states.dtype,
)
hidden_states = torch.cat([layer_padding, hidden_states], dim=-1)
return self.conv(hidden_states)
class VibeVoiceAcousticTokenizerCausalConvTranspose1d(nn.Module):
"""ConvTranspose1d with built-in causal padding and optional streaming support through a cache."""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: int,
stride: int = 1,
layer_idx: int | None = None,
):
super().__init__()
self.convtr = nn.ConvTranspose1d(in_channels, out_channels, kernel_size, stride)
self.stride = stride
self.layer_idx = layer_idx
self.padding_total = kernel_size - stride
self.causal_padding = kernel_size - 1
def forward(
self,
hidden_states: torch.Tensor,
padding_cache: Optional["VibeVoiceAcousticTokenizerConv1dPaddingCache"] = None,
) -> torch.Tensor:
time_dim = hidden_states.shape[-1]
if padding_cache is not None:
layer_padding = padding_cache.update(hidden_states, self.layer_idx)
hidden_states = torch.cat([layer_padding, hidden_states], dim=-1)
hidden_states = self.convtr(hidden_states)
# Remove extra padding at the right side
if self.padding_total > 0:
hidden_states = hidden_states[..., : -self.padding_total]
if padding_cache is not None and layer_padding.shape[2] != 0:
# For first chunk (layer_padding.shape[2] == 0) return full output
# for subsequent chunks return only new output
expected_new_output = time_dim * self.stride
if hidden_states.shape[2] >= expected_new_output:
hidden_states = hidden_states[:, :, -expected_new_output:]
return hidden_states
class VibeVoiceAcousticTokenizerConvNext1dLayer(nn.Module):
"""ConvNeXt-like block adapted for 1D convolutions."""
def __init__(self, config, hidden_size, dilation=1, stride=1, layer_idx=None):
super().__init__()
self.norm = VibeVoiceAcousticTokenizerRMSNorm(hidden_size, eps=config.rms_norm_eps)
self.ffn_norm = VibeVoiceAcousticTokenizerRMSNorm(hidden_size, eps=config.rms_norm_eps)
self.ffn = VibeVoiceAcousticTokenizerFeedForward(config, hidden_size)
self.gamma = nn.Parameter(config.layer_scale_init_value * torch.ones(hidden_size), requires_grad=True)
self.ffn_gamma = nn.Parameter(config.layer_scale_init_value * torch.ones(hidden_size), requires_grad=True)
self.mixer = VibeVoiceAcousticTokenizerCausalConv1d(
in_channels=hidden_size,
out_channels=hidden_size,
kernel_size=config.kernel_size,
groups=hidden_size,
dilation=dilation,
stride=stride,
layer_idx=layer_idx,
)
def forward(self, hidden_states, padding_cache=None):
# mixer
residual = hidden_states
hidden_states = self.norm(hidden_states.transpose(1, 2)).transpose(1, 2)
hidden_states = self.mixer(hidden_states, padding_cache=padding_cache)
hidden_states = hidden_states * self.gamma.unsqueeze(-1)
hidden_states = residual + hidden_states
# ffn
residual = hidden_states
hidden_states = self.ffn_norm(hidden_states.transpose(1, 2))
hidden_states = self.ffn(hidden_states).transpose(1, 2)
hidden_states = hidden_states * self.ffn_gamma.unsqueeze(-1)
return residual + hidden_states
class VibeVoiceAcousticTokenizerEncoderStem(nn.Module):
def __init__(self, config):
super().__init__()
self.conv = VibeVoiceAcousticTokenizerCausalConv1d(
in_channels=config.channels,
out_channels=config.num_filters,
kernel_size=config.kernel_size,
layer_idx=0,
)
self.stage = nn.ModuleList(
[
VibeVoiceAcousticTokenizerConvNext1dLayer(
config,
hidden_size=config.num_filters,
layer_idx=layer_idx,
)
for layer_idx in range(1, config.depths[0] + 1)
]
)
def forward(self, hidden_states, padding_cache=None):
hidden_states = self.conv(hidden_states, padding_cache=padding_cache)
for block in self.stage:
hidden_states = block(hidden_states, padding_cache=padding_cache)
return hidden_states
class VibeVoiceAcousticTokenizerEncoderLayer(nn.Module):
def __init__(self, config, stage_idx):
super().__init__()
depth_idx = stage_idx + 1 # first depth is for stem layer
layer_idx = sum(depth + 1 for depth in config.depths[:depth_idx])
intermediate_channels = int(config.num_filters * (2 ** (depth_idx)))
self.conv = VibeVoiceAcousticTokenizerCausalConv1d(
in_channels=int(config.num_filters * (2**stage_idx)),
out_channels=intermediate_channels,
kernel_size=int(config.downsampling_ratios[stage_idx] * 2),
stride=config.downsampling_ratios[stage_idx],
layer_idx=layer_idx,
)
self.stage = nn.ModuleList(
[
VibeVoiceAcousticTokenizerConvNext1dLayer(
config, hidden_size=intermediate_channels, layer_idx=layer_idx + offset
)
for offset in range(1, config.depths[depth_idx] + 1)
]
)
def forward(self, hidden_states, padding_cache=None):
hidden_states = self.conv(hidden_states, padding_cache=padding_cache)
for block in self.stage:
hidden_states = block(hidden_states, padding_cache=padding_cache)
return hidden_states
class VibeVoiceAcousticTokenizerEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.stem = VibeVoiceAcousticTokenizerEncoderStem(config)
self.conv_layers = nn.ModuleList(
[
VibeVoiceAcousticTokenizerEncoderLayer(config, stage_idx)
for stage_idx in range(len(config.downsampling_ratios))
]
)
self.head = VibeVoiceAcousticTokenizerCausalConv1d(
in_channels=int(config.num_filters * (2 ** len(config.downsampling_ratios))),
out_channels=config.hidden_size,
kernel_size=config.kernel_size,
layer_idx=sum(depth + 1 for depth in config.depths),
)
def forward(self, hidden_states, padding_cache=None):
hidden_states = self.stem(hidden_states, padding_cache=padding_cache)
for layer in self.conv_layers:
hidden_states = layer(hidden_states, padding_cache=padding_cache)
hidden_states = self.head(hidden_states, padding_cache=padding_cache)
return hidden_states.permute(0, 2, 1)
class VibeVoiceAcousticTokenizerDecoderStem(nn.Module):
def __init__(self, config):
super().__init__()
intermediate_channels = int(config.num_filters * 2 ** (len(config.decoder_depths) - 1))
self.conv = VibeVoiceAcousticTokenizerCausalConv1d(
in_channels=config.hidden_size,
out_channels=intermediate_channels,
kernel_size=config.kernel_size,
layer_idx=0,
)
self.stage = nn.ModuleList(
[
VibeVoiceAcousticTokenizerConvNext1dLayer(
config,
hidden_size=intermediate_channels,
layer_idx=layer_idx,
)
for layer_idx in range(1, config.decoder_depths[0] + 1)
]
)
def forward(self, hidden_states, padding_cache=None):
hidden_states = self.conv(hidden_states, padding_cache=padding_cache)
for block in self.stage:
hidden_states = block(hidden_states, padding_cache=padding_cache)
return hidden_states
class VibeVoiceAcousticTokenizerDecoderLayer(nn.Module):
def __init__(self, config, stage_idx):
super().__init__()
depth_idx = stage_idx + 1 # first depth is for stem layer
layer_idx = sum(depth + 1 for depth in config.decoder_depths[:depth_idx])
intermediate_channels = int(config.num_filters * (2 ** (len(config.decoder_depths) - 2 - stage_idx)))
self.convtr = VibeVoiceAcousticTokenizerCausalConvTranspose1d(
in_channels=int(config.num_filters * (2 ** (len(config.decoder_depths) - 1 - stage_idx))),
out_channels=intermediate_channels,
kernel_size=int(config.upsampling_ratios[stage_idx] * 2),
stride=config.upsampling_ratios[stage_idx],
layer_idx=layer_idx,
)
self.stage = nn.ModuleList(
[
VibeVoiceAcousticTokenizerConvNext1dLayer(
config, hidden_size=intermediate_channels, layer_idx=layer_idx + offset
)
for offset in range(1, config.decoder_depths[depth_idx] + 1)
]
)
def forward(self, hidden_states, padding_cache=None):
hidden_states = self.convtr(hidden_states, padding_cache=padding_cache)
for block in self.stage:
hidden_states = block(hidden_states, padding_cache=padding_cache)
return hidden_states
class VibeVoiceAcousticTokenizerDecoder(nn.Module):
def __init__(self, config):
super().__init__()
self.stem = VibeVoiceAcousticTokenizerDecoderStem(config)
self.conv_layers = nn.ModuleList(
[
VibeVoiceAcousticTokenizerDecoderLayer(config, stage_idx)
for stage_idx in range(len(config.upsampling_ratios))
]
)
self.head = VibeVoiceAcousticTokenizerCausalConv1d(
in_channels=config.num_filters,
out_channels=config.channels,
kernel_size=config.kernel_size,
layer_idx=sum(depth + 1 for depth in config.decoder_depths),
)
def forward(self, hidden_states, padding_cache=None):
hidden_states = self.stem(hidden_states, padding_cache=padding_cache)
for layer in self.conv_layers:
hidden_states = layer(hidden_states, padding_cache=padding_cache)
hidden_states = self.head(hidden_states, padding_cache=padding_cache)
return hidden_states
@auto_docstring
class VibeVoiceAcousticTokenizerPreTrainedModel(PreTrainedModel):
config: VibeVoiceAcousticTokenizerConfig
base_model_prefix = "vibevoice_acoustic_tokenizer"
main_input_name = "input_values"
_no_split_modules = ["VibeVoiceAcousticTokenizerEncoder", "VibeVoiceAcousticTokenizerDecoder"]
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, VibeVoiceAcousticTokenizerConvNext1dLayer):
init.constant_(module.gamma, self.config.layer_scale_init_value)
init.constant_(module.ffn_gamma, self.config.layer_scale_init_value)
@auto_docstring(
custom_intro="""
VibeVoice acoustic tokenizer with an encoder and decoder for continuous acoustic tokens.
"""
)
class VibeVoiceAcousticTokenizerModel(VibeVoiceAcousticTokenizerPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.encoder = VibeVoiceAcousticTokenizerEncoder(config)
self.decoder = VibeVoiceAcousticTokenizerDecoder(config)
self.post_init()
@can_return_tuple
@auto_docstring
def encode(self, input_values, padding_cache=None, use_cache=None, sample=True):
r"""
input_values (`torch.FloatTensor` of shape `(batch_size, channels, sequence_length)`):
Input audio waveform to be encoded into latent representation.
padding_cache (`VibeVoiceAcousticTokenizerConv1dPaddingCache`, *optional*):
Cache object for streaming mode to maintain convolution states across layers.
use_cache (`bool`, *optional*):
Whether to use caching for convolution states.
sample (`bool`, *optional*):
Whether to sample from the VAE. If False, no noise is added.
"""
if use_cache and padding_cache is None:
per_layer_padding = [self.encoder.stem.conv.causal_padding]
per_layer_in_channels = [self.encoder.stem.conv.conv.in_channels]
per_layer_padding.extend([block.mixer.causal_padding for block in self.encoder.stem.stage])
per_layer_in_channels.extend([block.mixer.conv.in_channels for block in self.encoder.stem.stage])
for layer in self.encoder.conv_layers:
per_layer_padding.append(layer.conv.causal_padding)
per_layer_in_channels.append(layer.conv.conv.in_channels)
per_layer_padding.extend([block.mixer.causal_padding for block in layer.stage])
per_layer_in_channels.extend([block.mixer.conv.in_channels for block in layer.stage])
per_layer_padding.append(self.encoder.head.causal_padding)
per_layer_in_channels.append(self.encoder.head.conv.in_channels)
padding_cache = VibeVoiceAcousticTokenizerConv1dPaddingCache(
num_layers=len(per_layer_padding),
per_layer_padding=per_layer_padding,
per_layer_padding_mode=["constant"] * len(per_layer_padding),
per_layer_in_channels=per_layer_in_channels,
)
latents = self.encoder(input_values, padding_cache=padding_cache)
if sample:
noise_std = self.config.vae_std * torch.randn(latents.shape[0], device=latents.device, dtype=latents.dtype)
latents = latents + noise_std[:, None, None] * torch.randn_like(latents)
return VibeVoiceAcousticTokenizerEncoderOutput(latents=latents, padding_cache=padding_cache)
@can_return_tuple
@auto_docstring
def decode(self, latents, padding_cache=None, use_cache=False):
r"""
latents (`torch.FloatTensor` of shape `(batch_size, channels, sequence_length)`):
Input latent representation to be decoded back into audio.
padding_cache (`VibeVoiceAcousticTokenizerConv1dPaddingCache`, *optional*):
Cache object for streaming mode to maintain convolution states across layers.
use_cache (`bool`, *optional*):
Whether to use caching for convolution states.
"""
if use_cache and padding_cache is None:
per_layer_padding = [self.decoder.stem.conv.causal_padding]
per_layer_in_channels = [self.decoder.stem.conv.conv.in_channels]
per_layer_padding.extend([block.mixer.causal_padding for block in self.decoder.stem.stage])
per_layer_in_channels.extend([block.mixer.conv.in_channels for block in self.decoder.stem.stage])
for layer in self.decoder.conv_layers:
per_layer_padding.append(layer.convtr.causal_padding)
per_layer_in_channels.append(layer.convtr.convtr.in_channels)
per_layer_padding.extend([block.mixer.causal_padding for block in layer.stage])
per_layer_in_channels.extend([block.mixer.conv.in_channels for block in layer.stage])
per_layer_padding.append(self.decoder.head.causal_padding)
per_layer_in_channels.append(self.decoder.head.conv.in_channels)
padding_cache = VibeVoiceAcousticTokenizerConv1dPaddingCache(
num_layers=len(per_layer_padding),
per_layer_padding=per_layer_padding,
per_layer_padding_mode=["constant"] * len(per_layer_padding),
per_layer_in_channels=per_layer_in_channels,
)
latents = latents.permute(0, 2, 1)
audio = self.decoder(latents, padding_cache=padding_cache)
return VibeVoiceAcousticTokenizerDecoderOutput(audio=audio, padding_cache=padding_cache)
@can_return_tuple
@auto_docstring
def forward(self, input_values, padding_cache=None, use_cache=False, sample=True, **kwargs):
r"""
input_values (`torch.FloatTensor` of shape `(batch_size, channels, sequence_length)`):
Input audio waveform to be encoded into latent representation.
padding_cache (`VibeVoiceAcousticTokenizerConv1dPaddingCache`, *optional*):
Cache object for streaming mode to maintain convolution states across layers. Note only used by decoder.
use_cache (`bool`, *optional*):
Whether to use caching for convolution states.
sample (`bool`, *optional*):
Whether to sample from the VAE latent distribution. If False, no noise is added to the latents.
"""
encoder_output = self.encode(input_values, sample=sample)
decoder_output = self.decode(encoder_output.latents, padding_cache=padding_cache, use_cache=use_cache)
return VibeVoiceAcousticTokenizerOutput(
audio=decoder_output.audio,
latents=encoder_output.latents,
padding_cache=decoder_output.padding_cache,
)
__all__ = [
"VibeVoiceAcousticTokenizerModel",
"VibeVoiceAcousticTokenizerPreTrainedModel",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/vibevoice_acoustic_tokenizer/modular_vibevoice_acoustic_tokenizer.py",
"license": "Apache License 2.0",
"lines": 439,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/vibevoice_acoustic_tokenizer/test_feature_extraction_vibevoice_acoustic_tokenizer.py | # Copyright 2026 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import random
import unittest
import numpy as np
from transformers import VibeVoiceAcousticTokenizerFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
global_rng = random.Random()
# Copied from tests.models.whisper.test_feature_extraction_whisper.floats_list
def floats_list(shape, scale=1.0, rng=None, name=None):
"""Creates a random float32 tensor"""
if rng is None:
rng = global_rng
values = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
@require_torch
class VibeVoiceAcousticTokenizerFeatureExtractionTester:
def __init__(
self,
parent,
batch_size=7,
min_seq_length=400,
max_seq_length=2000,
feature_size=1,
padding_value=0.0,
sampling_rate=24000,
normalize_audio=True,
target_dB_FS=-25,
eps=1e-6,
):
self.parent = parent
self.batch_size = batch_size
self.min_seq_length = min_seq_length
self.max_seq_length = max_seq_length
self.seq_length_diff = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
self.feature_size = feature_size
self.padding_value = padding_value
self.sampling_rate = sampling_rate
self.normalize_audio = normalize_audio
self.target_dB_FS = target_dB_FS
self.eps = eps
def prepare_feat_extract_dict(self):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"normalize_audio": self.normalize_audio,
"target_dB_FS": self.target_dB_FS,
"eps": self.eps,
}
# Copied from tests.models.encodec.test_feature_extraction_encodec.EnCodecFeatureExtractionTester.prepare_inputs_for_common
def prepare_inputs_for_common(self, equal_length=False, numpify=False):
def _flatten(list_of_lists):
return list(itertools.chain(*list_of_lists))
if equal_length:
audio_inputs = floats_list((self.batch_size, self.max_seq_length))
else:
# make sure that inputs increase in size
audio_inputs = [
_flatten(floats_list((x, self.feature_size)))
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff)
]
if numpify:
audio_inputs = [np.asarray(x) for x in audio_inputs]
return audio_inputs
@require_torch
class VibeVoiceAcousticTokenizerFeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest.TestCase):
feature_extraction_class = VibeVoiceAcousticTokenizerFeatureExtractor
def setUp(self):
self.feat_extract_tester = VibeVoiceAcousticTokenizerFeatureExtractionTester(self)
def test_call(self):
TOL = 1e-6
feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
sampling_rate = feature_extractor.sampling_rate
audio_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)]
np_audio_inputs = [np.asarray(audio_input) for audio_input in audio_inputs]
torch_audio_inputs = [torch.tensor(audio_input) for audio_input in audio_inputs]
# Test non-batched input
encoded_sequences_1 = feature_extractor(torch_audio_inputs[0], sampling_rate=sampling_rate).input_values
encoded_sequences_2 = feature_extractor(np_audio_inputs[0], sampling_rate=sampling_rate).input_values
self.assertTrue(np.allclose(encoded_sequences_1, encoded_sequences_2, atol=TOL))
# Test batched input
encoded_sequences_1 = feature_extractor(torch_audio_inputs, sampling_rate=sampling_rate).input_values
encoded_sequences_2 = feature_extractor(np_audio_inputs, sampling_rate=sampling_rate).input_values
for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2):
self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=TOL))
def _load_datasamples(self, num_samples):
from datasets import load_dataset
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
audio_samples = ds.sort("id")[:num_samples]["input_values"]
return [x["array"] for x in audio_samples]
def test_normalize_audio(self):
"""Test audio normalization functionality specific to VibeVoice."""
# Test with normalization enabled (default)
feature_extractor = VibeVoiceAcousticTokenizerFeatureExtractor(normalize_audio=True, target_dB_FS=-25)
# Test with very low amplitude audio (should increase amplitude)
low_amplitude_audio = np.random.randn(1000).astype(np.float32) * 0.01
result = feature_extractor([low_amplitude_audio])
normalized_audio = result.input_values.squeeze()
self.assertGreater(
torch.abs(normalized_audio).max().item(), torch.abs(torch.tensor(low_amplitude_audio)).max().item()
)
# Test with normalization disabled (should be close to original)
feature_extractor_no_norm = VibeVoiceAcousticTokenizerFeatureExtractor(normalize_audio=False)
result_no_norm = feature_extractor_no_norm([low_amplitude_audio])
torch.testing.assert_close(
result_no_norm.input_values.squeeze(), torch.tensor(low_amplitude_audio), rtol=1e-5, atol=1e-5
)
def test_sampling_rate_validation(self):
"""Test that sampling rate validation works correctly."""
feature_extractor = VibeVoiceAcousticTokenizerFeatureExtractor(sampling_rate=24000)
input_audio = np.random.randn(1000).astype(np.float32)
result = feature_extractor([input_audio], sampling_rate=24000)
self.assertIsInstance(result.input_values, torch.Tensor)
with self.assertRaises(ValueError):
feature_extractor([input_audio], sampling_rate=16000)
def test_padding_mask_generation(self):
"""Test that padding masks are generated correctly."""
feature_extractor = VibeVoiceAcousticTokenizerFeatureExtractor()
audio1 = np.random.randn(100).astype(np.float32)
audio2 = np.random.randn(200).astype(np.float32)
result = feature_extractor([audio1, audio2], padding=True, return_attention_mask=True)
self.assertIn("padding_mask", result)
self.assertEqual(result.padding_mask.shape, result.input_values.squeeze(1).shape)
# First sample should have some padding (False values at the end)
self.assertTrue(torch.any(~result.padding_mask[0]))
# Second sample should have no padding (all True values)
self.assertTrue(torch.all(result.padding_mask[1]))
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/vibevoice_acoustic_tokenizer/test_feature_extraction_vibevoice_acoustic_tokenizer.py",
"license": "Apache License 2.0",
"lines": 148,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/models/vibevoice_acoustic_tokenizer/test_modeling_vibevoice_acoustic_tokenizer.py | # Copyright 2026 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import json
import unittest
from pathlib import Path
import numpy as np
from transformers import (
AutoFeatureExtractor,
AutoModel,
VibeVoiceAcousticTokenizerConfig,
VibeVoiceAcousticTokenizerModel,
)
from transformers.audio_utils import load_audio_librosa
from transformers.testing_utils import cleanup, is_torch_available, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
@require_torch
class VibeVoiceAcousticTokenizerModelTester:
def __init__(
self,
parent,
batch_size=2,
channels=1,
hidden_size=32,
kernel_size=3,
n_filters=4,
downsampling_ratios=[2],
depths=[1, 1],
is_training=False,
):
self.parent = parent
self.batch_size = batch_size
self.channels = channels
self.is_training = is_training
self.hidden_size = hidden_size
self.kernel_size = kernel_size
self.n_filters = n_filters
self.downsampling_ratios = downsampling_ratios
self.depths = depths
def prepare_config_and_inputs(self):
input_values = floats_tensor([self.batch_size, self.channels, self.hidden_size], scale=1.0)
config = self.get_config()
# disable sampling for deterministic tests
inputs_dict = {"input_values": input_values, "sample": False}
return config, inputs_dict
def prepare_config_and_inputs_for_common(self):
config, inputs_dict = self.prepare_config_and_inputs()
return config, inputs_dict
def prepare_config_and_inputs_for_model_class(self, model_class):
input_values = floats_tensor([self.batch_size, self.channels, self.hidden_size], scale=1.0)
config = self.get_config()
# disable sampling for deterministic tests
inputs_dict = {"input_values": input_values, "sample": False}
return config, inputs_dict
def get_config(self):
return VibeVoiceAcousticTokenizerConfig(
channels=self.channels,
hidden_size=self.hidden_size,
kernel_size=self.kernel_size,
n_filters=self.n_filters,
downsampling_ratios=self.downsampling_ratios,
depths=self.depths,
)
def create_and_check_model_forward(self, config, inputs_dict):
model = VibeVoiceAcousticTokenizerModel(config=config).to(torch_device).eval()
input_values = inputs_dict["input_values"]
result = model(input_values)
# Calculate expected sequence length after downsampling
expected_seq_len = self.hidden_size // np.prod(self.downsampling_ratios)
self.parent.assertEqual(result.latents.shape, (self.batch_size, expected_seq_len, self.hidden_size))
self.parent.assertEqual(result.audio.shape, (self.batch_size, self.channels, self.hidden_size))
@require_torch
class VibeVoiceAcousticTokenizerModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (VibeVoiceAcousticTokenizerModel,) if is_torch_available() else ()
is_encoder_decoder = False
test_resize_embeddings = False
test_head_masking = False
test_pruning = False
test_cpu_offload = False
test_disk_offload_safetensors = False
test_disk_offload_bin = False
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
if "output_attentions" in inputs_dict:
inputs_dict.pop("output_attentions")
if "output_hidden_states" in inputs_dict:
inputs_dict.pop("output_hidden_states")
return inputs_dict
def setUp(self):
self.model_tester = VibeVoiceAcousticTokenizerModelTester(self)
self.config_tester = ConfigTester(
self,
config_class=VibeVoiceAcousticTokenizerConfig,
common_properties=[],
has_text_modality=False,
)
def test_config(self):
self.config_tester.run_common_tests()
def test_model_forward(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_forward(*config_and_inputs)
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["input_values", "padding_cache", "use_cache", "sample"]
self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names)
@unittest.skip("VibeVoiceAcousticTokenizerModel does not have `inputs_embeds` logic")
def test_inputs_embeds(self):
pass
@unittest.skip("VibeVoiceAcousticTokenizerModel does not have `inputs_embeds` logic")
def test_model_get_set_embeddings(self):
pass
@unittest.skip("VibeVoiceAcousticTokenizerModel does not have the usual `attention` logic")
def test_retain_grad_hidden_states_attentions(self):
pass
@unittest.skip("VibeVoiceAcousticTokenizerModel does not have the usual `attention` logic")
def test_attention_outputs(self):
pass
@unittest.skip("VibeVoiceAcousticTokenizerModel does not have the usual `hidden_states` logic")
def test_hidden_states_output(self):
pass
@unittest.skip("VibeVoiceAcousticTokenizerModel does not has no attribute `hf_device_map`")
def test_model_parallelism(self):
pass
def test_determinism(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def check_determinism(first, second):
out_1 = first.cpu().numpy()
out_2 = second.cpu().numpy()
out_1 = out_1[~np.isnan(out_1)]
out_2 = out_2[~np.isnan(out_2)]
max_diff = np.amax(np.abs(out_1 - out_2))
self.assertLessEqual(max_diff, 1e-5)
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
first = model(**self._prepare_for_class(inputs_dict, model_class)).latents
second = model(**self._prepare_for_class(inputs_dict, model_class)).latents
if isinstance(first, tuple) and isinstance(second, tuple):
for tensor1, tensor2 in zip(first, second):
check_determinism(tensor1, tensor2)
else:
check_determinism(first, second)
def test_model_outputs_equivalence(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(t):
t[t != t] = 0
return t
def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}):
with torch.no_grad():
tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs)
dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple()
def recursive_check(tuple_object, dict_object):
if isinstance(tuple_object, (list, tuple)):
for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif isinstance(tuple_object, dict):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values(), dict_object.values()
):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5
),
msg=(
"Tuple and dict output are not equal. Difference:"
f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:"
f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has"
f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}."
),
)
recursive_check(tuple_output, dict_output)
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs)
def test_encode_method(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs()
model = VibeVoiceAcousticTokenizerModel(config=config).to(torch_device).eval()
audio = inputs_dict["input_values"]
with torch.no_grad():
output = model.encode(audio)
self.assertIsNotNone(output.latents)
expected_seq_len = self.model_tester.hidden_size // np.prod(self.model_tester.downsampling_ratios)
self.assertEqual(
output.latents.shape, (self.model_tester.batch_size, expected_seq_len, self.model_tester.hidden_size)
)
def test_decode_method(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs()
model = VibeVoiceAcousticTokenizerModel(config=config).to(torch_device).eval()
audio = inputs_dict["input_values"]
with torch.no_grad():
encode_output = model.encode(audio)
decode_output = model.decode(encode_output.latents)
self.assertIsNotNone(decode_output.audio)
self.assertEqual(
decode_output.audio.shape,
(self.model_tester.batch_size, self.model_tester.channels, self.model_tester.hidden_size),
)
def test_use_cache(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs()
model = VibeVoiceAcousticTokenizerModel(config=config).to(torch_device).eval()
input_values = inputs_dict["input_values"]
with torch.no_grad():
output = model(input_values, use_cache=True)
self.assertIsNotNone(output.padding_cache)
self.assertIsNotNone(output.latents)
self.assertIsNotNone(output.audio)
class VibeVoiceAcousticTokenizerIntegrationTest(unittest.TestCase):
def setUp(self):
self.model_checkpoint = "bezzam/VibeVoice-AcousticTokenizer"
self.sampling_rate = 24000
def tearDown(self):
cleanup(torch_device, gc_collect=True)
@slow
@require_torch
def test_batch_integration(self):
"""
Reproducer which generates JSON of expected outputs:
https://gist.github.com/ebezzam/507dfd544e0a0f12402966503cbc73e6#file-reproducer_tokenizer-py
NOTE (ebezzam): had to compute expected outputs on CI runners for passing tests
"""
dtype = torch.bfloat16
# Load expected outputs
RESULTS_PATH = (
Path(__file__).parent.parent.parent / "fixtures/vibevoice/expected_acoustic_tokenizer_results.json"
)
with open(RESULTS_PATH, "r") as f:
expected_results = json.load(f)
expected_encoder = torch.tensor(expected_results["encoder"]).to(dtype)
expected_decoder = torch.tensor(expected_results["decoder"]).to(dtype)
# Prepare inputs
audio_paths = [
"https://huggingface.co/datasets/bezzam/vibevoice_samples/resolve/main/voices/en-Carter_man.wav",
"https://huggingface.co/datasets/bezzam/vibevoice_samples/resolve/main/voices/en-Frank_man.wav",
]
audio_arrays = [load_audio_librosa(path, sampling_rate=self.sampling_rate) for path in audio_paths]
feature_extractor = AutoFeatureExtractor.from_pretrained(self.model_checkpoint)
# apply model and compare
model = AutoModel.from_pretrained(
self.model_checkpoint,
dtype=dtype,
device_map=torch_device,
).eval()
processed_audio = feature_extractor(audio_arrays, sampling_rate=self.sampling_rate).to(
torch_device, dtype=dtype
)
with torch.no_grad():
encoder_out = model.encode(processed_audio["input_values"], sample=False).latents
acoustic_decoder_out = model.decode(encoder_out).audio
encoder_out_flat = encoder_out.reshape(encoder_out.shape[0], -1)
encoder_out = encoder_out_flat[..., : expected_encoder.shape[-1]].cpu()
decoder_out = acoustic_decoder_out[..., : expected_decoder.shape[-1]].cpu()
torch.testing.assert_close(encoder_out, expected_encoder, rtol=1e-6, atol=1e-6)
torch.testing.assert_close(decoder_out, expected_decoder, rtol=1e-6, atol=1e-6)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/vibevoice_acoustic_tokenizer/test_modeling_vibevoice_acoustic_tokenizer.py",
"license": "Apache License 2.0",
"lines": 281,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/integrations/neftune.py | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
NEFTune: Noisy Embeddings for Fine-Tuning.
Implementation based on https://github.com/neelsjain/NEFTune
Paper: https://huggingface.co/papers/2310.05914
"""
import torch
from ..trainer_utils import _is_peft_model
def neftune_post_forward_hook(module, input, output):
"""
Implements the NEFTune forward pass for the model using forward hooks. Note this works only for torch.nn.Embedding
layers. This method is slightly adapted from the original source code that can be found here:
https://github.com/neelsjain/NEFTune. Simply add it to your model as follows:
```python
from transformers.integrations.neftune import neftune_post_forward_hook
model = ...
model.embed_tokens.neftune_noise_alpha = 0.1
model.embed_tokens.register_forward_hook(neftune_post_forward_hook)
```
Args:
module (`torch.nn.Module`):
The embedding module where the hook is attached. Note that you need to set `module.neftune_noise_alpha` to
the desired noise alpha value.
input (`torch.Tensor`):
The input tensor to the model.
output (`torch.Tensor`):
The output tensor of the model (i.e. the embeddings).
"""
if module.training:
dims = torch.tensor(output.size(1) * output.size(2))
mag_norm = module.neftune_noise_alpha / torch.sqrt(dims)
output = output + torch.zeros_like(output).uniform_(-mag_norm, mag_norm)
return output
def activate_neftune(model, neftune_noise_alpha, accelerator=None):
"""
Activates NEFTune (Noisy Embeddings for Fine-Tuning) on the model.
NEFTune adds noise to embedding vectors during training, which has been shown to improve
fine-tuning performance. See https://huggingface.co/papers/2310.05914 for details.
Args:
model (`torch.nn.Module`):
The model to activate NEFTune on.
neftune_noise_alpha (`float`):
The noise alpha value controlling the magnitude of the noise.
accelerator (`Accelerator`, *optional*):
The accelerator instance. If provided, the model will be unwrapped before
accessing embeddings. Required when using distributed training.
Returns:
`torch.utils.hooks.RemovableHandle`: The hook handle that can be used to deactivate NEFTune.
"""
if accelerator is not None:
unwrapped_model = accelerator.unwrap_model(model)
else:
unwrapped_model = model
if _is_peft_model(unwrapped_model):
embeddings = unwrapped_model.base_model.model.get_input_embeddings()
else:
embeddings = unwrapped_model.get_input_embeddings()
embeddings.neftune_noise_alpha = neftune_noise_alpha
hook_handle = embeddings.register_forward_hook(neftune_post_forward_hook)
return hook_handle
def deactivate_neftune(model, hook_handle, accelerator=None):
"""
Deactivates NEFTune on the model.
Args:
model (`torch.nn.Module`):
The model to deactivate NEFTune on.
hook_handle (`torch.utils.hooks.RemovableHandle`):
The hook handle returned by `activate_neftune`.
accelerator (`Accelerator`, *optional*):
The accelerator instance. If provided, the model will be unwrapped before
accessing embeddings.
"""
if accelerator is not None:
unwrapped_model = accelerator.unwrap_model(model)
else:
unwrapped_model = model
if _is_peft_model(unwrapped_model):
embeddings = unwrapped_model.base_model.model.get_input_embeddings()
else:
embeddings = unwrapped_model.get_input_embeddings()
hook_handle.remove()
if hasattr(embeddings, "neftune_noise_alpha"):
del embeddings.neftune_noise_alpha
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/integrations/neftune.py",
"license": "Apache License 2.0",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/moonshine_streaming/configuration_moonshine_streaming.py | # Copyright 2026 the HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...configuration_utils import PreTrainedConfig
from ...modeling_rope_utils import RopeParameters
from ..auto import CONFIG_MAPPING
class MoonshineStreamingEncoderConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`MoonshineStreamingEncoder`]. It is used to
instantiate a Moonshine Streaming encoder according to the specified arguments, defining the encoder architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the Moonshine Streaming tiny model.
e.g. [UsefulSensors/moonshine-streaming-tiny](https://huggingface.co/UsefulSensors/moonshine-streaming-tiny)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 320):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 1280):
Dimension of the MLP representations.
hidden_act (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder.
num_hidden_layers (`int`, *optional*, defaults to 6):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer encoder.
num_key_value_heads (`int`, *optional*, defaults to 8):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used.
max_position_embeddings (`int`, *optional*, defaults to 4096):
The maximum sequence length that this model might ever be used with.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
sample_rate (`int`, *optional*, defaults to 16000):
The sample rate of the audio input in Hz.
frame_ms (`float`, *optional*, defaults to 5.0):
The frame duration in milliseconds for audio processing.
sliding_windows (`list[tuple[int, int]]`, *optional*, defaults to `[(16, 4), (16, 4), (16, 0), (16, 0), (16, 4), (16, 4)]`):
List of sliding window configurations for each encoder layer. Each tuple contains (window_size, shift).
head_dim (`int`, *optional*):
The attention head dimension. If None, it will default to hidden_size // num_attention_heads.
```python
>>> from transformers import MoonshineStreamingEncoder, MoonshineStreamingEncoderConfig
>>> # Initializing a Moonshine Streaming encoder configuration
>>> configuration = MoonshineStreamingEncoderConfig()
>>> # Initializing a model from the configuration
>>> model = MoonshineStreamingEncoder(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "moonshine_streaming_encoder"
def __init__(
self,
hidden_size: int | None = 320,
intermediate_size: int | None = 1280,
hidden_act: str | None = "gelu",
num_hidden_layers: int | None = 6,
num_attention_heads: int | None = 8,
num_key_value_heads: int | None = 8,
max_position_embeddings: int | None = 4096,
attention_dropout: float | None = 0.0,
attention_bias: bool | None = False,
sample_rate: int = 16000,
frame_ms: float = 5.0,
sliding_windows: list[tuple[int, int]] = [(16, 4), (16, 4), (16, 0), (16, 0), (16, 4), (16, 4)],
head_dim: int | None = None,
**kwargs,
):
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.max_position_embeddings = max_position_embeddings
self.attention_dropout = attention_dropout
self.attention_bias = attention_bias
self.head_dim = head_dim if head_dim is not None else self.hidden_size // self.num_attention_heads
self.sample_rate = sample_rate
self.frame_ms = frame_ms
self.sliding_windows = [list(window) for window in sliding_windows]
super().__init__(**kwargs)
class MoonshineStreamingConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`MoonshineStreamingModel`]. It is used to
instantiate a Moonshine Streaming model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the Moonshine
Streaming tiny model.
e.g. [UsefulSensors/moonshine-streaming-tiny](https://huggingface.co/UsefulSensors/moonshine-streaming-tiny)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
encoder_config (`MoonshineStreamingEncoderConfig`, *optional*):
Configuration of the encoder. If not provided, a default `MoonshineStreamingEncoderConfig` will be
instantiated.
vocab_size (`int`, *optional*, defaults to 32768):
Vocabulary size of the Moonshine Streaming decoder model. Defines the number of different tokens that can
be represented by the `inputs_ids` passed when calling [`MoonshineStreamingModel`].
hidden_size (`int`, *optional*, defaults to 320):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 1280):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 6):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer decoder.
hidden_act (`str`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 4096):
The maximum sequence length that this model might ever be used with.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*, defaults to 0):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 1):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 2):
End of stream token id.
rope_parameters (`RopeParameters` or `dict`, *optional*, defaults to `{'rope_type': 'default', 'rope_theta': 10000.0, 'partial_rotary_factor': 0.8}`):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta`, `rope_type`, and optionally `partial_rotary_factor` for partial RoPE application.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
decoder_start_token_id (`int`, *optional*):
The decoder start token id. If not specified, it will default to `bos_token_id`.
head_dim (`int`, *optional*):
The attention head dimension. If None, it will default to hidden_size // num_attention_heads.
pad_head_dim_to_multiple_of (`int`, *optional*):
If set, the head dimension will be padded to a multiple of this value.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
is_encoder_decoder (`bool`, *optional*, defaults to `True`):
Whether the model is used as an encoder/decoder or not.
```python
>>> from transformers import MoonshineStreamingModel, MoonshineStreamingConfig
>>> # Initializing a Moonshine Streaming configuration
>>> configuration = MoonshineStreamingConfig()
>>> # Initializing a model from the configuration
>>> model = MoonshineStreamingModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "moonshine_streaming"
sub_configs = {"encoder_config": MoonshineStreamingEncoderConfig}
keys_to_ignore_at_inference = ["past_key_values"]
def __init__(
self,
encoder_config: MoonshineStreamingEncoderConfig = None,
vocab_size: int = 32768,
hidden_size: int | None = 320,
intermediate_size: int | None = 1280,
num_hidden_layers: int | None = 6,
num_attention_heads: int | None = 8,
hidden_act: str | None = "silu",
max_position_embeddings: int = 4096,
use_cache: bool | None = True,
pad_token_id: int = 0,
bos_token_id: int = 1,
eos_token_id: int = 2,
rope_parameters: RopeParameters | dict[str, RopeParameters] | None = {
"rope_type": "default",
"rope_theta": 10000.0,
"partial_rotary_factor": 0.8,
},
attention_bias: bool = False,
attention_dropout: float = 0.0,
decoder_start_token_id: int | None = None,
head_dim: int | None = None,
pad_head_dim_to_multiple_of: int | None = None,
tie_word_embeddings: bool = False,
is_encoder_decoder: bool = True,
**kwargs,
):
if isinstance(encoder_config, dict):
encoder_config["model_type"] = encoder_config.get("model_type", "moonshine_streaming_encoder")
encoder_config = CONFIG_MAPPING[encoder_config["model_type"]](**encoder_config)
elif encoder_config is None:
encoder_config = CONFIG_MAPPING["moonshine_streaming_encoder"]()
self.encoder_config = encoder_config
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_attention_heads
self.hidden_act = hidden_act
self.use_cache = use_cache
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.head_dim = head_dim if head_dim is not None else self.hidden_size // self.num_attention_heads
self.rope_parameters = rope_parameters
self.pad_head_dim_to_multiple_of = pad_head_dim_to_multiple_of
super().__init__(
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
pad_token_id=pad_token_id,
decoder_start_token_id=decoder_start_token_id,
tie_word_embeddings=tie_word_embeddings,
is_encoder_decoder=is_encoder_decoder,
**kwargs,
)
__all__ = ["MoonshineStreamingConfig", "MoonshineStreamingEncoderConfig"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/moonshine_streaming/configuration_moonshine_streaming.py",
"license": "Apache License 2.0",
"lines": 218,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/moonshine_streaming/modular_moonshine_streaming.py | # Copyright 2026 the HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Callable
from dataclasses import dataclass
import torch
import torch.nn as nn
from torch import Tensor
from ...cache_utils import Cache
from ...masking_utils import create_bidirectional_mask
from ...modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPast,
)
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS
from ...processing_utils import ProcessingKwargs, Unpack
from ...utils import TransformersKwargs, auto_docstring, logging
from ...utils.generic import merge_with_config_defaults
from ...utils.output_capturing import OutputRecorder, capture_outputs
from ..llama.modeling_llama import LlamaMLP, eager_attention_forward
from ..moonshine.modeling_moonshine import (
MoonshineDecoder,
MoonshineEncoderLayer,
MoonshineEncoderMLP,
MoonshineForConditionalGeneration,
MoonshineModel,
MoonshinePreTrainedModel,
)
from ..wav2vec2.processing_wav2vec2 import Wav2Vec2Processor
from .configuration_moonshine_streaming import MoonshineStreamingConfig, MoonshineStreamingEncoderConfig
logger = logging.get_logger(__name__)
class MoonshineStreamingProcessorKwargs(ProcessingKwargs, total=False):
_defaults = {
"audio_kwargs": {
"pad_to_multiple_of": 80,
"padding": True,
},
"common_kwargs": {"return_tensors": "pt"},
}
class MoonshineStreamingProcessor(Wav2Vec2Processor): ...
@dataclass
@auto_docstring(
custom_intro="""
Extends [~modeling_outputs.BaseModelOutput] to include the output attention mask since sequence length is not preserved in the model's forward.
"""
)
class MoonshineStreamingEncoderModelOutput(BaseModelOutput):
attention_mask: torch.Tensor | None = None
class MoonshineStreamingFrameCMVN(nn.Module):
def __init__(self, eps: float = 1e-6):
super().__init__()
self.eps = eps
def forward(self, x: torch.Tensor) -> torch.Tensor:
mean = x.mean(dim=-1, keepdim=True)
centered = x - mean
rms = (centered.pow(2).mean(dim=-1, keepdim=True) + self.eps).sqrt()
return centered / rms
class MoonshineStreamingAsinhCompression(nn.Module):
def __init__(self, k_init: float = 0.75):
super().__init__()
self.log_k = nn.Parameter(torch.log(torch.tensor(k_init)))
def forward(self, x: torch.Tensor) -> torch.Tensor:
return torch.asinh(torch.exp(self.log_k) * x)
class MoonshineStreamingCausalConv1d(nn.Conv1d):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: int,
stride: int = 1,
dilation: int = 1,
bias: bool = True,
):
super().__init__(in_channels, out_channels, kernel_size, stride=stride, dilation=dilation, bias=bias)
self.left_pad = (kernel_size - 1) * dilation
def forward(self, x: torch.Tensor, mask: torch.Tensor | None = None) -> torch.Tensor:
x = nn.functional.pad(x, (self.left_pad, 0))
x = super().forward(x)
if mask is not None:
mask = nn.functional.pad(mask, (self.left_pad, 0))[:, None, :]
weight = torch.ones(1, 1, self.kernel_size[0], device=mask.device)
mask = nn.functional.conv1d(mask.float(), weight, stride=self.stride)
mask = mask > 0
x *= mask
if mask is not None:
mask = mask.squeeze(1)
return x, mask
class MoonshineStreamingLayerNorm(nn.Module):
def __init__(self, dim: int, unit_offset: bool = True, device=None, dtype=None):
super().__init__()
self.unit_offset = float(unit_offset)
self.ln = nn.LayerNorm(dim, elementwise_affine=False, device=device, dtype=dtype)
self.gamma = nn.Parameter(torch.ones(dim, device=device, dtype=dtype))
def forward(self, x: Tensor) -> Tensor:
normed = self.ln(x)
gamma = self.gamma + self.unit_offset
return normed * gamma
class MoonshineStreamingEncoderMLP(MoonshineEncoderMLP): ...
class MoonshineStreamingEncoderAttention(nn.Module):
def __init__(self, config: MoonshineStreamingConfig, layer_idx: int):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.attention_dropout = config.attention_dropout
self.is_causal = False
self.q_proj = nn.Linear(
config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
)
self.k_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.v_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.o_proj = nn.Linear(
config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias
)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, torch.Tensor]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class MoonshineStreamingEncoderLayer(MoonshineEncoderLayer):
def __init__(self, config: MoonshineStreamingConfig, layer_idx: int):
super().__init__(config, layer_idx)
self.self_attn = MoonshineStreamingEncoderAttention(config, layer_idx)
self.mlp = MoonshineStreamingEncoderMLP(config, config.hidden_act)
self.input_layernorm = MoonshineStreamingLayerNorm(config.hidden_size)
self.post_attention_layernorm = MoonshineStreamingLayerNorm(config.hidden_size)
class MoonshineStreamingEncoderEmbedder(nn.Module):
def __init__(self, config):
super().__init__()
self.cmvn = MoonshineStreamingFrameCMVN()
self.comp = MoonshineStreamingAsinhCompression()
self.conv1 = MoonshineStreamingCausalConv1d(
config.hidden_size, config.hidden_size * 2, kernel_size=5, stride=2
)
self.conv2 = MoonshineStreamingCausalConv1d(
config.hidden_size * 2, config.hidden_size, kernel_size=5, stride=2
)
self.frame_len = int(round(config.sample_rate * config.frame_ms / 1000.0))
self.linear = nn.Linear(self.frame_len, config.hidden_size, bias=False)
def forward(self, input_values, padding_mask=None):
hidden_states = self.cmvn(input_values.reshape(input_values.shape[0], -1, self.frame_len))
hidden_states = self.comp(hidden_states)
hidden_states = nn.functional.silu(self.linear(hidden_states))
if padding_mask is not None:
num_frames = padding_mask.sum(-1) // self.frame_len
padding_mask = (
torch.arange(hidden_states.shape[1], device=padding_mask.device)[None, :] < num_frames[:, None]
)
hidden_states *= padding_mask[..., None]
hidden_states = hidden_states.transpose(1, 2)
hidden_states, padding_mask = self.conv1(hidden_states, padding_mask)
hidden_states = nn.functional.silu(hidden_states)
hidden_states, padding_mask = self.conv2(hidden_states, padding_mask)
hidden_states = hidden_states.transpose(1, 2)
return hidden_states, padding_mask
class MoonshineStreamingPreTrainedModel(MoonshinePreTrainedModel):
def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor) -> torch.LongTensor:
frame_len = int(round(self.config.encoder_config.sample_rate * self.config.encoder_config.frame_ms / 1000.0))
output_lengths = input_lengths // frame_len
output_lengths = (output_lengths - 1) // 2 + 1
output_lengths = (output_lengths - 1) // 2 + 1
return output_lengths
def _init_weights(self, module: nn.Module):
if isinstance(module, MoonshineStreamingLayerNorm):
nn.init.constant_(module.gamma, 1.0 - module.unit_offset)
else:
super()._init_weights(module)
def sliding_window_mask_function(sliding_window: tuple[int, int]) -> Callable:
"""
This creates uni/bidirectional attention mask with sliding window.
"""
def inner_mask(batch_idx: int, head_idx: int, q_idx: int, kv_idx: int) -> bool:
left_window_size, right_window_size = sliding_window
dist = q_idx - kv_idx
left_mask = (dist >= 0) & (dist < left_window_size)
right_mask = (dist < 0) & (-dist < right_window_size)
return left_mask | right_mask
return inner_mask
class MoonshineStreamingEncoder(MoonshineStreamingPreTrainedModel):
config: MoonshineStreamingEncoderConfig
_can_record_outputs = {
"attentions": OutputRecorder(MoonshineStreamingEncoderAttention, index=1, layer_name="self_attn"),
"hidden_states": MoonshineStreamingEncoderLayer,
}
def __init__(self, config: MoonshineStreamingEncoderConfig):
super().__init__(config)
self.embedder = MoonshineStreamingEncoderEmbedder(config)
self.layers = nn.ModuleList(
[MoonshineStreamingEncoderLayer(config, idx) for idx in range(config.num_hidden_layers)]
)
self.final_norm = MoonshineStreamingLayerNorm(config.hidden_size)
self.gradient_checkpointing = False
self.post_init()
@merge_with_config_defaults
@capture_outputs
def forward(
self,
input_values: torch.FloatTensor,
attention_mask: torch.Tensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutputWithPast:
r"""
Args:
input_values (`torch.FloatTensor` of shape `(batch_size, audio_length)`):
Float values of the raw speech waveform. Raw speech waveform can be
obtained by loading a `.flac` or `.wav` audio file into an array of type `list[float]`, a
`numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec library (`pip install torchcodec`) or
the soundfile library (`pip install soundfile`). To prepare the array into
`input_values`, the [`AutoFeatureExtractor`] should be used for padding
and conversion into a tensor of type `torch.FloatTensor`.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding indices in `input_values`. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
"""
inputs_embeds, attention_mask = self.embedder(input_values, padding_mask=attention_mask)
if attention_mask is not None:
mask_kwargs = {
"config": self.config,
"inputs_embeds": inputs_embeds,
"attention_mask": attention_mask,
}
per_layer_attention_mask = [
create_bidirectional_mask(
and_mask_function=sliding_window_mask_function(self.config.sliding_windows[layer_idx]),
**mask_kwargs,
)
for layer_idx in range(self.config.num_hidden_layers)
]
hidden_states = inputs_embeds
for layer_idx, encoder_layer in enumerate(self.layers):
hidden_states = encoder_layer(
hidden_states,
attention_mask=per_layer_attention_mask[layer_idx] if attention_mask is not None else None,
**kwargs,
)
hidden_states = self.final_norm(hidden_states)
return MoonshineStreamingEncoderModelOutput(last_hidden_state=hidden_states, attention_mask=attention_mask)
class MoonshinMoonshineStreamingDecoderMLP(LlamaMLP): ...
class MoonshineStreamingDecoder(MoonshineDecoder):
def __init__(self, config):
super().__init__(config)
self.pos_emb = nn.Embedding(self.config.max_position_embeddings, config.encoder_config.hidden_size)
if config.encoder_config.hidden_size != self.config.hidden_size:
self.proj = nn.Linear(config.encoder_config.hidden_size, self.config.hidden_size, bias=False)
else:
self.proj = nn.Identity()
@merge_with_config_defaults
@capture_outputs
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
use_cache: bool | None = None,
cache_position: torch.LongTensor | None = None,
encoder_hidden_states: torch.FloatTensor | None = None,
encoder_attention_mask: torch.Tensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | BaseModelOutputWithPast:
r"""
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding indices in `encoder_hidden_states`. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
"""
position_embeddings = self.pos_emb(
torch.arange(encoder_hidden_states.shape[1], device=encoder_hidden_states.device)
)
encoder_hidden_states += position_embeddings
encoder_hidden_states = self.proj(encoder_hidden_states)
return super().forward(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
**kwargs,
)
class MoonshineStreamingModel(MoonshineModel):
def __init__(self, config):
super().__init__(config)
self.encoder = MoonshineStreamingEncoder(config.encoder_config)
self.decoder = MoonshineStreamingDecoder(config)
class MoonshineStreamingForConditionalGeneration(MoonshineForConditionalGeneration): ...
__all__ = [
"MoonshineStreamingPreTrainedModel",
"MoonshineStreamingModel",
"MoonshineStreamingForConditionalGeneration",
"MoonshineStreamingProcessor",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/moonshine_streaming/modular_moonshine_streaming.py",
"license": "Apache License 2.0",
"lines": 340,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/moonshine_streaming/test_modeling_moonshine_streaming.py | # Copyright 2026 the HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch MoonshineStreaming model."""
import copy
import unittest
from transformers import MoonshineStreamingConfig, MoonshineStreamingEncoderConfig, is_torch_available
from transformers.testing_utils import cleanup, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import (
ModelTesterMixin,
floats_tensor,
random_attention_mask,
)
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
AutoProcessor,
MoonshineStreamingForConditionalGeneration,
MoonshineStreamingModel,
)
from datasets import load_dataset
class MoonshineStreamingModelTester:
def __init__(
self,
parent,
batch_size=3, # need batch_size != num_hidden_layers
seq_length=1040,
is_training=False,
use_labels=False,
vocab_size=147,
hidden_size=8,
intermediate_size=32,
num_hidden_layers=2,
num_attention_heads=2,
num_key_value_heads=2,
head_dim=4,
decoder_start_token_id=85,
bos_token_id=98,
eos_token_id=98,
pad_token_id=0,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.use_labels = use_labels
self.vocab_size = vocab_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.head_dim = head_dim
self.decoder_start_token_id = decoder_start_token_id
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
def prepare_config_and_inputs(self):
input_values = floats_tensor([self.batch_size, self.seq_length], scale=1.0)
attention_mask = random_attention_mask([self.batch_size, self.seq_length])
decoder_input_ids = torch.tensor(self.batch_size * [[self.decoder_start_token_id]], device=torch_device)
decoder_attention_mask = decoder_input_ids.ne(self.pad_token_id)
config = self.get_config()
return config, input_values, attention_mask, decoder_input_ids, decoder_attention_mask
def get_config(self):
encoder_config = MoonshineStreamingEncoderConfig(
hidden_size=self.hidden_size,
intermediate_size=self.intermediate_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
num_key_value_heads=self.num_attention_heads,
head_dim=self.head_dim,
)
return MoonshineStreamingConfig(
encoder_config=encoder_config,
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
intermediate_size=self.intermediate_size,
head_dim=self.head_dim,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
num_key_value_heads=self.num_key_value_heads,
decoder_start_token_id=self.decoder_start_token_id,
bos_token_id=self.bos_token_id,
eos_token_id=self.eos_token_id,
pad_token_id=self.pad_token_id,
)
def check_output_attentions(self, config, input_values, attention_mask):
model = MoonshineStreamingModel(config=config)
model.to(torch_device)
model.train()
outputs = model(input_values, attention_mask=attention_mask, output_attentions=True)
self.parent.assertTrue(len(outputs.attentions) > 0)
def prepare_config_and_inputs_for_common(self):
config, input_values, attention_mask, decoder_input_ids, decoder_attention_mask = (
self.prepare_config_and_inputs()
)
inputs_dict = {
"input_values": input_values,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
}
return config, inputs_dict
@require_torch
class MoonshineStreamingModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(MoonshineStreamingModel, MoonshineStreamingForConditionalGeneration) if is_torch_available() else ()
)
# Doesn't run generation tests. TODO (eustache): remove this line and then make CI green
all_generative_model_classes = ()
pipeline_model_mapping = (
{
"automatic-speech-recognition": MoonshineStreamingForConditionalGeneration,
"feature-extraction": MoonshineStreamingModel,
}
if is_torch_available()
else {}
)
def setUp(self):
self.model_tester = MoonshineStreamingModelTester(self)
self.config_tester = ConfigTester(self, config_class=MoonshineStreamingConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_can_init_all_missing_weights(self):
self.skipTest("MoonshineStreaming uses special parameter initialization that conflicts with this test")
def test_init_weights_can_init_buffers(self):
self.skipTest("MoonshineStreaming uses special buffer initialization that conflicts with this test")
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
seq_len = getattr(self.model_tester, "seq_length", None)
decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", 1)
encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len)
decoder_key_length = getattr(self.model_tester, "decoder_key_length", 1)
encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class._from_config(config, attn_implementation="eager")
config = model.config
model.to(torch_device)
model.eval()
subsampled_encoder_seq_length = model._get_feat_extract_output_lengths(encoder_seq_length)
subsampled_encoder_key_length = model._get_feat_extract_output_lengths(encoder_key_length)
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
config.encoder_config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, subsampled_encoder_seq_length, subsampled_encoder_key_length],
)
out_len = len(outputs)
correct_outlen = 5
# loss is at first position
if "labels" in inputs_dict:
correct_outlen += 1 # loss is added to beginning
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
self.assertEqual(out_len, correct_outlen)
# decoder attentions
decoder_attentions = outputs.decoder_attentions
self.assertIsInstance(decoder_attentions, (list, tuple))
self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length],
)
# cross attentions
cross_attentions = outputs.cross_attentions
self.assertIsInstance(cross_attentions, (list, tuple))
self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(cross_attentions[0].shape[-3:]),
[
self.model_tester.num_attention_heads,
decoder_seq_length,
subsampled_encoder_key_length,
],
)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
added_hidden_states = 2
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, subsampled_encoder_seq_length, subsampled_encoder_key_length],
)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
self.assertEqual(len(hidden_states), expected_num_layers)
if hasattr(self.model_tester, "encoder_seq_length"):
seq_length = self.model_tester.encoder_seq_length
else:
seq_length = self.model_tester.seq_length
subsampled_seq_length = model._get_feat_extract_output_lengths(seq_length)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[subsampled_seq_length, self.model_tester.hidden_size],
)
if config.is_encoder_decoder:
hidden_states = outputs.decoder_hidden_states
self.assertIsInstance(hidden_states, (list, tuple))
self.assertEqual(len(hidden_states), expected_num_layers)
decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", 1)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[decoder_seq_length, self.model_tester.hidden_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
config.encoder_config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
# Copied from tests.models.whisper.test_modeling_whisper.WhisperModelTest.test_inputs_embeds
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
decoder_input_ids = inputs.pop("decoder_input_ids", None)
inputs.pop("decoder_attention_mask", None)
wte = model.get_input_embeddings()
inputs["decoder_inputs_embeds"] = wte(decoder_input_ids)
with torch.no_grad():
model(**inputs)[0]
# Copied from tests.models.whisper.test_modeling_whisper.WhisperModelTest.test_resize_tokens_embeddings
def test_resize_tokens_embeddings(self):
(
original_config,
inputs_dict,
) = self.model_tester.prepare_config_and_inputs_for_common()
if not self.test_resize_embeddings:
self.skipTest(reason="test_resize_embeddings is False")
for model_class in self.all_model_classes:
config = copy.deepcopy(original_config)
model = model_class(config)
model.to(torch_device)
if self.model_tester.is_training is False:
model.eval()
model_vocab_size = config.vocab_size
# Retrieve the embeddings and clone theme
model_embed = model.resize_token_embeddings(model_vocab_size)
cloned_embeddings = model_embed.weight.clone()
# Check that resizing the token embeddings with a larger vocab size increases the model's vocab size
model_embed = model.resize_token_embeddings(model_vocab_size + 10)
self.assertEqual(model.config.vocab_size, model_vocab_size + 10)
# Check that it actually resizes the embeddings matrix
self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size
model_embed = model.resize_token_embeddings(model_vocab_size - 15)
self.assertEqual(model.config.vocab_size, model_vocab_size - 15)
# Check that it actually resizes the embeddings matrix
self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15)
# make sure that decoder_input_ids are resized
if "decoder_input_ids" in inputs_dict:
inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1)
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that adding and removing tokens has not modified the first part of the embedding matrix.
models_equal = True
for p1, p2 in zip(cloned_embeddings, model_embed.weight):
if p1.data.ne(p2.data).sum() > 0:
models_equal = False
self.assertTrue(models_equal)
# Copied from tests.models.whisper.test_modeling_whisper.WhisperModelTest.test_resize_embeddings_untied
def test_resize_embeddings_untied(self):
(
original_config,
inputs_dict,
) = self.model_tester.prepare_config_and_inputs_for_common()
if not self.test_resize_embeddings:
self.skipTest(reason="test_resize_embeddings is False")
original_config.tie_word_embeddings = False
# if model cannot untied embeddings -> leave test
if original_config.tie_word_embeddings:
self.skipTest(reason="Model cannot untie embeddings")
for model_class in self.all_model_classes:
config = copy.deepcopy(original_config)
model = model_class(config).to(torch_device)
model.eval()
# if no output embeddings -> leave test
if model.get_output_embeddings() is None:
continue
# Check that resizing the token embeddings with a larger vocab size increases the model's vocab size
model_vocab_size = config.vocab_size
model.resize_token_embeddings(model_vocab_size + 10)
self.assertEqual(model.config.vocab_size, model_vocab_size + 10)
output_embeds = model.get_output_embeddings()
self.assertEqual(output_embeds.weight.shape[0], model_vocab_size + 10)
# Check bias if present
if output_embeds.bias is not None:
self.assertEqual(output_embeds.bias.shape[0], model_vocab_size + 10)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size
model.resize_token_embeddings(model_vocab_size - 15)
self.assertEqual(model.config.vocab_size, model_vocab_size - 15)
# Check that it actually resizes the embeddings matrix
output_embeds = model.get_output_embeddings()
self.assertEqual(output_embeds.weight.shape[0], model_vocab_size - 15)
# Check bias if present
if output_embeds.bias is not None:
self.assertEqual(output_embeds.bias.shape[0], model_vocab_size - 15)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
if "decoder_input_ids" in inputs_dict:
inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
@require_torch
class MoonshineStreamingModelIntegrationTests(unittest.TestCase):
def setUp(self):
self.processor_tiny = AutoProcessor.from_pretrained("UsefulSensors/moonshine-streaming-tiny")
self.processor_small = AutoProcessor.from_pretrained("UsefulSensors/moonshine-streaming-small")
self.processor_medium = AutoProcessor.from_pretrained("UsefulSensors/moonshine-streaming-medium")
def tearDown(self):
cleanup(torch_device, gc_collect=True)
def _load_datasamples(self, num_samples):
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
# automatic decoding with librispeech
speech_samples = ds.sort("id")[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
@slow
def test_tiny_logits_single(self):
model = MoonshineStreamingForConditionalGeneration.from_pretrained("UsefulSensors/moonshine-streaming-tiny")
model.to(torch_device)
inputs = self.processor_tiny(self._load_datasamples(1), sampling_rate=16000)
inputs.to(torch_device)
outputs = model.generate(**inputs, max_new_tokens=1, return_dict_in_generate=True, output_logits=True)
# fmt: off
EXPECTED_LOGITS = torch.tensor([
-13.847891807556152, -0.18819725513458252, 3.1453802585601807, -13.759804725646973, -13.689135551452637,
-13.750009536743164, -13.690473556518555, -13.681711196899414, -13.769899368286133, -13.692444801330566,
-13.809157371520996, -13.810665130615234, -13.652420043945312, -13.789128303527832, -13.746649742126465,
-13.74869155883789, -13.79692268371582, -13.63906192779541, -13.665060997009277, -13.634946823120117,
-13.711505889892578, -13.777567863464355, -13.721321105957031, -13.677959442138672, -13.754849433898926,
-13.712194442749023, -13.79233169555664, -13.687705039978027, -13.664924621582031, -13.779203414916992,
])
# fmt: on
torch.testing.assert_close(outputs.logits[0][0, :30].cpu(), EXPECTED_LOGITS, rtol=2e-4, atol=2e-4)
@slow
def test_small_logits_single(self):
model = MoonshineStreamingForConditionalGeneration.from_pretrained("UsefulSensors/moonshine-streaming-small")
model.to(torch_device)
inputs = self.processor_small(self._load_datasamples(1), sampling_rate=16000)
inputs.to(torch_device)
outputs = model.generate(**inputs, max_new_tokens=1, return_dict_in_generate=True, output_logits=True)
# fmt: off
EXPECTED_LOGITS = torch.tensor([
-9.193448066711426, -1.3106095790863037, 2.4847524166107178, -9.474504470825195, -9.443048477172852,
-9.465521812438965, -9.475011825561523, -9.474539756774902, -9.452878952026367, -9.46949577331543,
-9.46340560913086, -9.48450756072998, -9.512656211853027, -9.460539817810059, -9.464164733886719,
-9.46074104309082, -9.420138359069824, -9.48065185546875, -9.467584609985352, -9.43082332611084,
-9.467816352844238, -9.473931312561035, -9.462691307067871, -9.438430786132812, -9.448503494262695,
-9.438905715942383, -9.440755844116211, -9.487390518188477, -9.487754821777344, -9.472284317016602,
])
# fmt: on
torch.testing.assert_close(outputs.logits[0][0, :30].cpu(), EXPECTED_LOGITS, rtol=1e-4, atol=1e-4)
@slow
def test_medium_logits_single(self):
model = MoonshineStreamingForConditionalGeneration.from_pretrained("UsefulSensors/moonshine-streaming-medium")
model.to(torch_device)
inputs = self.processor_medium(self._load_datasamples(1), sampling_rate=16000)
inputs.to(torch_device)
outputs = model.generate(**inputs, max_new_tokens=1, return_dict_in_generate=True, output_logits=True)
# fmt: off
EXPECTED_LOGITS = torch.tensor([
-9.380514144897461, -1.8016688823699951, 1.309783935546875, -9.992443084716797, -10.047298431396484,
-9.993546485900879, -10.00343132019043, -10.052844047546387, -10.095193862915039, -9.937813758850098,
-9.995306968688965, -10.06312370300293, -10.039563179016113, -10.00948715209961, -10.04725170135498,
-10.08010196685791, -10.043283462524414, -10.06112289428711, -9.989591598510742, -10.034473419189453,
-9.958343505859375, -9.956878662109375, -10.006301879882812, -10.032047271728516, -9.969188690185547,
-10.00571060180664, -10.043065071105957, -9.983331680297852, -9.988570213317871, -9.935394287109375,
])
# fmt: on
torch.testing.assert_close(outputs.logits[0][0, :30].cpu(), EXPECTED_LOGITS, rtol=1e-4, atol=1e-4)
@slow
def test_tiny_logits_batch(self):
model = MoonshineStreamingForConditionalGeneration.from_pretrained("UsefulSensors/moonshine-streaming-tiny")
model.to(torch_device)
inputs = self.processor_tiny(self._load_datasamples(4), sampling_rate=16000)
inputs.to(torch_device)
outputs = model.generate(**inputs, max_new_tokens=1, return_dict_in_generate=True, output_logits=True)
# fmt: off
EXPECTED_LOGITS = torch.tensor(
[
[-12.441858291625977, -0.2812096178531647, 2.7568106651306152, -12.284578323364258, -12.205985069274902, -12.262890815734863, -12.224806785583496, -12.220057487487793, -12.314021110534668, -12.228297233581543],
[-13.319320678710938, -3.6359996795654297, 4.0685296058654785, -13.046940803527832, -13.122637748718262, -13.096488952636719, -13.141905784606934, -13.038910865783691, -13.136741638183594, -13.037278175354004],
[-10.126669883728027, -4.161841869354248, 4.4407429695129395, -10.040196418762207, -10.065054893493652, -10.001801490783691, -9.991734504699707, -10.037150382995605, -10.0549898147583, -10.101166725158691],
[-11.697093963623047, -3.0441789627075195, 3.8363659381866455, -11.45719051361084, -11.495401382446289, -11.519722938537598, -11.482342720031738, -11.529292106628418, -11.5482177734375, -11.483217239379883],
],
)
# fmt: on
torch.testing.assert_close(outputs.logits[0][:, :10].cpu(), EXPECTED_LOGITS, rtol=2e-4, atol=2e-4)
@slow
def test_small_logits_batch(self):
model = MoonshineStreamingForConditionalGeneration.from_pretrained("UsefulSensors/moonshine-streaming-small")
model.to(torch_device)
inputs = self.processor_small(self._load_datasamples(4), sampling_rate=16000)
inputs.to(torch_device)
outputs = model.generate(**inputs, max_new_tokens=1, return_dict_in_generate=True, output_logits=True)
# fmt: off
EXPECTED_LOGITS = torch.tensor(
[
[-9.596293449401855, -1.297331690788269, 2.817121982574463, -9.826224327087402, -9.802359580993652, -9.802471160888672, -9.81285285949707, -9.82018756866455, -9.801692962646484, -9.809906005859375],
[-9.602995872497559, 0.32756108045578003, 3.0864665508270264, -9.754168510437012, -9.803014755249023, -9.832489013671875, -9.785274505615234, -9.750894546508789, -9.827933311462402, -9.816366195678711],
[-10.247313499450684, -0.4231721254699707, 3.1179518699645996, -9.989541053771973, -10.001238822937012, -10.040529251098633, -9.996538162231445, -10.052029609680176, -9.986088752746582, -10.036115646362305],
[-9.98245906829834, -1.4063411709259033, 3.539100170135498, -9.433758735656738, -9.444565773010254, -9.49752426147461, -9.452383995056152, -9.457331657409668, -9.432816505432129, -9.439447402954102],
]
)
# fmt: on
torch.testing.assert_close(outputs.logits[0][:, :10].cpu(), EXPECTED_LOGITS, rtol=2e-4, atol=2e-4)
@slow
def test_medium_logits_batch(self):
model = MoonshineStreamingForConditionalGeneration.from_pretrained("UsefulSensors/moonshine-streaming-medium")
model.to(torch_device)
inputs = self.processor_medium(self._load_datasamples(4), sampling_rate=16000)
inputs.to(torch_device)
outputs = model.generate(**inputs, max_new_tokens=1, return_dict_in_generate=True, output_logits=True)
# fmt: off
EXPECTED_LOGITS = torch.tensor(
[
[-9.423518180847168, -1.6021490097045898, 1.3190011978149414, -10.032197952270508, -10.08576774597168, -10.04221248626709, -10.057312965393066, -10.089818000793457, -10.141901969909668, -10.003352165222168],
[-9.891376495361328, -2.268763542175293, 2.4474310874938965, -10.193374633789062, -10.256990432739258, -10.184536933898926, -10.223142623901367, -10.29221248626709, -10.325952529907227, -10.256648063659668],
[-9.396651268005371, -0.7291030287742615, 2.299491403982544, -9.815659523010254, -9.854050636291504, -9.821599006652832, -9.81181812286377, -9.838842391967773, -9.854424476623535, -9.855895042419434],
[-8.918790817260742, -0.6990604400634766, 1.3242177963256836, -8.931782722473145, -9.016800880432129, -8.92956829071045, -8.945950508117676, -8.984317779541016, -8.983695030212402, -8.945679664611816],
]
)
# fmt: on
torch.testing.assert_close(outputs.logits[0][:, :10].cpu(), EXPECTED_LOGITS, rtol=2e-4, atol=2e-4)
@slow
def test_tiny_generation_single(self):
model = MoonshineStreamingForConditionalGeneration.from_pretrained("UsefulSensors/moonshine-streaming-tiny")
model.to(torch_device)
audio_array = self._load_datasamples(1)
inputs = self.processor_tiny(audio_array, sampling_rate=16000)
inputs.to(torch_device)
generated_ids = model.generate(**inputs, max_new_tokens=20)
transcript = self.processor_tiny.batch_decode(generated_ids, skip_special_tokens=True)[0]
EXPECTED_TRANSCRIPT = "Mr. Quilter is the apostle of the Middle Classes, and we are glad to"
self.assertEqual(transcript, EXPECTED_TRANSCRIPT)
@slow
def test_small_generation_single(self):
model = MoonshineStreamingForConditionalGeneration.from_pretrained("UsefulSensors/moonshine-streaming-small")
model.to(torch_device)
audio_array = self._load_datasamples(1)
inputs = self.processor_small(audio_array, sampling_rate=16000)
inputs.to(torch_device)
generated_ids = model.generate(**inputs, max_new_tokens=20)
transcript = self.processor_small.batch_decode(generated_ids, skip_special_tokens=True)[0]
EXPECTED_TRANSCRIPT = "Mister Quilter is the apostle of the middle classes, and we are glad to welcome"
self.assertEqual(transcript, EXPECTED_TRANSCRIPT)
@slow
def test_medium_generation_single(self):
model = MoonshineStreamingForConditionalGeneration.from_pretrained("UsefulSensors/moonshine-streaming-medium")
model.to(torch_device)
audio_array = self._load_datasamples(1)
inputs = self.processor_medium(audio_array, sampling_rate=16000)
inputs.to(torch_device)
generated_ids = model.generate(**inputs, max_new_tokens=20)
transcript = self.processor_medium.batch_decode(generated_ids, skip_special_tokens=True)[0]
EXPECTED_TRANSCRIPT = "Mister Quilter is the apostle of the middle classes, and we are glad to welcome"
self.assertEqual(transcript, EXPECTED_TRANSCRIPT)
@slow
def test_tiny_generation_batch(self):
model = MoonshineStreamingForConditionalGeneration.from_pretrained("UsefulSensors/moonshine-streaming-tiny")
model.to(torch_device)
audio_array = self._load_datasamples(4)
inputs = self.processor_tiny(audio_array, sampling_rate=16000)
inputs.to(torch_device)
generated_ids = model.generate(**inputs, max_new_tokens=20)
transcript = self.processor_tiny.batch_decode(generated_ids, skip_special_tokens=True)
# fmt: off
EXPECTED_TRANSCRIPT = [
"Mr. Quilter is the apostle of the Middle Classes, and we are glad to",
"Nor is Mr. Quilter's manner less interesting than his matter.",
"He tells us that at this festive season of the year, with Christmas and a roast be",
"He has grieved doubts whether Sir Frederick Layton's work is really Greek after all",
]
# fmt: on
self.assertListEqual(transcript, EXPECTED_TRANSCRIPT)
@slow
def test_small_generation_batch(self):
model = MoonshineStreamingForConditionalGeneration.from_pretrained("UsefulSensors/moonshine-streaming-small")
model.to(torch_device)
audio_array = self._load_datasamples(4)
inputs = self.processor_small(audio_array, sampling_rate=16000)
inputs.to(torch_device)
generated_ids = model.generate(**inputs, max_new_tokens=20)
transcript = self.processor_small.batch_decode(generated_ids, skip_special_tokens=True)
# fmt: off
EXPECTED_TRANSCRIPT = [
"Mister Quilter is the apostle of the middle classes, and we are glad to welcome",
"Nor is Mister Quilter's manner less interesting than his matter.",
"He tells us that at this festive season of the year, with Christmas and roast beef",
"He has grave doubts whether Sir Frederick Layton's work is really Greek after all,",
]
# fmt: on
self.assertListEqual(transcript, EXPECTED_TRANSCRIPT)
@slow
def test_medium_generation_batch(self):
model = MoonshineStreamingForConditionalGeneration.from_pretrained("UsefulSensors/moonshine-streaming-medium")
model.to(torch_device)
audio_array = self._load_datasamples(4)
inputs = self.processor_medium(audio_array, sampling_rate=16000)
inputs.to(torch_device)
generated_ids = model.generate(**inputs, max_new_tokens=20)
transcript = self.processor_medium.batch_decode(generated_ids, skip_special_tokens=True)
# fmt: off
EXPECTED_TRANSCRIPT = [
"Mister Quilter is the apostle of the middle classes, and we are glad to welcome",
"Nor is Mister Quilter's manner less interesting than his matter.",
"He tells us that at this festive season of the year, with Christmas and roast beef",
"He has grave doubts whether Sir Frederick Leighton's work is really Greek after all,",
]
# fmt: on
self.assertListEqual(transcript, EXPECTED_TRANSCRIPT)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/moonshine_streaming/test_modeling_moonshine_streaming.py",
"license": "Apache License 2.0",
"lines": 566,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/backbone_utils.py | # Copyright 2026 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection of utils to be used by backbones and their components."""
import enum
import inspect
from huggingface_hub import repo_exists
from .utils import logging
logger = logging.get_logger(__name__)
class BackboneType(enum.Enum):
TIMM = "timm"
TRANSFORMERS = "transformers"
class BackboneConfigMixin:
"""
A Mixin to support handling the `out_features` and `out_indices` attributes for the backbone configurations.
"""
def set_output_features_output_indices(
self,
out_features: list | None,
out_indices: list | None,
):
"""
Sets output indices and features to new values and aligns them with the given `stage_names`.
If one of the inputs is not given, find the corresponding `out_features` or `out_indices`
for the given `stage_names`.
Args:
out_features (`list[str]`, *optional*):
The names of the features for the backbone to output. Defaults to `config._out_features` if not provided.
out_indices (`list[int]` or `tuple[int]`, *optional*):
The indices of the features for the backbone to output. Defaults to `config._out_indices` if not provided.
"""
self._out_features = out_features
self._out_indices = list(out_indices) if isinstance(out_indices, tuple) else out_indices
# First verify that the out_features and out_indices are valid
self.verify_out_features_out_indices()
# Align output features with indices
out_features, out_indices = self._out_features, self._out_indices
if out_indices is None and out_features is None:
out_indices = [len(self.stage_names) - 1]
out_features = [self.stage_names[-1]]
elif out_indices is None and out_features is not None:
out_indices = [self.stage_names.index(layer) for layer in out_features]
elif out_features is None and out_indices is not None:
out_features = [self.stage_names[idx] for idx in out_indices]
# Update values and verify that the aligned out_features and out_indices are valid
self._out_features, self._out_indices = out_features, out_indices
self.verify_out_features_out_indices()
def verify_out_features_out_indices(self):
"""
Verify that out_indices and out_features are valid for the given stage_names.
"""
if self.stage_names is None:
raise ValueError("Stage_names must be set for transformers backbones")
if self._out_features is not None:
if not isinstance(self._out_features, (list,)):
raise ValueError(f"out_features must be a list got {type(self._out_features)}")
if any(feat not in self.stage_names for feat in self._out_features):
raise ValueError(
f"out_features must be a subset of stage_names: {self.stage_names} got {self._out_features}"
)
if len(self._out_features) != len(set(self._out_features)):
raise ValueError(f"out_features must not contain any duplicates, got {self._out_features}")
if self._out_features != (
sorted_feats := [feat for feat in self.stage_names if feat in self._out_features]
):
raise ValueError(
f"out_features must be in the same order as stage_names, expected {sorted_feats} got {self._out_features}"
)
if self._out_indices is not None:
if not isinstance(self._out_indices, list):
raise ValueError(f"out_indices must be a list, got {type(self._out_indices)}")
# Convert negative indices to their positive equivalent: [-1,] -> [len(stage_names) - 1,]
positive_indices = tuple(idx % len(self.stage_names) if idx < 0 else idx for idx in self._out_indices)
if any(idx for idx in positive_indices if idx not in range(len(self.stage_names))):
raise ValueError(
f"out_indices must be valid indices for stage_names {self.stage_names}, got {self._out_indices}"
)
if len(positive_indices) != len(set(positive_indices)):
msg = f"out_indices must not contain any duplicates, got {self._out_indices}"
msg += f"(equivalent to {positive_indices}))" if positive_indices != self._out_indices else ""
raise ValueError(msg)
if positive_indices != tuple(sorted(positive_indices)):
sorted_negative = [
idx for _, idx in sorted(zip(positive_indices, self._out_indices), key=lambda x: x[0])
]
raise ValueError(
f"out_indices must be in the same order as stage_names, expected {sorted_negative} got {self._out_indices}"
)
if self._out_features is not None and self._out_indices is not None:
if len(self._out_features) != len(self._out_indices):
raise ValueError("out_features and out_indices should have the same length if both are set")
if self._out_features != [self.stage_names[idx] for idx in self._out_indices]:
raise ValueError("out_features and out_indices should correspond to the same stages if both are set")
@property
def out_features(self):
return self._out_features
@out_features.setter
def out_features(self, out_features: list[str]):
"""
Set the out_features attribute. This will also update the out_indices attribute to match the new out_features.
"""
self.set_output_features_output_indices(out_features=out_features, out_indices=None)
@property
def out_indices(self):
return self._out_indices
@out_indices.setter
def out_indices(self, out_indices: tuple[int, ...] | list[int]):
"""
Set the out_indices attribute. This will also update the out_features attribute to match the new out_indices.
"""
out_indices = list(out_indices) if out_indices is not None else out_indices
self.set_output_features_output_indices(out_features=None, out_indices=out_indices)
def to_dict(self):
"""
Serializes this instance to a Python dictionary. Override the default `to_dict()` from `PreTrainedConfig` to
include the `out_features` and `out_indices` attributes.
"""
output = super().to_dict()
output["out_features"] = output.pop("_out_features", None)
output["out_indices"] = output.pop("_out_indices", None)
return output
class BackboneMixin:
backbone_type: BackboneType | None = None
# Attribute to indicate if the backbone has attention and can return attention outputs.
# Should be set to `False` for conv-based models to be able to run `forward_with_filtered_kwargs`
has_attentions: bool = True
def __init__(self, *args, **kwargs) -> None:
"""
Method to initialize the backbone. This method is called by the constructor of the base class after the
pretrained model weights have been loaded.
"""
super().__init__(*args, **kwargs)
timm_backbone = kwargs.pop("timm_backbone", None)
if timm_backbone is not None:
self.backbone_type = BackboneType.TIMM
else:
self.backbone_type = BackboneType.TRANSFORMERS
if self.backbone_type == BackboneType.TIMM:
self._init_timm_backbone(backbone=timm_backbone)
elif self.backbone_type == BackboneType.TRANSFORMERS:
self._init_transformers_backbone()
else:
raise ValueError(f"backbone_type {self.backbone_type} not supported.")
def _init_timm_backbone(self, backbone) -> None:
"""
Initialize the backbone model from timm. The backbone must already be loaded to backbone
"""
out_features_from_config = getattr(self.config, "out_features", None)
stage_names_from_config = getattr(self.config, "stage_names", None)
# These will disagree with the defaults for the transformers models e.g. for resnet50
# the transformer model has out_features = ['stem', 'stage1', 'stage2', 'stage3', 'stage4']
# the timm model has out_features = ['act', 'layer1', 'layer2', 'layer3', 'layer4']
self.stage_names = [stage["module"] for stage in backbone.feature_info.info]
self.num_features = [stage["num_chs"] for stage in backbone.feature_info.info]
out_indices = list(backbone.feature_info.out_indices)
out_features = backbone.feature_info.module_name()
if out_features_from_config is not None and out_features_from_config != out_features:
raise ValueError(
f"Config has `out_features` set to {out_features_from_config} which doesn't match `out_features` "
"from backbone's feature_info. Please check if your checkpoint has correct out features/indices saved."
)
if stage_names_from_config is not None and stage_names_from_config != self.stage_names:
raise ValueError(
f"Config has `stage_names` set to {stage_names_from_config} which doesn't match `stage_names` "
"from backbone's feature_info. Please check if your checkpoint has correct `stage_names` saved."
)
# We set, align and verify out indices, out features and stage names
self.config.stage_names = self.stage_names
self.config.set_output_features_output_indices(out_features, out_indices)
def _init_transformers_backbone(self) -> None:
self.stage_names = self.config.stage_names
self.config.verify_out_features_out_indices()
# Number of channels for each stage. This is set in the transformer backbone model init
self.num_features = None
@property
def out_features(self):
return self.config._out_features
@out_features.setter
def out_features(self, out_features: list[str]):
"""
Set the out_features attribute. This will also update the out_indices attribute to match the new out_features.
"""
self.config.out_features = out_features
@property
def out_indices(self):
return self.config._out_indices
@out_indices.setter
def out_indices(self, out_indices: tuple[int] | list[int]):
"""
Set the out_indices attribute. This will also update the out_features attribute to match the new out_indices.
"""
self.config.out_indices = out_indices
@property
def out_feature_channels(self):
# the current backbones will output the number of channels for each stage
# even if that stage is not in the out_features list.
return {stage: self.num_features[i] for i, stage in enumerate(self.stage_names)}
@property
def channels(self):
return [self.out_feature_channels[name] for name in self.out_features]
def forward_with_filtered_kwargs(self, *args, **kwargs):
if not self.has_attentions:
kwargs.pop("output_attentions", None)
if self.backbone_type == BackboneType.TIMM:
signature = dict(inspect.signature(self.forward).parameters)
kwargs = {k: v for k, v in kwargs.items() if k in signature}
return self(*args, **kwargs)
def forward(
self,
pixel_values,
output_hidden_states: bool | None = None,
output_attentions: bool | None = None,
return_dict: bool | None = None,
):
raise NotImplementedError("This method should be implemented by the derived class.")
def consolidate_backbone_kwargs_to_config(
backbone_config,
default_backbone: str | None = None,
default_config_type: str | None = None,
default_config_kwargs: dict | None = None,
timm_default_kwargs: dict | None = None,
**kwargs,
):
# Lazy import to avoid circular import issues. Can be imported properly
# after deleting ref to `BackboneMixin` in `utils/backbone_utils.py`
from .configuration_utils import PreTrainedConfig
from .models.auto import CONFIG_MAPPING
use_timm_backbone = kwargs.pop("use_timm_backbone", True)
backbone_kwargs = kwargs.pop("backbone_kwargs", {})
backbone = kwargs.pop("backbone") if kwargs.get("backbone") is not None else default_backbone
kwargs.pop("use_pretrained_backbone", None)
# Init timm backbone with hardcoded values for BC. If everything is set to `None` and there is
# a default timm config, we use it to init the backbone.
if (
timm_default_kwargs is not None
and use_timm_backbone
and backbone is not None
and backbone_config is None
and not backbone_kwargs
):
backbone_config = CONFIG_MAPPING["timm_backbone"](backbone=backbone, **timm_default_kwargs)
elif backbone is not None and backbone_config is None:
if repo_exists(backbone):
config_dict, _ = PreTrainedConfig.get_config_dict(backbone)
config_class = CONFIG_MAPPING[config_dict["model_type"]]
config_dict.update(backbone_kwargs)
backbone_config = config_class(**config_dict)
else:
backbone_config = CONFIG_MAPPING["timm_backbone"](backbone=backbone, **backbone_kwargs)
elif backbone_config is None and default_config_type is not None:
logger.info(
f"`backbone_config` is `None`. Initializing the config with the default `{default_config_type}` vision config."
)
default_config_kwargs = default_config_kwargs or {}
backbone_config = CONFIG_MAPPING[default_config_type](**default_config_kwargs)
elif isinstance(backbone_config, dict):
backbone_model_type = backbone_config.get("model_type")
config_class = CONFIG_MAPPING[backbone_model_type]
backbone_config = config_class.from_dict(backbone_config)
return backbone_config, kwargs
def load_backbone(config):
"""
Loads the backbone model from a config object.
If the config is from the backbone model itself, then we return a backbone model with randomly initialized
weights.
If the config is from the parent model of the backbone model itself, then we load the pretrained backbone weights
if specified.
"""
from transformers import AutoBackbone
backbone_config = getattr(config, "backbone_config", None)
if backbone_config is None:
backbone = AutoBackbone.from_config(config=config)
else:
backbone = AutoBackbone.from_config(config=backbone_config)
return backbone
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/backbone_utils.py",
"license": "Apache License 2.0",
"lines": 285,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/exaone_moe/modular_exaone_moe.py | # Copyright 2026 The LG AI Research and HuggingFace Inc. team. All rights reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""LG AI Research EXAONE Lab"""
import torch
import torch.nn as nn
from ... import initialization as init
from ...cache_utils import Cache
from ...configuration_utils import PreTrainedConfig, layer_type_validation
from ...modeling_outputs import CausalLMOutputWithPast
from ...modeling_utils import PreTrainedModel
from ...processing_utils import Unpack
from ...utils import TransformersKwargs
from ..deepseek_v3.modeling_deepseek_v3 import (
DeepseekV3MoE,
DeepseekV3NaiveMoe,
DeepseekV3TopkRouter,
)
from ..exaone4.configuration_exaone4 import Exaone4Config
from ..exaone4.modeling_exaone4 import (
Exaone4Attention,
Exaone4ForCausalLM,
Exaone4Model,
Exaone4PreTrainedModel,
)
from ..olmoe.modeling_olmoe import (
OlmoeDecoderLayer,
)
from ..qwen2_moe.modeling_qwen2_moe import Qwen2MoeMLP
class ExaoneMoeConfig(Exaone4Config):
model_type = "exaone_moe"
r"""
This is the configuration class to store the configuration of a [`ExaoneMoeModel`]. It is used to
instantiate a EXAONE MoE model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the K-EXAONE-236B-A23B [LGAI-EXAONE/K-EXAONE-236B-A23B](https://huggingface.co/LGAI-EXAONE/K-EXAONE-236B-A23B)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model
outputs. Read the documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 102400):
Vocabulary size of the EXAONE MoE model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`ExaoneMoeModel`].
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 16384):
Dimensionality of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*, defaults to 32):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details checkout [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 32768 for EXAONE 3.5).
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if ``config.is_decoder=True``.
bos_token_id (`int`, *optional*, defaults to 1):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 53):
End of stream token id.
pad_token_id (`int`, *optional*, defaults to 0):
Padding token id.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
sliding_window (`int`, *optional*, defaults to 4096):
The size of the sliding window for the sliding window attention.
sliding_window_pattern (`str`, *optional*, defaults to 4):
The pattern to use for sliding window attention. Can be one of:
- `None`: No sliding window attention is used
- `int`: Every `sliding_window` layers, use global attention, else use local attention.
- `str`: A sequence of "L" (local attention) and "G" (global attention) characters that defines the
attention pattern. The pattern starts from layer 0 and repeats every `sliding_window` layers. The
final layer always uses global attention regardless of the pattern.
For instance, sliding_window_pattern="LLLG" same as sliding_window=4, which means:
- Layer 0, 1, 2: local attention,
- Layer 3: global attention,
...(repeated)
layer_types (`list`, *optional*):
Attention pattern for each layer. Prioritized over `sliding_window_pattern`.
mlp_layer_types (`list`, *optional*):
MLP pattern for each layer. Prioritized over `first_k_dense_replace`.
first_k_dense_replace (`int`, *optional*, defaults to 1):
Number of dense layers in shallow layers(embed->dense->dense->...->dense->moe->moe...->lm_head).
\--k dense layers--/
moe_intermediate_size (`int`, *optional*, defaults to 1024):
Dimension of the MoE representations.
num_experts (`int`, *optional*, defaults to 64):
Number of routed experts.
num_experts_per_tok (`int`, *optional*, defaults to 8):
Number of selected experts, None means dense model.
num_shared_experts (`int`, *optional*, defaults to 1):
Number of shared experts.
norm_topk_prob (`bool`, *optional*, defaults to `True`):
Whether to normalize the weights of the routed experts.
routed_scaling_factor (`float`, *optional*, defaults to 2.5):
Scaling factor or routed experts.
n_group (`int`, *optional*, defaults to 1):
Number of groups for routed experts.
topk_group (`int`, *optional*, defaults to 1):
Number of selected groups for each token(for each token, ensuring the selected experts is only within `topk_group` groups).
Example:
```python
>>> from transformers import ExaoneMoeModel, ExaoneMoeConfig
>>> # Initializing a EXAONE configuration
>>> configuration = ExaoneMoeConfig()
>>> # Initializing a model from configuration
>>> model = ExaoneMoeModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
def __init__(
self,
vocab_size=102400,
hidden_size=4096,
intermediate_size=16384,
num_hidden_layers=32,
num_attention_heads=32,
num_key_value_heads=32,
hidden_act="silu",
max_position_embeddings=2048,
initializer_range=0.02,
rms_norm_eps=1e-5,
use_cache=True,
bos_token_id=1,
eos_token_id=53,
pad_token_id=0,
tie_word_embeddings=False,
rope_parameters=None,
attention_dropout=0.0,
sliding_window=4096,
sliding_window_pattern=4,
layer_types=None,
mlp_layer_types=None,
first_k_dense_replace=1,
moe_intermediate_size=1024,
num_experts=64,
num_experts_per_tok=8,
num_shared_experts=1,
norm_topk_prob=True,
routed_scaling_factor=2.5,
n_group=1,
topk_group=1,
**kwargs,
):
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.attention_dropout = attention_dropout
self.sliding_window = sliding_window
self.sliding_window_pattern = sliding_window_pattern
self.first_k_dense_replace = first_k_dense_replace
self.moe_intermediate_size = moe_intermediate_size
self.num_experts = num_experts
self.num_experts_per_tok = num_experts_per_tok
self.num_shared_experts = num_shared_experts
self.norm_topk_prob = norm_topk_prob
self.routed_scaling_factor = routed_scaling_factor
self.n_group = n_group
self.topk_group = topk_group
self.rope_parameters = rope_parameters
self.layer_types = layer_types
if self.sliding_window is None:
sliding_window_pattern = 0
if self.layer_types is None:
self.layer_types = [
"sliding_attention"
if ((i + 1) % (sliding_window_pattern) != 0 and i < self.num_hidden_layers)
else "full_attention"
for i in range(self.num_hidden_layers)
]
layer_type_validation(self.layer_types)
self.mlp_layer_types = mlp_layer_types
if self.mlp_layer_types is None:
self.mlp_layer_types = [
"dense" if i < self.first_k_dense_replace else "sparse" for i in range(self.num_hidden_layers)
]
layer_type_validation(self.mlp_layer_types, self.num_hidden_layers, attention=False)
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.tie_word_embeddings = tie_word_embeddings
PreTrainedConfig.__init__(**kwargs)
class ExaoneMoeAttention(Exaone4Attention):
pass
class ExaoneMoeMLP(Qwen2MoeMLP):
pass
class ExaoneMoeTopkRouter(DeepseekV3TopkRouter):
def __init__(self, config):
nn.Module.__init__()
self.config = config
self.weight = nn.Parameter(torch.empty((config.num_experts, config.hidden_size)))
self.register_buffer("e_score_correction_bias", torch.zeros(config.num_experts))
class ExaoneMoeExperts(DeepseekV3NaiveMoe):
def __init__(self, config):
super().__init__(config)
self.num_experts = config.num_experts
class ExaoneMoeSparseMoEBlock(DeepseekV3MoE):
def __init__(self, config):
super().__init__()
self.experts = ExaoneMoeExperts(config)
self.shared_experts = ExaoneMoeMLP(
config=config, intermediate_size=config.moe_intermediate_size * config.num_shared_experts
)
self.n_routed_experts = config.num_experts
class ExaoneMoeDecoderLayer(OlmoeDecoderLayer):
def __init__(self, config: ExaoneMoeConfig, layer_idx: int):
super().__init__(config, layer_idx)
self.mlp = (
ExaoneMoeSparseMoEBlock(config) if config.mlp_layer_types[layer_idx] == "sparse" else ExaoneMoeMLP(config)
)
class ExaoneMoePreTrainedModel(Exaone4PreTrainedModel):
config: ExaoneMoeConfig
_can_record_outputs = {
"hidden_states": ExaoneMoeDecoderLayer,
"attentions": ExaoneMoeAttention,
"router_logits": ExaoneMoeSparseMoEBlock,
}
_keep_in_fp32_modules_strict = ["e_score_correction_bias"]
_keys_to_ignore_on_load_unexpected = [r"mtp.*"]
@torch.no_grad()
def _init_weights(self, module):
PreTrainedModel._init_weights(self, module)
if isinstance(module, ExaoneMoeTopkRouter):
init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
init.zeros_(module.e_score_correction_bias)
elif isinstance(module, ExaoneMoeExperts):
init.normal_(module.gate_up_proj, mean=0.0, std=self.config.initializer_range)
init.normal_(module.down_proj, mean=0.0, std=self.config.initializer_range)
class ExaoneMoeModel(Exaone4Model):
pass
class ExaoneMoeForCausalLM(Exaone4ForCausalLM):
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
labels: torch.LongTensor | None = None,
use_cache: bool | None = None,
cache_position: torch.LongTensor | None = None,
logits_to_keep: int | torch.Tensor = 0,
**kwargs: Unpack[TransformersKwargs],
) -> CausalLMOutputWithPast:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoModelForCausalLM, AutoTokenizer
>>> model = AutoModelForCausalLM.from_pretrained("LGAI-EXAONE/K-EXAONE-236B-A23B")
>>> tokenizer = AutoTokenizer.from_pretrained("LGAI-EXAONE/K-EXAONE-236B-A23B")
>>> prompt = "Explain how wonderful you are"
>>> messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
>>> input_ids = tokenizer.apply_chat_template(
messages,
tokenize=True,
add_generation_prompt=True,
return_tensors="pt",
enable_thinking=False,
)
>>> output = model.generate(**input_ids.to(model.device), max_new_tokens=128)
>>> tokenizer.decode(output[0], skip_special_tokens=False)
"<|system|>\nYou are a helpful assistant.<|endofturn|>\n<|user|>\nExplain how wonderful you are<|endofturn|>\n<|assistant|>\n<think>\n\n</think>\n\nThank you for the kind question! While I can't feel emotions or take pride in the way humans do, I *can* share what makes me uniquely helpful and capable—qualities that many people find wonderful.\n\nHere’s how I can support you:\n\n🌟 **Knowledge at Your Fingertips** \nI have access to a vast amount of information across countless topics—from science and history to technology and creative writing. Whether you're curious, learning, or solving a problem, I can help explain things clearly and accurately.\n\n💬 **Clear, Helpful Communication** \nI aim to respond in a way that's easy to understand, whether you need a simple explanation or a detailed analysis. I adapt my tone and depth to match"
```
"""
super().forward(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
labels=labels,
use_cache=use_cache,
cache_position=cache_position,
logits_to_keep=logits_to_keep,
**kwargs,
)
__all__ = [
"ExaoneMoeConfig",
"ExaoneMoePreTrainedModel",
"ExaoneMoeModel",
"ExaoneMoeForCausalLM",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/exaone_moe/modular_exaone_moe.py",
"license": "Apache License 2.0",
"lines": 327,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/exaone_moe/test_modeling_exaone_moe.py | # Copyright 2026 The LG AI Research and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch EXAONE MoE model."""
import unittest
from pytest import mark
from transformers import (
AutoTokenizer,
is_torch_available,
)
from transformers.testing_utils import (
Expectations,
cleanup,
require_flash_attn,
require_torch,
slow,
torch_device,
)
from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester
if is_torch_available():
import torch
from transformers import (
ExaoneMoeForCausalLM,
ExaoneMoeModel,
)
class ExaoneMoeModelTester(CausalLMModelTester):
if is_torch_available():
base_model_class = ExaoneMoeModel
@require_torch
class ExaoneMoeModelTest(CausalLMModelTest, unittest.TestCase):
model_tester_class = ExaoneMoeModelTester
model_split_percents = [0.5, 0.8, 0.9]
@slow
@require_torch
class ExaoneMoeIntegrationTest(unittest.TestCase):
TEST_MODEL_ID = "hf-internal-testing/EXAONE-MoE-Dummy-7B-A1B"
@classmethod
def setUpClass(cls):
cls.model = None
@classmethod
def tearDownClass(cls):
del cls.model
cleanup(torch_device, gc_collect=True)
def setup(self):
cleanup(torch_device, gc_collect=True)
def tearDown(self):
cleanup(torch_device, gc_collect=True)
@classmethod
def get_model(cls):
if cls.model is None:
cls.model = ExaoneMoeForCausalLM.from_pretrained(
cls.TEST_MODEL_ID,
device_map="auto",
experts_implementation="eager",
)
return cls.model
def test_model_logits(self):
input_ids = [405, 7584, 36608, 892, 95714, 2907, 1492, 758, 373, 582]
model = self.get_model()
input_ids = torch.tensor([input_ids]).to(model.device)
with torch.no_grad():
out = model(input_ids).logits.float().cpu()
# fmt: off
EXPECTED_MEAN = Expectations(
{
("xpu", None): torch.tensor(
[[-2.2315, -3.0070, -3.2105, -3.2688, -3.2211, -3.3958, -3.1049, -3.2591, -3.8714, -0.6801]]
),
("cuda", None): torch.tensor(
[[-2.2491, -3.0824, -3.2191, -3.2712, -3.1991, -3.4087, -3.1384, -3.2601, -3.8869, -0.6940]]
),
}
).get_expectation()
EXPECTED_SLICE = Expectations(
{
("xpu", None): torch.tensor(
[-2.3750, -3.0156, 2.6875, -3.0000, 0.5078, -1.4141, -1.8516, -2.6719, -1.7578, -2.0781]
),
("cuda", None): torch.tensor(
[-2.3906, -3.0469, 2.6875, -3.0156, 0.4941, -1.4219, -1.8672, -2.6719, -1.7656, -2.0938]
),
}
).get_expectation()
# fmt: on
torch.testing.assert_close(out.mean(-1), EXPECTED_MEAN, atol=1e-2, rtol=1e-2)
torch.testing.assert_close(out[0, 0, :10], EXPECTED_SLICE, atol=1e-4, rtol=1e-4)
def test_model_generation_sdpa(self):
EXPECTED_TEXT = "The deep learning is 100% accurate.\n\nThe 100% accurate is 100%"
prompt = "The deep learning is "
tokenizer = AutoTokenizer.from_pretrained(self.TEST_MODEL_ID)
model = self.get_model()
input_ids = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
generated_ids = model.generate(**input_ids, max_new_tokens=20, do_sample=False)
text = tokenizer.decode(generated_ids[0], skip_special_tokens=False)
self.assertEqual(EXPECTED_TEXT, text)
@require_flash_attn
@mark.flash_attn_test
def test_model_generation_beyond_sliding_window_flash(self):
EXPECTED_OUTPUT_TOKEN_IDS = [373, 686, 373, 115708, 373, 885]
input_ids = [72861, 2711] + [21605, 2711] * 2048
model = self.get_model()
model.set_attn_implementation("flash_attention_2")
input_ids = torch.tensor([input_ids]).to(model.device)
with torch.no_grad():
generated_ids = model.generate(input_ids, max_new_tokens=6, do_sample=False)
self.assertEqual(EXPECTED_OUTPUT_TOKEN_IDS, generated_ids[0][-6:].tolist())
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/exaone_moe/test_modeling_exaone_moe.py",
"license": "Apache License 2.0",
"lines": 117,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/eomt_dinov3/convert_eomt_dinov3_to_hf.py | # Copyright 2026 Mobile Perception Systems Lab at TU/e and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Conversion script for EoMT-DINOv3 checkpoints.
To convert one of the official checkpoints directly from the Hugging Face Hub you can run:
```bash
HF_TOKEN=your_token_here \
python -m transformers.models.eomt_dinov3.convert_eomt_dinov3_to_hf \
--model-id tue-mps/coco_panoptic_eomt_large_640_dinov3 \
--output-dir /tmp/eomt_converted \
--verify \
--original-repo-path /tmp/eomt
```
Make sure the token used above has been granted access to the gated DINOv3 weights.
"""
from __future__ import annotations
import argparse
import json
import re
import sys
from collections.abc import Iterable
from pathlib import Path
from typing import NamedTuple
import requests
import torch
from accelerate import init_empty_weights
from huggingface_hub import hf_hub_download
from PIL import Image
from safetensors.torch import load_file
from transformers import EomtDinov3Config, EomtDinov3ForUniversalSegmentation, EomtImageProcessorFast
CAT_URL = "http://images.cocodataset.org/val2017/000000039769.jpg"
DEFAULT_BACKBONE_REPO_ID = "facebook/dinov3-vitl16-pretrain-lvd1689m"
DEFAULT_IMAGE_SIZE = 640
class CheckpointSpec(NamedTuple):
"""Metadata describing how to convert an official EoMT-DINOv3 checkpoint."""
model_id: str
backbone_repo_id: str
image_size: int
CHECKPOINT_CATALOG: tuple[CheckpointSpec, ...] = (
CheckpointSpec(
model_id="tue-mps/coco_panoptic_eomt_small_640_dinov3",
backbone_repo_id="facebook/dinov3-vits16-pretrain-lvd1689m",
image_size=640,
),
CheckpointSpec(
model_id="tue-mps/coco_panoptic_eomt_base_640_dinov3",
backbone_repo_id="facebook/dinov3-vitb16-pretrain-lvd1689m",
image_size=640,
),
CheckpointSpec(
model_id="tue-mps/coco_panoptic_eomt_large_640_dinov3",
backbone_repo_id="facebook/dinov3-vitl16-pretrain-lvd1689m",
image_size=640,
),
CheckpointSpec(
model_id="tue-mps/coco_panoptic_eomt_large_1280_dinov3",
backbone_repo_id="facebook/dinov3-vitl16-pretrain-lvd1689m",
image_size=1280,
),
CheckpointSpec(
model_id="tue-mps/ade20k_semantic_eomt_large_512_dinov3",
backbone_repo_id="facebook/dinov3-vitl16-pretrain-lvd1689m",
image_size=512,
),
CheckpointSpec(
model_id="tue-mps/coco_instance_eomt_large_640_dinov3",
backbone_repo_id="facebook/dinov3-vitl16-pretrain-lvd1689m",
image_size=640,
),
CheckpointSpec(
model_id="tue-mps/coco_instance_eomt_large_1280_dinov3",
backbone_repo_id="facebook/dinov3-vitl16-pretrain-lvd1689m",
image_size=1280,
),
CheckpointSpec(
model_id="tue-mps/ade_semantic_eomt_large_512_dinov3",
backbone_repo_id="facebook/dinov3-vitl16-pretrain-lvd1689m",
image_size=512,
),
)
def _build_checkpoint_index() -> dict[str, CheckpointSpec]:
index: dict[str, CheckpointSpec] = {}
for spec in CHECKPOINT_CATALOG:
keys = {spec.model_id.lower(), spec.model_id.split("/", maxsplit=1)[-1].lower()}
for key in keys:
index[key] = spec
return index
CHECKPOINT_SPECS = _build_checkpoint_index()
def resolve_checkpoint_spec(model_id: str) -> CheckpointSpec:
key = model_id.lower()
if key not in CHECKPOINT_SPECS:
available = ", ".join(sorted(spec.model_id for spec in CHECKPOINT_CATALOG))
raise ValueError(f"Unknown checkpoint '{model_id}'. Available options: {available}.")
return CHECKPOINT_SPECS[key]
def print_checkpoint_catalog() -> None:
print("Supported checkpoints:")
for spec in CHECKPOINT_CATALOG:
print(f"- {spec.model_id} (image_size={spec.image_size}, backbone={spec.backbone_repo_id})")
DELTA_KEY_REPLACEMENTS: tuple[tuple[str, str], ...] = (
(r"^network\.encoder\.backbone\.patch_embed\.cls_token$", "embeddings.cls_token"),
(r"^network\.encoder\.backbone\.patch_embed\.register_tokens$", "embeddings.register_tokens"),
(
r"^network\.encoder\.backbone\.patch_embed\.patch_embeddings\.",
"embeddings.patch_embeddings.",
),
(r"^network\.encoder\.backbone\.blocks\.(\d+)\.", r"layers.\1."),
(r"^network\.encoder\.backbone\.norm\.", "layernorm."),
(r"^network\.q\.", "query."),
(r"^network\.class_head\.", "class_predictor."),
(r"^network\.mask_head\.0\.", "mask_head.fc1."),
(r"^network\.mask_head\.2\.", "mask_head.fc2."),
(r"^network\.mask_head\.4\.", "mask_head.fc3."),
(r"^network\.upscale\.(\d+)\.conv1\.", r"upscale_block.block.\1.conv1."),
(r"^network\.upscale\.(\d+)\.conv2\.", r"upscale_block.block.\1.conv2."),
(r"^network\.upscale\.(\d+)\.norm\.", r"upscale_block.block.\1.layernorm2d."),
(r"^network\.attn_mask_probs$", "attn_mask_probs"),
(r"^criterion\.", "criterion."),
)
SKIP_KEYS = {
"network.encoder.pixel_mean",
"network.encoder.pixel_std",
}
def _rename_delta_key(key: str) -> tuple[str | None, bool]:
if key in SKIP_KEYS:
return None, False
for pattern, replacement in DELTA_KEY_REPLACEMENTS:
if re.match(pattern, key):
new_key = re.sub(pattern, replacement, key)
return new_key, key.startswith("network.encoder.backbone")
if key.startswith("network.encoder.backbone"):
raise KeyError(f"Unhandled backbone key: {key}")
return None, False
def convert_delta_state_dict(state_dict: dict[str, torch.Tensor]) -> tuple[dict[str, torch.Tensor], set[str]]:
converted: dict[str, torch.Tensor] = {}
backbone_keys: set[str] = set()
for key, value in state_dict.items():
new_key, is_backbone = _rename_delta_key(key)
if new_key is None:
continue
converted[new_key] = value
if is_backbone:
backbone_keys.add(new_key)
return converted, backbone_keys
def map_dinov3_state_to_eomt(base_state_dict: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
mapped: dict[str, torch.Tensor] = {}
for key, tensor in base_state_dict.items():
if key.startswith("layer."):
new_key = key.replace("layer.", "layers.", 1)
elif key == "norm.weight":
new_key = "layernorm.weight"
elif key == "norm.bias":
new_key = "layernorm.bias"
else:
new_key = key
if new_key == "embeddings.mask_token":
continue
mapped[new_key] = tensor
return mapped
def merge_backbone_weights(
base_backbone: dict[str, torch.Tensor],
delta_backbone: dict[str, torch.Tensor],
backbone_delta_keys: Iterable[str],
) -> dict[str, torch.Tensor]:
merged = dict(base_backbone)
for key, value in delta_backbone.items():
if key in backbone_delta_keys:
merged[key] = merged[key] + value
else:
merged[key] = value
return merged
def build_eomt_config(
*,
base_config: dict[str, object],
delta_state_dict: dict[str, torch.Tensor],
image_size: int,
) -> EomtDinov3Config:
num_queries = delta_state_dict["network.q.weight"].shape[0]
num_blocks = delta_state_dict["network.attn_mask_probs"].numel()
num_upscale_blocks = len({int(key.split(".")[2]) for key in delta_state_dict if key.startswith("network.upscale")})
num_register_tokens = delta_state_dict["network.encoder.backbone.patch_embed.register_tokens"].shape[1]
num_labels = delta_state_dict["network.class_head.weight"].shape[0] - 1
config = EomtDinov3Config(
hidden_size=base_config["hidden_size"],
num_hidden_layers=base_config["num_hidden_layers"],
num_attention_heads=base_config["num_attention_heads"],
intermediate_size=base_config["intermediate_size"],
hidden_act=base_config["hidden_act"],
hidden_dropout_prob=0.0,
initializer_range=base_config["initializer_range"],
layer_norm_eps=base_config["layer_norm_eps"],
image_size=image_size,
patch_size=base_config["patch_size"],
num_channels=base_config.get("num_channels", 3),
layerscale_value=base_config.get("layerscale_value", 1.0),
drop_path_rate=base_config.get("drop_path_rate", 0.0),
attention_dropout=base_config.get("attention_dropout", 0.0),
num_upscale_blocks=num_upscale_blocks,
num_blocks=num_blocks,
num_queries=num_queries,
num_register_tokens=num_register_tokens,
rope_parameters={"rope_theta": base_config.get("rope_theta", 100.0)},
query_bias=base_config.get("query_bias", True),
key_bias=base_config.get("key_bias", False),
value_bias=base_config.get("value_bias", True),
proj_bias=base_config.get("proj_bias", True),
mlp_bias=base_config.get("mlp_bias", True),
use_gated_mlp=base_config.get("use_gated_mlp", False),
pos_embed_shift=base_config.get("pos_embed_shift"),
pos_embed_jitter=base_config.get("pos_embed_jitter"),
pos_embed_rescale=base_config.get("pos_embed_rescale"),
num_labels=num_labels,
)
return config
def convert_checkpoint(
*,
delta_state_dict: dict[str, torch.Tensor],
backbone_repo_id: str,
image_size: int,
) -> tuple[EomtDinov3Config, dict[str, torch.Tensor]]:
# load model.safetensors
filepath = hf_hub_download(backbone_repo_id, filename="model.safetensors")
base_state_dict = load_file(filepath)
# load config.json
filepath = hf_hub_download(backbone_repo_id, filename="config.json")
with open(filepath, "r") as f:
base_config = json.load(f)
mapped_base = map_dinov3_state_to_eomt(base_state_dict)
converted_delta, backbone_delta_keys = convert_delta_state_dict(delta_state_dict)
merged_state_dict = merge_backbone_weights(mapped_base, converted_delta, backbone_delta_keys)
config = build_eomt_config(
base_config=base_config,
delta_state_dict=delta_state_dict,
image_size=image_size,
)
return config, merged_state_dict
def ensure_state_dict(state_dict: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
if "state_dict" in state_dict:
return state_dict["state_dict"]
return state_dict
def convert_model(
*,
model_id: str,
output_dir: Path,
verify: bool,
original_repo_path: Path | None,
push_to_hub: bool = False,
) -> None:
# resolve checkpoint spec
spec = resolve_checkpoint_spec(model_id)
backbone_repo_id = spec.backbone_repo_id
image_size = spec.image_size
# load delta state
delta_path = hf_hub_download(repo_id=model_id, filename="pytorch_model.bin")
raw_delta_state = torch.load(delta_path, map_location="cpu")
delta_state_dict = ensure_state_dict(raw_delta_state)
# convert checkpoint
config, merged_state_dict = convert_checkpoint(
delta_state_dict=delta_state_dict,
backbone_repo_id=backbone_repo_id,
image_size=image_size,
)
with init_empty_weights():
model = EomtDinov3ForUniversalSegmentation(config)
model.load_state_dict(merged_state_dict, strict=True, assign=True)
processor = EomtImageProcessorFast(
size={"shortest_edge": image_size, "longest_edge": image_size},
do_split_image=False,
do_pad=True,
)
if verify:
verify_conversion(
hf_model=model,
processor=processor,
delta_state_dict=delta_state_dict,
backbone_repo_id=backbone_repo_id,
image_size=image_size,
original_repo_path=original_repo_path,
)
if output_dir is not None:
output_dir.mkdir(parents=True, exist_ok=True)
model.save_pretrained(output_dir)
processor.save_pretrained(output_dir)
if push_to_hub:
# Extract model name from model_id (e.g. "tue-mps/coco_panoptic_eomt_large_640_dinov3" -> "eomt-dinov3-coco-panoptic-large-640")
base_name = model_id.split("/")[-1] # e.g. "coco_panoptic_eomt_large_640_dinov3"
parts = base_name.replace("_dinov3", "").split("_") # ["coco", "panoptic", "eomt", "large", "640"]
# Reorder to: eomt-dinov3-{task}-{dataset}-{size}-{resolution}
if "eomt" in parts:
parts.remove("eomt")
repo_name = "eomt-dinov3-" + "-".join(parts)
model.push_to_hub(repo_id=f"nielsr/{repo_name}")
processor.push_to_hub(repo_id=f"nielsr/{repo_name}")
def _prepare_image(processor: EomtImageProcessorFast) -> torch.Tensor:
image = Image.open(requests.get(CAT_URL, stream=True).raw).convert("RGB")
inputs = processor(images=image, do_normalize=False, return_tensors="pt")
return inputs.pixel_values
def _load_original_model(
*,
original_repo_path: Path,
backbone_repo_id: str,
image_size: int,
num_labels: int,
num_queries: int,
num_blocks: int,
delta_state_dict: dict[str, torch.Tensor],
) -> torch.nn.Module:
sys.path.insert(0, str(original_repo_path))
from models.eomt import EoMT
from models.vit import ViT
encoder = ViT((image_size, image_size), backbone_name=backbone_repo_id, ckpt_path=None)
model = EoMT(encoder=encoder, num_classes=num_labels, num_q=num_queries, num_blocks=num_blocks)
state_dict = model.state_dict()
for key, value in delta_state_dict.items():
if key in SKIP_KEYS or key.startswith("criterion"):
continue
target_key = key
if key.startswith("network."):
target_key = key.replace("network.", "", 1)
if target_key.startswith("encoder.backbone"):
state_dict[target_key] = state_dict[target_key] + value
elif target_key in state_dict:
state_dict[target_key] = value
model.load_state_dict(state_dict)
model.eval()
return model
class BackboneVerificationOutputs(NamedTuple):
patch_embeddings: torch.Tensor
rope_embeddings: tuple[torch.Tensor, torch.Tensor]
hidden_states: list[torch.Tensor]
mask_logits: list[torch.Tensor]
class_logits: list[torch.Tensor]
sequence_output: torch.Tensor
def _collect_original_backbone_states(model, pixel_values: torch.Tensor) -> BackboneVerificationOutputs:
backbone = model.encoder.backbone
hidden_states = (pixel_values - model.encoder.pixel_mean) / model.encoder.pixel_std
rope = None
if hasattr(backbone, "rope_embeddings"):
rope = backbone.rope_embeddings(hidden_states)
hidden_states = backbone.patch_embed(hidden_states)
patch_embeddings = hidden_states.detach().clone()
if rope is None:
raise ValueError("Original model is missing rope embeddings")
outputs = []
mask_logits_list = []
class_logits_list = []
attn_mask = None
for idx, block in enumerate(backbone.blocks):
if idx == len(backbone.blocks) - model.num_blocks:
query = model.q.weight[None, :, :].expand(hidden_states.shape[0], -1, -1)
hidden_states = torch.cat((query, hidden_states), dim=1)
if idx >= len(backbone.blocks) - model.num_blocks:
norm_hidden_states = backbone.norm(hidden_states)
mask_logits, class_logits = model._predict(norm_hidden_states)
mask_logits_list.append(mask_logits)
class_logits_list.append(class_logits)
attn_mask = model._attn_mask(hidden_states, mask_logits, idx)
attn_module = block.attention if hasattr(block, "attention") else block.attn
attn_output = model._attn(attn_module, block.norm1(hidden_states), attn_mask, rope=rope)
if hasattr(block, "layer_scale1"):
hidden_states = hidden_states + block.layer_scale1(attn_output)
else:
hidden_states = hidden_states + block.ls1(attn_output)
mlp_output = block.mlp(block.norm2(hidden_states))
if hasattr(block, "layer_scale2"):
hidden_states = hidden_states + block.layer_scale2(mlp_output)
else:
hidden_states = hidden_states + block.ls2(mlp_output)
outputs.append(hidden_states)
sequence_output = backbone.norm(hidden_states)
mask_logits, class_logits = model._predict(sequence_output)
mask_logits_list.append(mask_logits)
class_logits_list.append(class_logits)
return BackboneVerificationOutputs(
patch_embeddings=patch_embeddings,
rope_embeddings=rope,
hidden_states=outputs,
mask_logits=mask_logits_list,
class_logits=class_logits_list,
sequence_output=sequence_output,
)
def _collect_hf_backbone_states(
model: EomtDinov3ForUniversalSegmentation, pixel_values: torch.Tensor
) -> BackboneVerificationOutputs:
position_embeddings = model.rope_embeddings(pixel_values)
hidden_states = model.embeddings(pixel_values)
patch_embeddings = hidden_states.detach().clone()
outputs = []
mask_logits_list = []
class_logits_list = []
attention_mask = None
for idx, layer_module in enumerate(model.layers):
if idx == model.num_hidden_layers - model.config.num_blocks:
query = model.query.weight[None, :, :].expand(hidden_states.shape[0], -1, -1).to(hidden_states.device)
hidden_states = torch.cat((query, hidden_states), dim=1)
if idx >= model.num_hidden_layers - model.config.num_blocks:
norm_hidden_states = model.layernorm(hidden_states)
mask_logits, class_logits = model.predict(norm_hidden_states)
mask_logits_list.append(mask_logits)
class_logits_list.append(class_logits)
probs_index = idx - model.num_hidden_layers + model.config.num_blocks
if model.training or model.attn_mask_probs[probs_index] > 0:
attention_mask = torch.ones(
hidden_states.shape[0],
hidden_states.shape[1],
hidden_states.shape[1],
device=hidden_states.device,
dtype=torch.bool,
)
interpolated_logits = torch.nn.functional.interpolate(
mask_logits,
size=model.grid_size,
mode="bilinear",
).view(mask_logits.size(0), mask_logits.size(1), -1)
num_query_tokens = model.config.num_queries
encoder_start_tokens = num_query_tokens + model.embeddings.num_prefix_tokens
attention_mask[:, :num_query_tokens, encoder_start_tokens:] = interpolated_logits > 0
attention_mask = model._disable_attention_mask(
attention_mask,
prob=model.attn_mask_probs[probs_index],
num_query_tokens=num_query_tokens,
encoder_start_tokens=encoder_start_tokens,
device=hidden_states.device,
)
attention_mask = attention_mask[:, None, ...].expand(-1, model.config.num_attention_heads, -1, -1)
bool_attention_mask = attention_mask
attention_mask = attention_mask.float().masked_fill(~bool_attention_mask, -1e9)
if attention_mask.dtype != hidden_states.dtype:
attention_mask = attention_mask.to(dtype=hidden_states.dtype)
hidden_states = layer_module(
hidden_states,
attention_mask=attention_mask,
position_embeddings=position_embeddings,
)
outputs.append(hidden_states)
sequence_output = model.layernorm(hidden_states)
mask_logits, class_logits = model.predict(sequence_output)
mask_logits_list.append(mask_logits)
class_logits_list.append(class_logits)
return BackboneVerificationOutputs(
patch_embeddings=patch_embeddings,
rope_embeddings=position_embeddings,
hidden_states=outputs,
mask_logits=mask_logits_list,
class_logits=class_logits_list,
sequence_output=sequence_output,
)
def _assert_allclose(reference: Iterable[torch.Tensor], actual: Iterable[torch.Tensor], message: str) -> None:
for idx, (ref_tensor, act_tensor) in enumerate(zip(reference, actual)):
if not torch.allclose(ref_tensor, act_tensor, atol=1e-4, rtol=1e-4):
raise ValueError(f"Mismatch in {message} at index {idx}")
def verify_conversion(
*,
hf_model: EomtDinov3ForUniversalSegmentation,
processor: EomtImageProcessorFast,
delta_state_dict: dict[str, torch.Tensor],
backbone_repo_id: str,
image_size: int,
original_repo_path: Path | None,
) -> None:
if original_repo_path is None:
raise ValueError("Original repository path is required for verification")
torch.manual_seed(0)
pixel_values = _prepare_image(processor)
image_mean = torch.tensor(processor.image_mean, dtype=pixel_values.dtype, device=pixel_values.device)[
None, :, None, None
]
image_std = torch.tensor(processor.image_std, dtype=pixel_values.dtype, device=pixel_values.device)[
None, :, None, None
]
normalized_pixel_values = (pixel_values - image_mean) / image_std
original_model = _load_original_model(
original_repo_path=original_repo_path,
backbone_repo_id=backbone_repo_id,
image_size=image_size,
num_labels=hf_model.config.num_labels,
num_queries=hf_model.config.num_queries,
num_blocks=hf_model.config.num_blocks,
delta_state_dict=delta_state_dict,
)
hf_model.eval()
with torch.no_grad():
orig_outputs = _collect_original_backbone_states(original_model, pixel_values)
hf_outputs = _collect_hf_backbone_states(hf_model, normalized_pixel_values)
patch_abs_diff = (orig_outputs.patch_embeddings - hf_outputs.patch_embeddings).abs()
print(f"Patch embedding max abs diff: {patch_abs_diff.max().item():.6e}")
rope_abs_diffs = [(orig - hf).abs() for orig, hf in zip(orig_outputs.rope_embeddings, hf_outputs.rope_embeddings)]
rope_max_diffs = [diff.max().item() for diff in rope_abs_diffs]
print(
"RoPE embedding max abs diff: "
+ ", ".join(f"component_{idx}={value:.6e}" for idx, value in enumerate(rope_max_diffs))
)
if not torch.allclose(orig_outputs.patch_embeddings, hf_outputs.patch_embeddings, atol=1e-4, rtol=1e-4):
raise ValueError("Mismatch in patch embeddings")
_assert_allclose(orig_outputs.rope_embeddings, hf_outputs.rope_embeddings, "rope embeddings")
_assert_allclose(orig_outputs.hidden_states, hf_outputs.hidden_states, "backbone hidden states")
_assert_allclose(orig_outputs.mask_logits, hf_outputs.mask_logits, "mask logits")
_assert_allclose(orig_outputs.class_logits, hf_outputs.class_logits, "class logits")
print("Looks good!")
if not torch.allclose(orig_outputs.sequence_output, hf_outputs.sequence_output, atol=1e-4, rtol=1e-4):
raise ValueError("Mismatch in final sequence output")
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Convert EoMT-DINOv3 checkpoints to 🤗 Transformers format")
parser.add_argument("--list-models", action="store_true", help="List supported checkpoint names and exit")
parser.add_argument(
"--model-id",
help="Name of an official EoMT-DINOv3 checkpoint to download and convert",
required=True,
)
parser.add_argument("--output-dir", type=Path, help="Directory to save the converted model")
parser.add_argument("--verify", action="store_true")
parser.add_argument("--original-repo-path", type=Path, default=None, help="Path to the original EoMT repository")
parser.add_argument(
"--push-to-hub", action="store_true", help="Whether to push the converted model to the Hugging Face Hub."
)
return parser.parse_args()
def main() -> None:
args = parse_args()
if args.list_models:
print_checkpoint_catalog()
return
convert_model(
model_id=args.model_id,
output_dir=args.output_dir,
verify=args.verify,
original_repo_path=args.original_repo_path,
push_to_hub=args.push_to_hub,
)
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/eomt_dinov3/convert_eomt_dinov3_to_hf.py",
"license": "Apache License 2.0",
"lines": 539,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/eomt_dinov3/modular_eomt_dinov3.py | # Copyright 2026 the HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch EoMT model backed by DINOv3."""
from collections.abc import Callable
from typing import Optional
import torch
import torch.nn.functional as F
from torch import Tensor, nn
from ... import initialization as init
from ...modeling_rope_utils import RopeParameters
from ...modeling_utils import PreTrainedModel
from ...processing_utils import Unpack
from ...utils import (
TransformersKwargs,
auto_docstring,
)
from ...utils.generic import merge_with_config_defaults
from ...utils.output_capturing import capture_outputs
from ..dinov3_vit.modeling_dinov3_vit import (
DINOv3ViTAttention,
DINOv3ViTEmbeddings,
DINOv3ViTLayer,
DINOv3ViTLayerScale,
DINOv3ViTRopePositionEmbedding,
)
from ..eomt.configuration_eomt import EomtConfig
from ..eomt.modeling_eomt import (
EomtForUniversalSegmentation,
EomtForUniversalSegmentationOutput,
EomtLoss,
EomtPreTrainedModel,
)
class EomtDinov3Config(EomtConfig):
r"""
This is the configuration class to store the configuration of a [`EomtDinov3ForUniversalSegmentation`]. It is used to instantiate an EoMT-DINOv3 model
according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the EoMT-DINOv3
[tue-mps/coco_panoptic_eomt_large_640_dinov3](https://huggingface.co/tue-mps/coco_panoptic_eomt_large_640_dinov3)
architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 1024):
Dimensionality of the hidden representations.
num_hidden_layers (`int`, *optional*, defaults to 24):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads in each attention layer.
intermediate_size (`int`, *optional*, defaults to 4096):
The intermediate size of the MLP. If not provided, defaults to `hidden_size * 4`.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings and encoder.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
image_size (`int`, *optional*, defaults to 640):
The size (resolution) of each input image.
patch_size (`int`, *optional*, defaults to 16):
The size (resolution) of each patch.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
layerscale_value (`float`, *optional*, defaults to 1.0):
Initial value for the LayerScale parameter.
drop_path_rate (`float`, *optional*, defaults to 0.0):
The stochastic depth rate (drop path) used during training.
num_upscale_blocks (`int`, *optional*, defaults to 2):
Number of upsampling blocks used in the decoder or segmentation head.
attention_dropout (`float`, *optional*, defaults to 0.0):
Dropout probability applied after attention projection.
num_blocks (`int`, *optional*, defaults to 4):
Number of feature blocks or stages in the architecture.
no_object_weight (`float`, *optional*, defaults to 0.1):
Loss weight for the "no object" class in panoptic/instance segmentation.
class_weight (`float`, *optional*, defaults to 2.0):
Loss weight for classification targets.
mask_weight (`float`, *optional*, defaults to 5.0):
Loss weight for mask prediction.
dice_weight (`float`, *optional*, defaults to 5.0):
Loss weight for the dice loss component.
train_num_points (`int`, *optional*, defaults to 12544):
Number of points to sample for mask loss computation during training.
oversample_ratio (`float`, *optional*, defaults to 3.0):
Oversampling ratio used in point sampling for mask training.
importance_sample_ratio (`float`, *optional*, defaults to 0.75):
Ratio of points to sample based on importance during training.
num_queries (`int`, *optional*, defaults to 200):
Number of object queries in the Transformer.
num_register_tokens (`int`, *optional*, defaults to 4):
Number of learnable register tokens added to the transformer input.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling.
query_bias (`bool`, *optional*, defaults to `True`):
Whether to use bias in query projection.
key_bias (`bool`, *optional*, defaults to `False`):
Whether to use bias in key projection.
value_bias (`bool`, *optional*, defaults to `True`):
Whether to use bias in value projection.
proj_bias (`bool`, *optional*, defaults to `True`):
Whether to use bias in output projection.
mlp_bias (`bool`, *optional*, defaults to `True`):
Whether to use bias in MLP layers.
use_gated_mlp (`bool`, *optional*, defaults to `False`):
Whether to use gated MLP layers.
pos_embed_shift (`float`, *optional*):
Shift value for position embeddings.
pos_embed_jitter (`float`, *optional*):
Jitter value for position embeddings.
pos_embed_rescale (`float`, *optional*, defaults to 2.0):
Rescale value for position embeddings.
"""
model_type = "eomt_dinov3"
default_theta = 100.0
def __init__(
self,
hidden_size=1024,
num_hidden_layers=24,
num_attention_heads=16,
intermediate_size=4096,
hidden_act="gelu",
hidden_dropout_prob=0.0,
initializer_range=0.02,
layer_norm_eps=1e-6,
image_size=640,
patch_size=16,
num_channels=3,
layerscale_value=1.0,
drop_path_rate=0.0,
num_upscale_blocks=2,
attention_dropout=0.0,
num_blocks=4,
no_object_weight: float = 0.1,
class_weight: float = 2.0,
mask_weight: float = 5.0,
dice_weight: float = 5.0,
train_num_points: int = 12544,
oversample_ratio: float = 3.0,
importance_sample_ratio: float = 0.75,
num_queries=200,
num_register_tokens=4,
rope_parameters: RopeParameters | dict[str, RopeParameters] | None = None,
query_bias: bool = True,
key_bias: bool = False,
value_bias: bool = True,
proj_bias: bool = True,
mlp_bias: bool = True,
use_gated_mlp: bool = False,
pos_embed_shift: float | None = None,
pos_embed_jitter: float | None = None,
pos_embed_rescale: float | None = 2.0,
**kwargs,
):
self.intermediate_size = intermediate_size
self.attention_dropout = attention_dropout
self.layerscale_value = layerscale_value
self.drop_path_rate = drop_path_rate
self.num_upscale_blocks = num_upscale_blocks
self.num_blocks = num_blocks
self.no_object_weight = no_object_weight
self.class_weight = class_weight
self.mask_weight = mask_weight
self.dice_weight = dice_weight
self.train_num_points = train_num_points
self.oversample_ratio = oversample_ratio
self.importance_sample_ratio = importance_sample_ratio
self.num_queries = num_queries
self.num_register_tokens = num_register_tokens
self.rope_parameters = rope_parameters
self.query_bias = query_bias
self.key_bias = key_bias
self.value_bias = value_bias
self.proj_bias = proj_bias
self.mlp_bias = mlp_bias
self.use_gated_mlp = use_gated_mlp
self.pos_embed_shift = pos_embed_shift
self.pos_embed_jitter = pos_embed_jitter
self.pos_embed_rescale = pos_embed_rescale
super().__init__(
hidden_size=hidden_size,
num_hidden_layers=num_hidden_layers,
num_attention_heads=num_attention_heads,
hidden_dropout_prob=hidden_dropout_prob,
hidden_act=hidden_act,
initializer_range=initializer_range,
layer_norm_eps=layer_norm_eps,
image_size=image_size,
patch_size=patch_size,
num_channels=num_channels,
**kwargs,
)
del self.qkv_bias
del self.pooler_act
del self.pooler_output_size
del self.encoder_stride
del self.attention_probs_dropout_prob
del self.mlp_ratio
del self.use_swiglu_ffn
class EomtDinov3Attention(DINOv3ViTAttention):
pass
class EomtDinov3Embeddings(DINOv3ViTEmbeddings):
def __init__(self, config: EomtDinov3Config):
super().__init__(config)
self.num_prefix_tokens = 1 + config.num_register_tokens
class EomtDinov3Layer(DINOv3ViTLayer):
pass
class EomtDinov3LayerScale(DINOv3ViTLayerScale):
pass
class EomtDinov3RotaryEmbedding(DINOv3ViTRopePositionEmbedding):
inv_freq: Tensor
def __init__(self, config: EomtDinov3Config, device=None):
nn.Module.__init__(self)
self.config = config
self.rope_type = self.config.rope_parameters["rope_type"]
rope_init_fn: Callable = self.compute_default_rope_parameters
if self.rope_type != "default":
raise ValueError("`EomtDinov3` only supports `default` RoPE! Please check your `rope_type`")
inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.register_buffer("original_inv_freq", inv_freq.clone(), persistent=False)
@staticmethod
def compute_default_rope_parameters(
config: EomtDinov3Config | None = None,
device: Optional["torch.device"] = None,
seq_len: int | None = None,
) -> torch.Tensor:
"""
Computes the inverse frequencies according to the original RoPE implementation
Args:
config ([`~transformers.PreTrainedConfig`]):
The model configuration.
device (`torch.device`):
The device to use for initialization of the inverse frequencies.
seq_len (`int`, *optional*):
The current sequence length. Unused for this type of RoPE.
Returns:
Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
"""
base = config.rope_parameters["rope_theta"]
head_dim = config.hidden_size // config.num_attention_heads
attention_factor = 1.0 # Unused in this type of RoPE
# Compute the inverse frequencies
inv_freq = 1 / base ** torch.arange(0, 1, 4 / head_dim, dtype=torch.float32, device=device)
return inv_freq, attention_factor
class EomtDinov3Loss(EomtLoss):
pass
class EomtDinov3ForUniversalSegmentationOutput(EomtForUniversalSegmentationOutput):
pass
class EomtDinov3PreTrainedModel(EomtPreTrainedModel):
config_class = EomtDinov3Config
base_model_prefix = "eomt_dinov3"
_no_split_modules = ["EomtDinov3Layer"]
_can_record_outputs = {
"hidden_states": EomtDinov3Layer,
"attentions": EomtDinov3Attention,
}
def _init_weights(self, module: nn.Module) -> None:
PreTrainedModel._init_weights(module)
std = self.config.initializer_range
if isinstance(module, EomtDinov3LayerScale):
if hasattr(module, "lambda1"):
init.constant_(module.lambda1, self.config.layerscale_value)
elif isinstance(module, EomtDinov3Embeddings):
init.trunc_normal_(module.cls_token, mean=0.0, std=std)
init.zeros_(module.register_tokens)
elif isinstance(module, EomtDinov3Loss):
empty_weight = torch.ones(module.num_labels + 1)
empty_weight[-1] = module.eos_coef
init.copy_(module.empty_weight, empty_weight)
elif isinstance(module, EomtDinov3ForUniversalSegmentation):
init.ones_(module.attn_mask_probs)
@auto_docstring(
custom_intro="""
The EoMT-DINOv3 model with head on top for instance/semantic/panoptic segmentation.
""",
)
class EomtDinov3ForUniversalSegmentation(EomtDinov3PreTrainedModel, EomtForUniversalSegmentation):
def __init__(self, config: EomtDinov3Config):
super().__init__(config)
self.num_prefix_tokens = 1 + config.num_register_tokens
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.embeddings = EomtDinov3Embeddings(config)
self.embeddings.register_parameter("mask_token", None)
self.rope_embeddings = EomtDinov3RotaryEmbedding(config)
self.layers = nn.ModuleList([EomtDinov3Layer(config) for _ in range(config.num_hidden_layers)])
self.post_init()
# We redefine forward here because EoMT-DINOv3 uses DINOv3 backbone components (RoPE embeddings, layers)
# which require different integration than the base EoMT model that uses a separate encoder.
@merge_with_config_defaults
@capture_outputs
@auto_docstring
def forward(
self,
pixel_values: Tensor,
mask_labels: list[Tensor] | None = None,
class_labels: list[Tensor] | None = None,
patch_offsets: list[Tensor] | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> EomtDinov3ForUniversalSegmentationOutput:
r"""
mask_labels (`list[torch.Tensor]`, *optional*):
list of mask labels of shape `(num_labels, height, width)` to be fed to a model
class_labels (`list[torch.LongTensor]`, *optional*):
list of target class labels of shape `(num_labels, height, width)` to be fed to a model. They identify the
labels of `mask_labels`, e.g. the label of `mask_labels[i][j]` if `class_labels[i][j]`.
patch_offsets (`list[torch.Tensor]`, *optional*):
list of tuples indicating the image index and start and end positions of patches for semantic segmentation.
"""
masks_queries_logits_per_layer, class_queries_logits_per_layer = (), ()
hidden_states = self.dropout(self.embeddings(pixel_values))
position_embeddings = self.rope_embeddings(pixel_values.to(hidden_states.dtype))
attention_mask = None
for idx, layer_module in enumerate(self.layers):
if idx == self.num_hidden_layers - self.config.num_blocks:
query = self.query.weight[None, :, :].expand(hidden_states.shape[0], -1, -1).to(hidden_states.device)
hidden_states = torch.cat((query, hidden_states), dim=1)
if idx >= self.num_hidden_layers - self.config.num_blocks and (
self.training or self.attn_mask_probs[idx - self.num_hidden_layers + self.config.num_blocks] > 0
):
norm_hidden_states = self.layernorm(hidden_states)
masks_queries_logits, class_queries_logits = self.predict(norm_hidden_states)
masks_queries_logits_per_layer += (masks_queries_logits,)
class_queries_logits_per_layer += (class_queries_logits,)
attention_mask = torch.ones(
hidden_states.shape[0],
hidden_states.shape[1],
hidden_states.shape[1],
device=hidden_states.device,
dtype=torch.bool,
)
interpolated_logits = F.interpolate(masks_queries_logits, size=self.grid_size, mode="bilinear")
interpolated_logits = interpolated_logits.view(
interpolated_logits.size(0), interpolated_logits.size(1), -1
)
num_query_tokens = self.config.num_queries
encoder_start_tokens = num_query_tokens + self.num_prefix_tokens
# Set attention mask for queries to focus on encoder tokens based on interpolated logits
attention_mask[:, :num_query_tokens, encoder_start_tokens:] = interpolated_logits > 0
# Disable attention mask for random query tokens.
attention_mask = self._disable_attention_mask(
attention_mask,
prob=self.attn_mask_probs[idx - self.num_hidden_layers + self.config.num_blocks],
num_query_tokens=num_query_tokens,
encoder_start_tokens=encoder_start_tokens,
device=attention_mask.device,
)
# Expand attention mask to 4d mask.
attention_mask = attention_mask[:, None, ...].expand(-1, self.config.num_attention_heads, -1, -1)
dtype_min = torch.finfo(hidden_states.dtype).min
attention_mask = attention_mask.to(hidden_states.dtype).masked_fill(~attention_mask, dtype_min)
hidden_states = layer_module(
hidden_states,
attention_mask=attention_mask,
position_embeddings=position_embeddings,
)
sequence_output = self.layernorm(hidden_states)
masks_queries_logits, class_queries_logits = self.predict(sequence_output)
masks_queries_logits_per_layer += (masks_queries_logits,)
class_queries_logits_per_layer += (class_queries_logits,)
loss = None
if mask_labels is not None and class_labels is not None:
loss = 0.0
for masks_queries_logits, class_queries_logits in zip(
masks_queries_logits_per_layer, class_queries_logits_per_layer
):
loss_dict = self.get_loss_dict(
masks_queries_logits=masks_queries_logits,
class_queries_logits=class_queries_logits,
mask_labels=mask_labels,
class_labels=class_labels,
auxiliary_predictions=None,
)
loss += self.get_loss(loss_dict)
return EomtDinov3ForUniversalSegmentationOutput(
loss=loss,
masks_queries_logits=masks_queries_logits,
class_queries_logits=class_queries_logits,
last_hidden_state=sequence_output,
patch_offsets=patch_offsets,
)
__all__ = [
"EomtDinov3Config",
"EomtDinov3PreTrainedModel",
"EomtDinov3ForUniversalSegmentation",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/eomt_dinov3/modular_eomt_dinov3.py",
"license": "Apache License 2.0",
"lines": 399,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:utils/aggregate_failure_reports.py | #!/usr/bin/env python
# Copyright 2026 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Aggregate multiple failure report JSON files into a single file.
This script reads all JSON files from a directory and combines them
into a single JSON array.
"""
import argparse
import json
import sys
from pathlib import Path
def aggregate_failures(input_dir, output_file):
"""
Aggregate failure reports from multiple JSON files.
Args:
input_dir: Directory containing failure report JSON files
output_file: Path to output aggregated JSON file
Returns:
Number of failures aggregated
"""
failures = []
input_path = Path(input_dir)
if input_path.exists() and input_path.is_dir():
for failure_file in input_path.glob("*.json"):
try:
with open(failure_file) as f:
failure_data = json.load(f)
failures.append(failure_data)
except Exception as e:
print(f"Error reading {failure_file}: {e}", file=sys.stderr)
# Write aggregated failures
with open(output_file, "w") as f:
json.dump(failures, f, indent=2)
print(f"Aggregated {len(failures)} failure(s) from {input_dir} to {output_file}")
return len(failures)
def main():
parser = argparse.ArgumentParser(description="Aggregate failure report JSON files")
parser.add_argument(
"--input-dir",
required=True,
help="Directory containing failure report JSON files",
)
parser.add_argument(
"--output",
required=True,
help="Output file path for aggregated JSON",
)
args = parser.parse_args()
aggregate_failures(args.input_dir, args.output)
return 0
if __name__ == "__main__":
sys.exit(main())
| {
"repo_id": "huggingface/transformers",
"file_path": "utils/aggregate_failure_reports.py",
"license": "Apache License 2.0",
"lines": 64,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:utils/extract_metadata.py | #!/usr/bin/env python
# Copyright 2026 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extract metadata from setup.py for CI testing.
Usage:
python utils/extract_metadata.py extras # List all extras (one per line)
python utils/extract_metadata.py python-versions # Output JSON array of Python versions
"""
import json
import sys
from pathlib import Path
from types import ModuleType
def get_setup_module() -> ModuleType:
"""Import and return the setup module."""
repo_root: Path = Path(__file__).parent.parent
sys.path.insert(0, str(repo_root))
import setup
return setup
def extract_extras() -> None:
"""Print all extras in definition order (one per line)."""
setup: ModuleType = get_setup_module()
for extra in setup.extras.keys():
print(extra)
def extract_python_versions() -> None:
"""Print supported Python versions as a JSON array."""
setup: ModuleType = get_setup_module()
min_ver: int
max_ver: int
min_ver, max_ver = setup.SUPPORTED_PYTHON_VERSIONS
versions: list[str] = [f"3.{v}" for v in range(min_ver, max_ver + 1)]
print(json.dumps(versions))
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Usage: python utils/extract_metadata.py {extras|python-versions}", file=sys.stderr)
sys.exit(1)
command: str = sys.argv[1]
if command == "extras":
extract_extras()
elif command == "python-versions":
extract_python_versions()
else:
print(f"Unknown command: {command}", file=sys.stderr)
print("Usage: python utils/extract_metadata.py {extras|python-versions}", file=sys.stderr)
sys.exit(1)
| {
"repo_id": "huggingface/transformers",
"file_path": "utils/extract_metadata.py",
"license": "Apache License 2.0",
"lines": 55,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:utils/format_extras_slack_message.py | #!/usr/bin/env python
# Copyright 2026 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Format extras smoke test results for Slack notification.
This script reads failure reports from a JSON file and outputs environment
variables for GitHub Actions to use in Slack notifications.
"""
import argparse
import json
import os
import sys
def format_slack_message(failures_file, workflow_url, output_file=None):
"""
Format extras smoke test results into Slack message components.
Args:
failures_file: Path to JSON file containing failure reports
workflow_url: URL to the GitHub Actions workflow run
output_file: Optional path to output file (defaults to GITHUB_ENV)
Returns:
Dictionary with title, message, and workflow_url
"""
# Read failures
with open(failures_file) as f:
failures = json.load(f)
if not failures:
# Success case
title = "Extras Smoke Test - All tests passed"
message = "All extras installed successfully across all Python versions."
else:
# Failure case - group by Python version
failures_by_python = {}
for failure in failures:
py_ver = failure.get("python_version", "unknown")
extra = failure.get("extra", "unknown")
if py_ver not in failures_by_python:
failures_by_python[py_ver] = []
failures_by_python[py_ver].append(extra)
title = f"Extras Smoke Test Failed - {len(failures)} failure(s)"
# Build failure details
details = []
for py_ver in sorted(failures_by_python.keys()):
extras = failures_by_python[py_ver]
extras_list = "\n".join([f"• `{extra}`" for extra in sorted(extras)])
details.append(f"*Python {py_ver}*\n{extras_list}")
message = "\n\n".join(details)
# Determine output destination
if output_file is None:
output_file = os.environ.get("GITHUB_ENV")
if not output_file:
print("Error: GITHUB_ENV not set and no output file specified", file=sys.stderr)
sys.exit(1)
# Write environment variables
with open(output_file, "a") as f:
f.write(f"SLACK_TITLE={title}\n")
f.write(f"SLACK_WORKFLOW_URL={workflow_url}\n")
# Use heredoc for multiline message
f.write("SLACK_MESSAGE<<EOF\n")
f.write(f"{message}\n")
f.write("EOF\n")
return {"title": title, "message": message, "workflow_url": workflow_url}
def main():
parser = argparse.ArgumentParser(description="Format extras smoke test results for Slack")
parser.add_argument(
"--failures",
required=True,
help="Path to JSON file containing failure reports",
)
parser.add_argument(
"--workflow-url",
required=True,
help="URL to the GitHub Actions workflow run",
)
parser.add_argument(
"--output",
help="Output file path (defaults to GITHUB_ENV)",
)
args = parser.parse_args()
result = format_slack_message(args.failures, args.workflow_url, args.output)
print(f"Formatted Slack message: {result['title']}")
return 0
if __name__ == "__main__":
sys.exit(main())
| {
"repo_id": "huggingface/transformers",
"file_path": "utils/format_extras_slack_message.py",
"license": "Apache License 2.0",
"lines": 94,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/pp_doclayout_v3/modular_pp_doclayout_v3.py | # Copyright 2026 The PaddlePaddle Team and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn.functional as F
import torchvision.transforms.v2.functional as tvF
from torch import nn
from ... import initialization as init
from ...backbone_utils import consolidate_backbone_kwargs_to_config
from ...configuration_utils import PreTrainedConfig
from ...image_processing_utils_fast import (
BaseImageProcessorFast,
BatchFeature,
)
from ...image_transforms import (
group_images_by_shape,
reorder_images,
)
from ...image_utils import PILImageResampling, SizeDict
from ...modeling_outputs import BaseModelOutput
from ...processing_utils import Unpack
from ...utils import (
ModelOutput,
TransformersKwargs,
auto_docstring,
is_cv2_available,
logging,
requires_backends,
)
from ...utils.generic import TensorType, can_return_tuple
from ..auto import AutoConfig
from ..resnet.modeling_resnet import ResNetConvLayer
from ..rt_detr.modeling_rt_detr import (
RTDetrDecoder,
RTDetrDecoderOutput,
RTDetrForObjectDetection,
RTDetrHybridEncoder,
RTDetrMLPPredictionHead,
RTDetrModel,
RTDetrModelOutput,
RTDetrMultiscaleDeformableAttention,
RTDetrPreTrainedModel,
get_contrastive_denoising_training_group,
inverse_sigmoid,
)
if is_cv2_available():
import cv2
logger = logging.get_logger(__name__)
class PPDocLayoutV3Config(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`PP-DocLayoutV3`]. It is used to instantiate a
PP-DocLayoutV3 model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the PP-DocLayoutV3
[PaddlePaddle/PP-DocLayoutV3_safetensors](https://huggingface.co/PaddlePaddle/PP-DocLayoutV3_safetensors) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
initializer_range (`float`, *optional*, defaults to 0.01):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
initializer_bias_prior_prob (`float`, *optional*):
The prior probability used by the bias initializer to initialize biases for `enc_score_head` and `class_embed`.
If `None`, `prior_prob` computed as `prior_prob = 1 / (num_labels + 1)` while initializing model weights.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
batch_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the batch normalization layers.
tie_word_embeddings (`bool`, *optional*, defaults to `True`):
Whether the model's input and output word embeddings should be tied.
backbone_config (`Union[dict, "PreTrainedConfig"]`, *optional*):
The configuration of the backbone model.
freeze_backbone_batch_norms (`bool`, *optional*, defaults to `True`):
Whether to freeze the batch normalization layers in the backbone.
encoder_hidden_dim (`int`, *optional*, defaults to 256):
Dimension of the layers in hybrid encoder.
encoder_in_channels (`list`, *optional*, defaults to `[512, 1024, 2048]`):
Multi level features input for encoder.
feat_strides (`list[int]`, *optional*, defaults to `[8, 16, 32]`):
Strides used in each feature map.
encoder_layers (`int`, *optional*, defaults to 1):
Total of layers to be used by the encoder.
encoder_ffn_dim (`int`, *optional*, defaults to 1024):
Dimension of the "intermediate" (often named feed-forward) layer in decoder.
encoder_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer encoder.
dropout (`float`, *optional*, defaults to 0.0):
The ratio for all dropout layers.
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for activations inside the fully connected layer.
encode_proj_layers (`list[int]`, *optional*, defaults to `[2]`):
Indexes of the projected layers to be used in the encoder.
positional_encoding_temperature (`int`, *optional*, defaults to 10000):
The temperature parameter used to create the positional encodings.
encoder_activation_function (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
activation_function (`str`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the general layer. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
eval_size (`tuple[int, int]`, *optional*):
Height and width used to computes the effective height and width of the position embeddings after taking
into account the stride.
normalize_before (`bool`, *optional*, defaults to `False`):
Determine whether to apply layer normalization in the transformer encoder layer before self-attention and
feed-forward modules.
hidden_expansion (`float`, *optional*, defaults to 1.0):
Expansion ratio to enlarge the dimension size of RepVGGBlock and CSPRepLayer.
mask_feature_channels (`list[int]`, *optional*, defaults to `[64, 64]`):
The channels of the multi-level features for mask enhancement.
x4_feat_dim (`int`, *optional*, defaults to 128):
The dimension of the x4 feature map.
d_model (`int`, *optional*, defaults to 256):
Dimension of the layers exclude hybrid encoder.
num_prototypes (`int`, *optional*, defaults to 32):
Dimension of the layers exclude mask query head.
label_noise_ratio (`float`, *optional*, defaults to 0.4):
The fraction of denoising labels to which random noise should be added.
box_noise_scale (`float`, *optional*, defaults to 0.4):
Scale or magnitude of noise to be added to the bounding boxes.
mask_enhanced (`bool`, *optional*, defaults to `True`):
Whether to use enhanced masked attention.
num_queries (`int`, *optional*, defaults to 300):
Number of object queries.
decoder_in_channels (`list`, *optional*, defaults to `[256, 256, 256]`):
Multi level features dimension for decoder
decoder_ffn_dim (`int`, *optional*, defaults to 1024):
Dimension of the "intermediate" (often named feed-forward) layer in decoder.
num_feature_levels (`int`, *optional*, defaults to 3):
The number of input feature levels.
decoder_n_points (`int`, *optional*, defaults to 4):
The number of sampled keys in each feature level for each attention head in the decoder.
decoder_layers (`int`, *optional*, defaults to 6):
Number of decoder layers.
decoder_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer decoder.
decoder_activation_function (`str`, *optional*, defaults to `"relu"`):
The non-linear activation function (function or string) in the decoder. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
num_denoising (`int`, *optional*, defaults to 100):
The total number of denoising tasks or queries to be used for contrastive denoising.
learn_initial_query (`bool`, *optional*, defaults to `False`):
Indicates whether the initial query embeddings for the decoder should be learned during training
anchor_image_size (`tuple[int, int]`, *optional*):
Height and width of the input image used during evaluation to generate the bounding box anchors. If None, automatic generate anchor is applied.
disable_custom_kernels (`bool`, *optional*, defaults to `True`):
Whether to disable custom kernels.
is_encoder_decoder (`bool`, *optional*, defaults to `True`):
Whether the architecture has an encoder decoder structure.
global_pointer_head_size (`int`, *optional*, defaults to 64):
The size of the global pointer head.
gp_dropout_value (`float`, *optional*, defaults to 0.1):
The dropout probability in the global pointer head.
Examples:
```python
>>> from transformers import PPDocLayoutV3Config, PPDocLayoutV3ForObjectDetection
>>> # Initializing a PP-DocLayoutV3 configuration
>>> configuration = PPDocLayoutV3Config()
>>> # Initializing a model (with random weights) from the configuration
>>> model = PPDocLayoutV3ForObjectDetection(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "pp_doclayout_v3"
sub_configs = {"backbone_config": AutoConfig}
layer_types = ("basic", "bottleneck")
attribute_map = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__(
self,
initializer_range=0.01,
initializer_bias_prior_prob=None,
layer_norm_eps=1e-5,
batch_norm_eps=1e-5,
tie_word_embeddings=True,
# backbone
backbone_config=None,
freeze_backbone_batch_norms=True,
# encoder PPDocLayoutV3HybridEncoder
encoder_hidden_dim=256,
encoder_in_channels=[512, 1024, 2048],
feat_strides=[8, 16, 32],
encoder_layers=1,
encoder_ffn_dim=1024,
encoder_attention_heads=8,
dropout=0.0,
activation_dropout=0.0,
encode_proj_layers=[2],
positional_encoding_temperature=10000,
encoder_activation_function="gelu",
activation_function="silu",
eval_size=None,
normalize_before=False,
hidden_expansion=1.0,
mask_feature_channels=[64, 64],
x4_feat_dim=128,
# decoder PPDocLayoutV3Transformer
d_model=256,
num_prototypes=32,
label_noise_ratio=0.4,
box_noise_scale=0.4,
mask_enhanced=True,
num_queries=300,
decoder_in_channels=[256, 256, 256],
decoder_ffn_dim=1024,
num_feature_levels=3,
decoder_n_points=4,
decoder_layers=6,
decoder_attention_heads=8,
decoder_activation_function="relu",
attention_dropout=0.0,
num_denoising=100,
learn_initial_query=False,
anchor_image_size=None,
disable_custom_kernels=True,
is_encoder_decoder=True,
global_pointer_head_size=64,
gp_dropout_value=0.1,
**kwargs,
):
self.initializer_range = initializer_range
self.initializer_bias_prior_prob = initializer_bias_prior_prob
self.layer_norm_eps = layer_norm_eps
self.batch_norm_eps = batch_norm_eps
self.tie_word_embeddings = tie_word_embeddings
backbone_config, kwargs = consolidate_backbone_kwargs_to_config(
backbone_config=backbone_config,
default_config_type="hgnet_v2",
default_config_kwargs={
"arch": "L",
"return_idx": [0, 1, 2, 3],
"freeze_stem_only": True,
"freeze_at": 0,
"freeze_norm": True,
"lr_mult_list": [0, 0.05, 0.05, 0.05, 0.05],
"out_features": ["stage1", "stage2", "stage3", "stage4"],
},
**kwargs,
)
self.backbone_config = backbone_config
self.freeze_backbone_batch_norms = freeze_backbone_batch_norms
# ---- encoder ----
self.encoder_hidden_dim = encoder_hidden_dim
self.encoder_in_channels = list(encoder_in_channels)
self.feat_strides = list(feat_strides)
self.encoder_layers = encoder_layers
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_attention_heads = encoder_attention_heads
self.dropout = dropout
self.activation_dropout = activation_dropout
self.encode_proj_layers = list(encode_proj_layers)
self.positional_encoding_temperature = positional_encoding_temperature
self.encoder_activation_function = encoder_activation_function
self.activation_function = activation_function
self.eval_size = list(eval_size) if eval_size is not None else None
self.normalize_before = normalize_before
self.hidden_expansion = hidden_expansion
self.mask_feature_channels = mask_feature_channels
self.x4_feat_dim = x4_feat_dim
# ---- decoder ----
self.d_model = d_model
self.num_queries = num_queries
self.num_prototypes = num_prototypes
self.decoder_in_channels = list(decoder_in_channels)
self.decoder_ffn_dim = decoder_ffn_dim
self.num_feature_levels = num_feature_levels
self.decoder_n_points = decoder_n_points
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.decoder_activation_function = decoder_activation_function
self.attention_dropout = attention_dropout
self.num_denoising = num_denoising
self.label_noise_ratio = label_noise_ratio
self.mask_enhanced = mask_enhanced
self.box_noise_scale = box_noise_scale
self.learn_initial_query = learn_initial_query
self.anchor_image_size = list(anchor_image_size) if anchor_image_size is not None else None
self.disable_custom_kernels = disable_custom_kernels
self.global_pointer_head_size = global_pointer_head_size
self.gp_dropout_value = gp_dropout_value
super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)
@auto_docstring
class PPDocLayoutV3ImageProcessorFast(BaseImageProcessorFast):
resample = PILImageResampling.BICUBIC
image_mean = [0, 0, 0]
image_std = [1, 1, 1]
size = {"height": 800, "width": 800}
do_resize = True
do_rescale = True
do_normalize = True
# We require `self.resize(..., antialias=False)` to approximate the output of `cv2.resize`
def _preprocess(
self,
images: list["torch.Tensor"],
do_resize: bool,
size: SizeDict,
interpolation: Optional["tvF.InterpolationMode"],
do_center_crop: bool,
crop_size: SizeDict,
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
image_mean: float | list[float] | None,
image_std: float | list[float] | None,
do_pad: bool | None,
pad_size: SizeDict | None,
disable_grouping: bool | None,
return_tensors: str | TensorType | None,
**kwargs,
) -> BatchFeature:
# Group images by size for batched resizing
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
resized_images_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_resize:
stacked_images = self.resize(
image=stacked_images, size=size, interpolation=interpolation, antialias=False
)
resized_images_grouped[shape] = stacked_images
resized_images = reorder_images(resized_images_grouped, grouped_images_index)
# Group images by size for further processing
# Needed in case do_resize is False, or resize returns images with different sizes
grouped_images, grouped_images_index = group_images_by_shape(resized_images, disable_grouping=disable_grouping)
processed_images_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_center_crop:
stacked_images = self.center_crop(stacked_images, crop_size)
# Fused rescale and normalize
stacked_images = self.rescale_and_normalize(
stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
processed_images_grouped[shape] = stacked_images
processed_images = reorder_images(processed_images_grouped, grouped_images_index)
if do_pad:
processed_images = self.pad(processed_images, pad_size=pad_size, disable_grouping=disable_grouping)
return BatchFeature(data={"pixel_values": processed_images}, tensor_type=return_tensors)
def _get_order_seqs(self, order_logits):
"""
Computes the order sequences for a batch of inputs based on logits.
This function takes in the order logits, calculates order scores using a sigmoid activation,
and determines the order sequences by ranking the votes derived from the scores.
Args:
order_logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_queries)`):
Stacked order logits.
Returns:
torch.Tensor: A tensor of shape `(batch_size, num_queries)`:
Containing the computed order sequences for each input in the batch. Each row represents the ranked order of elements for the corresponding input in the batch.
"""
order_scores = torch.sigmoid(order_logits)
batch_size, sequence_length, _ = order_scores.shape
order_votes = order_scores.triu(diagonal=1).sum(dim=1) + (1.0 - order_scores.transpose(1, 2)).tril(
diagonal=-1
).sum(dim=1)
order_pointers = torch.argsort(order_votes, dim=1)
order_seq = torch.empty_like(order_pointers)
ranks = torch.arange(sequence_length, device=order_pointers.device, dtype=order_pointers.dtype).expand(
batch_size, -1
)
order_seq.scatter_(1, order_pointers, ranks)
return order_seq
def extract_custom_vertices(self, polygon, sharp_angle_thresh=45):
poly = np.array(polygon)
n = len(poly)
res = []
i = 0
while i < n:
previous_point = poly[(i - 1) % n]
current_point = poly[i]
next_point = poly[(i + 1) % n]
vector_1 = previous_point - current_point
vector_2 = next_point - current_point
cross_product_value = (vector_1[1] * vector_2[0]) - (vector_1[0] * vector_2[1])
if cross_product_value < 0:
angle_cos = np.clip(
(vector_1 @ vector_2) / (np.linalg.norm(vector_1) * np.linalg.norm(vector_2)), -1.0, 1.0
)
angle = np.degrees(np.arccos(angle_cos))
if abs(angle - sharp_angle_thresh) < 1:
# Calculate the new point based on the direction of two vectors.
dir_vec = vector_1 / np.linalg.norm(vector_1) + vector_2 / np.linalg.norm(vector_2)
dir_vec = dir_vec / np.linalg.norm(dir_vec)
step_size = (np.linalg.norm(vector_1) + np.linalg.norm(vector_2)) / 2
new_point = current_point + dir_vec * step_size
res.append(tuple(new_point))
else:
res.append(tuple(current_point))
i += 1
return res
def _mask2polygon(self, mask, epsilon_ratio=0.004):
"""
Postprocess mask by removing small noise.
Args:
mask (ndarray): The input mask of shape [H, W].
epsilon_ratio (float): The ratio of epsilon.
Returns:
ndarray: The output mask after postprocessing.
"""
contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if not contours:
return None
contours = max(contours, key=cv2.contourArea)
epsilon = epsilon_ratio * cv2.arcLength(contours, True)
approx_contours = cv2.approxPolyDP(contours, epsilon, True)
polygon_points = approx_contours.squeeze()
polygon_points = np.atleast_2d(polygon_points)
polygon_points = self.extract_custom_vertices(polygon_points)
return polygon_points
def _extract_polygon_points_by_masks(self, boxes, masks, scale_ratio):
scale_width, scale_height = scale_ratio[0] / 4, scale_ratio[1] / 4
mask_height, mask_width = masks.shape[1:]
polygon_points = []
for i in range(len(boxes)):
x_min, y_min, x_max, y_max = boxes[i].astype(np.int32)
box_w, box_h = x_max - x_min, y_max - y_min
# default rect
rect = np.array(
[[x_min, y_min], [x_max, y_min], [x_max, y_max], [x_min, y_max]],
dtype=np.float32,
)
if box_w <= 0 or box_h <= 0:
polygon_points.append(rect)
continue
# crop mask
x_coordinates = [int(round((x_min * scale_width).item())), int(round((x_max * scale_width).item()))]
x_start, x_end = np.clip(x_coordinates, 0, mask_width)
y_coordinates = [int(round((y_min * scale_height).item())), int(round((y_max * scale_height).item()))]
y_start, y_end = np.clip(y_coordinates, 0, mask_height)
cropped_mask = masks[i, y_start:y_end, x_start:x_end]
# resize mask to match box size
resized_mask = cv2.resize(cropped_mask.astype(np.uint8), (box_w, box_h), interpolation=cv2.INTER_NEAREST)
polygon = self._mask2polygon(resized_mask)
if polygon is not None and len(polygon) < 4:
polygon_points.append(rect)
continue
if polygon is not None and len(polygon) > 0:
polygon = polygon + np.array([x_min, y_min])
polygon_points.append(polygon)
return polygon_points
def post_process_object_detection(
self,
outputs,
threshold: float = 0.5,
target_sizes: TensorType | list[tuple] | None = None,
):
"""
Converts the raw output of [`PPDocLayoutV3ForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y,
bottom_right_x, bottom_right_y) format. Only supports PyTorch.
Args:
outputs ([`DetrObjectDetectionOutput`]):
Raw outputs of the model.
Returns:
`list[Dict]`: A list of dictionaries, each dictionary containing the scores, labels, boxes and polygon_points for an image
in the batch as predicted by the model.
"""
requires_backends(self, ["torch", "cv2"])
boxes = outputs.pred_boxes
logits = outputs.logits
order_logits = outputs.order_logits
masks = outputs.out_masks
order_seqs = self._get_order_seqs(order_logits)
box_centers, box_dims = torch.split(boxes, 2, dim=-1)
top_left_coords = box_centers - 0.5 * box_dims
bottom_right_coords = box_centers + 0.5 * box_dims
boxes = torch.cat([top_left_coords, bottom_right_coords], dim=-1)
if target_sizes is not None:
if len(logits) != len(target_sizes):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits"
)
if isinstance(target_sizes, list):
img_height, img_width = torch.as_tensor(target_sizes).unbind(1)
else:
img_height, img_width = target_sizes.unbind(1)
scale_factor = torch.stack([img_width, img_height, img_width, img_height], dim=1).to(boxes.device)
boxes = boxes * scale_factor[:, None, :]
num_top_queries = logits.shape[1]
num_classes = logits.shape[2]
scores = torch.nn.functional.sigmoid(logits)
scores, index = torch.topk(scores.flatten(1), num_top_queries, dim=-1)
labels = index % num_classes
index = index // num_classes
boxes = boxes.gather(dim=1, index=index.unsqueeze(-1).repeat(1, 1, boxes.shape[-1]))
masks = masks.gather(
dim=1, index=index.unsqueeze(-1).unsqueeze(-1).repeat(1, 1, masks.shape[-2], masks.shape[-1])
)
masks = (masks.sigmoid() > threshold).int()
order_seqs = order_seqs.gather(dim=1, index=index)
results = []
for score, label, box, order_seq, target_size, mask in zip(
scores, labels, boxes, order_seqs, target_sizes, masks
):
order_seq = order_seq[score >= threshold]
order_seq, indices = torch.sort(order_seq)
polygon_points = self._extract_polygon_points_by_masks(
box[score >= threshold][indices].detach().cpu().numpy(),
mask[score >= threshold][indices].detach().cpu().numpy(),
[self.size["width"] / target_size[1], self.size["height"] / target_size[0]],
)
results.append(
{
"scores": score[score >= threshold][indices],
"labels": label[score >= threshold][indices],
"boxes": box[score >= threshold][indices],
"polygon_points": polygon_points,
"order_seq": order_seq,
}
)
return results
class PPDocLayoutV3GlobalPointer(nn.Module):
def __init__(self, config):
super().__init__()
self.head_size = config.global_pointer_head_size
self.dense = nn.Linear(config.d_model, self.head_size * 2)
self.dropout = nn.Dropout(config.gp_dropout_value)
def forward(self, inputs):
batch_size, sequence_length, _ = inputs.shape
query_key_projection = self.dense(inputs).reshape(batch_size, sequence_length, 2, self.head_size)
query_key_projection = self.dropout(query_key_projection)
queries, keys = torch.unbind(query_key_projection, dim=2)
logits = (queries @ keys.transpose(-2, -1)) / (self.head_size**0.5)
mask = torch.tril(torch.ones(sequence_length, sequence_length, device=logits.device)).bool()
logits = logits.masked_fill(mask.unsqueeze(0), -1e4)
return logits
class PPDocLayoutV3MultiscaleDeformableAttention(RTDetrMultiscaleDeformableAttention):
pass
@auto_docstring
class PPDocLayoutV3PreTrainedModel(RTDetrPreTrainedModel):
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, PPDocLayoutV3MultiscaleDeformableAttention):
init.constant_(module.sampling_offsets.weight, 0.0)
default_dtype = torch.get_default_dtype()
thetas = torch.arange(module.n_heads, dtype=torch.int64).to(default_dtype) * (
2.0 * math.pi / module.n_heads
)
grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)
grid_init = (
(grid_init / grid_init.abs().max(-1, keepdim=True)[0])
.view(module.n_heads, 1, 1, 2)
.repeat(1, module.n_levels, module.n_points, 1)
)
for i in range(module.n_points):
grid_init[:, :, i, :] *= i + 1
init.copy_(module.sampling_offsets.bias, grid_init.view(-1))
init.constant_(module.attention_weights.weight, 0.0)
init.constant_(module.attention_weights.bias, 0.0)
init.xavier_uniform_(module.value_proj.weight)
init.constant_(module.value_proj.bias, 0.0)
init.xavier_uniform_(module.output_proj.weight)
init.constant_(module.output_proj.bias, 0.0)
elif isinstance(module, PPDocLayoutV3Model):
prior_prob = self.config.initializer_bias_prior_prob or 1 / (self.config.num_labels + 1)
bias = float(-math.log((1 - prior_prob) / prior_prob))
init.xavier_uniform_(module.enc_score_head.weight)
init.constant_(module.enc_score_head.bias, bias)
init.xavier_uniform_(module.decoder.class_embed.weight)
init.constant_(module.decoder.class_embed.bias, bias)
elif isinstance(module, (nn.Linear, nn.Conv2d, nn.BatchNorm2d)):
init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
init.zeros_(module.bias)
if getattr(module, "running_mean", None) is not None:
init.zeros_(module.running_mean)
init.ones_(module.running_var)
init.zeros_(module.num_batches_tracked)
elif isinstance(module, nn.LayerNorm):
init.ones_(module.weight)
init.zeros_(module.bias)
if isinstance(module, nn.Embedding):
init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
init.zeros_(module.weight.data[module.padding_idx])
def mask_to_box_coordinate(mask, dtype):
mask = mask.bool()
height, width = mask.shape[-2:]
y_coords, x_coords = torch.meshgrid(
torch.arange(height, device=mask.device), torch.arange(width, device=mask.device), indexing="ij"
)
x_coords = x_coords.to(dtype)
y_coords = y_coords.to(dtype)
x_coords_masked = x_coords * mask
x_max = x_coords_masked.flatten(start_dim=-2).max(dim=-1).values + 1
x_min = (
torch.where(mask, x_coords_masked, torch.tensor(torch.finfo(dtype).max))
.flatten(start_dim=-2)
.min(dim=-1)
.values
)
y_coords_masked = y_coords * mask
y_max = y_coords_masked.flatten(start_dim=-2).max(dim=-1).values + 1
y_min = (
torch.where(mask, y_coords_masked, torch.tensor(torch.finfo(dtype).max))
.flatten(start_dim=-2)
.min(dim=-1)
.values
)
unnormalized_bbox = torch.stack([x_min, y_min, x_max, y_max], dim=-1)
is_mask_non_empty = torch.any(mask, dim=(-2, -1)).unsqueeze(-1)
unnormalized_bbox = unnormalized_bbox * is_mask_non_empty
norm_tensor = torch.tensor([width, height, width, height], device=mask.device, dtype=dtype)
normalized_bbox_xyxy = unnormalized_bbox / norm_tensor
x_min_norm, y_min_norm, x_max_norm, y_max_norm = normalized_bbox_xyxy.unbind(dim=-1)
center_x = (x_min_norm + x_max_norm) / 2
center_y = (y_min_norm + y_max_norm) / 2
box_width = x_max_norm - x_min_norm
box_height = y_max_norm - y_min_norm
return torch.stack([center_x, center_y, box_width, box_height], dim=-1)
@dataclass
class PPDocLayoutV3DecoderOutput(RTDetrDecoderOutput):
r"""
intermediate_hidden_states (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, hidden_size)`):
Stacked intermediate hidden states (output of each layer of the decoder).
intermediate_logits (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, sequence_length, config.num_labels)`):
Stacked intermediate logits (logits of each layer of the decoder).
intermediate_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, sequence_length, hidden_size)`):
Stacked intermediate reference points (reference points of each layer of the decoder).
intermediate_predicted_corners (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`):
Stacked intermediate predicted corners (predicted corners of each layer of the decoder).
initial_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`):
Stacked initial reference points (initial reference points of each layer of the decoder).
cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax,
used to compute the weighted average in the cross-attention heads.
decoder_out_order_logits (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, config.num_queries, config.num_queries)`):
Stacked order logits (order logits of each layer of the decoder).
decoder_out_masks (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, config.num_queries, 200, 200)`):
Stacked masks (masks of each layer of the decoder).
"""
decoder_out_order_logits: torch.FloatTensor | None = None
decoder_out_masks: torch.FloatTensor | None = None
@dataclass
@auto_docstring(
custom_intro="""
Base class for outputs of the PP-DocLayoutV3 model.
"""
)
class PPDocLayoutV3ModelOutput(RTDetrModelOutput):
r"""
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the decoder of the model.
intermediate_hidden_states (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, hidden_size)`):
Stacked intermediate hidden states (output of each layer of the decoder).
intermediate_logits (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, sequence_length, config.num_labels)`):
Stacked intermediate logits (logits of each layer of the decoder).
intermediate_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`):
Stacked intermediate reference points (reference points of each layer of the decoder).
intermediate_predicted_corners (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`):
Stacked intermediate predicted corners (predicted corners of each layer of the decoder).
initial_reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`):
Initial reference points used for the first decoder layer.
init_reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`):
Initial reference points sent through the Transformer decoder.
enc_topk_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`):
Predicted bounding boxes scores where the top `config.two_stage_num_proposals` scoring bounding boxes are
picked as region proposals in the encoder stage. Output of bounding box binary classification (i.e.
foreground and background).
enc_topk_bboxes (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`):
Logits of predicted bounding boxes coordinates in the encoder stage.
enc_outputs_class (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`):
Predicted bounding boxes scores where the top `config.two_stage_num_proposals` scoring bounding boxes are
picked as region proposals in the first stage. Output of bounding box binary classification (i.e.
foreground and background).
enc_outputs_coord_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`):
Logits of predicted bounding boxes coordinates in the first stage.
denoising_meta_values (`dict`):
Extra dictionary for the denoising related values.
out_order_logits (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, config.num_queries, config.num_queries)`):
Stacked order logits (order logits of each layer of the decoder).
out_masks (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, config.num_queries, 200, 200)`):
Stacked masks (masks of each layer of the decoder).
"""
out_order_logits: torch.FloatTensor | None = None
out_masks: torch.FloatTensor | None = None
class PPDocLayoutV3MLPPredictionHead(RTDetrMLPPredictionHead):
pass
class PPDocLayoutV3ConvLayer(ResNetConvLayer):
pass
class PPDocLayoutV3ScaleHead(nn.Module):
def __init__(self, in_channels, feature_channels, fpn_stride, base_stride, align_corners=False):
super().__init__()
head_length = max(1, int(np.log2(fpn_stride) - np.log2(base_stride)))
self.layers = nn.ModuleList()
for k in range(head_length):
in_c = in_channels if k == 0 else feature_channels
self.layers.append(PPDocLayoutV3ConvLayer(in_c, feature_channels, 3, 1, "silu"))
if fpn_stride != base_stride:
self.layers.append(nn.Upsample(scale_factor=2, mode="bilinear", align_corners=align_corners))
def forward(self, x):
for layer in self.layers:
x = layer(x)
return x
class PPDocLayoutV3MaskFeatFPN(nn.Module):
def __init__(
self,
in_channels=[256, 256, 256],
fpn_strides=[32, 16, 8],
feature_channels=256,
dropout_ratio=0.0,
out_channels=256,
align_corners=False,
):
super().__init__()
reorder_index = np.argsort(fpn_strides, axis=0).tolist()
in_channels = [in_channels[i] for i in reorder_index]
fpn_strides = [fpn_strides[i] for i in reorder_index]
self.reorder_index = reorder_index
self.fpn_strides = fpn_strides
self.dropout_ratio = dropout_ratio
self.align_corners = align_corners
if self.dropout_ratio > 0:
self.dropout = nn.Dropout2d(dropout_ratio)
self.scale_heads = nn.ModuleList()
for i in range(len(fpn_strides)):
self.scale_heads.append(
PPDocLayoutV3ScaleHead(
in_channels=in_channels[i],
feature_channels=feature_channels,
fpn_stride=fpn_strides[i],
base_stride=fpn_strides[0],
align_corners=align_corners,
)
)
self.output_conv = PPDocLayoutV3ConvLayer(feature_channels, out_channels, 3, 1, "silu")
def forward(self, inputs):
x = [inputs[i] for i in self.reorder_index]
output = self.scale_heads[0](x[0])
for i in range(1, len(self.fpn_strides)):
output = output + F.interpolate(
self.scale_heads[i](x[i]), size=output.shape[2:], mode="bilinear", align_corners=self.align_corners
)
if self.dropout_ratio > 0:
output = self.dropout(output)
output = self.output_conv(output)
return output
class PPDocLayoutV3EncoderMaskOutput(nn.Module):
def __init__(self, in_channels, num_prototypes):
super().__init__()
self.base_conv = PPDocLayoutV3ConvLayer(in_channels, in_channels, 3, 1, "silu")
self.conv = nn.Conv2d(in_channels, num_prototypes, kernel_size=1)
def forward(self, x):
x = self.base_conv(x)
x = self.conv(x)
return x
class PPDocLayoutV3HybridEncoder(RTDetrHybridEncoder):
"""
Main difference to `RTDetrHybridEncoder`:
1. Mask Feature Head: Added `PPDocLayoutV3MaskFeatFPN` module (`self.mask_feature_head`) for document - specific mask feature generation.
2. Extra Conv Layers: Introduced `self.encoder_mask_lateral` and `self.encoder_mask_output` for mask feature processing and output.
"""
def __init__(self, config: PPDocLayoutV3Config):
super().__init__()
feat_strides = config.feat_strides
mask_feature_channels = config.mask_feature_channels
self.mask_feature_head = PPDocLayoutV3MaskFeatFPN(
[self.encoder_hidden_dim] * len(feat_strides),
feat_strides,
feature_channels=mask_feature_channels[0],
out_channels=mask_feature_channels[1],
)
self.encoder_mask_lateral = PPDocLayoutV3ConvLayer(config.x4_feat_dim, mask_feature_channels[1], 3, 1, "silu")
self.encoder_mask_output = PPDocLayoutV3EncoderMaskOutput(
in_channels=mask_feature_channels[1], num_prototypes=config.num_prototypes
)
def forward(
self,
inputs_embeds=None,
x4_feat=None,
**kwargs: Unpack[TransformersKwargs],
):
r"""
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Flattened feature map (output of the backbone + projection layer) that is passed to the encoder.
"""
feature_maps = inputs_embeds
# AIFI: Apply transformer encoder to specified feature levels
if self.config.encoder_layers > 0:
for i, enc_ind in enumerate(self.encode_proj_layers):
feature_maps[enc_ind] = self.aifi[i](feature_maps[enc_ind], **kwargs)
# top-down FPN
fpn_feature_maps = [feature_maps[-1]]
for idx, (lateral_conv, fpn_block) in enumerate(zip(self.lateral_convs, self.fpn_blocks)):
backbone_feature_map = feature_maps[self.num_fpn_stages - idx - 1]
top_fpn_feature_map = fpn_feature_maps[-1]
# apply lateral block
top_fpn_feature_map = lateral_conv(top_fpn_feature_map)
fpn_feature_maps[-1] = top_fpn_feature_map
# apply fpn block
top_fpn_feature_map = F.interpolate(top_fpn_feature_map, scale_factor=2.0, mode="nearest")
fused_feature_map = torch.concat([top_fpn_feature_map, backbone_feature_map], dim=1)
new_fpn_feature_map = fpn_block(fused_feature_map)
fpn_feature_maps.append(new_fpn_feature_map)
fpn_feature_maps.reverse()
# bottom-up PAN
pan_feature_maps = [fpn_feature_maps[0]]
for idx, (downsample_conv, pan_block) in enumerate(zip(self.downsample_convs, self.pan_blocks)):
top_pan_feature_map = pan_feature_maps[-1]
fpn_feature_map = fpn_feature_maps[idx + 1]
downsampled_feature_map = downsample_conv(top_pan_feature_map)
fused_feature_map = torch.concat([downsampled_feature_map, fpn_feature_map], dim=1)
new_pan_feature_map = pan_block(fused_feature_map)
pan_feature_maps.append(new_pan_feature_map)
mask_feat = self.mask_feature_head(pan_feature_maps)
mask_feat = F.interpolate(mask_feat, scale_factor=2, mode="bilinear", align_corners=False)
mask_feat += self.encoder_mask_lateral(x4_feat[0])
mask_feat = self.encoder_mask_output(mask_feat)
return PPDocLayoutV3HybridEncoderOutput(
last_hidden_state=pan_feature_maps,
mask_feat=mask_feat,
)
class PPDocLayoutV3Decoder(RTDetrDecoder):
"""
Main difference to `RTDetrDecoder`:
A new mask generation process is introduced at each decoder layer.
"""
def __init__(self, config: PPDocLayoutV3Config):
super().__init__()
self.num_queries = config.num_queries
def forward(
self,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
reference_points=None,
spatial_shapes=None,
spatial_shapes_list=None,
level_start_index=None,
order_head=None,
global_pointer=None,
mask_query_head=None,
norm=None,
mask_feat=None,
**kwargs: Unpack[TransformersKwargs],
):
r"""
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`):
The query embeddings that are passed into the decoder.
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing cross-attention on padding pixel_values of the encoder. Mask values selected
in `[0, 1]`:
- 1 for pixels that are real (i.e. **not masked**),
- 0 for pixels that are padding (i.e. **masked**).
reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)` is `as_two_stage` else `(batch_size, num_queries, 2)` or , *optional*):
Reference point in range `[0, 1]`, top-left (0,0), bottom-right (1, 1), including padding area.
spatial_shapes (`torch.FloatTensor` of shape `(num_feature_levels, 2)`):
Spatial shapes of the feature maps.
level_start_index (`torch.LongTensor` of shape `(num_feature_levels)`, *optional*):
Indexes for the start of each feature level. In range `[0, sequence_length]`.
"""
if inputs_embeds is not None:
hidden_states = inputs_embeds
# decoder layers
intermediate = ()
intermediate_reference_points = ()
intermediate_logits = ()
decoder_out_order_logits = ()
decoder_out_masks = ()
reference_points = F.sigmoid(reference_points)
# https://github.com/lyuwenyu/RT-DETR/blob/94f5e16708329d2f2716426868ec89aa774af016/rtdetr_pytorch/src/zoo/rtdetr/rtdetr_decoder.py#L252
for idx, decoder_layer in enumerate(self.layers):
reference_points_input = reference_points.unsqueeze(2)
object_queries_position_embeddings = self.query_pos_head(reference_points)
hidden_states = decoder_layer(
hidden_states,
object_queries_position_embeddings=object_queries_position_embeddings,
encoder_hidden_states=encoder_hidden_states,
reference_points=reference_points_input,
spatial_shapes=spatial_shapes,
spatial_shapes_list=spatial_shapes_list,
level_start_index=level_start_index,
encoder_attention_mask=encoder_attention_mask,
**kwargs,
)
# hack implementation for iterative bounding box refinement
if self.bbox_embed is not None:
predicted_corners = self.bbox_embed(hidden_states)
new_reference_points = F.sigmoid(predicted_corners + inverse_sigmoid(reference_points))
reference_points = new_reference_points.detach()
intermediate += (hidden_states,)
intermediate_reference_points += (
(new_reference_points,) if self.bbox_embed is not None else (reference_points,)
)
# get_pred_class_order_and_mask
out_query = norm(hidden_states)
mask_query_embed = mask_query_head(out_query)
batch_size, mask_dim, _ = mask_query_embed.shape
_, _, mask_h, mask_w = mask_feat.shape
out_mask = torch.bmm(mask_query_embed, mask_feat.flatten(start_dim=2)).reshape(
batch_size, mask_dim, mask_h, mask_w
)
decoder_out_masks += (out_mask,)
if self.class_embed is not None:
logits = self.class_embed(out_query)
intermediate_logits += (logits,)
if order_head is not None and global_pointer is not None:
valid_query = out_query[:, -self.num_queries :] if self.num_queries is not None else out_query
order_logits = global_pointer(order_head[idx](valid_query))
decoder_out_order_logits += (order_logits,)
# Keep batch_size as first dimension
intermediate = torch.stack(intermediate, dim=1)
intermediate_reference_points = torch.stack(intermediate_reference_points, dim=1)
if self.class_embed is not None:
intermediate_logits = torch.stack(intermediate_logits, dim=1)
if order_head is not None and global_pointer is not None:
decoder_out_order_logits = torch.stack(decoder_out_order_logits, dim=1)
decoder_out_masks = torch.stack(decoder_out_masks, dim=1)
return PPDocLayoutV3DecoderOutput(
last_hidden_state=hidden_states,
intermediate_hidden_states=intermediate,
intermediate_logits=intermediate_logits,
intermediate_reference_points=intermediate_reference_points,
decoder_out_order_logits=decoder_out_order_logits,
decoder_out_masks=decoder_out_masks,
)
@auto_docstring(
custom_intro="""
PP-DocLayoutV3 Model (consisting of a backbone and encoder-decoder) outputting raw hidden states without any head on top.
"""
)
class PPDocLayoutV3Model(RTDetrModel):
_tied_weights_keys = {
"decoder.class_embed": "enc_score_head",
"decoder.bbox_embed": "enc_bbox_head",
}
def __init__(self, config: PPDocLayoutV3Config):
super().__init__(config)
encoder_input_proj_list = []
self.encoder_input_proj = nn.ModuleList(encoder_input_proj_list[1:])
self.decoder_order_head = nn.ModuleList(
[nn.Linear(config.d_model, config.d_model) for _ in range(config.decoder_layers)]
)
self.decoder_global_pointer = PPDocLayoutV3GlobalPointer(config)
self.decoder_norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_eps)
self.decoder = PPDocLayoutV3Decoder(config)
self.decoder.class_embed = nn.Linear(config.d_model, config.num_labels)
self.decoder.bbox_embed = PPDocLayoutV3MLPPredictionHead(config.d_model, config.d_model, 4, num_layers=3)
self.mask_enhanced = config.mask_enhanced
self.mask_query_head = PPDocLayoutV3MLPPredictionHead(
config.d_model, config.d_model, config.num_prototypes, num_layers=3
)
@auto_docstring
@can_return_tuple
def forward(
self,
pixel_values: torch.FloatTensor,
pixel_mask: torch.LongTensor | None = None,
encoder_outputs: torch.FloatTensor | None = None,
labels: list[dict] | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.FloatTensor] | PPDocLayoutV3ModelOutput:
r"""
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing the flattened feature map (output of the backbone + projection layer), you
can choose to directly pass a flattened representation of an image.
decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*):
Optionally, instead of initializing the queries with a tensor of zeros, you can choose to directly pass an
embedded representation.
labels (`list[Dict]` of len `(batch_size,)`, *optional*):
Labels for computing the bipartite matching loss. List of dicts, each dictionary containing at least the
following 2 keys: 'class_labels' and 'boxes' (the class labels and bounding boxes of an image in the batch
respectively). The class labels themselves should be a `torch.LongTensor` of len `(number of bounding boxes
in the image,)` and the boxes a `torch.FloatTensor` of shape `(number of bounding boxes in the image, 4)`.
Examples:
```python
>>> from transformers import AutoImageProcessor, PPDocLayoutV2Model
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("PekingU/PPDocLayoutV2_r50vd")
>>> model = PPDocLayoutV2Model.from_pretrained("PekingU/PPDocLayoutV2_r50vd")
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
>>> list(last_hidden_states.shape)
[1, 300, 256]
```"""
batch_size, num_channels, height, width = pixel_values.shape
device = pixel_values.device
if pixel_mask is None:
pixel_mask = torch.ones(((batch_size, height, width)), device=device)
features = self.backbone(pixel_values, pixel_mask)
x4_feat = features.pop(0)
proj_feats = [self.encoder_input_proj[level](source) for level, (source, mask) in enumerate(features)]
if encoder_outputs is None:
encoder_outputs = self.encoder(
proj_feats,
x4_feat,
**kwargs,
)
# If the user passed a tuple for encoder_outputs, we wrap it in a PPDocLayoutV3HybridEncoderOutput when return_dict=True
elif not isinstance(encoder_outputs, PPDocLayoutV3HybridEncoderOutput):
encoder_outputs = PPDocLayoutV3HybridEncoderOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
mask_feat=encoder_outputs[-1],
)
# Equivalent to def _get_encoder_input
# https://github.com/lyuwenyu/RT-DETR/blob/94f5e16708329d2f2716426868ec89aa774af016/rtdetr_pytorch/src/zoo/rtdetr/rtdetr_decoder.py#L412
sources = []
for level, source in enumerate(encoder_outputs.last_hidden_state):
sources.append(self.decoder_input_proj[level](source))
# Lowest resolution feature maps are obtained via 3x3 stride 2 convolutions on the final stage
if self.config.num_feature_levels > len(sources):
_len_sources = len(sources)
sources.append(self.decoder_input_proj[_len_sources](encoder_outputs.last_hidden_state[-1]))
for i in range(_len_sources + 1, self.config.num_feature_levels):
sources.append(self.decoder_input_proj[i](encoder_outputs.last_hidden_state[-1]))
# Prepare encoder inputs (by flattening)
source_flatten = []
spatial_shapes_list = []
spatial_shapes = torch.empty((len(sources), 2), device=device, dtype=torch.long)
for level, source in enumerate(sources):
height, width = source.shape[-2:]
spatial_shapes[level, 0] = height
spatial_shapes[level, 1] = width
spatial_shapes_list.append((height, width))
source = source.flatten(2).transpose(1, 2)
source_flatten.append(source)
source_flatten = torch.cat(source_flatten, 1)
level_start_index = torch.cat((spatial_shapes.new_zeros((1,)), spatial_shapes.prod(1).cumsum(0)[:-1]))
# prepare denoising training
if self.training and self.config.num_denoising > 0 and labels is not None:
(
denoising_class,
denoising_bbox_unact,
attention_mask,
denoising_meta_values,
) = get_contrastive_denoising_training_group(
targets=labels,
num_classes=self.config.num_labels,
num_queries=self.config.num_queries,
class_embed=self.denoising_class_embed,
num_denoising_queries=self.config.num_denoising,
label_noise_ratio=self.config.label_noise_ratio,
box_noise_scale=self.config.box_noise_scale,
)
else:
denoising_class, denoising_bbox_unact, attention_mask, denoising_meta_values = None, None, None, None
batch_size = len(source_flatten)
device = source_flatten.device
dtype = source_flatten.dtype
# prepare input for decoder
if self.training or self.config.anchor_image_size is None:
# Pass spatial_shapes as tuple to make it hashable and make sure
# lru_cache is working for generate_anchors()
spatial_shapes_tuple = tuple(spatial_shapes_list)
anchors, valid_mask = self.generate_anchors(spatial_shapes_tuple, device=device, dtype=dtype)
else:
anchors, valid_mask = self.anchors, self.valid_mask
anchors, valid_mask = anchors.to(device, dtype), valid_mask.to(device, dtype)
# use the valid_mask to selectively retain values in the feature map where the mask is `True`
memory = valid_mask.to(source_flatten.dtype) * source_flatten
output_memory = self.enc_output(memory)
enc_outputs_class = self.enc_score_head(output_memory)
enc_outputs_coord_logits = self.enc_bbox_head(output_memory) + anchors
_, topk_ind = torch.topk(enc_outputs_class.max(-1).values, self.config.num_queries, dim=1)
reference_points_unact = enc_outputs_coord_logits.gather(
dim=1, index=topk_ind.unsqueeze(-1).repeat(1, 1, enc_outputs_coord_logits.shape[-1])
)
# _get_pred_class_and_mask
batch_ind = torch.arange(memory.shape[0], device=output_memory.device).unsqueeze(1)
target = output_memory[batch_ind, topk_ind]
out_query = self.decoder_norm(target)
mask_query_embed = self.mask_query_head(out_query)
batch_size, mask_dim, _ = mask_query_embed.shape
enc_topk_bboxes = F.sigmoid(reference_points_unact)
enc_topk_logits = enc_outputs_class.gather(
dim=1, index=topk_ind.unsqueeze(-1).repeat(1, 1, enc_outputs_class.shape[-1])
)
# extract region features
if self.config.learn_initial_query:
target = self.weight_embedding.tile([batch_size, 1, 1])
else:
target = output_memory.gather(dim=1, index=topk_ind.unsqueeze(-1).repeat(1, 1, output_memory.shape[-1]))
target = target.detach()
if denoising_class is not None:
target = torch.concat([denoising_class, target], 1)
if self.mask_enhanced:
_, _, mask_h, mask_w = encoder_outputs.mask_feat.shape
enc_out_masks = torch.bmm(mask_query_embed, encoder_outputs.mask_feat.flatten(start_dim=2)).reshape(
batch_size, mask_dim, mask_h, mask_w
)
reference_points = mask_to_box_coordinate(enc_out_masks > 0, dtype=reference_points_unact.dtype)
reference_points_unact = inverse_sigmoid(reference_points)
if denoising_bbox_unact is not None:
reference_points_unact = torch.concat([denoising_bbox_unact, reference_points_unact], 1)
init_reference_points = reference_points_unact.detach()
# decoder
decoder_outputs = self.decoder(
inputs_embeds=target,
encoder_hidden_states=source_flatten,
encoder_attention_mask=attention_mask,
reference_points=init_reference_points,
spatial_shapes=spatial_shapes,
spatial_shapes_list=spatial_shapes_list,
level_start_index=level_start_index,
order_head=self.decoder_order_head,
global_pointer=self.decoder_global_pointer,
mask_query_head=self.mask_query_head,
norm=self.decoder_norm,
mask_feat=encoder_outputs.mask_feat,
**kwargs,
)
return PPDocLayoutV3ModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
intermediate_hidden_states=decoder_outputs.intermediate_hidden_states,
intermediate_logits=decoder_outputs.intermediate_logits,
intermediate_reference_points=decoder_outputs.intermediate_reference_points,
intermediate_predicted_corners=decoder_outputs.intermediate_predicted_corners,
initial_reference_points=decoder_outputs.initial_reference_points,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
out_order_logits=decoder_outputs.decoder_out_order_logits,
out_masks=decoder_outputs.decoder_out_masks,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
init_reference_points=init_reference_points,
enc_topk_logits=enc_topk_logits,
enc_topk_bboxes=enc_topk_bboxes,
enc_outputs_class=enc_outputs_class,
enc_outputs_coord_logits=enc_outputs_coord_logits,
denoising_meta_values=denoising_meta_values,
)
@dataclass
@auto_docstring
class PPDocLayoutV3HybridEncoderOutput(BaseModelOutput):
r"""
mask_feat (`torch.FloatTensor` of shape `(batch_size, config.num_queries, 200, 200)`):
Mask features for each query in the batch.
"""
mask_feat: torch.FloatTensor = None
@dataclass
@auto_docstring
class PPDocLayoutV3ForObjectDetectionOutput(ModelOutput):
r"""
logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes + 1)`):
Classification logits (including no-object) for all queries.
pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`):
Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These
values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding
possible padding). You can use [`~PPDocLayoutV3ImageProcessorFast.post_process_object_detection`] to retrieve the
unnormalized (absolute) bounding boxes.
order_logits (`tuple` of `torch.FloatTensor` of shape `(batch_size, num_queries, num_queries)`):
Order logits of the final layer of the decoder.
out_masks (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, height, width)`):
Masks of the final layer of the decoder.
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the decoder of the model.
intermediate_hidden_states (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, hidden_size)`):
Stacked intermediate hidden states (output of each layer of the decoder).
intermediate_logits (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, config.num_labels)`):
Stacked intermediate logits (logits of each layer of the decoder).
intermediate_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`):
Stacked intermediate reference points (reference points of each layer of the decoder).
intermediate_predicted_corners (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`):
Stacked intermediate predicted corners (predicted corners of each layer of the decoder).
initial_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`):
Stacked initial reference points (initial reference points of each layer of the decoder).
init_reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`):
Initial reference points sent through the Transformer decoder.
enc_topk_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`):
Logits of predicted bounding boxes coordinates in the encoder.
enc_topk_bboxes (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`):
Logits of predicted bounding boxes coordinates in the encoder.
enc_outputs_class (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`):
Predicted bounding boxes scores where the top `config.two_stage_num_proposals` scoring bounding boxes are
picked as region proposals in the first stage. Output of bounding box binary classification (i.e.
foreground and background).
enc_outputs_coord_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`):
Logits of predicted bounding boxes coordinates in the first stage.
denoising_meta_values (`dict`):
Extra dictionary for the denoising related values
"""
logits: torch.FloatTensor | None = None
pred_boxes: torch.FloatTensor | None = None
order_logits: torch.FloatTensor | None = None
out_masks: torch.FloatTensor | None = None
last_hidden_state: torch.FloatTensor | None = None
intermediate_hidden_states: torch.FloatTensor | None = None
intermediate_logits: torch.FloatTensor | None = None
intermediate_reference_points: torch.FloatTensor | None = None
intermediate_predicted_corners: torch.FloatTensor | None = None
initial_reference_points: torch.FloatTensor | None = None
decoder_hidden_states: tuple[torch.FloatTensor] | None = None
decoder_attentions: tuple[torch.FloatTensor] | None = None
cross_attentions: tuple[torch.FloatTensor] | None = None
encoder_last_hidden_state: torch.FloatTensor | None = None
encoder_hidden_states: tuple[torch.FloatTensor] | None = None
encoder_attentions: tuple[torch.FloatTensor] | None = None
init_reference_points: tuple[torch.FloatTensor] | None = None
enc_topk_logits: torch.FloatTensor | None = None
enc_topk_bboxes: torch.FloatTensor | None = None
enc_outputs_class: torch.FloatTensor | None = None
enc_outputs_coord_logits: torch.FloatTensor | None = None
denoising_meta_values: dict | None = None
@auto_docstring(
custom_intro="""
PP-DocLayoutV3 Model (consisting of a backbone and encoder-decoder) outputs bounding boxes and logits sorted according to reading order,
which are further decoded into scores and classes.
"""
)
class PPDocLayoutV3ForObjectDetection(RTDetrForObjectDetection, PPDocLayoutV3PreTrainedModel):
_keys_to_ignore_on_load_missing = ["num_batches_tracked", "rel_pos_y_bias", "rel_pos_x_bias"]
def __init__(self, config: PPDocLayoutV3Config):
super().__init__(config)
del self.model.decoder.class_embed
del self.model.decoder.bbox_embed
del num_pred # noqa
self.model.denoising_class_embed = nn.Embedding(config.num_labels, config.d_model)
self.num_queries = config.num_queries
self.post_init()
@auto_docstring
@can_return_tuple
def forward(
self,
pixel_values: torch.FloatTensor,
pixel_mask: torch.LongTensor | None = None,
encoder_outputs: torch.FloatTensor | None = None,
labels: list[dict] | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.FloatTensor] | PPDocLayoutV3ForObjectDetectionOutput:
r"""
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing the flattened feature map (output of the backbone + projection layer), you
can choose to directly pass a flattened representation of an image.
decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*):
Optionally, instead of initializing the queries with a tensor of zeros, you can choose to directly pass an
embedded representation.
labels (`list[Dict]` of len `(batch_size,)`, *optional*):
Labels for computing the bipartite matching loss. List of dicts, each dictionary containing at least the
following 2 keys: 'class_labels' and 'boxes' (the class labels and bounding boxes of an image in the batch
respectively). The class labels themselves should be a `torch.LongTensor` of len `(number of bounding boxes
in the image,)` and the boxes a `torch.FloatTensor` of shape `(number of bounding boxes in the image, 4)`.
Examples:
```python
>>> from transformers import AutoModelForObjectDetection, AutoImageProcessor
>>> from PIL import Image
>>> import requests
>>> import torch
>>> url = "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/layout_demo.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> model_path = "PaddlePaddle/PP-DocLayoutV3_safetensors"
>>> image_processor = AutoImageProcessor.from_pretrained(model_path)
>>> model = AutoModelForObjectDetection.from_pretrained(model_path)
>>> # prepare image for the model
>>> inputs = image_processor(images=[image], return_tensors="pt")
>>> # forward pass
>>> outputs = model(**inputs)
>>> # convert outputs (bounding boxes and class logits) to Pascal VOC format (xmin, ymin, xmax, ymax)
>>> results = image_processor.post_process_object_detection(outputs, target_sizes=torch.tensor([image.size[::-1]]))
>>> # print outputs
>>> for result in results:
... for idx, (score, label_id, box) in enumerate(zip(result["scores"], result["labels"], result["boxes"])):
... score, label = score.item(), label_id.item()
... box = [round(i, 2) for i in box.tolist()]
... print(f"Order {idx + 1}: {model.config.id2label[label]}: {score:.2f} {box}")
Order 1: text: 0.99 [334.95, 184.78, 897.25, 654.83]
Order 2: paragraph_title: 0.97 [337.28, 683.92, 869.16, 798.35]
Order 3: text: 0.99 [335.75, 842.82, 892.13, 1454.32]
Order 4: text: 0.99 [920.18, 185.28, 1476.38, 464.49]
Order 5: text: 0.98 [920.47, 483.68, 1480.63, 765.72]
Order 6: text: 0.98 [920.62, 846.8, 1482.09, 1220.67]
Order 7: text: 0.97 [920.92, 1239.41, 1469.55, 1378.02]
Order 8: footnote: 0.86 [335.03, 1614.68, 1483.33, 1731.73]
Order 9: footnote: 0.83 [334.64, 1756.74, 1471.78, 1845.69]
Order 10: text: 0.81 [336.8, 1910.52, 661.64, 1939.92]
Order 11: footnote: 0.96 [336.24, 2114.42, 1450.14, 2172.12]
Order 12: number: 0.88 [106.0, 2257.5, 135.84, 2282.18]
Order 13: footer: 0.93 [338.4, 2255.52, 986.15, 2284.37]
```"""
outputs = self.model(
pixel_values,
pixel_mask=pixel_mask,
encoder_outputs=encoder_outputs,
labels=labels,
**kwargs,
)
intermediate_logits = outputs.intermediate_logits
intermediate_reference_points = outputs.intermediate_reference_points
order_logits = outputs.out_order_logits
out_masks = outputs.out_masks
pred_boxes = intermediate_reference_points[:, -1]
logits = intermediate_logits[:, -1]
order_logits = order_logits[:, -1]
out_masks = out_masks[:, -1]
if labels is not None:
raise ValueError("PPDocLayoutV3ForObjectDetection does not support training")
return PPDocLayoutV3ForObjectDetectionOutput(
logits=logits,
pred_boxes=pred_boxes,
order_logits=order_logits,
out_masks=out_masks,
last_hidden_state=outputs.last_hidden_state,
intermediate_hidden_states=outputs.intermediate_hidden_states,
intermediate_logits=outputs.intermediate_logits,
intermediate_reference_points=outputs.intermediate_reference_points,
intermediate_predicted_corners=outputs.intermediate_predicted_corners,
initial_reference_points=outputs.initial_reference_points,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
init_reference_points=outputs.init_reference_points,
enc_topk_logits=outputs.enc_topk_logits,
enc_topk_bboxes=outputs.enc_topk_bboxes,
enc_outputs_class=outputs.enc_outputs_class,
enc_outputs_coord_logits=outputs.enc_outputs_coord_logits,
denoising_meta_values=outputs.denoising_meta_values,
)
__all__ = [
"PPDocLayoutV3ForObjectDetection",
"PPDocLayoutV3ImageProcessorFast",
"PPDocLayoutV3Config",
"PPDocLayoutV3Model",
"PPDocLayoutV3PreTrainedModel",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/pp_doclayout_v3/modular_pp_doclayout_v3.py",
"license": "Apache License 2.0",
"lines": 1344,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/pp_doclayout_v3/test_modeling_pp_doclayout_v3.py | # coding = utf-8
# Copyright 2026 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PP-DocLayoutV3 model."""
import inspect
import math
import unittest
import requests
from parameterized import parameterized
from transformers import (
PPDocLayoutV3Config,
PPDocLayoutV3ForObjectDetection,
PPDocLayoutV3ImageProcessorFast,
is_torch_available,
is_vision_available,
)
from transformers.testing_utils import (
require_torch,
require_torch_accelerator,
require_vision,
slow,
torch_device,
)
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
class PPDocLayoutV3ModelTester:
def __init__(
self,
parent,
batch_size=3,
is_training=False,
n_targets=3,
num_labels=25,
initializer_range=0.01,
layer_norm_eps=1e-5,
batch_norm_eps=1e-5,
# backbone
backbone_config=None,
# encoder HybridEncoder
encoder_hidden_dim=32,
encoder_in_channels=[32, 32, 32],
feat_strides=[8, 16, 32],
encoder_layers=1,
encoder_ffn_dim=64,
encoder_attention_heads=2,
dropout=0.0,
activation_dropout=0.0,
encode_proj_layers=[2],
positional_encoding_temperature=10000,
encoder_activation_function="gelu",
activation_function="silu",
eval_size=None,
normalize_before=False,
mask_feature_channels=[32, 32],
x4_feat_dim=32,
# decoder PPDocLayoutV3Transformer
d_model=32,
num_queries=30,
decoder_in_channels=[32, 32, 32],
decoder_ffn_dim=8,
num_feature_levels=3,
decoder_n_points=4,
decoder_layers=2,
decoder_attention_heads=2,
decoder_activation_function="relu",
attention_dropout=0.0,
num_denoising=0,
label_noise_ratio=0.5,
box_noise_scale=1.0,
learn_initial_query=False,
anchor_image_size=None,
image_size=128,
disable_custom_kernels=True,
):
self.parent = parent
self.batch_size = batch_size
self.num_channels = 3
self.is_training = is_training
self.n_targets = n_targets
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.batch_norm_eps = batch_norm_eps
self.backbone_config = backbone_config
self.encoder_hidden_dim = encoder_hidden_dim
self.encoder_in_channels = encoder_in_channels
self.feat_strides = feat_strides
self.num_labels = num_labels
self.encoder_layers = encoder_layers
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_attention_heads = encoder_attention_heads
self.dropout = dropout
self.activation_dropout = activation_dropout
self.encode_proj_layers = encode_proj_layers
self.positional_encoding_temperature = positional_encoding_temperature
self.encoder_activation_function = encoder_activation_function
self.activation_function = activation_function
self.eval_size = eval_size
self.normalize_before = normalize_before
self.mask_feature_channels = mask_feature_channels
self.x4_feat_dim = x4_feat_dim
self.d_model = d_model
self.num_queries = num_queries
self.decoder_in_channels = decoder_in_channels
self.decoder_ffn_dim = decoder_ffn_dim
self.num_feature_levels = num_feature_levels
self.decoder_n_points = decoder_n_points
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.decoder_activation_function = decoder_activation_function
self.attention_dropout = attention_dropout
self.num_denoising = num_denoising
self.label_noise_ratio = label_noise_ratio
self.box_noise_scale = box_noise_scale
self.learn_initial_query = learn_initial_query
self.anchor_image_size = anchor_image_size
self.image_size = image_size
self.disable_custom_kernels = disable_custom_kernels
self.encoder_seq_length = math.ceil(self.image_size / 32) * math.ceil(self.image_size / 32)
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
config = self.get_config()
return config, pixel_values
def get_config(self):
hidden_sizes = [10, 20, 30, 40]
backbone_config = {
"model_type": "hgnet_v2",
"arch": "L",
"return_idx": [0, 1, 2, 3],
"hidden_sizes": [32, 32, 32, 32],
"stem_channels": [3, 32, 32],
"stage_in_channels": [32, 32, 32, 32],
"stage_mid_channels": [32, 32, 32, 32],
"stage_out_channels": [32, 32, 32, 32],
"freeze_stem_only": True,
"freeze_at": 0,
"freeze_norm": True,
"lr_mult_list": [0, 0.05, 0.05, 0.05, 0.05],
"out_features": ["stage1", "stage2", "stage3", "stage4"],
}
return PPDocLayoutV3Config(
backbone_config=backbone_config,
encoder_hidden_dim=self.encoder_hidden_dim,
encoder_in_channels=hidden_sizes[1:],
feat_strides=self.feat_strides,
encoder_layers=self.encoder_layers,
encoder_ffn_dim=self.encoder_ffn_dim,
encoder_attention_heads=self.encoder_attention_heads,
dropout=self.dropout,
activation_dropout=self.activation_dropout,
encode_proj_layers=self.encode_proj_layers,
positional_encoding_temperature=self.positional_encoding_temperature,
encoder_activation_function=self.encoder_activation_function,
activation_function=self.activation_function,
eval_size=self.eval_size,
normalize_before=self.normalize_before,
mask_feature_channels=self.mask_feature_channels,
x4_feat_dim=self.x4_feat_dim,
d_model=self.d_model,
num_queries=self.num_queries,
decoder_in_channels=self.decoder_in_channels,
decoder_ffn_dim=self.decoder_ffn_dim,
num_feature_levels=self.num_feature_levels,
decoder_n_points=self.decoder_n_points,
decoder_layers=self.decoder_layers,
decoder_attention_heads=self.decoder_attention_heads,
decoder_activation_function=self.decoder_activation_function,
attention_dropout=self.attention_dropout,
num_denoising=self.num_denoising,
label_noise_ratio=self.label_noise_ratio,
box_noise_scale=self.box_noise_scale,
learn_initial_query=self.learn_initial_query,
anchor_image_size=self.anchor_image_size,
image_size=self.image_size,
disable_custom_kernels=self.disable_custom_kernels,
)
def prepare_config_and_inputs_for_common(self):
config, pixel_values = self.prepare_config_and_inputs()
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class PPDocLayoutV3ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (PPDocLayoutV3ForObjectDetection,) if is_torch_available() else ()
pipeline_model_mapping = {"object-detection": PPDocLayoutV3ForObjectDetection} if is_torch_available() else {}
is_encoder_decoder = True
test_missing_keys = False
test_torch_exportable = True
def setUp(self):
self.model_tester = PPDocLayoutV3ModelTester(self)
self.config_tester = ConfigTester(
self,
config_class=PPDocLayoutV3Config,
has_text_modality=False,
)
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(reason="PPDocLayoutV3 has tied weights.")
def test_load_save_without_tied_weights(self):
pass
@unittest.skip(reason="PPDocLayoutV3 does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="PPDocLayoutV3 does not use test_inputs_embeds_matches_input_ids")
def test_inputs_embeds_matches_input_ids(self):
pass
@unittest.skip(reason="PPDocLayoutV3 does not support input and output embeddings")
def test_model_get_set_embeddings(self):
pass
@unittest.skip(reason="PPDocLayoutV3 does not support input and output embeddings")
def test_model_common_attributes(self):
pass
@unittest.skip(reason="PPDocLayoutV3 does not use token embeddings")
def test_resize_tokens_embeddings(self):
pass
@unittest.skip(reason="Feed forward chunking is not implemented")
def test_feed_forward_chunking(self):
pass
@unittest.skip(reason="PPDocLayoutV3 does not support training")
def test_retain_grad_hidden_states_attentions(self):
pass
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["pixel_values"]
self.assertListEqual(arg_names[:1], expected_arg_names)
@parameterized.expand(["float32", "float16", "bfloat16"])
@require_torch_accelerator
@slow
def test_inference_with_different_dtypes(self, dtype_str):
dtype = {
"float32": torch.float32,
"float16": torch.float16,
"bfloat16": torch.bfloat16,
}[dtype_str]
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device).to(dtype)
model.eval()
for key, tensor in inputs_dict.items():
inputs_dict[key] = tensor.to(dtype)
with torch.no_grad():
_ = model(**self._prepare_for_class(inputs_dict, model_class))
# We have not `num_hidden_layers`, use `encoder_in_channels` instead
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", len(self.model_tester.encoder_in_channels) - 1
)
self.assertEqual(len(hidden_states), expected_num_layers)
self.assertListEqual(
list(hidden_states[1].shape[-2:]),
[
self.model_tester.image_size // self.model_tester.feat_strides[-1],
self.model_tester.image_size // self.model_tester.feat_strides[-1],
],
)
if config.is_encoder_decoder:
hidden_states = outputs.decoder_hidden_states
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.decoder_layers + 1
)
self.assertIsInstance(hidden_states, (list, tuple))
self.assertEqual(len(hidden_states), expected_num_layers)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[self.model_tester.num_queries, self.model_tester.d_model],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class._from_config(config, attn_implementation="eager")
config = model.config
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions
self.assertEqual(len(attentions), self.model_tester.encoder_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions
self.assertEqual(len(attentions), self.model_tester.encoder_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[
self.model_tester.encoder_attention_heads,
self.model_tester.encoder_seq_length,
self.model_tester.encoder_seq_length,
],
)
out_len = len(outputs)
correct_outlen = 14
# loss is at first position
if "labels" in inputs_dict:
correct_outlen += 1 # loss is added to beginning
# Object Detection model returns pred_logits and pred_boxes
if model_class.__name__ == "PPDocLayoutV3ForObjectDetection":
correct_outlen += 3
self.assertEqual(out_len, correct_outlen)
# decoder attentions
decoder_attentions = outputs.decoder_attentions
self.assertIsInstance(decoder_attentions, (list, tuple))
self.assertEqual(len(decoder_attentions), self.model_tester.decoder_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]),
[
self.model_tester.decoder_attention_heads,
self.model_tester.num_queries,
self.model_tester.num_queries,
],
)
# cross attentions
cross_attentions = outputs.cross_attentions
self.assertIsInstance(cross_attentions, (list, tuple))
self.assertEqual(len(cross_attentions), self.model_tester.decoder_layers)
self.assertListEqual(
list(cross_attentions[0].shape[-3:]),
[
self.model_tester.decoder_attention_heads,
self.model_tester.num_feature_levels,
self.model_tester.decoder_n_points,
],
)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
if hasattr(self.model_tester, "num_hidden_states_types"):
added_hidden_states = self.model_tester.num_hidden_states_types
else:
added_hidden_states = 2
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs.encoder_attentions
self.assertEqual(len(self_attentions), self.model_tester.encoder_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[
self.model_tester.encoder_attention_heads,
self.model_tester.encoder_seq_length,
self.model_tester.encoder_seq_length,
],
)
@require_torch
@require_vision
@slow
class PPDocLayoutV3ModelIntegrationTest(unittest.TestCase):
def setUp(self):
model_path = "PaddlePaddle/PP-DocLayoutV3_safetensors"
self.model = PPDocLayoutV3ForObjectDetection.from_pretrained(model_path).to(torch_device)
self.image_processor = (
PPDocLayoutV3ImageProcessorFast.from_pretrained(model_path) if is_vision_available() else None
)
url = "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/layout_demo.jpg"
self.image = Image.open(requests.get(url, stream=True).raw)
def test_inference_object_detection_head(self):
inputs = self.image_processor(images=self.image, return_tensors="pt").to(torch_device)
with torch.no_grad():
outputs = self.model(**inputs)
expected_shape_logits = torch.Size((1, 300, self.model.config.num_labels))
expected_logits = torch.tensor(
[[-4.7670, -6.2655, -6.3641], [-4.9534, -5.8549, -6.4938], [-5.1931, -6.2573, -6.6023]]
).to(torch_device)
self.assertEqual(outputs.logits.shape, expected_shape_logits)
torch.testing.assert_close(outputs.logits[0, :3, :3], expected_logits, rtol=2e-4, atol=2e-2)
expected_shape_boxes = torch.Size((1, 300, 4))
expected_boxes = torch.tensor(
[[0.3725, 0.1789, 0.3373], [0.7256, 0.2672, 0.3378], [0.7247, 0.1389, 0.3352]]
).to(torch_device)
self.assertEqual(outputs.pred_boxes.shape, expected_shape_boxes)
torch.testing.assert_close(outputs.pred_boxes[0, :3, :3], expected_boxes, rtol=2e-4, atol=2e-2)
expected_shape_order_logits = torch.Size((1, 300, 300))
expected_order_logits = torch.tensor(
[
[-10000.0000, 2333.5664, 1632.4893],
[-10000.0000, -10000.0000, -1068.3279],
[-10000.0000, -10000.0000, -10000.0000],
]
).to(torch_device)
self.assertEqual(outputs.order_logits.shape, expected_shape_order_logits)
torch.testing.assert_close(outputs.order_logits[0, :3, :3], expected_order_logits, rtol=2e-2, atol=2e-2)
# verify postprocessing
results = self.image_processor.post_process_object_detection(
outputs, threshold=0.5, target_sizes=[self.image.size[::-1]]
)[0]
expected_scores = torch.tensor(
[0.9605, 0.9050, 0.9517, 0.9482, 0.9640, 0.9519, 0.9216, 0.7799, 0.7979, 0.5582, 0.7412, 0.7018, 0.8377]
).to(torch_device)
torch.testing.assert_close(results["scores"], expected_scores, rtol=2e-2, atol=2e-2)
expected_labels = [22, 17, 22, 22, 22, 22, 22, 10, 10, 10, 10, 16, 8]
self.assertSequenceEqual(results["labels"].tolist(), expected_labels)
expected_slice_boxes = torch.tensor(
[
[337.0705, 183.0614, 895.0403, 653.6794],
[337.8179, 684.5647, 868.7692, 798.1080],
[921.4486, 185.6825, 1475.8827, 464.3206],
[920.6929, 484.8696, 1479.4470, 765.1530],
]
).to(torch_device)
torch.testing.assert_close(results["boxes"][:4], expected_slice_boxes, rtol=2e-2, atol=2e-2)
expected_slice_polygon_points = torch.tensor([[867, 684], [636, 684], [337, 696], [337, 797], [867, 797]]).to(
torch_device
)
torch.testing.assert_close(
torch.tensor(results["polygon_points"][1], device=torch_device, dtype=expected_slice_polygon_points.dtype),
expected_slice_polygon_points,
rtol=0,
atol=2,
)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/pp_doclayout_v3/test_modeling_pp_doclayout_v3.py",
"license": "Apache License 2.0",
"lines": 455,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/models/siglip2/test_tokenization_siglip2.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
import unittest
from transformers import Siglip2Tokenizer
from transformers.testing_utils import require_tokenizers
@require_tokenizers
class Siglip2TokenizerTest(unittest.TestCase):
"""
Integration test for Siglip2Tokenizer:
- verify hub loading,
- default lowercasing behavior,
- save/load roundtrip.
"""
from_pretrained_id = "google/siglip2-base-patch16-224"
def test_tokenizer(self):
tokenizer = Siglip2Tokenizer.from_pretrained(self.from_pretrained_id)
texts_uc = [
"HELLO WORLD!",
"Hello World!!",
"A Picture Of Zürich",
"San Francisco",
"MIXED-case: TeSt 123",
]
texts_lc = [t.lower() for t in texts_uc]
# default lowercasing (single + batch paths)
for t_uc, t_lc in zip(texts_uc, texts_lc):
with self.subTest(text=t_uc):
enc_uc = tokenizer(t_uc, truncation=True)
enc_lc = tokenizer(t_lc, truncation=True)
self.assertListEqual(enc_uc["input_ids"], enc_lc["input_ids"])
batch_uc = tokenizer(texts_uc, truncation=True)
batch_lc = tokenizer(texts_lc, truncation=True)
self.assertListEqual(batch_uc["input_ids"], batch_lc["input_ids"])
# padding/truncation path (avoid relying on model_max_length)
max_len = 64
padded = tokenizer(texts_uc, padding="max_length", truncation=True, max_length=max_len)
# ensure every sequence is padded/truncated to max_len
for seq in padded["input_ids"]:
self.assertEqual(len(seq), max_len)
# save/load roundtrip preserves behavior
with tempfile.TemporaryDirectory() as tmpdir:
tokenizer.save_pretrained(tmpdir)
tokenizer_reloaded = Siglip2Tokenizer.from_pretrained(tmpdir)
batch_uc_2 = tokenizer_reloaded(texts_uc, truncation=True)
batch_lc_2 = tokenizer_reloaded(texts_lc, truncation=True)
self.assertListEqual(batch_uc_2["input_ids"], batch_lc_2["input_ids"])
self.assertListEqual(batch_uc["input_ids"], batch_uc_2["input_ids"])
padded_2 = tokenizer_reloaded(texts_uc, padding="max_length", truncation=True, max_length=max_len)
for seq in padded_2["input_ids"]:
self.assertEqual(len(seq), max_len)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/siglip2/test_tokenization_siglip2.py",
"license": "Apache License 2.0",
"lines": 62,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/youtu/modular_youtu.py | # Copyright 2026 the Tencent and HuggingFace Inc. team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch import nn
from ... import initialization as init
from ...modeling_rope_utils import RopeParameters
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from ..deepseek_v3.configuration_deepseek_v3 import DeepseekV3Config
from ..deepseek_v3.modeling_deepseek_v3 import DeepseekV3Attention
from ..llama.modeling_llama import (
LlamaDecoderLayer,
LlamaForCausalLM,
LlamaModel,
LlamaPreTrainedModel,
LlamaRMSNorm,
LlamaRotaryEmbedding,
)
from ..qwen3.modeling_qwen3 import Qwen3MLP
logger = logging.get_logger(__name__)
class YoutuConfig(DeepseekV3Config):
r"""
This is the configuration class to store the configuration of a [`YoutuModel`]. It is used to instantiate an Youtu
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the Youtu-LLM-2B.
e.g. [tencent/Youtu-LLM-2B](https://huggingface.co/tencent/Youtu-LLM-2B)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 128256):
Vocabulary size of the Deep model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`YoutuModel`]
hidden_size (`int`, *optional*, defaults to 2048):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 6144):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*, defaults to 16):
In MLA, num_key_value_heads=num_attention_heads.
kv_lora_rank (`int`, *optional*, defaults to 512):
Rank of the LoRA matrices for key and value projections.
q_lora_rank (`int`, *optional*, defaults to 1536):
Rank of the LoRA matrices for query projections.
qk_rope_head_dim (`int`, *optional*, defaults to 64):
Dimension of the query/key heads that use rotary position embeddings.
v_head_dim (`int`, *optional*, defaults to 128):
Dimension of the value heads.
qk_nope_head_dim (`int`, *optional*, defaults to 128):
Dimension of the query/key heads that don't use rotary position embeddings.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 131072):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices, except embedding matrices.
embedding_initializer_range (`float`, *optional*):
The standard deviation of the truncated_normal_initializer for initializing all embedding matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 128000):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 128001):
End of stream token id.
tie_word_embeddings (`bool`, *optional*, defaults to `True`):
Whether to tie weight embeddings
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
rope_interleave (`bool`, *optional*, defaults to `True`):
Whether to interleave the rotary position embeddings.
attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
```python
>>> from transformers import YoutuModel, YoutuConfig
>>> # Initializing a Youtu-LLM-2B style configuration
>>> configuration = YoutuConfig()
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "youtu"
base_model_tp_plan = {
"layers.*.mlp.gate_proj": "colwise",
"layers.*.mlp.up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
}
attribute_map = {}
def __init__(
self,
vocab_size: int | None = 128256,
hidden_size: int | None = 2048,
intermediate_size: int | None = 6144,
num_hidden_layers: int | None = 32,
num_attention_heads: int | None = 16,
num_key_value_heads: int | None = 16,
kv_lora_rank: int | None = 512,
q_lora_rank: int | None = 1536,
qk_rope_head_dim: int | None = 64,
v_head_dim: int | None = 128,
qk_nope_head_dim: int | None = 128,
hidden_act: str | None = "silu",
max_position_embeddings: int | None = 131072,
initializer_range: float | None = None,
embedding_initializer_range: float | None = None,
rms_norm_eps: int | None = 1e-6,
use_cache: bool | None = True,
pad_token_id: int | None = None,
bos_token_id: int | None = 128000,
eos_token_id: int | None = 128001,
tie_word_embeddings: bool | None = True,
rope_parameters: RopeParameters | dict[str, RopeParameters] = None,
rope_interleave: bool | None = True,
attention_bias: bool | None = False,
attention_dropout: float | None = 0.0,
**kwargs,
):
super().__init__(
vocab_size=vocab_size,
hidden_size=hidden_size,
intermediate_size=intermediate_size,
num_hidden_layers=num_hidden_layers,
num_attention_heads=num_attention_heads,
num_key_value_heads=num_key_value_heads,
kv_lora_rank=kv_lora_rank,
q_lora_rank=q_lora_rank,
qk_rope_head_dim=qk_rope_head_dim,
v_head_dim=v_head_dim,
qk_nope_head_dim=qk_nope_head_dim,
hidden_act=hidden_act,
max_position_embeddings=max_position_embeddings,
rms_norm_eps=rms_norm_eps,
use_cache=use_cache,
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
rope_parameters=rope_parameters,
rope_interleave=rope_interleave,
attention_bias=attention_bias,
attention_dropout=attention_dropout,
**kwargs,
)
# remove unused attribute
del self.n_shared_experts
del self.n_routed_experts
del self.routed_scaling_factor
del self.n_group
del self.topk_group
del self.num_experts_per_tok
del self.first_k_dense_replace
del self.norm_topk_prob
del self.pretraining_tp
del self.moe_intermediate_size
# if initializer_range is None, set it to 2.0 / (5.0 * self.hidden_size) ** 0.5 (if hidden size is valid)
if self.initializer_range is None:
if self.hidden_size != 0:
self.initializer_range = 2.0 / (5.0 * self.hidden_size) ** 0.5
else:
self.initializer_range = 0.02
# if embedding_initializer_range is None, set it to 2.0 * self.initializer_range
if embedding_initializer_range is None:
self.embedding_initializer_range = 2.0 * self.initializer_range
else:
self.embedding_initializer_range = embedding_initializer_range
def convert_rope_params_to_dict(self, ignore_keys_at_rope_validation: set | None = None, **kwargs):
raise AttributeError("Not overwritten for the Youtu model!")
class YoutuRMSNorm(LlamaRMSNorm):
pass
class YoutuRotaryEmbedding(LlamaRotaryEmbedding):
pass
class YoutuMLP(Qwen3MLP):
pass
class YoutuAttention(DeepseekV3Attention):
pass
class YoutuDecoderLayer(LlamaDecoderLayer):
pass
class YoutuPreTrainedModel(LlamaPreTrainedModel, PreTrainedModel):
@torch.no_grad()
def _init_weights(self, module):
PreTrainedModel._init_weights(self, module)
std = getattr(self.config, "initializer_range", 0.02)
embed_std = getattr(self.config, "embedding_initializer_range", 2 * std)
if isinstance(module, nn.Embedding):
init.normal_(module.weight, mean=0.0, std=embed_std)
if module.padding_idx is not None:
init.zeros_(module.weight.data[module.padding_idx])
class YoutuModel(LlamaModel):
pass
class YoutuForCausalLM(LlamaForCausalLM):
pass
__all__ = [
"YoutuConfig",
"YoutuPreTrainedModel",
"YoutuModel",
"YoutuForCausalLM",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/youtu/modular_youtu.py",
"license": "Apache License 2.0",
"lines": 222,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/youtu/test_modeling_youtu.py | # Copyright 2026 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch Youtu-LLM model."""
import unittest
import pytest
from transformers import AutoTokenizer, is_torch_available
from transformers.testing_utils import (
cleanup,
require_deterministic_for_xpu,
require_torch,
require_torch_accelerator,
slow,
torch_device,
)
from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester
if is_torch_available():
import torch
torch.set_float32_matmul_precision("high")
from transformers import (
Cache,
YoutuForCausalLM,
YoutuModel,
)
class YoutuModelTester(CausalLMModelTester):
if is_torch_available():
base_model_class = YoutuModel
def __init__(
self,
parent,
kv_lora_rank=16,
q_lora_rank=32,
qk_rope_head_dim=32,
qk_nope_head_dim=32,
v_head_dim=32,
):
super().__init__(parent=parent)
self.kv_lora_rank = kv_lora_rank
self.q_lora_rank = q_lora_rank
self.qk_nope_head_dim = qk_nope_head_dim
self.qk_rope_head_dim = qk_rope_head_dim
self.v_head_dim = v_head_dim
@require_torch
class YoutuModelTest(CausalLMModelTest, unittest.TestCase):
model_tester_class = YoutuModelTester
def _check_past_key_values_for_generate(self, batch_size, past_key_values, seq_length, config):
"""Needs to be overridden as youtu-llm has special MLA cache format (though we don't really use the MLA)"""
self.assertIsInstance(past_key_values, Cache)
# (batch, head, seq_length, head_features)
expected_common_shape = (
batch_size,
getattr(config, "num_key_value_heads", config.num_attention_heads),
seq_length,
)
expected_key_shape = expected_common_shape + (config.qk_nope_head_dim + config.qk_rope_head_dim,)
expected_value_shape = expected_common_shape + (config.v_head_dim,)
for layer in past_key_values.layers:
self.assertEqual(layer.keys.shape, expected_key_shape)
self.assertEqual(layer.values.shape, expected_value_shape)
@unittest.skip(reason="SDPA can't dispatch on flash due to unsupported head dims")
def test_sdpa_can_dispatch_on_flash(self):
pass
@slow
class YoutuIntegrationTest(unittest.TestCase):
def tearDown(self):
cleanup(torch_device, gc_collect=False)
@require_deterministic_for_xpu
@require_torch_accelerator
def test_dynamic_cache(self):
NUM_TOKENS_TO_GENERATE = 40
EXPECTED_TEXT_COMPLETION = [
"Simply put, the theory of relativity states that , the speed of light is constant in all reference frames. This means that if you are traveling at the speed of light, you will never reach the speed of light. This is because the speed of",
"My favorite all time favorite condiment is ketchup. I love it on everything. I love it on burgers, hot dogs, and even on my fries. I also love it on my french fries. I love it on my french fries. I love",
]
prompts = [
"Simply put, the theory of relativity states that ",
"My favorite all time favorite condiment is ketchup.",
]
tokenizer = AutoTokenizer.from_pretrained("tencent/Youtu-LLM-2B-Base")
model = YoutuForCausalLM.from_pretrained(
"tencent/Youtu-LLM-2B-Base", device_map=torch_device, dtype=torch.float16
)
inputs = tokenizer(prompts, return_tensors="pt", padding=True).to(model.device)
# Dynamic Cache
generated_ids = model.generate(**inputs, max_new_tokens=NUM_TOKENS_TO_GENERATE, do_sample=False)
dynamic_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, dynamic_text)
@require_deterministic_for_xpu
@require_torch_accelerator
def test_static_cache(self):
NUM_TOKENS_TO_GENERATE = 40
EXPECTED_TEXT_COMPLETION = [
"Simply put, the theory of relativity states that , the speed of light is constant in all reference frames. This means that if you are traveling at the speed of light, you will never reach the speed of light. This is because the speed of",
"My favorite all time favorite condiment is ketchup. I love it on everything. I love it on burgers, hot dogs, and even on my fries. I also love it on my french fries. I love it on my french fries. I love",
]
prompts = [
"Simply put, the theory of relativity states that ",
"My favorite all time favorite condiment is ketchup.",
]
tokenizer = AutoTokenizer.from_pretrained("tencent/Youtu-LLM-2B-Base")
model = YoutuForCausalLM.from_pretrained(
"tencent/Youtu-LLM-2B-Base", device_map=torch_device, dtype=torch.float16
)
inputs = tokenizer(prompts, return_tensors="pt", padding=True).to(model.device)
# Static Cache
generated_ids = model.generate(
**inputs, max_new_tokens=NUM_TOKENS_TO_GENERATE, do_sample=False, cache_implementation="static"
)
static_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, static_text)
@require_deterministic_for_xpu
@slow
@require_torch_accelerator
@pytest.mark.torch_compile_test
def test_compile_static_cache(self):
NUM_TOKENS_TO_GENERATE = 40
EXPECTED_TEXT_COMPLETION = [
"Simply put, the theory of relativity states that , the speed of light is constant in all reference frames. This means that if you are traveling at the speed of light, you will never reach the speed of light. This is because the speed of",
"My favorite all time favorite condiment is ketchup. I love it on everything. I love it on burgers, hot dogs, and even on my fries. I also love it on my french fries. I love it on my french fries. I love",
]
prompts = [
"Simply put, the theory of relativity states that ",
"My favorite all time favorite condiment is ketchup.",
]
tokenizer = AutoTokenizer.from_pretrained("tencent/Youtu-LLM-2B-Base")
model = YoutuForCausalLM.from_pretrained(
"tencent/Youtu-LLM-2B-Base", device_map=torch_device, dtype=torch.float16
)
inputs = tokenizer(prompts, return_tensors="pt", padding=True).to(model.device)
# Static Cache + compile
model._cache = None # clear cache object, initialized when we pass `cache_implementation="static"`
model.forward = torch.compile(model.forward, mode="reduce-overhead", fullgraph=False, dynamic=True)
generated_ids = model.generate(
**inputs, max_new_tokens=NUM_TOKENS_TO_GENERATE, do_sample=False, cache_implementation="static"
)
static_compiled_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, static_compiled_text)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/youtu/test_modeling_youtu.py",
"license": "Apache License 2.0",
"lines": 147,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/glm_ocr/modular_glm_ocr.py | # Copyright 2026 the HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Callable
import torch
import torch.nn as nn
import torch.nn.functional as F
from ...modeling_outputs import BaseModelOutputWithPooling
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS
from ..glm4v.configuration_glm4v import Glm4vConfig, Glm4vTextConfig, Glm4vVisionConfig
from ..glm4v.modeling_glm4v import (
Glm4vForConditionalGeneration,
Glm4VisionMlp,
Glm4vModel,
Glm4vModelOutputWithPast,
Glm4vPreTrainedModel,
Glm4vRMSNorm,
Glm4vTextAttention,
Glm4vVisionAttention,
Glm4vVisionBlock,
Glm4vVisionModel,
Glm4vVisionPatchMerger,
apply_rotary_pos_emb_vision,
eager_attention_forward,
is_flash_attention_requested,
)
class GlmOcrRMSNorm(Glm4vRMSNorm):
pass
class GlmOcrVisionMlp(Glm4VisionMlp):
def __init__(self, config, bias: bool = True):
super().__init__(config)
self.intermediate_size = config.intermediate_size
class GlmOcrVisionConfig(Glm4vVisionConfig):
r"""
This is the configuration class to store the configuration of a [`GlmOcrVisionConfig`]. It is used to instantiate a
GLM-OCR model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of
GLM-OCR [zai-org/GLM-OCR](https://huggingface.co/zai-org/GLM-OCR).
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
depth (`int`, *optional*, defaults to 24):
Number of layers (depth) in the model.
hidden_size (`int`, *optional*, defaults to 1024):
Dimensionality of the encoder layers and the pooler layer.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"silu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
attention_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the queries, keys and values.
attention_dropout (`float`, *optional*, defaults to 0.0):
Dropout probability for attention weights.
num_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer architecture.
in_channels (`int`, *optional*, defaults to 3):
Number of input channels.
image_size (`int` or `list[int]`, *optional*, defaults to 336):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 14):
The size (resolution) of each patch.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
spatial_merge_size (`int`, *optional*, defaults to 2):
The size used for merging spatial dimensions.
temporal_patch_size (`int`, *optional*, defaults to 2):
The size used for patches along the temporal dimension.
out_hidden_size (`int`, *optional*, defaults to 1536):
The output hidden size of the vision model.
intermediate_size (`int`, *optional*, defaults to 4096):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
"""
def __init__(
self,
depth=24,
hidden_size=1024,
hidden_act="silu",
attention_bias=True,
num_heads=16,
image_size=336,
out_hidden_size=1536,
intermediate_size=4096,
**super_kwargs,
):
super().__init__(**super_kwargs)
class GlmOcrTextConfig(Glm4vTextConfig):
r"""
This is the configuration class to store the configuration of a [`GlmOcrTextConfig`]. It is used to instantiate a
GLM-OCR model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of
GLM-OCR [zai-org/GLM-OCR](https://huggingface.co/zai-org/GLM-OCR).
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 59392):
Vocabulary size of the GlmOcr model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`GlmOcrModel`]
hidden_size (`int`, *optional*, defaults to 1024):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 4096):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 16):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
num_key_value_heads (`int`, *optional*, defaults to 8):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details checkout [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `32`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 131072):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
pad_token_id (`int`, *optional*):
The id of the padding token.
```python
>>> from transformers import GlmOcrTextModel, GlmOcrConfig
>>> # Initializing a GLM-OCR style configuration
>>> configuration = GlmOcrConfig()
>>> # Initializing a model from the GLM-OCR style configuration
>>> model = GlmOcrTextModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
def __init__(
self,
vocab_size: int | None = 59392,
hidden_size: int | None = 1024,
intermediate_size: int | None = 4096,
num_hidden_layers: int | None = 16,
num_attention_heads: int | None = 16,
num_key_value_heads: int | None = 8,
max_position_embeddings: int | None = 131072,
**super_kwargs,
):
super().__init__(**super_kwargs)
class GlmOcrConfig(Glm4vConfig):
r"""
This is the configuration class to store the configuration of a [`GlmOcrModel`]. It is used to instantiate a
GLM-OCR model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of
GLM-OCR [zai-org/GLM-OCR](https://huggingface.co/zai-org/GLM-OCR).
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
text_config (`Union[PreTrainedConfig, dict]`, *optional*, defaults to `GlmOcrTextConfig`):
The config object or dictionary of the text backbone.
vision_config (`Union[PreTrainedConfig, dict]`, *optional*, defaults to `GlmOcrVisionConfig`):
The config object or dictionary of the vision backbone.
image_token_id (`int`, *optional*, defaults to 59280):
The image token index to encode the image prompt.
video_token_id (`int`, *optional*, defaults to 59281):
The video token index to encode the image prompt.
image_start_token_id (`int`, *optional*, defaults to 59256):
The image start token index to encode the start of image.
image_end_token_id (`int`, *optional*, defaults to 59257):
The image end token index to encode the end of image.
video_start_token_id (`int`, *optional*, defaults to 59258):
The video start token index to encode the start of video.
video_end_token_id (`int`, *optional*, defaults to 59259):
The video end token index to encode the end of video.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether the model's input and output word embeddings should be tied.
```python
>>> from transformers import GlmOcrForConditionalGeneration, GlmOcrConfig
>>> # Initializing a GLM-OCR style configuration
>>> configuration = GlmOcrConfig()
>>> # Initializing a model from the GLM-OCR style configuration
>>> model = GlmOcrForConditionalGeneration(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
def __init__(
self,
text_config=None,
vision_config=None,
image_token_id=59280,
video_token_id=59281,
image_start_token_id=59256,
image_end_token_id=59257,
video_start_token_id=59258,
video_end_token_id=59259,
tie_word_embeddings=False,
**super_kwargs,
):
super().__init__(**super_kwargs)
class GlmOcrTextAttention(Glm4vTextAttention, nn.Module):
def __init__(self, config: GlmOcrTextConfig, layer_idx: int | None = None):
super().__init__()
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
class GlmOcrPreTrainedModel(Glm4vPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"model\.language_model\.layers\.16.*"]
class GlmOcrModelOutputWithPast(Glm4vModelOutputWithPast):
pass
class GlmOcrVisionAttention(Glm4vVisionAttention):
def __init__(self, config: GlmOcrVisionConfig) -> None:
super().__init__()
self.qkv = nn.Linear(config.hidden_size, config.hidden_size * 3, bias=config.attention_bias)
self.proj = nn.Linear(config.hidden_size, config.hidden_size, bias=config.attention_bias)
self.q_norm = GlmOcrRMSNorm(self.head_dim, eps=config.rms_norm_eps)
self.k_norm = GlmOcrRMSNorm(self.head_dim, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
cu_seqlens: torch.Tensor,
rotary_pos_emb: torch.Tensor | None = None,
position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None,
**kwargs,
) -> torch.Tensor:
seq_length = hidden_states.shape[0]
query_states, key_states, value_states = (
self.qkv(hidden_states).reshape(seq_length, 3, self.num_heads, -1).permute(1, 0, 2, 3).unbind(0)
)
query_states = self.q_norm(query_states)
key_states = self.k_norm(key_states)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb_vision(query_states, key_states, cos, sin)
query_states = query_states.transpose(0, 1).unsqueeze(0)
key_states = key_states.transpose(0, 1).unsqueeze(0)
value_states = value_states.transpose(0, 1).unsqueeze(0)
attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface(
self.config._attn_implementation, eager_attention_forward
)
if is_flash_attention_requested(self.config):
# Flash Attention: Use cu_seqlens for variable length attention
max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max()
attn_output, _ = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask=None,
scaling=self.scaling,
dropout=0.0 if not self.training else self.attention_dropout,
cu_seq_lens_q=cu_seqlens,
cu_seq_lens_k=cu_seqlens,
max_length_q=max_seqlen,
max_length_k=max_seqlen,
is_causal=False,
**kwargs,
)
else:
# Other implementations: Process each chunk separately
lengths = cu_seqlens[1:] - cu_seqlens[:-1]
splits = [
torch.split(tensor, lengths.tolist(), dim=2) for tensor in (query_states, key_states, value_states)
]
attn_outputs = [
attention_interface(
self,
q,
k,
v,
attention_mask=None,
scaling=self.scaling,
dropout=0.0 if not self.training else self.attention_dropout,
is_causal=False,
**kwargs,
)[0]
for q, k, v in zip(*splits)
]
attn_output = torch.cat(attn_outputs, dim=1)
attn_output = attn_output.reshape(seq_length, -1).contiguous()
attn_output = self.proj(attn_output)
return attn_output
class GlmOcrVisionBlock(Glm4vVisionBlock):
def __init__(self, config) -> None:
super().__init__()
self.mlp = GlmOcrVisionMlp(config, bias=config.attention_bias)
class GlmOcrVisionPatchMerger(Glm4vVisionPatchMerger):
pass
class GlmOcrVisionModel(Glm4vVisionModel):
def __init__(self, config) -> None:
super().__init__(config)
del self.embeddings
del self.post_conv_layernorm
self.merger = GlmOcrVisionPatchMerger(
dim=config.out_hidden_size,
context_dim=config.out_hidden_size * config.in_channels,
hidden_act=config.hidden_act,
)
def forward(self, hidden_states: torch.Tensor, grid_thw: torch.Tensor, **kwargs) -> torch.Tensor:
r"""
hidden_states (`torch.Tensor` of shape `(seq_len, hidden_size)`):
The final hidden states of the model.
grid_thw (`torch.Tensor` of shape `(num_images_or_videos, 3)`):
The temporal, height and width of feature shape of each image in LLM.
Returns:
`torch.Tensor`: hidden_states.
"""
hidden_states = self.patch_embed(hidden_states)
rotary_pos_emb, image_type_ids = self.rot_pos_emb(grid_thw)
emb = torch.cat((rotary_pos_emb, rotary_pos_emb), dim=-1)
position_embeddings = (emb.cos(), emb.sin())
cu_seqlens = torch.repeat_interleave(grid_thw[:, 1] * grid_thw[:, 2], grid_thw[:, 0]).cumsum(
dim=0,
# Select dtype based on the following factors:
# - FA2 requires that cu_seqlens_q must have dtype int32
# - torch.onnx.export requires that cu_seqlens_q must have same dtype as grid_thw
# See https://github.com/huggingface/transformers/pull/34852 for more information
dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32,
)
cu_seqlens = F.pad(cu_seqlens, (1, 0), value=0)
for blk in self.blocks:
hidden_states = blk(
hidden_states,
cu_seqlens=cu_seqlens,
position_embeddings=position_embeddings,
)
hidden_states = self.post_layernorm(hidden_states)
hidden_states = hidden_states.view(
-1, self.spatial_merge_size, self.spatial_merge_size, hidden_states.shape[-1]
)
hidden_states = hidden_states.permute(0, 3, 1, 2)
hidden_states = self.downsample(hidden_states).view(-1, self.config.out_hidden_size)
merged_hidden_states = self.merger(hidden_states)
return BaseModelOutputWithPooling(
last_hidden_state=hidden_states,
pooler_output=merged_hidden_states,
)
class GlmOcrModel(Glm4vModel):
pass
class GlmOcrForConditionalGeneration(Glm4vForConditionalGeneration):
pass
__all__ = [
"GlmOcrConfig",
"GlmOcrTextConfig",
"GlmOcrVisionConfig",
"GlmOcrTextModel", # noqa: F822
"GlmOcrVisionModel",
"GlmOcrModel",
"GlmOcrPreTrainedModel",
"GlmOcrForConditionalGeneration",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/glm_ocr/modular_glm_ocr.py",
"license": "Apache License 2.0",
"lines": 364,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/glm_ocr/test_modeling_glm_ocr.py | # Copyright 2026 the HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch GLM-4.6V model."""
import copy
import unittest
import pytest
from transformers import (
AutoProcessor,
GlmOcrConfig,
GlmOcrForConditionalGeneration,
GlmOcrModel,
is_torch_available,
logging,
)
from transformers.testing_utils import (
CaptureLogger,
Expectations,
cleanup,
require_deterministic_for_xpu,
require_flash_attn,
require_torch,
require_torch_accelerator,
require_torch_greater_or_equal,
set_config_for_less_flaky_test,
set_model_for_less_flaky_test,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin, assert_similar_generate_outputs
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import (
ModelTesterMixin,
floats_tensor,
ids_tensor,
)
if is_torch_available():
import torch
from transformers.cache_utils import DynamicCache
from transformers.generation import CompileConfig
class GlmOcrVisionText2TextModelTester:
def __init__(
self,
parent,
batch_size=3,
seq_length=7,
num_channels=3,
ignore_index=-100,
image_size=112,
video_start_token_id=3,
video_end_token_id=4,
image_start_token_id=5,
image_end_token_id=6,
image_token_id=7,
video_token_id=8,
is_training=True,
text_config={
"vocab_size": 99,
"hidden_size": 16,
"intermediate_size": 22,
"num_hidden_layers": 2,
"num_attention_heads": 2,
"num_key_value_heads": 1,
"output_channels": 64,
"head_dim": 8,
"hidden_act": "silu",
"max_position_embeddings": 512,
"rope_parameters": {"type": "default", "rope_theta": 10000, "mrope_section": [2, 1, 1]},
"tie_word_embeddings": True,
"bos_token_id": 0,
"eos_token_id": 0,
"pad_token_id": 0,
},
vision_config={
"depth": 2,
"hidden_act": "silu",
"hidden_size": 48,
"num_heads": 12,
"out_hidden_size": 16,
"intermediate_size": 22,
"patch_size": 14,
"spatial_merge_size": 1,
"temporal_patch_size": 2,
},
):
self.parent = parent
self.ignore_index = ignore_index
self.bos_token_id = text_config["bos_token_id"]
self.eos_token_id = text_config["eos_token_id"]
self.pad_token_id = text_config["pad_token_id"]
self.video_start_token_id = video_start_token_id
self.video_end_token_id = video_end_token_id
self.image_start_token_id = image_start_token_id
self.image_end_token_id = image_end_token_id
self.image_token_id = image_token_id
self.video_token_id = video_token_id
self.text_config = text_config
self.vision_config = vision_config
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.is_training = is_training
self.hidden_size = text_config["hidden_size"]
self.num_hidden_layers = text_config["num_hidden_layers"]
self.num_attention_heads = text_config["num_attention_heads"]
self.vocab_size = text_config["vocab_size"]
self.num_image_tokens = 64
self.seq_length = seq_length + self.num_image_tokens
def get_config(self):
return GlmOcrConfig(
text_config=self.text_config,
vision_config=self.vision_config,
image_token_id=self.image_token_id,
video_token_id=self.video_token_id,
video_start_token_id=self.video_start_token_id,
video_end_token_id=self.video_end_token_id,
image_start_token_id=self.image_start_token_id,
image_end_token_id=self.image_end_token_id,
)
def prepare_config_and_inputs(self):
config = self.get_config()
patch_size = config.vision_config.patch_size
temporal_patch_size = config.vision_config.temporal_patch_size
pixel_values = floats_tensor(
[
self.batch_size * (self.image_size**2) // (patch_size**2),
self.num_channels * (patch_size**2) * temporal_patch_size,
]
)
return config, pixel_values
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
input_ids[input_ids == self.video_token_id] = self.pad_token_id
input_ids[input_ids == self.image_token_id] = self.pad_token_id
input_ids[input_ids == self.video_start_token_id] = self.pad_token_id
input_ids[input_ids == self.image_start_token_id] = self.pad_token_id
input_ids[input_ids == self.video_end_token_id] = self.pad_token_id
input_ids[input_ids == self.image_end_token_id] = self.pad_token_id
input_ids[:, 0] = self.image_start_token_id
input_ids[:, 1 : 1 + self.num_image_tokens] = self.image_token_id
input_ids[:, 1 + self.num_image_tokens] = self.image_end_token_id
patch_size = config.vision_config.patch_size
patches_per_side = self.image_size // patch_size
mm_token_type_ids = torch.zeros_like(input_ids)
mm_token_type_ids[:, 1 : 1 + self.num_image_tokens] = 1
inputs_dict = {
"pixel_values": pixel_values,
"image_grid_thw": torch.tensor(
[[1, patches_per_side, patches_per_side]] * self.batch_size, device=torch_device
),
"input_ids": input_ids,
"attention_mask": attention_mask,
"mm_token_type_ids": mm_token_type_ids,
}
return config, inputs_dict
@require_torch
class GlmOcrModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = (GlmOcrModel, GlmOcrForConditionalGeneration) if is_torch_available() else ()
model_split_percents = [0.7, 0.9] # model too big to split at 0.5
_is_composite = True
def setUp(self):
self.model_tester = GlmOcrVisionText2TextModelTester(self)
self.config_tester = ConfigTester(self, config_class=GlmOcrConfig, has_text_modality=False)
def test_config(self):
self.config_tester.run_common_tests()
# GLM4V has images shaped as (bs*patch_len, dim) so we can't slice to batches in generate
def prepare_config_and_inputs_for_generate(self, batch_size=2):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# We don't want a few model inputs in our model input dictionary for generation tests
input_keys_to_ignore = [
# we don't want to mask attention heads
# we don't want encoder-decoder models to start from filled decoder ids
"decoder_input_ids",
"decoder_attention_mask",
# we'll set cache use in each test differently
"use_cache",
# Ignore labels if it is in the input dict
"labels",
# model-specific exceptions should overload/overwrite this function
]
# The diff from the general `prepare_config_and_inputs_for_generate` lies here
patch_size = config.vision_config.patch_size
filtered_image_length = batch_size * (self.model_tester.image_size**2) // (patch_size**2)
filtered_inputs_dict = {
k: v[:batch_size, ...] if isinstance(v, torch.Tensor) else v
for k, v in inputs_dict.items()
if k not in input_keys_to_ignore
}
filtered_inputs_dict["pixel_values"] = inputs_dict["pixel_values"][:filtered_image_length]
# It is important set `eos_token_id` to `None` to avoid early stopping (would break for length-based checks)
text_gen_config = config.get_text_config(decoder=True)
if text_gen_config.eos_token_id is not None and text_gen_config.pad_token_id is None:
text_gen_config.pad_token_id = (
text_gen_config.eos_token_id
if isinstance(text_gen_config.eos_token_id, int)
else text_gen_config.eos_token_id[0]
)
text_gen_config.eos_token_id = None
text_gen_config.forced_eos_token_id = None
return config, filtered_inputs_dict
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
input_ids = inputs["input_ids"]
del inputs["input_ids"]
del inputs["pixel_values"]
del inputs["image_grid_thw"]
wte = model.get_input_embeddings()
inputs["inputs_embeds"] = wte(input_ids)
with torch.no_grad():
model(**inputs)[0]
def test_inputs_embeds_matches_input_ids(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = self._prepare_for_class(inputs_dict, model_class)
input_ids = inputs["input_ids"]
del inputs["input_ids"]
del inputs["pixel_values"]
del inputs["image_grid_thw"]
inputs_embeds = model.get_input_embeddings()(input_ids)
with torch.no_grad():
out_ids = model(input_ids=input_ids, **inputs)[0]
out_embeds = model(inputs_embeds=inputs_embeds, **inputs)[0]
torch.testing.assert_close(out_embeds, out_ids)
@pytest.mark.generate
@pytest.mark.torch_compile_test
@require_torch_greater_or_equal("2.6") # Uses torch.compiler.set_stance
def test_generate_compile_model_forward_fullgraph(self):
"""
Tests that `.generate` is compatible with torch.compile, keeping the same results. Also confirms that
`.forward` called from `.generate` sees no graph breaks or recompilations when compiled.
⚠️ Runs two sequential generations to ensure the cache doesn't get stuck after the first compiled run! ⚠️
"""
# GLM-OCR inputs cannot be split simply by batch size, therefore overriden
for model_class in self.all_generative_model_classes:
# 1. Test exclusion criteria
if not model_class._can_compile_fullgraph:
self.skipTest("This model doesn't support compilation without graph breaks")
# 2. Prepares two sets of inputs
config, inputs_dict = self.prepare_config_and_inputs_for_generate(batch_size=4)
set_config_for_less_flaky_test(config)
model = model_class(config).to(torch_device)
set_model_for_less_flaky_test(model)
model.eval() # otherwise `self.training` is `True` -- this flag is used at attn mask creation time
# Some composite models have a custom generate and will call an inner model's generate -> that inner model
# is the one that gets compiled.
# (Note for the future: if BLIP starts causing problems, let's stop testing it)
if "blip" in model.__class__.__name__.lower():
model_to_be_compiled = model.language_model
else:
model_to_be_compiled = model
# creates two sets of *different* inputs with the same shape
main_input = inputs_dict[model.main_input_name].to(torch_device)
half_batch_size = main_input.shape[0] // 2
patch_size = config.vision_config.patch_size
half_image_length = half_batch_size * (self.model_tester.image_size**2) // (patch_size**2)
input_1 = {}
input_2 = {}
for key, value in inputs_dict.items():
if isinstance(value, torch.Tensor):
input_1[key] = value[:half_batch_size, :].to(torch_device)
input_2[key] = value[half_batch_size : half_batch_size * 2, :].to(torch_device)
else:
input_1[key] = value
input_2[key] = value
input_1["pixel_values"] = inputs_dict["pixel_values"][:half_image_length]
input_2["pixel_values"] = inputs_dict["pixel_values"][half_image_length : half_image_length * 2]
model_input_sets = [input_1, input_2]
self.assertTrue(
model_input_sets[0][model.main_input_name].shape == model_input_sets[1][model.main_input_name].shape
)
# 3. compilation-specific setup and generation parameterization
torch.compiler.reset() # prevent cached compilation from being used in the test
has_defined_cache_implementation = model.generation_config.cache_implementation is not None
compile_config = CompileConfig(fullgraph=True, dynamic=False) # Error out on dynamic shapes
compile_config._compile_all_devices = True # force compilation (e.g. fast CI, CPU)
generation_kwargs = {
"use_cache": True,
"do_sample": False,
"max_new_tokens": 5,
"return_dict_in_generate": True,
"output_scores": True,
"compile_config": compile_config,
}
# 4. get eager + dynamic cache results for future comparison
dynamic_outputs = []
# Ignores all `torch.compile` usage, useful to test models that that have non-default compilable caches
# (who would have used compilation in this section)
with torch.compiler.set_stance("force_eager"):
for model_inputs in model_input_sets:
gen_out = model.generate(**model_inputs, **generation_kwargs)
dynamic_outputs.append(gen_out)
# sanity checks for the default cache implementation
if not has_defined_cache_implementation:
decoder_cache = (
gen_out.past_key_values.self_attention_cache
if config.is_encoder_decoder
else gen_out.past_key_values
)
self.assertTrue(isinstance(decoder_cache, DynamicCache))
self.assertFalse(decoder_cache.is_compileable)
# our auto compile should NOT have been called
self.assertFalse(hasattr(model_to_be_compiled, "_compiled_call"))
# 5. get compiled results -- relies on the automatic compilation triggered by specific compilable caches
if not has_defined_cache_implementation:
generation_kwargs["cache_implementation"] = "static"
compiled_outputs = []
# Uses a context manager to catch recompilation logs. If there is any recompilation, this test fails.
# Try/Finally is used to ensure that the log options are reset even if an error is raised.
try:
torch._logging.set_logs(recompiles_verbose=True)
logger = logging.get_logger("torch._dynamo.guards")
with CaptureLogger(logger) as cl:
for model_inputs in model_input_sets:
# with torch.compiler.set_stance("fail_on_recompile"):
gen_out = model.generate(**model_inputs, **generation_kwargs)
compiled_outputs.append(gen_out)
# sanity checks
decoder_cache = (
gen_out.past_key_values.self_attention_cache
if config.is_encoder_decoder
else gen_out.past_key_values
)
self.assertFalse(isinstance(decoder_cache, DynamicCache))
self.assertTrue(decoder_cache.is_compileable)
# our auto compile should have been called
self.assertTrue(hasattr(model_to_be_compiled, "_compiled_call"))
finally:
torch._logging.set_logs()
# Compilation of sliding layers necessarily has recompiles with `dynamic=False` - however this test
# still checks that `fullgraph=True` is supported in this case, as compilation with `dynamic=None`
# is the default and does not actually lead to too many recompiles
has_sliding_layers = any(decoder_cache.is_sliding)
has_recompilation = "Recompiling" in cl.out or ("guard" in cl.out and "failure" in cl.out)
if not has_sliding_layers and has_recompilation:
raise RuntimeError(
f"`torch.compile` recompiled part of the forward pass in {model.__class__.__name__}. "
"See the test logs for more details."
)
for dynamic_result, compiled_result in zip(dynamic_outputs, compiled_outputs):
assert_similar_generate_outputs(dynamic_result, compiled_result)
@require_torch
class GlmOcrIntegrationTest(unittest.TestCase):
def setUp(self):
cleanup(torch_device, gc_collect=True)
self.processor = AutoProcessor.from_pretrained("zai-org/GLM-OCR")
self.message = [
{
"role": "user",
"content": [
{
"type": "image",
"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg",
},
{"type": "text", "text": "What kind of dog is this?"},
],
}
]
self.message2 = [
{
"role": "user",
"content": [
{
"type": "image",
"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/coco_sample.png",
},
{"type": "text", "text": "What kind of dog is this?"},
],
}
]
def tearDown(self):
cleanup(torch_device, gc_collect=True)
@slow
def test_small_model_integration_test(self):
model = GlmOcrForConditionalGeneration.from_pretrained("zai-org/GLM-OCR", dtype="auto", device_map="auto")
inputs = self.processor.apply_chat_template(
self.message, tokenize=True, add_generation_prompt=True, return_dict=True, return_tensors="pt"
)
expected_input_ids = [151331, 151333, 151336, 198, 151339, 151343, 151343, 151343, 151343, 151343, 151343, 151343, 151343, 151343, 151343, 151343, 151343] # fmt: skip
assert expected_input_ids == inputs.input_ids[0].tolist()[:17]
expected_pixel_slice = torch.tensor(
[
[-0.0988, -0.0842, -0.0842],
[-0.5660, -0.5514, -0.4200],
[-0.0259, -0.0259, -0.0259],
[-0.1280, -0.0988, -0.2010],
[-0.4638, -0.5806, -0.6974],
[-1.2083, -1.2229, -1.2083],
],
dtype=torch.float32,
device="cpu",
)
assert torch.allclose(expected_pixel_slice, inputs.pixel_values[:6, :3], atol=3e-3)
# verify generation
inputs = inputs.to(torch_device)
# This model on the hub has `do_sample=True`.
torch.manual_seed(42)
output = model.generate(**inputs, max_new_tokens=30)
EXPECTED_DECODED_TEXT = "\nWhat kind of dog is this?\n<think>Got it, let's look at the image. The animal in the picture doesn't look like a dog; it's actually a cat. Specifically"
self.assertEqual(
self.processor.decode(output[0], skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
@slow
def test_small_model_integration_test_batch(self):
model = GlmOcrForConditionalGeneration.from_pretrained("zai-org/GLM-OCR", dtype="auto", device_map="auto")
batch_messages = [self.message] * 2
inputs = self.processor.apply_chat_template(
batch_messages, tokenize=True, add_generation_prompt=True, return_dict=True, return_tensors="pt"
).to(torch_device)
# This model on the hub has `do_sample=True`.
torch.manual_seed(42)
# it should not matter whether two images are the same size or not
output = model.generate(**inputs, max_new_tokens=30)
EXPECTED_DECODED_TEXT = [
"\nWhat kind of dog is this?\n<think>Got it, let's look at the image. The animal in the picture doesn't look like a dog; it's actually a cat. Specifically",
"\nWhat kind of dog is this?\n<think>Got it, let's look at the image. The animal in the picture has a stocky body, thick fur, and a face that's"
] # fmt: skip
self.assertEqual(
self.processor.batch_decode(output, skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
@slow
def test_small_model_integration_test_with_video(self):
processor = AutoProcessor.from_pretrained("zai-org/GLM-OCR", max_image_size={"longest_edge": 50176})
model = GlmOcrForConditionalGeneration.from_pretrained(
"zai-org/GLM-OCR", dtype=torch.float16, device_map="auto"
)
questions = ["Describe this video."]
video_urls = ["https://huggingface.co/datasets/hf-internal-testing/fixtures_videos/resolve/main/tennis.mp4"]
messages = [
[
{
"role": "user",
"content": [
{
"type": "video",
"video": video_url,
},
{"type": "text", "text": question},
],
}
]
for question, video_url in zip(questions, video_urls)
]
inputs = processor.apply_chat_template(
messages, tokenize=True, add_generation_prompt=True, return_dict=True, return_tensors="pt", padding=True
).to(torch_device)
# This model on the hub has `do_sample=True`.
torch.manual_seed(42)
output = model.generate(**inputs, max_new_tokens=30)
EXPECTED_DECODED_TEXT = ["\n012345Describe this video.\n<think>Got it, let's analyze the video. First, the scene is an indoor tennis court. There are two players: one in a white shirt"] # fmt: skip
self.assertEqual(
processor.batch_decode(output, skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
@slow
@require_deterministic_for_xpu
def test_small_model_integration_test_expand(self):
model = GlmOcrForConditionalGeneration.from_pretrained("zai-org/GLM-OCR", dtype="auto", device_map="auto")
inputs = self.processor.apply_chat_template(
self.message, tokenize=True, add_generation_prompt=True, return_dict=True, return_tensors="pt"
).to(torch_device)
# This model on the hub has `do_sample=True`.
torch.manual_seed(42)
output = model.generate(**inputs, max_new_tokens=30, do_sample=False, num_beams=2, num_return_sequences=2)
# fmt: off
EXPECTED_DECODED_TEXTS = Expectations(
{
(None, None): ["\nWhat kind of dog is this?\n<think>Got it, let's look at the image. The animal in the picture doesn't look like a dog; it's actually a cat. Specifically",
"\nWhat kind of dog is this?\n<think>Got it, let's look at the image. The animal in the picture doesn't look like a dog; it's actually a cat, specifically"
],
("xpu", None): ["\nWhat kind of dog is this?\n<think>Got it, let's look at the image. The animal in the picture is not a dog; it's a cat. Specifically, it looks",
"\nWhat kind of dog is this?\n<think>Got it, let's look at the image. The animal in the picture is not a dog; it's a cat, specifically a Pallas"
],
}
)
# fmt: on
EXPECTED_DECODED_TEXT = EXPECTED_DECODED_TEXTS.get_expectation()
decoded_text = self.processor.batch_decode(output, skip_special_tokens=True)
self.assertEqual(decoded_text, EXPECTED_DECODED_TEXT)
@slow
def test_small_model_integration_test_batch_wo_image(self):
model = GlmOcrForConditionalGeneration.from_pretrained("zai-org/GLM-OCR", dtype="auto", device_map="auto")
message_wo_image = [
{"role": "user", "content": [{"type": "text", "text": "Who are you?"}]},
]
batched_messages = [self.message, message_wo_image]
inputs = self.processor.apply_chat_template(
batched_messages,
tokenize=True,
add_generation_prompt=True,
return_dict=True,
return_tensors="pt",
padding=True,
).to(torch_device)
# This model on the hub has `do_sample=True`.
torch.manual_seed(42)
# it should not matter whether two images are the same size or not
output = model.generate(**inputs, max_new_tokens=30)
EXPECTED_DECODED_TEXT = [
"\nWhat kind of dog is this?\n<think>Got it, let's look at the image. The animal in the picture doesn't look like a dog; it's actually a cat. Specifically",
"\nWho are you?\n<think>Got it, let's look at the user's question: \"Who are you?\" This is a common question when someone is just starting a conversation"
] # fmt: skip
self.assertEqual(
self.processor.batch_decode(output, skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
@slow
def test_small_model_integration_test_batch_different_resolutions(self):
model = GlmOcrForConditionalGeneration.from_pretrained("zai-org/GLM-OCR", dtype="auto", device_map="auto")
batched_messages = [self.message, self.message2]
inputs = self.processor.apply_chat_template(
batched_messages,
tokenize=True,
add_generation_prompt=True,
return_dict=True,
return_tensors="pt",
padding=True,
).to(torch_device)
# This model on the hub has `do_sample=True`.
torch.manual_seed(42)
# it should not matter whether two images are the same size or not
output = model.generate(**inputs, max_new_tokens=30)
EXPECTED_DECODED_TEXT = [
"\nWhat kind of dog is this?\n<think>Got it, let's look at the image. The animal in the picture doesn't look like a dog; it's actually a cat. Specifically",
"\nWhat kind of dog is this?\n<think>Got it, let's look at the image. Wait, the animals here are cats, not dogs. The question is about a dog, but",
] # fmt: skip
self.assertEqual(
self.processor.batch_decode(output, skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
@slow
@require_flash_attn
@require_torch_accelerator
def test_small_model_integration_test_batch_flashatt2(self):
model = GlmOcrForConditionalGeneration.from_pretrained(
"zai-org/GLM-OCR",
dtype=torch.bfloat16,
attn_implementation="flash_attention_2",
device_map="auto",
)
batched_messages = [self.message, self.message2]
inputs = self.processor.apply_chat_template(
batched_messages,
tokenize=True,
add_generation_prompt=True,
return_dict=True,
return_tensors="pt",
padding=True,
).to(torch_device)
# This model on the hub has `do_sample=True`.
torch.manual_seed(42)
# it should not matter whether two images are the same size or not
output = model.generate(**inputs, max_new_tokens=30)
EXPECTED_DECODED_TEXT = [
"\nWhat kind of dog is this?\n<think>Got it, let's look at the image. The animal in the picture doesn't look like a dog. Wait, it's a cat,",
"\nWhat kind of dog is this?\n<think>Got it, let's look at the image. Wait, the animals here are cats, not dogs. The question is about a dog, but"
] # fmt: skip
self.assertEqual(
self.processor.batch_decode(output, skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
@slow
@require_flash_attn
@require_torch_accelerator
def test_small_model_integration_test_batch_wo_image_flashatt2(self):
model = GlmOcrForConditionalGeneration.from_pretrained(
"zai-org/GLM-OCR",
dtype=torch.bfloat16,
attn_implementation="flash_attention_2",
device_map="auto",
)
message_wo_image = [
{"role": "user", "content": [{"type": "text", "text": "Who are you?"}]},
]
batched_messages = [self.message, message_wo_image]
inputs = self.processor.apply_chat_template(
batched_messages,
tokenize=True,
add_generation_prompt=True,
return_dict=True,
return_tensors="pt",
padding=True,
).to(torch_device)
# This model on the hub has `do_sample=True`.
torch.manual_seed(42)
# it should not matter whether two images are the same size or not
output = model.generate(**inputs, max_new_tokens=30)
EXPECTED_DECODED_TEXT = [
"\nWhat kind of dog is this?\n<think>Got it, let's look at the image. The animal in the picture doesn't look like a dog; it's actually a cat. Specifically",
"\nWho are you?\n<think>Got it, let's look at the user's question: \"Who are you?\" This is a common question when someone is just starting a conversation"
] # fmt: skip
self.assertEqual(
self.processor.batch_decode(output, skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/glm_ocr/test_modeling_glm_ocr.py",
"license": "Apache License 2.0",
"lines": 617,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/solar_open/modular_solar_open.py | # Copyright 2026 Upstage and HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch SolarOpen model."""
from torch import nn
from ...modeling_rope_utils import RopeParameters
from ...utils import logging
from ..glm4_moe.configuration_glm4_moe import Glm4MoeConfig
from ..glm4_moe.modeling_glm4_moe import (
Glm4MoeForCausalLM,
Glm4MoeModel,
Glm4MoeMoE,
Glm4MoePreTrainedModel,
Glm4MoeRMSNorm,
)
from ..llama.modeling_llama import LlamaAttention, LlamaDecoderLayer
logger = logging.get_logger(__name__)
class SolarOpenConfig(Glm4MoeConfig):
r"""
This is the configuration class to store the configuration of a [`SolarOpenModel`]. It is used to instantiate a
SolarOpen model according to the specified arguments, defining the model architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Instantiating a configuration defaults will yield a similar configuration to that of
[upstage/Solar-Open-100B](https://huggingface.co/upstage/Solar-Open-100B) architecture.
Args:
vocab_size (`int`, *optional*, defaults to 196608):
Vocabulary size of the SolarOpen model.
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
moe_intermediate_size (`int`, *optional*, defaults to 1280):
Intermediate size of the routed expert.
num_hidden_layers (`int`, *optional*, defaults to 48):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 64):
Number of attention heads for each attention layer.
num_key_value_heads (`int`, *optional*, defaults to 8):
Number of key_value heads for Grouped Query Attention.
n_shared_experts (`int`, *optional*, defaults to 1):
Number of shared experts.
n_routed_experts (`int`, *optional*, defaults to 128):
Number of routed experts.
head_dim (`int`, *optional*, defaults to 128):
Dimension of each attention head.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 131072):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether to return the last key/values attentions.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether the model's input and output word embeddings should be tied.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the projection layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
num_experts_per_tok (`int`, *optional*, defaults to 8):
Number of experts per token.
routed_scaling_factor (`float`, *optional*, defaults to 1.0):
Scaling factor for routed experts.
n_group (`int`, *optional*, defaults to 1):
Number of groups for routed experts.
topk_group (`int`, *optional*, defaults to 1):
Number of selected groups for each token.
norm_topk_prob (`bool`, *optional*, defaults to `True`):
Whether to normalize the topk probabilities.
bos_token_id (`int`, *optional*):
Beginning of stream token id.
eos_token_id (`int`, *optional*):
End of stream token id.
pad_token_id (`int`, *optional*):
Padding token id.
"""
model_type = "solar_open"
default_theta = 1_000_000.0
# Default tensor parallel plan for base model `SolarOpenModel`
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise",
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.experts.gate_up_proj": "packed_colwise",
"layers.*.mlp.experts.down_proj": "rowwise",
"layers.*.mlp.experts": "moe_tp_experts",
}
def __init__(
self,
vocab_size: int = 196608,
hidden_size: int = 4096,
moe_intermediate_size: int = 1280,
num_hidden_layers: int = 48,
num_attention_heads: int = 64,
num_key_value_heads: int = 8,
n_shared_experts: int = 1,
n_routed_experts: int = 128,
head_dim: int = 128,
hidden_act: str = "silu",
max_position_embeddings: int = 131072,
initializer_range: float = 0.02,
rms_norm_eps: int = 1e-5,
use_cache: bool = True,
tie_word_embeddings: bool = False,
rope_parameters: RopeParameters | None = None,
attention_bias: bool = False,
attention_dropout: float = 0.0,
num_experts_per_tok: int = 8,
routed_scaling_factor: float = 1.0,
n_group: int = 1,
topk_group: int = 1,
norm_topk_prob: bool = True,
bos_token_id: int | None = None,
eos_token_id: int | None = None,
pad_token_id: int | None = None,
**kwargs,
):
# Default partial_rotary_factor to 1.0 (instead of 0.5 in Glm4MoeConfig).
# `setdefault` ensures this value is not overridden by subsequent calls.
# This workaround is required due to modular inheritance limitations.
kwargs.setdefault("partial_rotary_factor", 1.0)
self.head_dim = head_dim
super().__init__(
vocab_size=vocab_size,
hidden_size=hidden_size,
moe_hidden_size=moe_intermediate_size,
num_hidden_layers=num_hidden_layers,
num_attention_heads=num_attention_heads,
num_key_value_heads=num_key_value_heads,
n_shared_experts=n_shared_experts,
n_routed_experts=n_routed_experts,
head_dim=head_dim,
hidden_act=hidden_act,
max_position_embeddings=max_position_embeddings,
initializer_range=initializer_range,
rms_norm_eps=rms_norm_eps,
use_cache=use_cache,
tie_word_embeddings=tie_word_embeddings,
rope_parameters=rope_parameters,
attention_bias=attention_bias,
attention_dropout=attention_dropout,
num_experts_per_tok=num_experts_per_tok,
routed_scaling_factor=routed_scaling_factor,
n_group=n_group,
topk_group=topk_group,
norm_topk_prob=norm_topk_prob,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
pad_token_id=pad_token_id,
**kwargs,
)
del self.intermediate_size
del self.first_k_dense_replace
del self.use_qk_norm
class SolarOpenDecoderLayer(LlamaDecoderLayer):
def __init__(self, config: SolarOpenConfig, layer_idx: int):
super().__init__(config, layer_idx)
self.mlp = SolarOpenMoE(config)
class SolarOpenMoE(Glm4MoeMoE):
pass
class SolarOpenAttention(LlamaAttention):
def __init__(self, config: SolarOpenConfig, layer_idx: int):
super().__init__(config, layer_idx)
self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False)
class SolarOpenRMSNorm(Glm4MoeRMSNorm):
pass
class SolarOpenPreTrainedModel(Glm4MoePreTrainedModel):
_keys_to_ignore_on_load_unexpected = None
class SolarOpenModel(Glm4MoeModel):
pass
class SolarOpenForCausalLM(Glm4MoeForCausalLM):
pass
__all__ = [
"SolarOpenConfig",
"SolarOpenPreTrainedModel",
"SolarOpenModel",
"SolarOpenForCausalLM",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/solar_open/modular_solar_open.py",
"license": "Apache License 2.0",
"lines": 194,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/lighton_ocr/modular_lighton_ocr.py | # Copyright 2026 The LightOn Team and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any
import numpy as np
import torch
from torch import nn
from ...cache_utils import Cache
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import BatchFeature
from ...image_utils import ImageInput
from ...modeling_outputs import BaseModelOutputWithPooling
from ...modeling_utils import PreTrainedModel
from ...processing_utils import (
MultiModalData,
ProcessingKwargs,
ProcessorMixin,
Unpack,
)
from ...tokenization_utils_base import PreTokenizedInput, TextInput
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
from ..auto import CONFIG_MAPPING, AutoConfig, AutoModel
from ..mistral3.modeling_mistral3 import (
Mistral3ForConditionalGeneration,
Mistral3Model,
Mistral3ModelOutputWithPast,
Mistral3MultiModalProjector,
)
from ..pixtral.image_processing_pixtral import get_resize_output_image_size
class LightOnOcrConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`LightOnOcrForConditionalGeneration`]. It is used to instantiate a
LightOnOcr model according to the specified arguments, defining the model architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information. Instantiating a configuration with the defaults will yield
a similar configuration to that of the LightOnOcr [lightonocr-hf/lightonocr-9b](https://huggingface.co/lightonocr-hf/lightonocr-9b) architecture.
Args:
spatial_merge_size (`int`, *optional*, defaults to 2):
The size of spatial merging for image patches.
image_token_id (`int`, *optional*, defaults to 151655):
The id of the image token in the vocabulary.
tie_word_embeddings (`bool`, *optional*, defaults to `True`):
Whether the model's input and output word embeddings should be tied.
vision_config (`dict` or `LightOnOcrVisionConfig`, *optional*):
Custom vision configuration or dictionary with vision configuration values.
text_config (`dict` or `LightOnOcrTextConfig`, *optional*):
Custom text configuration or dictionary with text configuration values.
Example:
```python
>>> from transformers import LightOnOcrConfig, LightOnOcrForConditionalGeneration
>>> # Initializing a LightOnOcr configuration
>>> configuration = LightOnOcrConfig()
>>> # Initializing a model from the configuration
>>> model = LightOnOcrForConditionalGeneration(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = "lighton_ocr"
sub_configs = {"text_config": AutoConfig, "vision_config": AutoConfig}
def __init__(
self,
spatial_merge_size: int = 2,
image_token_id: int = 151655,
tie_word_embeddings: bool = True,
vision_config: dict[str, Any] | None = None,
text_config: dict[str, Any] | None = None,
**kwargs,
):
self.spatial_merge_size = spatial_merge_size
self.image_token_id = image_token_id
self.tie_word_embeddings = tie_word_embeddings
if vision_config is None:
self.vision_config = CONFIG_MAPPING["pixtral"](
attention_dropout=0,
head_dim=64,
hidden_act="silu",
hidden_size=1024,
image_size=1540,
initializer_range=0.02,
intermediate_size=4096,
model_type="pixtral",
num_attention_heads=16,
num_channels=3,
num_hidden_layers=24,
patch_size=14,
rope_theta=10000,
)
elif isinstance(vision_config, PretrainedConfig):
self.vision_config = vision_config
else:
vision_config["model_type"] = vision_config.get("model_type", "pixtral")
self.vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config)
if text_config is None:
self.text_config = CONFIG_MAPPING["qwen3"](
attention_dropout=0,
head_dim=128,
hidden_act="silu",
hidden_size=1024,
initializer_range=0.02,
intermediate_size=3072,
max_position_embeddings=40960,
num_attention_heads=16,
num_hidden_layers=28,
num_key_value_heads=8,
rms_norm_eps=1e-6,
rope_theta=1000000,
sliding_window=None,
use_cache=True,
vocab_size=151936,
)
elif isinstance(text_config, PretrainedConfig):
self.text_config = text_config
else:
text_config["model_type"] = text_config.get("model_type", "qwen3")
self.text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config)
super().__init__(**kwargs)
class LightOnOcrProcessorKwargs(ProcessingKwargs, total=False):
_defaults = {
"text_kwargs": {
"padding": False,
"return_mm_token_type_ids": False,
},
"common_kwargs": {
"return_tensors": "pt",
},
}
class LightOnOcrProcessor(ProcessorMixin):
def __init__(
self,
image_processor=None,
tokenizer=None,
patch_size: int = 14,
spatial_merge_size: int = 2,
chat_template=None,
**kwargs,
):
self.patch_size = patch_size
self.spatial_merge_size = spatial_merge_size
# Calculate effective patch size for image processing
self.effective_patch_size = patch_size * spatial_merge_size
# Get special tokens and IDs directly from tokenizer attributes
self.image_token = tokenizer.image_token
self.image_break_token = tokenizer.image_break_token
self.image_end_token = tokenizer.image_end_token
self.image_token_id = tokenizer.image_token_id
self.image_break_token_id = tokenizer.image_break_token_id
self.image_end_token_id = tokenizer.image_end_token_id
self.image_ids = [self.image_token_id, self.image_break_token_id, self.image_end_token_id]
super().__init__(image_processor, tokenizer, chat_template=chat_template)
def __call__(
self,
images: ImageInput | None = None,
text: TextInput | PreTokenizedInput | list[TextInput] | list[PreTokenizedInput] = None,
**kwargs: Unpack[LightOnOcrProcessorKwargs],
) -> BatchFeature:
if images is None and text is None:
raise ValueError("You must provide either text or images")
output_kwargs = self._merge_kwargs(
LightOnOcrProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
if images is not None:
image_inputs = self.image_processor(images, **output_kwargs["images_kwargs"])
else:
image_inputs = {}
if isinstance(text, str):
text = [text]
elif not isinstance(text, list) and not isinstance(text[0], str):
raise TypeError("Invalid input text. Please provide a string, or a list of strings")
if image_inputs.get("pixel_values") is not None:
image_sizes_iter = iter(image_inputs["image_sizes"])
prompt_strings = []
for sample in text:
replace_strings = []
while self.image_token in sample:
image_height, image_width = next(image_sizes_iter)
num_height_tokens = image_height // self.effective_patch_size
num_width_tokens = image_width // self.effective_patch_size
num_patches = num_height_tokens * num_width_tokens
replace_str = self.image_token * num_patches
replace_strings.append(replace_str)
sample = sample.replace(self.image_token, "<placeholder>", 1)
while "<placeholder>" in sample:
replace_str = replace_strings.pop(0)
sample = sample.replace("<placeholder>", replace_str, 1)
prompt_strings.append(sample)
else:
prompt_strings = text
return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None)
return_mm_token_type_ids = output_kwargs["text_kwargs"].pop("return_mm_token_type_ids", False)
text_inputs = self.tokenizer(prompt_strings, **output_kwargs["text_kwargs"], return_tensors=None)
self._check_special_mm_tokens(prompt_strings, text_inputs, modalities=["image"])
if return_mm_token_type_ids:
array_ids = np.array(text_inputs["input_ids"])
mm_token_type_ids = np.zeros_like(text_inputs["input_ids"])
mm_token_type_ids[np.isin(array_ids, self.image_ids)] = 1
text_inputs["mm_token_type_ids"] = mm_token_type_ids.tolist()
return BatchFeature(data={**text_inputs, **image_inputs}, tensor_type=return_tensors)
def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs):
"""
Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.
Args:
image_sizes (`list[list[int]]`, *optional*):
The input sizes formatted as (height, width) per each image.
Returns:
`MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided
input modalities, along with other useful data.
"""
vision_data = {}
if image_sizes is not None:
images_kwargs = LightOnOcrProcessorKwargs._defaults.get("images_kwargs", {})
images_kwargs.update(kwargs)
size = images_kwargs.get("size", None) or self.image_processor.size
patch_size = images_kwargs.get("patch_size", None) or self.image_processor.patch_size
if isinstance(patch_size, dict) and "height" in patch_size and "width" in patch_size:
patch_size = (patch_size["height"], patch_size["width"])
num_image_tokens = []
for height, width in image_sizes:
resized_height, resized_width = get_resize_output_image_size(
np.zeros((height, width, 3)),
size=(size["longest_edge"], size["longest_edge"]),
patch_size=patch_size,
)
num_height_tokens = resized_height // self.effective_patch_size
num_width_tokens = resized_width // self.effective_patch_size
num_image_tokens.append(num_width_tokens * num_height_tokens)
num_image_patches = [1] * len(image_sizes)
vision_data.update({"num_image_tokens": num_image_tokens, "num_image_patches": num_image_patches})
return MultiModalData(**vision_data)
class LightOnOcrMultiModalProjector(Mistral3MultiModalProjector):
def __init__(self, config: LightOnOcrConfig):
self.config = config
super().__init__()
self.act = nn.GELU()
self.linear_1 = nn.Linear(config.vision_config.hidden_size, config.text_config.hidden_size, bias=False)
self.linear_2 = nn.Linear(config.text_config.hidden_size, config.text_config.hidden_size, bias=False)
del self.num_feature_layers
class LightOnOcrModelOutputWithPast(Mistral3ModelOutputWithPast):
pass
class LightOnOcrModel(Mistral3Model):
base_model_prefix = ""
_checkpoint_conversion_mapping = {}
def __init__(self, config: LightOnOcrConfig):
PreTrainedModel.__init__(self, config)
self.vision_encoder = AutoModel.from_config(config.vision_config)
self.vision_projection = LightOnOcrMultiModalProjector(config)
self.language_model = AutoModel.from_config(config.text_config)
self.post_init()
@can_return_tuple
@auto_docstring
def get_image_features(
self, pixel_values: torch.Tensor, image_sizes: torch.Tensor | list, **kwargs: Unpack[TransformersKwargs]
) -> tuple | BaseModelOutputWithPooling:
image_outputs = self.vision_encoder(pixel_values, image_sizes=image_sizes, return_dict=True, **kwargs)
image_features = image_outputs.last_hidden_state
image_features = self.vision_projection(image_features.squeeze(0), image_sizes)
# Split features per image based on the effective patch size
downsample_ratio = self.config.vision_config.patch_size * self.config.spatial_merge_size
split_sizes = [(height // downsample_ratio) * (width // downsample_ratio) for height, width in image_sizes]
image_features = torch.split(image_features, split_sizes)
image_outputs.pooler_output = image_features
return image_outputs
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
pixel_values: torch.FloatTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
use_cache: bool | None = None,
output_attentions: bool | None = None,
output_hidden_states: bool | None = None,
return_dict: bool | None = None,
cache_position: torch.LongTensor | None = None,
image_sizes: torch.Tensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | LightOnOcrModelOutputWithPast:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.get_input_embeddings()(input_ids)
if pixel_values is not None:
image_features = self.get_image_features(
pixel_values=pixel_values, image_sizes=image_sizes, return_dict=True
).pooler_output
image_features = torch.cat(image_features, dim=0).to(inputs_embeds.device, inputs_embeds.dtype)
special_image_mask = self.get_placeholder_mask(
input_ids, inputs_embeds=inputs_embeds, image_features=image_features
)
inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features)
outputs = self.language_model(
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
cache_position=cache_position,
**kwargs,
)
return LightOnOcrModelOutputWithPast(
last_hidden_state=outputs.last_hidden_state,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=image_features if pixel_values is not None else None,
)
class LightOnOcrForConditionalGeneration(Mistral3ForConditionalGeneration):
_checkpoint_conversion_mapping = {}
@auto_docstring
def get_image_features(
self, pixel_values: torch.FloatTensor, image_sizes: torch.Tensor, **kwargs: Unpack[TransformersKwargs]
) -> tuple | BaseModelOutputWithPooling:
return self.model.get_image_features(pixel_values=pixel_values, image_sizes=image_sizes, **kwargs)
__all__ = [
"LightOnOcrPreTrainedModel", # noqa
"LightOnOcrForConditionalGeneration",
"LightOnOcrModel",
"LightOnOcrConfig",
"LightOnOcrProcessor",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/lighton_ocr/modular_lighton_ocr.py",
"license": "Apache License 2.0",
"lines": 343,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/lighton_ocr/test_modeling_lighton_ocr.py | # Copyright 2026 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch LightOnOcr model."""
import copy
import unittest
from difflib import SequenceMatcher
from transformers import (
LightOnOcrConfig,
LightOnOcrForConditionalGeneration,
LightOnOcrModel,
LightOnOcrProcessor,
is_torch_available,
is_vision_available,
)
from transformers.testing_utils import (
cleanup,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
if is_torch_available():
import torch
if is_vision_available():
from transformers.image_utils import load_image
class LightOnOcrVisionText2TextModelTester:
def __init__(
self,
parent,
image_token_index=10,
spatial_merge_size=2,
seq_length=7,
text_config={
"model_type": "qwen3",
"seq_length": 7,
"is_training": True,
"use_input_mask": True,
"use_token_type_ids": False,
"use_labels": True,
"vocab_size": 99,
"hidden_size": 32,
"num_hidden_layers": 2,
"num_attention_heads": 4,
"num_key_value_heads": 2,
"intermediate_size": 37,
"hidden_act": "silu",
"hidden_dropout_prob": 0.1,
"attention_probs_dropout_prob": 0.1,
"max_position_embeddings": 512,
"type_vocab_size": 16,
"type_sequence_label_size": 2,
"initializer_range": 0.02,
"num_labels": 3,
"num_choices": 4,
"pad_token_id": 1,
"bos_token_id": 0,
"eos_token_id": 2,
"rms_norm_eps": 1e-6,
"rope_theta": 10000.0,
"attention_bias": False,
"attention_dropout": 0.0,
"head_dim": 8,
},
is_training=True,
vision_config={
"image_size": 112,
"patch_size": 14,
"num_channels": 3,
"is_training": True,
"hidden_size": 32,
"num_hidden_layers": 2,
"num_attention_heads": 4,
"intermediate_size": 37,
"attention_dropout": 0.0,
"hidden_act": "silu",
"initializer_range": 0.02,
"rope_theta": 10000.0,
},
):
self.parent = parent
self.image_token_index = image_token_index
self.spatial_merge_size = spatial_merge_size
self.text_config = text_config
self.vision_config = vision_config
self.pad_token_id = text_config["pad_token_id"]
self.num_hidden_layers = text_config["num_hidden_layers"]
self.vocab_size = text_config["vocab_size"]
self.hidden_size = text_config["hidden_size"]
self.num_attention_heads = text_config["num_attention_heads"]
self.is_training = is_training
self.batch_size = 3
self.num_channels = 3
# Image size must be divisible by patch_size
self.image_size = vision_config["image_size"]
self.patch_size = vision_config["patch_size"]
# Number of patches after patch conv
num_patches = (self.image_size // self.patch_size) ** 2
# After spatial merging, number of tokens is reduced by spatial_merge_size**2
self.num_image_tokens = num_patches // (self.spatial_merge_size**2)
self.seq_length = seq_length + self.num_image_tokens
self.encoder_seq_length = self.seq_length
def get_config(self):
return LightOnOcrConfig(
text_config=self.text_config,
vision_config=self.vision_config,
image_token_id=self.image_token_index,
spatial_merge_size=self.spatial_merge_size,
)
def prepare_config_and_inputs(self):
pixel_values = floats_tensor(
[
self.batch_size,
self.vision_config["num_channels"],
self.vision_config["image_size"],
self.vision_config["image_size"],
]
)
config = self.get_config()
return config, pixel_values
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
input_ids = ids_tensor([self.batch_size, self.seq_length], config.text_config.vocab_size - 1) + 1
# Avoid placing image tokens on positions that would be the pad token
input_ids[input_ids == config.image_token_id] = self.pad_token_id
# Place image tokens at the beginning
input_ids[:, : self.num_image_tokens] = config.image_token_id
attention_mask = input_ids.ne(self.pad_token_id)
# Create image_sizes as tensor - must match batch size
image_sizes = torch.tensor([[self.image_size, self.image_size]] * self.batch_size, dtype=torch.long)
inputs_dict = {
"pixel_values": pixel_values,
"input_ids": input_ids,
"attention_mask": attention_mask,
"image_sizes": image_sizes,
}
return config, inputs_dict
def prepare_config_and_inputs_for_generate(self, batch_size=None):
"""Prepare config and inputs for generation tests."""
if batch_size is None:
batch_size = self.batch_size
# Get base config
config = self.get_config()
# Create pixel_values with the specified batch size
pixel_values = floats_tensor(
[
batch_size,
self.vision_config["num_channels"],
self.vision_config["image_size"],
self.vision_config["image_size"],
]
)
# Create input_ids
input_ids = ids_tensor([batch_size, self.seq_length], config.text_config.vocab_size - 1) + 1
# Avoid placing image tokens on positions that would be the pad token
input_ids[input_ids == config.image_token_id] = self.pad_token_id
# Place image tokens at the beginning
input_ids[:, : self.num_image_tokens] = config.image_token_id
attention_mask = input_ids.ne(self.pad_token_id)
# Create image_sizes as tensor - must match batch size
image_sizes = torch.tensor([[self.image_size, self.image_size]] * batch_size, dtype=torch.long)
inputs_dict = {
"pixel_values": pixel_values,
"input_ids": input_ids,
"attention_mask": attention_mask,
"image_sizes": image_sizes,
}
return config, inputs_dict
@require_torch
class LightOnOcrForConditionalGenerationModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
"""
Model tester for `LightOnOcrForConditionalGeneration`.
"""
all_model_classes = (
(
LightOnOcrModel,
LightOnOcrForConditionalGeneration,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = {"image-text-to-text": LightOnOcrForConditionalGeneration} if is_torch_available() else {}
# LightOnOcr uses a PixtralVisionModel, which merges batch_size and num_patches in index 1, with index 0 hardcoded to 1
skip_test_image_features_output_shape = True
_is_composite = True
test_torch_exportable = False
def setUp(self):
self.model_tester = LightOnOcrVisionText2TextModelTester(self)
common_properties = ["image_token_id", "spatial_merge_size"]
self.config_tester = ConfigTester(
self, config_class=LightOnOcrConfig, has_text_modality=False, common_properties=common_properties
)
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
"""
Prepare inputs for the model class, ensuring image_sizes matches the batch size.
"""
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
# Ensure image_sizes matches the batch size of pixel_values or input_ids
if "pixel_values" in inputs_dict and "image_sizes" in inputs_dict:
batch_size = inputs_dict["pixel_values"].shape[0]
# If image_sizes doesn't match batch size, adjust it
if len(inputs_dict["image_sizes"]) != batch_size:
inputs_dict["image_sizes"] = inputs_dict["image_sizes"][:batch_size]
return inputs_dict
def prepare_config_and_inputs_for_generate(self, batch_size=1):
"""Override to use the model_tester's custom method."""
return self.model_tester.prepare_config_and_inputs_for_generate(batch_size=batch_size)
def test_config(self):
self.config_tester.run_common_tests()
def test_mismatching_num_image_tokens(self):
"""
Tests that VLMs throw an error with explicit message saying what is wrong
when number of images doesn't match number of image tokens in the text.
Also we need to test multi-image cases when one prompt has multiple image tokens.
"""
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device)
model.eval()
curr_input_dict = copy.deepcopy(input_dict) # in-place modifications further
_ = model(**curr_input_dict) # successful forward with no modifications
# remove one image but leave the image token in text
curr_input_dict["pixel_values"] = curr_input_dict["pixel_values"][-1:, ...]
curr_input_dict["image_sizes"] = curr_input_dict["image_sizes"][-1:]
with self.assertRaisesRegex(ValueError, "Image features and image tokens do not match"):
_ = model(**curr_input_dict)
# simulate multi-image case by concatenating inputs where each has exactly one image/image-token
input_ids = curr_input_dict["input_ids"][:1]
pixel_values = curr_input_dict["pixel_values"][:1]
image_sizes = curr_input_dict["image_sizes"][:1]
input_ids = torch.cat([input_ids, input_ids], dim=0)
# one image and two image tokens raise an error
with self.assertRaisesRegex(ValueError, "Image features and image tokens do not match"):
_ = model(input_ids=input_ids, pixel_values=pixel_values, image_sizes=image_sizes)
# two images and two image tokens don't raise an error
pixel_values = torch.cat([pixel_values, pixel_values], dim=0)
image_sizes = torch.cat([image_sizes, image_sizes], dim=0)
_ = model(input_ids=input_ids, pixel_values=pixel_values, image_sizes=image_sizes)
def test_spatial_merge_size(self):
"""
Test that models can be created and initialized with different spatial_merge_size values.
"""
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
# Test that model can be created with different spatial_merge_size values
for spatial_merge_size in [1, 2, 4]:
curr_config = copy.deepcopy(config)
curr_config.spatial_merge_size = spatial_merge_size
for model_class in self.all_model_classes:
# Build model with the new config - should not raise any errors
model = model_class(curr_config).to(torch_device)
model.eval()
# Verify the spatial_merge_size is set correctly
self.assertEqual(model.config.spatial_merge_size, spatial_merge_size)
# Verify the model has the expected components
if hasattr(model, "model"):
self.assertTrue(hasattr(model.model, "vision_projection"))
self.assertEqual(model.model.vision_projection.config.spatial_merge_size, spatial_merge_size)
elif hasattr(model, "vision_projection"):
self.assertEqual(model.vision_projection.config.spatial_merge_size, spatial_merge_size)
def test_forward_pass_with_image_sizes(self):
"""
Test that the model correctly handles variable image sizes.
"""
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device)
model.eval()
# Test with different image sizes in the same batch
batch_size = 2
pixel_values = floats_tensor(
[batch_size, 3, self.model_tester.image_size, self.model_tester.image_size]
).to(torch_device)
# Different image sizes (but still need to be divisible by patch_size)
image_sizes = torch.tensor(
[[self.model_tester.image_size, self.model_tester.image_size]] * batch_size,
dtype=torch.long,
device=torch_device,
)
num_patches = (self.model_tester.image_size // self.model_tester.patch_size) ** 2
num_image_tokens = num_patches // (config.spatial_merge_size**2)
input_ids = ids_tensor([batch_size, 10 + num_image_tokens], config.text_config.vocab_size - 1) + 1
# Ensure no tokens accidentally equal image_token_id
input_ids[input_ids == config.image_token_id] = config.image_token_id + 1
# Now place image tokens at the beginning
input_ids[:, :num_image_tokens] = config.image_token_id
input_ids = input_ids.to(torch_device)
outputs = model(
pixel_values=pixel_values,
input_ids=input_ids,
image_sizes=image_sizes,
)
self.assertIsNotNone(outputs)
def test_model_outputs_equivalence(self):
"""
Test that model outputs are consistent across different input configurations.
"""
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device)
model.eval()
with torch.no_grad():
outputs1 = model(**input_dict)
outputs2 = model(**input_dict)
# Check that outputs are deterministic
if hasattr(outputs1, "last_hidden_state") and hasattr(outputs2, "last_hidden_state"):
self.assertTrue(torch.allclose(outputs1.last_hidden_state, outputs2.last_hidden_state, atol=1e-5))
def test_vision_projection(self):
"""
Test that the vision projection correctly transforms vision embeddings to text space.
"""
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
model = LightOnOcrModel(config).to(torch_device)
model.eval()
# Convert image_sizes to list for vision_encoder
if isinstance(input_dict["image_sizes"], torch.Tensor):
image_sizes_list = [(int(h), int(w)) for h, w in input_dict["image_sizes"]]
else:
image_sizes_list = input_dict["image_sizes"]
with torch.no_grad():
# Get vision features
vision_outputs = model.vision_encoder(
pixel_values=input_dict["pixel_values"].to(torch_device),
image_sizes=image_sizes_list,
)
# Project vision features
projected = model.vision_projection(
vision_outputs.last_hidden_state.squeeze(0),
image_sizes_list,
)
# Check output dimensions - should match text hidden size
self.assertEqual(projected.shape[-1], config.text_config.hidden_size)
def test_get_image_features(self):
"""
Test the get_image_features method.
"""
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
model = LightOnOcrModel(config).to(torch_device)
model.eval()
with torch.no_grad():
image_features_list = model.get_image_features(
pixel_values=input_dict["pixel_values"].to(torch_device),
image_sizes=input_dict["image_sizes"],
).pooler_output
# Check that features are returned as a list
self.assertIsNotNone(image_features_list)
self.assertIsInstance(image_features_list, (list, tuple))
# Concatenate features and check shape
image_features = torch.cat(image_features_list, dim=0)
self.assertEqual(image_features.shape[-1], config.text_config.hidden_size)
@slow
@require_torch
class LightOnOcrForConditionalGenerationIntegrationTest(unittest.TestCase):
def tearDown(self):
cleanup(torch_device, gc_collect=True)
@slow
def test_lightonocr_ocr_integration(self):
"""
Integration test for LightOnOcr OCR capabilities.
Tests that the model can perform OCR on a real image and produce expected output.
"""
model_id = "lightonai/LightOnOCR-1B-1025"
# Load processor and model from Hub
processor = LightOnOcrProcessor.from_pretrained(model_id)
model = LightOnOcrForConditionalGeneration.from_pretrained(model_id, device_map=torch_device)
model.eval()
# Load a test OCR image
# This is a standard OCR test image from HuggingFace fixtures
image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/fixtures_ocr/resolve/main/SROIE-receipt.jpeg"
)
# Process image and prepare inputs
# Using chat template as shown in the model's usage pattern
chat = [
{
"role": "user",
"content": [
{"type": "image", "url": image},
],
}
]
inputs = processor.apply_chat_template(
chat, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt"
).to(torch_device, dtype=torch.bfloat16)
# Generate OCR output
with torch.no_grad():
generated_ids = model.generate(
**inputs,
max_new_tokens=50,
do_sample=False,
num_beams=1,
)
# Decode output, excluding the input prompt
decoded_output = processor.decode(generated_ids[0, inputs["input_ids"].shape[1] :], skip_special_tokens=True)
expected_output = "Document No : TD01167104\n\nDate : 25/12/2018 8:13:39 PM\n\nCashier : MANIS\n\nMember :\n\nCASH BILL\n\n| CODE"
similarity = SequenceMatcher(None, decoded_output, expected_output).ratio()
# Require at least 95% similarity to catch regressions while allowing minor variations
self.assertGreater(
similarity,
0.95,
f"Model output differs too much from expected output (similarity: {similarity:.2%}).\n"
f"Expected:\n{expected_output}\n\nGot:\n{decoded_output}",
)
def test_model_can_generate_without_images(self):
"""
Test that the model can generate text without image inputs.
"""
# Create a small config for fast testing
text_config = {
"vocab_size": 100,
"hidden_size": 64,
"num_hidden_layers": 2,
"num_attention_heads": 4,
"num_key_value_heads": 2,
"intermediate_size": 128,
"max_position_embeddings": 512,
"rms_norm_eps": 1e-6,
"head_dim": 16,
}
vision_config = {
"hidden_size": 64,
"num_hidden_layers": 2,
"num_attention_heads": 4,
"intermediate_size": 128,
"image_size": 112,
"patch_size": 14,
}
config = LightOnOcrConfig(text_config=text_config, vision_config=vision_config, image_token_id=10)
model = LightOnOcrForConditionalGeneration(config).to(torch_device)
model.eval()
# Create text-only input
input_ids = torch.randint(0, config.text_config.vocab_size - 1, (1, 10), device=torch_device) + 1
with torch.no_grad():
outputs = model.generate(input_ids=input_ids, max_new_tokens=5)
self.assertIsNotNone(outputs)
self.assertEqual(outputs.shape[0], 1)
self.assertGreater(outputs.shape[1], input_ids.shape[1])
def test_model_forward_with_images(self):
"""
Test forward pass with image inputs.
"""
text_config = {
"vocab_size": 100,
"hidden_size": 64,
"num_hidden_layers": 2,
"num_attention_heads": 4,
"num_key_value_heads": 2,
"intermediate_size": 128,
"max_position_embeddings": 512,
"rms_norm_eps": 1e-6,
"head_dim": 16,
}
vision_config = {
"hidden_size": 64,
"num_hidden_layers": 2,
"num_attention_heads": 4,
"intermediate_size": 128,
"image_size": 112,
"patch_size": 14,
}
config = LightOnOcrConfig(text_config=text_config, vision_config=vision_config, image_token_id=10)
model = LightOnOcrForConditionalGeneration(config).to(torch_device)
model.eval()
# Create inputs
batch_size = 2
image_size = 112
pixel_values = torch.randn(batch_size, 3, image_size, image_size, device=torch_device)
image_sizes = torch.tensor([[image_size, image_size]] * batch_size, dtype=torch.long, device=torch_device)
# Calculate number of image tokens
num_patches = (image_size // 14) ** 2 # patch_size = 14
num_image_tokens = num_patches // (config.spatial_merge_size**2)
seq_len = num_image_tokens + 10
input_ids = torch.randint(0, config.text_config.vocab_size - 1, (batch_size, seq_len), device=torch_device) + 1
# Ensure no tokens accidentally equal image_token_id
input_ids[input_ids == config.image_token_id] = config.image_token_id + 1
# Now place image tokens at the beginning
input_ids[:, :num_image_tokens] = config.image_token_id
with torch.no_grad():
outputs = model(
pixel_values=pixel_values,
input_ids=input_ids,
image_sizes=image_sizes,
)
self.assertIsNotNone(outputs)
self.assertIsNotNone(outputs.logits)
self.assertEqual(outputs.logits.shape[0], batch_size)
self.assertEqual(outputs.logits.shape[1], seq_len)
self.assertEqual(outputs.logits.shape[2], config.text_config.vocab_size)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/lighton_ocr/test_modeling_lighton_ocr.py",
"license": "Apache License 2.0",
"lines": 500,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/models/lighton_ocr/test_processor_lighton_ocr.py | # Copyright 2026 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_processing_common import ProcessorTesterMixin
if is_vision_available():
from PIL import Image
from transformers import LightOnOcrProcessor
if is_torch_available():
import torch
@require_vision
@require_torch
class LightOnOcrProcessorTest(ProcessorTesterMixin, unittest.TestCase):
"""Test suite for LightOnOcr processor."""
processor_class = LightOnOcrProcessor
model_id = "lightonai/LightOnOCR-1B-1025"
def setUp(self):
"""Set up test fixtures."""
processor = self.get_processor()
self.image_token = processor.image_token
def prepare_image_inputs(self, batch_size=None):
"""Prepare small dummy image inputs."""
image = Image.new("RGB", (112, 112), color="red")
if batch_size is None:
return image
return [image] * batch_size
def test_processor_creation(self):
"""Test that processor can be created and loaded."""
processor = self.get_processor()
self.assertIsInstance(processor, LightOnOcrProcessor)
self.assertIsNotNone(processor.tokenizer)
self.assertIsNotNone(processor.image_processor)
def test_processor_with_text_only(self):
"""Test processor with text input only."""
processor = self.get_processor()
text = "This is a test sentence."
inputs = processor(text=text, return_tensors="pt")
self.assertIn("input_ids", inputs)
self.assertIn("attention_mask", inputs)
self.assertEqual(inputs["input_ids"].shape[0], 1) # batch size
def test_processor_with_image_and_text(self):
"""Test processor with both image and text inputs."""
processor = self.get_processor()
image = self.prepare_image_inputs()
text = f"{self.image_token} Extract text from this image."
inputs = processor(images=image, text=text, return_tensors="pt")
self.assertIn("input_ids", inputs)
self.assertIn("attention_mask", inputs)
self.assertIn("pixel_values", inputs)
self.assertIn("image_sizes", inputs)
# Check shapes
self.assertEqual(inputs["input_ids"].shape[0], 1) # batch size
self.assertEqual(len(inputs["pixel_values"].shape), 4) # (batch, channels, height, width)
self.assertEqual(len(inputs["image_sizes"]), 1) # one image
def test_processor_image_token_expansion(self):
"""Test that image token is properly expanded based on image size."""
processor = self.get_processor()
image = self.prepare_image_inputs()
text = f"{self.image_token} Describe this image."
inputs = processor(images=image, text=text, return_tensors="pt")
# The image token should be expanded to multiple tokens based on patch size
# Count occurrences of image_token_id in input_ids
image_token_id = processor.image_token_id
num_image_tokens = (inputs["input_ids"] == image_token_id).sum().item()
# Should have multiple image tokens (one per patch after spatial merging)
self.assertGreater(num_image_tokens, 1)
def test_processor_batch_processing(self):
"""Test processor with batch of inputs."""
processor = self.get_processor()
images = self.prepare_image_inputs(batch_size=2)
texts = [f"{self.image_token} Extract text." for _ in range(2)]
inputs = processor(images=images, text=texts, return_tensors="pt", padding=True)
self.assertEqual(inputs["input_ids"].shape[0], 2) # batch size
self.assertEqual(inputs["pixel_values"].shape[0], 2) # two images
def test_processor_model_input_names(self):
"""Test that processor returns correct model input names."""
processor = self.get_processor()
expected_keys = {"input_ids", "attention_mask", "pixel_values", "image_sizes"}
model_input_names = set(processor.model_input_names)
# Check that all expected keys are in model_input_names
for key in expected_keys:
self.assertIn(key, model_input_names)
def test_processor_without_images(self):
"""Test that processor handles text-only input correctly."""
processor = self.get_processor()
text = "This is text without any images."
inputs = processor(text=text, return_tensors="pt")
self.assertIn("input_ids", inputs)
self.assertIn("attention_mask", inputs)
self.assertNotIn("pixel_values", inputs)
self.assertNotIn("image_sizes", inputs)
def test_processor_special_tokens(self):
"""Test that special tokens are properly registered."""
processor = self.get_processor()
# Check that image tokens are properly defined
self.assertEqual(processor.image_token, "<|image_pad|>")
self.assertEqual(processor.image_break_token, "<|vision_pad|>")
self.assertEqual(processor.image_end_token, "<|vision_end|>")
# Check that tokens have valid IDs
self.assertIsInstance(processor.image_token_id, int)
self.assertIsInstance(processor.image_break_token_id, int)
self.assertIsInstance(processor.image_end_token_id, int)
def test_processor_return_types(self):
"""Test different return types (pt, np, list)."""
processor = self.get_processor()
image = self.prepare_image_inputs()
text_with_image = f"{self.image_token} Test image."
text_only = "Test without image."
# Test PyTorch tensors (with images - fast image processor only supports pt)
inputs_pt = processor(images=image, text=text_with_image, return_tensors="pt")
self.assertIsInstance(inputs_pt["input_ids"], torch.Tensor)
# Test NumPy arrays (text-only, since fast image processor doesn't support np)
inputs_np = processor(text=text_only, return_tensors="np")
self.assertIsInstance(inputs_np["input_ids"], np.ndarray)
# Test lists (text-only, since fast image processor doesn't support list)
inputs_list = processor(text=text_only, return_tensors=None)
self.assertIsInstance(inputs_list["input_ids"], list)
def test_image_sizes_output(self):
"""Test that image_sizes are correctly computed."""
processor = self.get_processor()
image = Image.new("RGB", (300, 400), color="blue") # Different size
text = f"{self.image_token} Test."
inputs = processor(images=image, text=text, return_tensors="pt")
self.assertIn("image_sizes", inputs)
self.assertEqual(len(inputs["image_sizes"]), 1)
# Image size should be a tuple of (height, width)
self.assertEqual(len(inputs["image_sizes"][0]), 2)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/lighton_ocr/test_processor_lighton_ocr.py",
"license": "Apache License 2.0",
"lines": 140,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/glm_image/modular_glm_image.py | # Copyright 2025 the HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from collections.abc import Callable
from typing import Any
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from ...cache_utils import Cache
from ...configuration_utils import PreTrainedConfig
from ...feature_extraction_utils import BatchFeature
from ...generation import GenerationMixin
from ...image_utils import ImageInput
from ...modeling_outputs import BaseModelOutputWithPooling
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...processing_utils import ImagesKwargs, ProcessorMixin, Unpack
from ...tokenization_utils_base import PreTokenizedInput, TextInput
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, is_torch_available, logging
from ...utils.generic import merge_with_config_defaults
from ...utils.output_capturing import capture_outputs
from ..chameleon.modeling_chameleon import ChameleonVQVAE, ChameleonVQVAEModelOutput, ChameleonVQVAEVectorQuantizer
from ..glm4v.configuration_glm4v import Glm4vTextConfig, Glm4vVisionConfig
from ..glm4v.modeling_glm4v import (
Glm4vCausalLMOutputWithPast,
Glm4vModel,
Glm4vModelOutputWithPast,
Glm4vPreTrainedModel,
Glm4vTextModel,
Glm4vVisionAttention,
Glm4vVisionBlock,
Glm4vVisionEmbeddings,
Glm4vVisionModel,
Glm4vVisionPatchEmbed,
)
from ..glm4v_moe.modeling_glm4v_moe import Glm4vMoeTextAttention, eager_attention_forward
from ..qwen2_vl.image_processing_qwen2_vl import Qwen2VLImageProcessor
from ..qwen2_vl.image_processing_qwen2_vl_fast import Qwen2VLImageProcessorFast
from ..qwen2_vl.processing_qwen2_vl import Qwen2VLProcessorKwargs
from ..siglip.modeling_siglip import SiglipMLP
if is_torch_available():
import torch
logger = logging.get_logger(__name__)
class GlmImageVQVAEConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`GlmImageVQModel`]. It is used to instantiate a
`GlmImageVQModel` according to the specified arguments, defining the model architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information. Instantiating a
configuration with the defaults will yield a similar configuration to the VQModel of the
[zai-org/GLM-Image](https://huggingface.co/zai-org/GLM-Image) architecture.
Args:
embed_dim (`int`, *optional*, defaults to 2048):
Dimensionality of each embedding vector.
num_embeddings (`int`, *optional*, defaults to 16384):
Number of codebook embeddings.
latent_channels (`int`, *optional*, defaults to 1536):
Number of channels for the latent space.
in_channels (`int`, *optional*, defaults to 3):
Number of input channels.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
"""
model_type = "glm_image_vqmodel"
base_config_key = "vq_config"
def __init__(
self,
embed_dim: int = 2048,
num_embeddings: int = 16384,
latent_channels: int = 1536,
in_channels: int = 3,
initializer_range=0.02,
**kwargs,
):
super().__init__(**kwargs)
self.embed_dim = embed_dim
self.num_embeddings = num_embeddings
self.latent_channels = latent_channels
self.in_channels = in_channels
self.initializer_range = initializer_range
class GlmImageVisionConfig(Glm4vVisionConfig):
r"""
This is the configuration class to store the configuration of a [`GlmImageVisionModel`]. It is used to instantiate an GlmImageVisionModel
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield
a similar configuration to that of
GLM-Image [zai-org/GLM-Image](https://huggingface.co/zai-org/GLM-Image).
Args:
depth (`int`, *optional*, defaults to 40):
Number of layers (depth) in the model.
hidden_size (`int`, *optional*, defaults to 1536):
Dimensionality of the encoder layers and the pooler layer.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
attention_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the queries, keys and values.
attention_dropout (`float`, *optional*, defaults to 0.0):
Dropout probability for attention weights.
num_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer architecture.
in_channels (`int`, *optional*, defaults to 3):
Number of input channels.
image_size (`int` or `list[int]`, *optional*, defaults to 2048):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 16):
The size (resolution) of each patch.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
spatial_merge_size (`int`, *optional*, defaults to 1):
The size used for merging spatial dimensions.
intermediate_size (`int`, *optional*, defaults to 6144):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
"""
model_type = "glm_image_vision"
base_config_key = "vision_config"
def __init__(
self,
depth=40,
hidden_size=1536,
hidden_act="gelu",
attention_bias=True,
attention_dropout=0.0,
num_heads=16,
in_channels=3,
image_size=2048,
patch_size=16,
layer_norm_eps=1e-06,
spatial_merge_size=1,
intermediate_size=6144,
initializer_range=0.02,
**kwargs,
):
super().__init__(**kwargs)
del self.out_hidden_size
del self.rms_norm_eps
del self.temporal_patch_size
self.layer_norm_eps = layer_norm_eps
class GlmImageTextConfig(Glm4vTextConfig):
r"""
This is the configuration class to store the configuration of a [`GlmImageTextModel`]. It is used to instantiate a
GLM-Image model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of
GLM-Image [zai-org/GLM-Image](https://huggingface.co/zai-org/GLM-Image).
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 168064):
Vocabulary size of the GlmImage model. Defines the number of different tokens that can be represented by
the `inputs_ids` passed when calling [`GlmImageModel`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 13696):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 40):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer encoder.
num_key_value_heads (`int`, *optional*, defaults to 2):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details checkout [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `32`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 131072):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
pad_token_id (`int`, *optional*, defaults to 167841):
The id of the padding token.
vision_vocab_size (`int`, *optional*, defaults to 16512):
Vision vocabulary size of the GlmImage model. Defines the number of different tokens that can be
represented by the `inputs_ids` passed when calling [`GlmImageVisionModel`]
attention_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the queries, keys and values.
eos_token_id (`int`, *optional*, defaults to 16385):
The id of the end of sequence token.
```python
>>> from transformers import GlmImageTextModel, GlmImageConfig
>>> # Initializing a GlmImageConfig style configuration
>>> configuration = GlmImageConfig()
>>> # Initializing a model from the GlmImageConfig style configuration
>>> model = GlmImageTextModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
def __init__(
self,
vocab_size: int = 168064,
max_position_embeddings: int = 131072,
vision_vocab_size: int = 16512,
attention_bias: bool = True,
pad_token_id: int = 167841,
eos_token_id: int = 16385,
**super_kwargs,
):
super().__init__(
vocab_size=vocab_size,
max_position_embeddings=max_position_embeddings,
pad_token_id=pad_token_id,
**super_kwargs,
)
self.vision_vocab_size = vision_vocab_size
self.attention_bias = attention_bias
self.eos_token_id = eos_token_id
class GlmImageConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`GlmImageModel`]. It is used to instantiate a
GLM-Image model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of
GLM-Image [zai-org/GLM-Image](https://huggingface.co/zai-org/GLM-Image) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
text_config (`Union[PreTrainedConfig, dict]`, *optional*, defaults to `GlmImageTextConfig`):
The config object or dictionary of the text backbone.
vision_config (`Union[PreTrainedConfig, dict]`, *optional*, defaults to `GlmImageVisionConfig`):
The config object or dictionary of the vision backbone.
vq_config (`Union[Dict, GlmImageVQVAEConfig]`, *optional*):
GlmImageVQVAEConfig instance containing the configuration for the VQ-VAE model.
image_token_id (`int`, *optional*, defaults to 167855):
The image token index to encode the image prompt.
image_start_token_id (`int`, *optional*, defaults to 16384):
The image start token index to encode the start of image.
image_end_token_id (`int`, *optional*, defaults to 16385):
The image end token index to encode the end of image.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether the model's input and output word embeddings should be tied.
```python
>>> from transformers import Glm4vForConditionalGeneration, Glm4vConfig
>>> # Initializing a GLM-Image style configuration
>>> configuration = Glm4vConfig()
>>> # Initializing a model from the GLM-Image style configuration
>>> model = Glm4vForConditionalGeneration(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "glm_image"
sub_configs = {
"vision_config": GlmImageVisionConfig,
"text_config": GlmImageTextConfig,
"vq_config": GlmImageVQVAEConfig,
}
keys_to_ignore_at_inference = ["past_key_values"]
def __init__(
self,
text_config=None,
vision_config=None,
vq_config=None,
image_token_id=167855,
image_start_token_id=16384,
image_end_token_id=16385,
tie_word_embeddings: bool | None = False,
**kwargs,
):
if isinstance(vision_config, dict):
vision_config = self.sub_configs["vision_config"](**vision_config)
elif vision_config is None:
vision_config = self.sub_configs["vision_config"](**kwargs)
if isinstance(vq_config, dict):
vq_config = self.sub_configs["vq_config"](**vq_config)
elif vq_config is None:
vq_config = self.sub_configs["vq_config"](**kwargs)
if isinstance(text_config, dict):
text_config = self.sub_configs["text_config"](**text_config)
elif text_config is None:
text_config = self.sub_configs["text_config"](**kwargs)
self.image_token_id = image_token_id
self.image_start_token_id = image_start_token_id
self.image_end_token_id = image_end_token_id
self.text_config = text_config
self.vision_config = vision_config
self.vq_config = vq_config
self.tie_word_embeddings = tie_word_embeddings
super().__init__(**kwargs)
class GlmImageVisionMLP(SiglipMLP):
pass
class GlmImageVisionAttention(Glm4vVisionAttention):
def __init__(self, config: GlmImageVisionConfig) -> None:
super().__init__(config)
self.qkv = nn.Linear(config.hidden_size, config.hidden_size * 3, bias=config.attention_bias)
self.proj = nn.Linear(config.hidden_size, config.hidden_size, bias=config.attention_bias)
def forward(
self,
hidden_states: torch.Tensor,
cu_seqlens: torch.Tensor,
**kwargs,
) -> torch.Tensor:
seq_length = hidden_states.shape[0]
query_states, key_states, value_states = (
self.qkv(hidden_states).reshape(seq_length, 3, self.num_heads, -1).permute(1, 0, 2, 3).unbind(0)
)
query_states = query_states.transpose(0, 1).unsqueeze(0)
key_states = key_states.transpose(0, 1).unsqueeze(0)
value_states = value_states.transpose(0, 1).unsqueeze(0)
attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface(
self.config._attn_implementation, eager_attention_forward
)
if "flash" in self.config._attn_implementation:
# Flash Attention: Use cu_seqlens for variable length attention
max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max()
attn_output, _ = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask=None,
scaling=self.scaling,
dropout=0.0 if not self.training else self.attention_dropout,
cu_seq_lens_q=cu_seqlens,
cu_seq_lens_k=cu_seqlens,
max_length_q=max_seqlen,
max_length_k=max_seqlen,
is_causal=False,
**kwargs,
)
else:
# Other implementations: Process each chunk separately
lengths = cu_seqlens[1:] - cu_seqlens[:-1]
splits = [
torch.split(tensor, lengths.tolist(), dim=2) for tensor in (query_states, key_states, value_states)
]
attn_outputs = [
attention_interface(
self,
q,
k,
v,
attention_mask=None,
scaling=self.scaling,
dropout=0.0 if not self.training else self.attention_dropout,
is_causal=False,
**kwargs,
)[0]
for q, k, v in zip(*splits)
]
attn_output = torch.cat(attn_outputs, dim=1)
attn_output = attn_output.reshape(seq_length, -1).contiguous()
attn_output = self.proj(attn_output)
return attn_output
class GlmImageVisionPatchEmbed(Glm4vVisionPatchEmbed):
def __init__(self, config: GlmImageVisionConfig) -> None:
super().__init__(config)
del self.temporal_patch_size
kernel_size = [self.patch_size, self.patch_size]
self.proj = nn.Conv2d(self.in_channels, self.embed_dim, kernel_size=kernel_size, stride=kernel_size)
def forward(self, hidden_states):
target_dtype = self.proj.weight.dtype
hidden_states = hidden_states.view(-1, self.in_channels, self.patch_size, self.patch_size)
hidden_states = self.proj(hidden_states.to(dtype=target_dtype)).view(-1, self.embed_dim)
return hidden_states
class GlmImageVisionEmbeddings(Glm4vVisionEmbeddings):
def __init__(self, config: GlmImageVisionConfig) -> None:
super().__init__(config)
self.interpolated_method = "bilinear"
class GlmImageVisionBlock(Glm4vVisionBlock):
def __init__(self, config: GlmImageVisionConfig):
super().__init__(config)
self.norm1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.norm2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.attn = GlmImageVisionAttention(config)
self.mlp = GlmImageVisionMLP(config)
def forward(
self,
hidden_states: torch.Tensor,
cu_seqlens: torch.Tensor,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
r"""
cu_seqlens (`torch.Tensor` of shape `(num_images_or_videos + 1,)`):
The cumulative sequence lengths of each image or video feature.
position_embeddings (`tuple(torch.Tensor, torch.Tensor)` of shape `(num_patches, head_dim // 2)`):
The cosine and sine position embeddings for vision attention.
"""
residual = hidden_states
hidden_states = self.norm1(hidden_states)
hidden_states = self.attn(
hidden_states,
cu_seqlens=cu_seqlens,
**kwargs,
)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.norm2(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
class GlmImageTextAttention(Glm4vMoeTextAttention):
pass
class GlmImagePreTrainedModel(Glm4vPreTrainedModel):
config: GlmImageConfig
input_modalities = ("image", "text")
@torch.no_grad()
def _init_weights(self, module):
PreTrainedModel._init_weights(module)
class GlmImageModelOutputWithPast(Glm4vModelOutputWithPast):
pass
class GlmImageVQVAEVectorQuantizer(ChameleonVQVAEVectorQuantizer):
def __init__(self, config: GlmImageVQVAEConfig):
super().__init__(config)
self.num_embeddings = config.num_embeddings
self.embedding_dim = config.embed_dim
self.beta = getattr(config, "beta", 0.25)
self.embedding = nn.Embedding(self.num_embeddings, self.embedding_dim)
def forward(self, hidden_state: torch.Tensor):
hidden_state = hidden_state.permute(0, 2, 3, 1).contiguous()
hidden_state_flattened = hidden_state.view(-1, self.embedding_dim)
# L2 normalize
hidden_state = F.normalize(hidden_state, p=2, dim=-1)
hidden_state_flattened = F.normalize(hidden_state_flattened, p=2, dim=-1)
embedding = F.normalize(self.embedding.weight, p=2, dim=-1)
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
distances = (
torch.sum(hidden_state_flattened**2, dim=1, keepdim=True)
+ torch.sum(embedding**2, dim=1)
- 2 * torch.einsum("bd,dn->bn", hidden_state_flattened, embedding.transpose(0, 1))
)
min_encoding_indices = torch.argmin(distances, dim=1)
hidden_state_quant = embedding[min_encoding_indices].view(hidden_state.shape)
# compute loss for embedding
loss = torch.mean((hidden_state_quant.detach() - hidden_state) ** 2) + self.beta * torch.mean(
(hidden_state_quant - hidden_state.detach()) ** 2
)
# preserve gradients
hidden_state_quant = hidden_state + (hidden_state_quant - hidden_state).detach()
# reshape back to match original input shape
hidden_state_quant = hidden_state_quant.permute(0, 3, 1, 2).contiguous()
return hidden_state_quant, loss, min_encoding_indices
class GlmImageVQVAEModelOutput(ChameleonVQVAEModelOutput):
pass
class GlmImageVQVAE(ChameleonVQVAE):
_no_split_modules = [
"GlmImageVQVAEVectorQuantizer",
]
_can_record_outputs = {}
def __init__(self, config: GlmImageVQVAEConfig):
super().__init__(config)
del self.encoder
def encode(self, hidden_states):
conv_hidden_states = self.quant_conv(hidden_states)
quantized_last_hidden_state, emb_loss, indices = self.quantize(conv_hidden_states)
return GlmImageVQVAEModelOutput(
last_hidden_state=hidden_states,
quantized_last_hidden_state=quantized_last_hidden_state,
image_tokens=indices,
embedding_loss=emb_loss,
)
class GlmImageVisionModel(Glm4vVisionModel):
config: GlmImageVisionConfig
main_input_name = "pixel_values"
input_modalities = ("image",)
def __init__(self, config: GlmImageVisionConfig):
super().__init__(config)
head_dim = config.hidden_size // config.num_heads
self.head_dim = head_dim
del self.merger
del self.rotary_pos_emb
del self.post_conv_layernorm
del self.downsample
del self.post_layernorm
def rot_pos_emb(self, grid_thw):
pos_ids = []
for t, h, w in grid_thw:
hpos_ids = torch.arange(h).unsqueeze(1).expand(-1, w)
hpos_ids = hpos_ids.reshape(
h // self.spatial_merge_size,
self.spatial_merge_size,
w // self.spatial_merge_size,
self.spatial_merge_size,
)
hpos_ids = hpos_ids.permute(0, 2, 1, 3)
hpos_ids = hpos_ids.flatten()
wpos_ids = torch.arange(w).unsqueeze(0).expand(h, -1)
wpos_ids = wpos_ids.reshape(
h // self.spatial_merge_size,
self.spatial_merge_size,
w // self.spatial_merge_size,
self.spatial_merge_size,
)
wpos_ids = wpos_ids.permute(0, 2, 1, 3)
wpos_ids = wpos_ids.flatten()
pos_ids.append(torch.stack([hpos_ids, wpos_ids], dim=-1).repeat(t, 1))
pos_ids = torch.cat(pos_ids, dim=0)
return pos_ids
@merge_with_config_defaults
@capture_outputs
@auto_docstring
def forward(
self, pixel_values: torch.Tensor, grid_thw: torch.Tensor, **kwargs: Unpack[TransformersKwargs]
) -> tuple | BaseModelOutputWithPooling:
r"""
pixel_values (`torch.Tensor` of shape `(total_patches, num_channels * patch_size * patch_size)`):
Packed pixel values.
grid_thw (`torch.Tensor` of shape `(num_images, 3)`):
The temporal, height and width of feature shape of each image.
Returns:
`torch.Tensor` of shape `(total_patches, hidden_size)`: Hidden states.
"""
hidden_states = self.patch_embed(pixel_values)
image_type_ids = self.rot_pos_emb(grid_thw)
cu_seqlens = torch.repeat_interleave(grid_thw[:, 1] * grid_thw[:, 2], grid_thw[:, 0]).cumsum(
dim=0,
dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32,
)
cu_seqlens = F.pad(cu_seqlens, (1, 0), value=0)
seqlens = (cu_seqlens[1:] - cu_seqlens[:-1]).tolist()
hidden_states = self.embeddings(
hidden_states,
seqlens,
grid_thw,
image_type_ids[:, 0].to(hidden_states.device),
image_type_ids[:, 1].to(hidden_states.device),
)
# Transformer blocks (no position_embeddings needed, already added above)
for blk in self.blocks:
hidden_states = blk(
hidden_states,
cu_seqlens=cu_seqlens,
)
return BaseModelOutputWithPooling(last_hidden_state=hidden_states)
class GlmImageTextModel(Glm4vTextModel):
pass
class GlmImageModel(Glm4vModel):
def __init__(self, config):
super().__init__(config)
self.visual = GlmImageVisionModel._from_config(config.vision_config)
self.language_model = GlmImageTextModel._from_config(config.text_config)
self.vqmodel = GlmImageVQVAE._from_config(config.vq_config)
self.rope_deltas = None # cache rope_deltas here
# Per-sample caches for batch processing
self._cached_decode_position_ids = None # shape: [batch_size, 3, max_decode_len]
self._prefill_len = None # prefill sequence length (same for all samples in batch)
# Initialize weights and apply final processing
self.post_init()
def get_rope_index(
self,
input_ids: torch.LongTensor | None = None,
image_grid_thw: torch.LongTensor | None = None,
images_per_sample: torch.LongTensor | None = None,
attention_mask: torch.LongTensor | None = None,
**kwargs,
) -> tuple[torch.Tensor, torch.Tensor]:
"""
Calculate the 3D rope index for image generation task with full batch support.
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
image_grid_thw (`torch.LongTensor` of shape `(total_images_in_batch, 3)`, *optional*):
The temporal, height and width of feature shape of each image.
Images are packed across all samples in the batch.
images_per_sample (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Number of images (including target grids) for each sample in the batch.
Used to split image_grid_thw by sample.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices.
Returns:
position_ids (`torch.LongTensor` of shape `(3, batch_size, sequence_length)`):
Position IDs for temporal, height, and width dimensions.
mrope_position_deltas (`torch.Tensor` of shape `(batch_size, 1)`):
Position deltas for multi-modal rotary position embedding.
"""
batch_size, seq_len = input_ids.shape
device = input_ids.device
dtype = input_ids.dtype
image_start_token_id = self.config.image_start_token_id
image_end_token_id = self.config.image_end_token_id
position_ids = torch.ones(3, batch_size, seq_len, dtype=dtype, device=device)
text_positions = torch.arange(seq_len, device=device)[None, :].repeat(3, 1)
# Split image_grid_thw by sample if images_per_sample is provided
if image_grid_thw is not None and images_per_sample is not None:
grids_per_sample = torch.split(image_grid_thw, images_per_sample.tolist())
elif image_grid_thw is not None:
# Fallback: assume all grids belong to first sample (batch_size=1)
grids_per_sample = [image_grid_thw] * batch_size
else:
grids_per_sample = [None] * batch_size
# Per-sample caches for decode stage
all_decode_position_ids = []
for batch_idx in range(batch_size):
curr_input_ids = input_ids[batch_idx]
curr_grids = grids_per_sample[batch_idx]
if attention_mask is not None and attention_mask.shape[1] == seq_len:
valid_mask = attention_mask[batch_idx] == 1
curr_input_ids_valid = curr_input_ids[valid_mask]
else:
# attention_mask may have different length during assisted decoding
curr_input_ids_valid = curr_input_ids
valid_mask = None
# Find image boundaries in this sample
image_end_positions = torch.where(curr_input_ids_valid == image_end_token_id)[0]
image_start_positions = torch.where(curr_input_ids_valid == image_start_token_id)[0] + 1
num_complete_images = len(image_end_positions)
current_pos = 0
prev_image_end = 0
curr_position_ids = []
# Process complete images (source images in image-to-image task)
for img_idx, (start, end) in enumerate(zip(image_start_positions, image_end_positions)):
if curr_grids is None or img_idx >= len(curr_grids):
break
# Text tokens before this image
llm_pos_length = start - prev_image_end
llm_position_ids = text_positions[:, current_pos : current_pos + llm_pos_length].to(device=device)
current_pos += llm_position_ids.shape[-1]
# Image tokens with 2D spatial encoding
# For an image with height H and width W:
# - position_width cycles [0, 1, ..., W-1] for each row, repeated H times
# - position_height stays constant per row, [0]*W, [1]*W, ..., [H-1]*W
vision_position_ids = self.get_vision_position_ids(
start_position=current_pos, grid_thw=curr_grids[img_idx], device=device
)
current_pos += max(curr_grids[img_idx][1], curr_grids[img_idx][2])
prev_image_end = end
curr_position_ids.append(torch.cat([llm_position_ids, vision_position_ids], dim=-1))
# Remaining text tokens (including the final image_start token for generation)
end_position = len(curr_input_ids_valid) - prev_image_end
llm_position_ids = text_positions[:, current_pos : current_pos + end_position].to(device=device)
current_pos += llm_position_ids.shape[-1]
curr_position_ids.append(llm_position_ids)
# Concatenate all position ids for this sample
curr_position_ids = torch.cat(curr_position_ids, dim=-1)
# Store in the main position_ids tensor
if valid_mask is not None:
position_ids[:, batch_idx, valid_mask] = curr_position_ids
else:
position_ids[:, batch_idx, :] = curr_position_ids
# Build decode position ids for this sample
if curr_grids is not None and len(curr_grids) > 0:
num_decode_grids = len(curr_grids) - num_complete_images
num_decode_grids = max(num_decode_grids, 0)
decode_pos = current_pos
decode_temporal_list = []
decode_height_list = []
decode_width_list = []
curr_grids_list = curr_grids.tolist()
for i in range(1, num_decode_grids + 1):
grid_idx = -i
h = curr_grids_list[grid_idx][1]
w = curr_grids_list[grid_idx][2]
total_tokens = h * w
h_indices = torch.arange(h, device=device).unsqueeze(1).expand(h, w).flatten()
w_indices = torch.arange(w, device=device).unsqueeze(0).expand(h, w).flatten()
decode_temporal_list.append(
torch.full((total_tokens,), decode_pos, device=device, dtype=torch.long)
)
decode_height_list.append(decode_pos + h_indices)
decode_width_list.append(decode_pos + w_indices)
decode_pos = decode_pos + max(h, w)
# End marker
decode_temporal_list.append(torch.tensor([decode_pos], device=device, dtype=torch.long))
decode_height_list.append(torch.tensor([decode_pos], device=device, dtype=torch.long))
decode_width_list.append(torch.tensor([decode_pos], device=device, dtype=torch.long))
sample_decode_pos_ids = torch.stack(
[
torch.cat(decode_temporal_list, dim=0),
torch.cat(decode_height_list, dim=0),
torch.cat(decode_width_list, dim=0),
],
dim=0,
)
all_decode_position_ids.append(sample_decode_pos_ids)
# Store prefill length (same for all samples since input_ids is padded to same length)
self._prefill_len = seq_len
# Pad decode position ids to same length and stack
if all_decode_position_ids:
max_decode_len = max(x.shape[1] for x in all_decode_position_ids)
padded_decode_pos_ids = [
F.pad(pos_ids, (0, max_decode_len - pos_ids.shape[1]), mode="replicate")
for pos_ids in all_decode_position_ids
]
self._cached_decode_position_ids = torch.stack(padded_decode_pos_ids, dim=0) # [batch, 3, max_decode_len]
else:
self._cached_decode_position_ids = None
mrope_position_deltas = torch.zeros([batch_size, 1], dtype=dtype, device=device)
return position_ids, mrope_position_deltas
def get_image_tokens(
self,
hidden_states: torch.FloatTensor,
image_grid_thw: torch.LongTensor,
) -> torch.LongTensor:
"""
Tokenizes image features into discrete tokens with VQVAE module.
Args:
hidden_states (`torch.FloatTensor` of shape `(total_patches, hidden_size)`):
The packed image features from vision encoder.
image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`):
The temporal, height and width of feature shape of each image.
Returns:
image_tokens (`torch.LongTensor` of shape `(total_patches,)`):
Discrete token indices from the VQVAE codebook.
"""
hidden_size = hidden_states.shape[-1]
split_sizes = (image_grid_thw.prod(dim=-1)).tolist()
hidden_states_list = torch.split(hidden_states, split_sizes, dim=0)
all_image_toks = []
for i, hs in enumerate(hidden_states_list):
grid_t, grid_h, grid_w = image_grid_thw[i].tolist()
hs = hs.view(grid_t, grid_h, grid_w, hidden_size)
hs = hs.permute(0, 3, 1, 2).contiguous()
vqmodel_outputs: GlmImageVQVAEModelOutput = self.vqmodel.encode(hs)
all_image_toks.append(vqmodel_outputs.image_tokens)
return torch.cat(all_image_toks, dim=0)
def get_video_features(self):
raise AttributeError("Not needed for GlmImage")
@can_return_tuple
@auto_docstring
def get_image_features(
self,
pixel_values: torch.FloatTensor,
image_grid_thw: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | BaseModelOutputWithPooling:
r"""
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
The tensors corresponding to the input images.
image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
The temporal, height and width of feature shape of each image in LLM.
"""
pixel_values = pixel_values.type(self.visual.dtype)
vision_outputs = self.visual(pixel_values, grid_thw=image_grid_thw, return_dict=True, **kwargs)
split_sizes = (image_grid_thw.prod(-1) // self.visual.spatial_merge_size**2).tolist()
image_embeds = torch.split(vision_outputs.last_hidden_state, split_sizes)
vision_outputs.pooler_output = image_embeds
return vision_outputs
def get_placeholder_mask(
self,
input_ids: torch.LongTensor,
image_ids: torch.LongTensor,
):
"""
Replace image placeholder tokens in input_ids with actual image token ids from VQVAE.
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, seq_len)`):
Input token ids with image placeholders.
image_ids (`torch.LongTensor` of shape `(num_images, num_tokens_per_image)` or flattened):
Discrete token indices from the VQVAE codebook.
Returns:
special_image_mask (`torch.LongTensor` of shape `(batch_size, seq_len)`):
Mask indicating positions in input ids that will be replaced by actual image tokens.
"""
special_image_mask = input_ids == self.config.image_token_id
n_placeholder_tokens = special_image_mask.sum()
n_image_tokens = image_ids.shape[0]
if n_placeholder_tokens != n_image_tokens:
raise ValueError(
f"Number of image placeholder tokens ({n_placeholder_tokens.item()}) does not match "
f"number of image tokens from VQVAE ({n_image_tokens})"
)
return special_image_mask
def compute_3d_position_ids(
self,
input_ids: torch.Tensor | None,
image_grid_thw: torch.Tensor | None,
images_per_sample: torch.Tensor | None,
inputs_embeds: torch.Tensor | None,
attention_mask: torch.Tensor | None,
past_key_values: torch.Tensor | None,
cache_position: torch.Tensor | None,
) -> torch.Tensor | None:
past_key_values_length = 0 if past_key_values is None else past_key_values.get_seq_length()
can_compute_mrope = input_ids is not None and image_grid_thw is not None
if can_compute_mrope and (self.rope_deltas is None or past_key_values_length == 0):
position_ids, rope_deltas = self.get_rope_index(
input_ids,
image_grid_thw=image_grid_thw,
attention_mask=attention_mask,
images_per_sample=images_per_sample,
)
self.rope_deltas = rope_deltas
# Use pre-calculated rope-deltas to infer correct 3D position ids
elif self.rope_deltas is not None:
batch_size, seq_length, _ = inputs_embeds.shape
if self._cached_decode_position_ids is not None:
step = cache_position[0].item() - self._prefill_len
position_ids = self._cached_decode_position_ids[:, :, step : step + seq_length].permute(1, 0, 2)
else:
position_ids = cache_position.view(1, 1, -1).repeat(3, batch_size, 1)
else:
# Can't build correct 3D positions. Let the model infer it from `cache_position`
position_ids = None
return position_ids
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
pixel_values: torch.Tensor | None = None,
image_grid_thw: torch.LongTensor | None = None,
images_per_sample: torch.LongTensor | None = None,
rope_deltas: torch.LongTensor | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | GlmImageModelOutputWithPast:
r"""
image_grid_thw (`torch.LongTensor` of shape `(total_images_in_batch, 3)`, *optional*):
The temporal, height and width of feature shape of each image in LLM.
Images are packed across all samples in the batch.
images_per_sample (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Number of images (including target grids) for each sample in the batch.
rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*):
The rope index difference between sequence length and multimodal rope.
"""
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
batch_size = input_ids.shape[0] if input_ids is not None else inputs_embeds.shape[0]
if pixel_values is not None:
# Process source images (image-to-image mode)
# Source images are identified by counting image_end_token_id in input_ids
# Note: We must exclude padding tokens since pad_token_id == image_end_token_id
if images_per_sample is not None:
grids_per_sample = torch.split(image_grid_thw, images_per_sample.tolist())
# Create mask for non-padding tokens (attention_mask=1 means non-padding)
# Handle 4D attention mask (from static cache) by extracting diagonal
if attention_mask is not None and attention_mask.ndim == 4:
non_pad_mask = torch.diagonal(attention_mask[:, 0], dim1=1, dim2=2)
if non_pad_mask.dtype.is_floating_point:
non_pad_mask = non_pad_mask / torch.finfo(non_pad_mask.dtype).min
non_pad_mask = (1.0 - non_pad_mask).int()
# Only keep columns matching input_ids length
non_pad_mask = non_pad_mask[:, -input_ids.shape[1] :]
else:
non_pad_mask = attention_mask if attention_mask is not None else torch.ones_like(input_ids)
source_grids_list = []
is_image_end = input_ids == self.config.image_end_token_id
is_non_pad = non_pad_mask == 1
num_source_per_sample = (is_image_end & is_non_pad).sum(dim=1).tolist()
for sample_idx in range(batch_size):
num_source = num_source_per_sample[sample_idx]
if num_source > 0:
source_grids_list.append(grids_per_sample[sample_idx][:num_source])
if len(source_grids_list) == 0:
raise ValueError(
"pixel_values provided but no source images found in input_ids. "
"Ensure input_ids contains image_end_token_id for each source image."
)
source_grids = torch.cat(source_grids_list, dim=0)
else:
# Fallback for batch_size=1: all but last grid are source images
source_grids = image_grid_thw[:-1]
image_features = self.get_image_features(pixel_values, source_grids, return_dict=True)
image_embeds = torch.cat(image_features.pooler_output, dim=0)
image_ids = self.get_image_tokens(image_embeds, source_grids)
image_ids = image_ids.view(-1).to(input_ids.device)
special_image_mask = self.get_placeholder_mask(input_ids, image_ids)
input_ids = input_ids.masked_scatter(special_image_mask, image_ids)
if inputs_embeds is None:
inputs_embeds = self.get_input_embeddings()(input_ids)
if position_ids is None:
position_ids = self.compute_3d_position_ids(
input_ids=input_ids,
image_grid_thw=image_grid_thw,
images_per_sample=images_per_sample,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
past_key_values=past_key_values,
cache_position=cache_position,
)
outputs = self.language_model(
input_ids=None,
position_ids=position_ids,
attention_mask=attention_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
cache_position=cache_position,
**kwargs,
)
return GlmImageModelOutputWithPast(
last_hidden_state=outputs.last_hidden_state,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
rope_deltas=self.rope_deltas,
)
class GlmImageCausalLMOutputWithPast(Glm4vCausalLMOutputWithPast):
pass
class GlmImageForConditionalGeneration(GlmImagePreTrainedModel, GenerationMixin):
_checkpoint_conversion_mapping = {}
_tied_weights_keys = {}
# Reference: fix gemma3 grad acc #37208
accepts_loss_kwargs = False
base_model_prefix = "model"
config: GlmImageConfig
def __init__(self, config):
super().__init__(config)
self.model = GlmImageModel(config)
self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vision_vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def get_image_features(
self,
pixel_values: torch.FloatTensor,
image_grid_thw: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | BaseModelOutputWithPooling:
r"""
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
The tensors corresponding to the input images.
image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
The temporal, height and width of feature shape of each image in LLM.
"""
return self.model.get_image_features(pixel_values, image_grid_thw, **kwargs)
def get_image_tokens(self, hidden_states: torch.FloatTensor, image_grid_thw: torch.LongTensor | None = None):
return self.model.get_image_tokens(hidden_states, image_grid_thw)
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
labels: torch.LongTensor | None = None,
pixel_values: torch.Tensor | None = None,
image_grid_thw: torch.LongTensor | None = None,
images_per_sample: torch.LongTensor | None = None,
cache_position: torch.LongTensor | None = None,
logits_to_keep: int | torch.Tensor = 0,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | GlmImageCausalLMOutputWithPast:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
image_grid_thw (`torch.LongTensor` of shape `(total_images_in_batch, 3)`, *optional*):
The temporal, height and width of feature shape of each image in LLM.
Images are packed across all samples in the batch.
images_per_sample (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Number of images (including target grids) for each sample in the batch.
Example:
```python
>>> from PIL import Image
>>> import httpx
>>> from io import BytesIO
>>> from transformers import AutoProcessor, GlmImageForConditionalGeneration
>>> model = GlmImageForConditionalGeneration.from_pretrained("zai-org/GLM-Image")
>>> processor = AutoProcessor.from_pretrained("zai-org/GLM-Image")
>>> messages = [
{
"role": "user",
"content": [
{"type": "image"},
{"type": "text", "text": "Add a truck of this photo.<sop>28 40<eop>"},
],
},
]
>>> url = "https://www.ilankelman.org/stopsigns/australia.jpg"
>>> with httpx.stream("GET", url) as response:
... image = Image.open(BytesIO(response.read()))
>>> text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
>>> inputs = processor(text=[text], images=[image], vision_infos=[vision_infos])
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"The image shows a street scene with a red stop sign in the foreground. In the background, there is a large red gate with Chinese characters ..."
```"""
outputs = self.model(
input_ids=input_ids,
pixel_values=pixel_values,
image_grid_thw=image_grid_thw,
images_per_sample=images_per_sample,
position_ids=position_ids,
attention_mask=attention_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs[0]
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size)
return GlmImageCausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
rope_deltas=outputs.rope_deltas,
)
def prepare_inputs_for_generation(
self,
input_ids,
past_key_values=None,
attention_mask=None,
inputs_embeds=None,
cache_position=None,
position_ids=None,
use_cache=True,
pixel_values=None,
image_grid_thw=None,
images_per_sample=None,
is_first_iteration=False,
**kwargs,
):
model_inputs = super().prepare_inputs_for_generation(
input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
cache_position=cache_position,
position_ids=position_ids,
pixel_values=pixel_values,
image_grid_thw=image_grid_thw,
is_first_iteration=is_first_iteration,
use_cache=use_cache,
**kwargs,
)
model_inputs["position_ids"] = None
model_inputs["images_per_sample"] = images_per_sample
if not is_first_iteration and use_cache:
model_inputs["pixel_values"] = None
return model_inputs
def _get_image_nums(
self,
input_ids: torch.LongTensor | None,
) -> torch.Tensor:
"""
Get the number of images for each sample.
For GLM-Image, only input_ids allow us to get the number of images.
Returns:
image_counts (`torch.LongTensor` of shape `(batch_size,)`)
"""
is_image = input_ids == self.config.image_start_token_id
return is_image.sum(dim=1)
def _expand_inputs_for_generation(
self,
expand_size: int = 1,
is_encoder_decoder: bool = False,
input_ids: torch.LongTensor | None = None,
**model_kwargs,
) -> tuple[torch.LongTensor, dict[str, Any]]:
# Overwritten -- Support for expanding tensors without a batch size dimension
# e.g., pixel_values, image_grid_thw
# pixel_values.shape[0] is sum(seqlen_images for samples)
# image_grid_thw.shape[0] is sum(num_images for samples)
if expand_size == 1:
return input_ids, model_kwargs
visual_keys = ["pixel_values", "image_grid_thw", "images_per_sample"]
def _expand_dict_for_generation_visual(dict_to_expand):
image_grid_thw = model_kwargs.get("image_grid_thw", None)
if image_grid_thw is None:
return dict_to_expand
images_per_sample = model_kwargs.get("images_per_sample", None)
# Use images_per_sample if available
if images_per_sample is not None:
image_nums = images_per_sample.tolist()
elif input_ids is not None:
# Try to infer from image_grid_thw / batch_size
batch_size = input_ids.shape[0]
total_grids = image_grid_thw.shape[0]
if total_grids % batch_size == 0:
grids_per_sample = total_grids // batch_size
image_nums = [grids_per_sample] * batch_size
else:
# Cannot evenly distribute grids - fall back to simple repeat_interleave
# This handles test cases where image_grid_thw has (batch_size + 1) rows
dict_to_expand["image_grid_thw"] = image_grid_thw.repeat_interleave(expand_size, dim=0)
if dict_to_expand.get("pixel_values") is not None:
dict_to_expand["pixel_values"] = dict_to_expand["pixel_values"].repeat_interleave(
expand_size, dim=0
)
return dict_to_expand
else:
image_nums = self._get_image_nums(input_ids).tolist()
# Get source image counts per sample from image_end_token_id count
source_image_nums = (input_ids == self.config.image_end_token_id).sum(dim=1).tolist()
def _repeat_interleave_samples(x, lengths, repeat_times):
samples = torch.split(x, lengths)
repeat_args = [repeat_times] + [1] * (x.dim() - 1)
result = torch.cat([sample.repeat(*repeat_args) for sample in samples], dim=0)
return result
for key in dict_to_expand:
if key == "pixel_values":
# Split images into samples based on source image counts
if sum(source_image_nums) > 0:
# Split grids by sample to compute pixel counts
grids_per_sample = torch.split(image_grid_thw, image_nums)
all_pixel_counts = image_grid_thw.prod(dim=1)
pixel_counts_per_sample = torch.split(all_pixel_counts, image_nums)
# Build source mask and compute per-sample source pixel counts in one sync
source_pixel_counts = torch.zeros(len(grids_per_sample), device=image_grid_thw.device)
for batch_idx in range(len(grids_per_sample)):
num_source = source_image_nums[batch_idx]
if num_source > 0:
source_pixel_counts[batch_idx] = pixel_counts_per_sample[batch_idx][:num_source].sum()
lengths = source_pixel_counts.to(torch.int64).tolist()
dict_to_expand[key] = _repeat_interleave_samples(
dict_to_expand[key], lengths=lengths, repeat_times=expand_size
)
elif key == "image_grid_thw":
# Expand all grids (source + target) per sample
dict_to_expand[key] = _repeat_interleave_samples(
dict_to_expand[key], lengths=image_nums, repeat_times=expand_size
)
elif key == "images_per_sample":
# Simply repeat the counts
if dict_to_expand.get(key) is not None:
dict_to_expand[key] = dict_to_expand[key].repeat_interleave(expand_size, dim=0)
return dict_to_expand
def _expand_dict_for_generation(dict_to_expand):
for key in dict_to_expand:
if (
key != "cache_position"
and dict_to_expand[key] is not None
and isinstance(dict_to_expand[key], torch.Tensor)
and key not in visual_keys
):
dict_to_expand[key] = dict_to_expand[key].repeat_interleave(expand_size, dim=0)
return dict_to_expand
model_kwargs = _expand_dict_for_generation_visual(model_kwargs)
if input_ids is not None:
input_ids = input_ids.repeat_interleave(expand_size, dim=0)
model_kwargs = _expand_dict_for_generation(model_kwargs)
if is_encoder_decoder:
if model_kwargs.get("encoder_outputs") is None:
raise ValueError("If `is_encoder_decoder` is True, make sure that `encoder_outputs` is defined.")
model_kwargs["encoder_outputs"] = _expand_dict_for_generation(model_kwargs["encoder_outputs"])
return input_ids, model_kwargs
def smart_resize(
height: int,
width: int,
factor: int = 16,
min_pixels: int = 512 * 512,
max_pixels: int = 2048 * 2048,
) -> tuple[int, int]:
if height < factor or width < factor:
raise ValueError(f"height:{height} or width:{width} must be larger than factor:{factor}")
elif max(height, width) / min(height, width) > 4:
raise ValueError(
f"absolute aspect ratio must be smaller than 4, got {max(height, width) / min(height, width)}"
)
shortest_edge = int(round(math.sqrt(min_pixels)))
longest_edge = int(round(math.sqrt(max_pixels)))
min_side = min(height, width)
max_side = max(height, width)
scale = 1.0
if min_side < shortest_edge:
scale = shortest_edge / min_side
if max_side * scale > longest_edge:
scale = longest_edge / max_side
height = height // 2
width = width // 2
h_bar = max(factor, int(round(height * scale / factor)) * factor)
w_bar = max(factor, int(round(width * scale / factor)) * factor)
if max(h_bar, w_bar) > longest_edge:
beta = max(h_bar, w_bar) / longest_edge
h_bar = max(factor, int(math.floor((h_bar / beta) / factor)) * factor)
w_bar = max(factor, int(math.floor((w_bar / beta) / factor)) * factor)
return h_bar, w_bar
class GlmImageImageProcessor(Qwen2VLImageProcessor):
model_input_names = ["pixel_values", "image_grid_thw", "images_per_sample"]
class GlmImageImageProcessorFast(Qwen2VLImageProcessorFast):
model_input_names = ["pixel_values", "image_grid_thw", "images_per_sample"]
class GlmImageImagesKwargs(ImagesKwargs, total=False):
"""
target_h (`int`):
Height of the target image to be generated.
target_w (`int`):
Width of the target image to be generated.
"""
target_h: int
target_w: int
class GlmImageProcessorKwargs(Qwen2VLProcessorKwargs):
images_kwargs: GlmImageImagesKwargs
_defaults = {
"text_kwargs": {
"padding": False,
"return_mm_token_type_ids": False,
},
"images_kwargs": {
"target_h": 1152,
"target_w": 768,
},
}
class GlmImageProcessor(ProcessorMixin):
r"""
Constructs a GLM-Image processor which wraps a GLM-Image image processor and a GLM-Image tokenizer into a single processor.
[`~GlmImageProcessor.__call__`] and [`~GlmImageProcessor.decode`] for more information.
Args:
image_processor ([`GlmImageProcessor`], *optional*):
The image processor is a required input.
tokenizer ([`PreTrainedTokenizerFast`], *optional*):
The tokenizer is a required input.
chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
in a chat into a tokenizable string.
"""
model_input_names = ["input_ids", "attention_mask", "pixel_values", "image_grid_thw", "images_per_sample"]
def __init__(self, image_processor=None, tokenizer=None, chat_template=None, **kwargs):
self.image_token = tokenizer.image_token
self.grid_bos_token = tokenizer.grid_bos_token
self.grid_eos_token = tokenizer.grid_eos_token
self.bos_token = tokenizer.bos_token
self.image_token_id = tokenizer.convert_tokens_to_ids(self.image_token)
super().__init__(image_processor, tokenizer, chat_template=chat_template)
def __call__(
self,
images: ImageInput | None = None,
text: TextInput | PreTokenizedInput | list[TextInput] | list[PreTokenizedInput] = None,
**kwargs: Unpack[GlmImageProcessorKwargs],
) -> BatchFeature:
"""
Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
and `kwargs` arguments to PreTrainedTokenizerFast's [`~PreTrainedTokenizerFast.__call__`] if `text` is not `None` to encode
the text.
Args:
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. Both channels-first and channels-last formats are supported.
text (`str`, `List[str]`, `List[List[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors of a particular framework. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
- **image_grid_thw** -- List of image 3D grid in LLM. Returned when `images` is not `None`.
"""
output_kwargs = self._merge_kwargs(
GlmImageProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
target_h = output_kwargs["images_kwargs"].pop("target_h", None)
target_w = output_kwargs["images_kwargs"].pop("target_w", None)
is_text_to_image = images is None
if images is not None:
image_inputs = self.image_processor(images=images, **output_kwargs["images_kwargs"])
image_grid_thw = image_inputs["image_grid_thw"]
else:
image_inputs = {}
image_grid_thw = None
# Handle text=None case (image-only processing)
if text is None:
if images is None:
raise ValueError("You must provide at least one of `text` or `images`.")
return image_inputs
if not isinstance(text, list):
text = [text]
batch_size = len(text)
text = text.copy() # below lines change text in-place
# Count images per sample by counting image tokens in each text
images_per_sample = []
for i in range(batch_size):
images_per_sample.append(text[i].count(self.image_token))
# Replace image tokens with the correct number of placeholder tokens
if not is_text_to_image:
index = 0
for i in range(batch_size):
while self.image_token in text[i]:
grid = image_grid_thw[index]
num_image_tokens = int(grid[1] * grid[2])
text[i] = text[i].replace(self.image_token, "<|placeholder|>" * num_image_tokens, 1)
index += 1
text[i] = text[i].replace("<|placeholder|>", self.image_token)
# Build prompt with target shape and combine grids in a single loop
# Format: [sample0_source_grids..., sample0_target_grids, sample1_source_grids..., sample1_target_grids, ...]
# Note: In i2i mode, batches are homogeneous (same number of source images per sample)
num_source_images = images_per_sample[0] if images_per_sample else 0
# Validate homogeneity for i2i mode
if not is_text_to_image and images_per_sample and len(set(images_per_sample)) != 1:
raise ValueError(
f"In image-to-image mode, all samples must have the same number of source images. "
f"Got different counts: {images_per_sample}"
)
all_grids = []
for i in range(batch_size):
text[i], token_h, token_w, prev_h, prev_w = self._build_prompt_with_target_shape(
text[i], height=target_h, width=target_w, is_text_to_image=is_text_to_image
)
# Add source grids for this sample (i2i mode only)
if not is_text_to_image and num_source_images > 0:
start_idx = i * num_source_images
all_grids.append(image_grid_thw[start_idx : start_idx + num_source_images])
# Add target grid for this sample
all_grids.append(
self._build_target_image_grid_thw(
token_h=token_h,
token_w=token_w,
prev_token_h=prev_h,
prev_token_w=prev_w,
is_text_to_image=is_text_to_image,
)
)
image_inputs["image_grid_thw"] = torch.cat(all_grids, dim=0)
# Store images_per_sample for later use (add target images count)
# Each sample will have: source_images + target_images (typically 2 for t2i, 1 for i2i)
num_target_grids = 2 if is_text_to_image else 1
image_inputs["images_per_sample"] = torch.tensor(
[num_source_images + num_target_grids] * batch_size, dtype=torch.long
)
return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None)
return_mm_token_type_ids = output_kwargs["text_kwargs"].pop("return_mm_token_type_ids", False)
text_inputs = self.tokenizer(text, **output_kwargs["text_kwargs"])
self._check_special_mm_tokens(text, text_inputs, modalities=["image"])
if return_mm_token_type_ids:
array_ids = np.array(text_inputs["input_ids"])
mm_token_type_ids = np.zeros_like(text_inputs["input_ids"])
mm_token_type_ids[array_ids == self.image_token_id] = 1
text_inputs["mm_token_type_ids"] = mm_token_type_ids.tolist()
return BatchFeature(data={**text_inputs, **image_inputs}, tensor_type=return_tensors)
def _build_prompt_with_target_shape(
self,
prompt: str,
height: int,
width: int,
is_text_to_image: bool,
) -> tuple[str, int, int, int, int]:
factor = 32
height = (height // factor) * factor
width = (width // factor) * factor
token_h = height // factor
token_w = width // factor
ratio = token_h / token_w
prev_token_h = int(math.sqrt(ratio) * (factor // 2))
prev_token_w = int(math.sqrt(1 / ratio) * (factor // 2))
if is_text_to_image:
expanded_prompt = f"{prompt}{self.grid_bos_token}{token_h} {token_w}{self.grid_eos_token}{self.grid_bos_token}{prev_token_h} {prev_token_w}{self.grid_eos_token}{self.bos_token}"
else:
expanded_prompt = f"{prompt}{self.grid_bos_token}{token_h} {token_w}{self.grid_eos_token}{self.bos_token}"
return expanded_prompt, token_h, token_w, prev_token_h, prev_token_w
@staticmethod
def _build_target_image_grid_thw(
token_h: int,
token_w: int,
prev_token_h: int,
prev_token_w: int,
is_text_to_image: bool = True,
):
if is_text_to_image:
# Text-to-image: 2 target grids (large + small preview)
return torch.tensor(
[
[1, token_h, token_w],
[1, prev_token_h, prev_token_w],
],
)
else:
# Image-to-image: 1 target grid only
return torch.tensor(
[
[1, token_h, token_w],
],
)
__all__ = [
"GlmImageVQVAEConfig",
"GlmImageVisionConfig",
"GlmImageTextConfig",
"GlmImageConfig",
"GlmImagePreTrainedModel",
"GlmImageVQVAE",
"GlmImageVisionModel",
"GlmImageTextModel",
"GlmImageModel",
"GlmImageForConditionalGeneration",
"GlmImageImageProcessor",
"GlmImageImageProcessorFast",
"GlmImageProcessor",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/glm_image/modular_glm_image.py",
"license": "Apache License 2.0",
"lines": 1396,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/glm_image/test_modeling_glm_image.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch GLM-Image model."""
import unittest
import pytest
from parameterized import parameterized
from transformers import (
GlmImageConfig,
GlmImageForConditionalGeneration,
GlmImageModel,
GlmImageProcessor,
is_torch_available,
set_seed,
)
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_MAPPING_NAMES
from transformers.testing_utils import (
Expectations,
cleanup,
require_deterministic_for_xpu,
require_flash_attn,
require_torch,
require_torch_accelerator,
run_first,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import (
TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION,
ModelTesterMixin,
floats_tensor,
ids_tensor,
)
if is_torch_available():
import torch
class GlmImageVisionText2TextModelTester:
def __init__(
self,
parent,
batch_size=2,
seq_length=7,
num_channels=3,
ignore_index=-100,
image_size=128,
image_start_token_id=50,
image_end_token_id=51,
image_token_id=52,
is_training=True,
text_config={
"vocab_size": 99,
"vision_vocab_size": 99,
"hidden_size": 16,
"intermediate_size": 22,
"num_hidden_layers": 2,
"num_attention_heads": 2,
"num_key_value_heads": 1,
"output_channels": 64,
"hidden_act": "silu",
"max_position_embeddings": 512,
"rope_parameters": {"type": "default", "mrope_section": [2, 1, 1]},
"rope_theta": 10000,
"tie_word_embeddings": True,
"bos_token_id": 0,
"eos_token_id": 0,
"pad_token_id": 0,
"n_routed_experts": 8,
"n_shared_experts": 1,
"n_group": 1,
"topk_group": 1,
"num_experts_per_tok": 8,
},
vision_config={
"depth": 2,
"hidden_act": "gelu",
"hidden_size": 32,
"intermediate_size": 22,
"patch_size": 16,
"spatial_merge_size": 1,
"temporal_patch_size": 1,
},
vq_config={
"embed_dim": 48,
"in_channels": 3,
"initializer_range": 0.02,
"latent_channels": 32,
"num_embeddings": 32,
},
):
self.parent = parent
self.ignore_index = ignore_index
self.bos_token_id = text_config["bos_token_id"]
self.eos_token_id = text_config["eos_token_id"]
self.pad_token_id = text_config["pad_token_id"]
self.image_start_token_id = image_start_token_id
self.image_end_token_id = image_end_token_id
self.image_token_id = image_token_id
self.text_config = text_config
self.vision_config = vision_config
self.vq_config = vq_config
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.is_training = is_training
self.hidden_size = text_config["hidden_size"]
self.num_hidden_layers = text_config["num_hidden_layers"]
self.num_attention_heads = text_config["num_attention_heads"]
self.vision_vocab_size = text_config["vision_vocab_size"]
self.vocab_size = text_config["vocab_size"]
self.num_image_tokens = 64
self.seq_length = seq_length + self.num_image_tokens
self.n_routed_experts = text_config["n_routed_experts"]
self.n_shared_experts = text_config["n_shared_experts"]
self.num_experts_per_tok = text_config["num_experts_per_tok"]
self.n_group = text_config["n_group"]
self.topk_group = text_config["topk_group"]
def get_config(self):
return GlmImageConfig(
text_config=self.text_config,
vision_config=self.vision_config,
vq_config=self.vq_config,
image_token_id=self.image_token_id,
image_start_token_id=self.image_start_token_id,
image_end_token_id=self.image_end_token_id,
)
def prepare_config_and_inputs(self):
config = self.get_config()
patch_size = config.vision_config.patch_size
temporal_patch_size = config.vision_config.temporal_patch_size
pixel_values = floats_tensor(
[
self.batch_size * (self.image_size**2) // (patch_size**2),
self.num_channels * (patch_size**2) * temporal_patch_size,
]
)
return config, pixel_values
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
input_ids[input_ids == self.image_token_id] = self.pad_token_id
input_ids[input_ids == self.image_start_token_id] = self.pad_token_id
input_ids[input_ids == self.image_end_token_id] = self.pad_token_id
input_ids[:, 0] = self.image_start_token_id
input_ids[:, 1 : 1 + self.num_image_tokens] = self.image_token_id
input_ids[:, 1 + self.num_image_tokens] = self.image_end_token_id
patch_size = config.vision_config.patch_size
patches_per_side = self.image_size // patch_size
# For i2i mode: each sample has 1 source image + 1 target grid
# image_grid_thw layout: [sample0_source, sample0_target, sample1_source, sample1_target, ...]
# Since batches are homogeneous, all samples have same number of source images
num_grids_per_sample = 2 # 1 source + 1 target
inputs_dict = {
"pixel_values": pixel_values,
"image_grid_thw": torch.tensor(
[[1, patches_per_side, patches_per_side]] * (self.batch_size * num_grids_per_sample),
device=torch_device,
),
"input_ids": input_ids,
"attention_mask": attention_mask,
"images_per_sample": torch.tensor([num_grids_per_sample] * self.batch_size, device=torch_device),
}
return config, inputs_dict
@require_torch
class GlmImageModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = (GlmImageModel, GlmImageForConditionalGeneration) if is_torch_available() else ()
model_split_percents = [0.7, 0.9] # model too big to split at 0.5
_is_composite = True
def setUp(self):
self.model_tester = GlmImageVisionText2TextModelTester(self)
self.config_tester = ConfigTester(self, config_class=GlmImageConfig, has_text_modality=False)
def test_config(self):
self.config_tester.run_common_tests()
# GlmImage has images shaped as (bs*patch_len, dim) so we can't slice to batches in generate
def prepare_config_and_inputs_for_generate(self, batch_size=2):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# We don't want a few model inputs in our model input dictionary for generation tests
input_keys_to_ignore = [
# we don't want to mask attention heads
# we don't want encoder-decoder models to start from filled decoder ids
"decoder_input_ids",
"decoder_attention_mask",
# we'll set cache use in each test differently
"use_cache",
# Ignore labels if it is in the input dict
"labels",
# model-specific exceptions should overload/overwrite this function
]
# The diff from the general `prepare_config_and_inputs_for_generate` lies here
patch_size = config.vision_config.patch_size
num_patches_per_image = (self.model_tester.image_size**2) // (patch_size**2)
num_grids_per_sample = 2 # 1 source + 1 target
filtered_inputs_dict = {
k: v[:batch_size, ...]
if isinstance(v, torch.Tensor) and k not in ["pixel_values", "image_grid_thw", "images_per_sample"]
else v
for k, v in inputs_dict.items()
if k not in input_keys_to_ignore
}
# pixel_values: each sample has 1 source image
filtered_inputs_dict["pixel_values"] = inputs_dict["pixel_values"][: batch_size * num_patches_per_image]
# image_grid_thw: each sample has 2 grids (1 source + 1 target)
filtered_inputs_dict["image_grid_thw"] = inputs_dict["image_grid_thw"][: batch_size * num_grids_per_sample]
# images_per_sample: each sample has 2 images
filtered_inputs_dict["images_per_sample"] = torch.tensor(
[num_grids_per_sample] * batch_size, device=torch_device
)
# It is important set `eos_token_id` to `None` to avoid early stopping (would break for length-based checks)
text_gen_config = config.get_text_config(decoder=True)
if text_gen_config.eos_token_id is not None and text_gen_config.pad_token_id is None:
text_gen_config.pad_token_id = (
text_gen_config.eos_token_id
if isinstance(text_gen_config.eos_token_id, int)
else text_gen_config.eos_token_id[0]
)
text_gen_config.eos_token_id = None
text_gen_config.forced_eos_token_id = None
return config, filtered_inputs_dict
def test_training(self):
# Model isn't in any auto-mapping so we need to build labels manually
if not self.model_tester.is_training:
self.skipTest(reason="ModelTester is not configured to run training tests")
for model_class in self.all_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
if model_class.__name__ in [
*get_values(MODEL_MAPPING_NAMES),
]:
continue
model = model_class(config)
model.to(torch_device)
model.train()
inputs_dict["labels"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device
)
loss = model(**inputs_dict).loss
loss.backward()
@unittest.skip(reason="Reequires input ids AND image grid to generate")
def test_generate_without_input_ids(self):
pass
@parameterized.expand(TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION)
@unittest.skip("Needs special input preparation. Not important test for model, skip for now")
def test_eager_matches_sdpa_inference(
self, name, dtype, padding_side, use_attention_mask, output_attentions, enable_kernels
):
pass
@unittest.skip(reason="No available kernels - not supported")
def test_sdpa_can_dispatch_on_flash(self):
pass
@unittest.skip(reason="Size mismatch")
def test_multi_gpu_data_parallel_forward(self):
pass
@pytest.mark.xfail(
reason="GlmImage has a VQ module that uses `weight.data` directly in forward which prevent offloading on that module"
)
def test_disk_offload_safetensors(self):
pass
@pytest.mark.xfail(
reason="GlmImage has a VQ module that uses `weight.data` directly in forward which prevent offloading on that module"
)
def test_disk_offload_bin(self):
pass
@pytest.mark.xfail(
reason="GlmImage has a VQ module that uses `weight.data` directly in forward which prevent offloading on that module"
)
def test_cpu_offload(self):
pass
@pytest.mark.xfail(
reason="GlmImage has a VQ module that uses `weight.data` directly in forward which prevent offloading on that module"
)
def test_model_parallelism(self):
pass
@unittest.skip("Error with compilation")
def test_generate_from_inputs_embeds_with_static_cache(self):
pass
@parameterized.expand([("greedy", 1), ("beam search", 2)])
@unittest.skip(reason="GLM-Image does not use inputs_embeds")
def test_generate_from_inputs_embeds(self, _, num_beams):
pass
@unittest.skip(reason="GLM-Image input embed is compare with inputs_ids and image_ids")
def test_inputs_embeds_matches_input_ids(self):
pass
@unittest.skip(reason="GLM-Image does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="GLM-Image can't do text-only inference")
def test_generate_from_random_inputs_embeds(self):
pass
@unittest.skip(reason="GLM-Image can't do and does not need assisted generation. Not worth fixing!")
def test_assisted_decoding_sample(self):
pass
@unittest.skip(reason="GLM-Image can't do and does not need assisted generation. Not worth fixing!")
def test_prompt_lookup_decoding_matches_greedy_search(self):
pass
@parameterized.expand([("random",), ("same",)])
@unittest.skip(reason="GLM-Image can't do and does not need assisted generation. Not worth fixing!")
def test_assisted_decoding_matches_greedy_search(self, assistant_type):
pass
@unittest.skip(reason="GlmImageVisionModel does not support training")
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(reason="GlmImageVision does not support output_hidden_states test")
def test_model_outputs_equivalence(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_true(self):
pass
@unittest.skip(reason="GlmImageVisionModel does not support training")
def test_retain_grad_hidden_states_attentions(self):
pass
@unittest.skip(reason="GlmImage needs special input preparation to pass this test")
def test_generate_compile_model_forward_fullgraph(self):
pass
@unittest.skip(
reason="GlmImage is a multimodal model that requires pixel_values and image_grid_thw. "
"This test drops all inputs except input_ids which causes NoneType iteration error."
)
def test_flash_attention_2_continue_generate_with_position_ids(self):
pass
@unittest.skip(
reason="GlmImage is a multimodal model that requires pixel_values and image_grid_thw. "
"This test only uses input_ids and attention_mask which causes NoneType iteration error."
)
def test_flash_attn_2_fp32_ln(self):
pass
@unittest.skip(
reason="GlmImage is a multimodal model that requires pixel_values and image_grid_thw. "
"This test only uses input_ids and attention_mask which causes NoneType iteration error."
)
def test_flash_attn_2_from_config(self):
pass
def _image_features_prepare_config_and_inputs(self):
"""
Helper method to extract only image-related inputs from the full set of inputs, for testing `get_image_features`.
GlmImage internally preprocesses the image_grid_thw input by selecting source grids,
so we need to prepare inputs accordingly for testing get_image_features. We also discard text-related inputs.
"""
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# Select only source grids (every other grid starting from index 0)
# Grid layout: [s0_source, s0_target, s1_source, s1_target, ...]
num_grids_per_sample = 2 # 1 source + 1 target
batch_size = self.model_tester.batch_size
source_indices = [i * num_grids_per_sample for i in range(batch_size)]
inputs_dict["image_grid_thw"] = inputs_dict["image_grid_thw"][source_indices]
del inputs_dict["input_ids"]
del inputs_dict["attention_mask"]
return config, inputs_dict
@require_torch
@slow
class GlmImageIntegrationTest(unittest.TestCase):
model_id = "zai-org/GLM-Image"
model_subfolder = "vision_language_encoder"
processor_subfolder = "processor"
@classmethod
def setUpClass(cls):
cls.model = None
@classmethod
def get_model(cls):
if cls.model is None:
cls.model = GlmImageForConditionalGeneration.from_pretrained(
cls.model_id, subfolder=cls.model_subfolder, torch_dtype=torch.bfloat16, device_map="auto"
)
return cls.model
@classmethod
def tearDownClass(cls):
if hasattr(cls, "model"):
del cls.model
cleanup(torch_device, gc_collect=True)
def setUp(self):
cleanup(torch_device, gc_collect=True)
self.processor = GlmImageProcessor.from_pretrained(self.model_id, subfolder=self.processor_subfolder)
# Text-to-image generation message
self.t2i_message = [
{
"role": "user",
"content": [
{"type": "text", "text": "A cute cat sitting on a wooden table"},
],
}
]
# Image-to-image generation message
self.i2i_message = [
{
"role": "user",
"content": [
{
"type": "image",
"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg",
},
{"type": "text", "text": "Add a red hat to this cat"},
],
}
]
def tearDown(self):
cleanup(torch_device, gc_collect=True)
def test_processor_text_to_image(self):
"""Test processor correctly prepares text-to-image inputs."""
inputs = self.processor.apply_chat_template(
self.t2i_message, tokenize=True, add_generation_prompt=True, return_dict=True, return_tensors="pt"
)
# For T2I with apply_chat_template, we get basic text inputs
# Target grids are added during actual generation when using processor directly with target shape
self.assertIn("input_ids", inputs)
self.assertIn("attention_mask", inputs)
def test_processor_image_to_image(self):
"""Test processor correctly prepares image-to-image inputs."""
from io import BytesIO
import requests
from PIL import Image
# Load the image
url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg"
response = requests.get(url)
image = Image.open(BytesIO(response.content))
# Create prompt with target shape and image token
text = "<|dit_token_16384|><|image|><|dit_token_16385|>Add a red hat to this cat<sop>28 40<eop>"
# Process with actual images (nested list for batched processing)
inputs = self.processor(text=[text], images=[[image]], return_tensors="pt")
# For I2I, there should be pixel_values from the source image
self.assertIn("input_ids", inputs)
self.assertIn("attention_mask", inputs)
self.assertIn("pixel_values", inputs)
self.assertIn("image_grid_thw", inputs)
# I2I should have 1 source grid + 1 target grid = 2 grids
self.assertEqual(inputs["image_grid_thw"].shape[0], 2)
# images_per_sample should be 2 (1 source + 1 target)
self.assertEqual(inputs["images_per_sample"].item(), 2)
def test_text_to_image_generation(self):
"""Test text-to-image generation produces valid image tokens."""
model = self.get_model()
inputs = self.processor.apply_chat_template(
self.t2i_message, tokenize=True, add_generation_prompt=True, return_dict=True, return_tensors="pt"
).to(torch_device)
# Generate image tokens with fixed seed for reproducibility
set_seed(42)
output = model.generate(**inputs, max_new_tokens=50, do_sample=False)
# Output should be longer than input (generated tokens)
self.assertGreater(output.shape[1], inputs["input_ids"].shape[1])
# Generated tokens should be within vision vocabulary range
generated_tokens = output[0, inputs["input_ids"].shape[1] :]
# Vision tokens are in range [0, vision_vocab_size)
self.assertTrue(all(t.item() < model.config.text_config.vision_vocab_size for t in generated_tokens))
# Check actual token values (first 30 tokens) to catch implementation errors
expected_tokens = torch.tensor(
[
671,
14581,
1275,
1275,
4508,
4508,
4508,
4508,
1471,
1471,
1153,
1153,
11241,
3596,
11241,
11942,
9695,
13748,
4508,
4508,
4508,
3136,
3136,
11241,
11241,
11241,
11241,
1755,
3136,
13748,
],
device=torch_device,
)
self.assertTrue(
torch.equal(generated_tokens[:30], expected_tokens),
f"Expected first 30 tokens:\n{expected_tokens.tolist()}\nGot:\n{generated_tokens[:30].tolist()}",
)
@require_deterministic_for_xpu
def test_image_to_image_generation(self):
"""Test image-to-image generation produces valid image tokens."""
model = self.get_model()
inputs = self.processor.apply_chat_template(
self.i2i_message, tokenize=True, add_generation_prompt=True, return_dict=True, return_tensors="pt"
).to(torch_device)
# Generate image tokens with fixed seed for reproducibility
set_seed(42)
output = model.generate(**inputs, max_new_tokens=50, do_sample=False)
# Output should be longer than input (generated tokens)
self.assertGreater(output.shape[1], inputs["input_ids"].shape[1])
# Generated tokens should be within vision vocabulary range
generated_tokens = output[0, inputs["input_ids"].shape[1] :]
self.assertTrue(all(t.item() < model.config.text_config.vision_vocab_size for t in generated_tokens))
# Check actual token values (first 30 tokens) to catch implementation errors
# fmt: off
expected_tokens = Expectations(
{
("cuda", None): [9223, 11045, 5705, 14581, 4759, 11667, 1275, 10094, 572, 10543, 9223, 1275, 9223, 10543, 12265, 10543, 2007, 8200, 10543, 1153, 1153, 1153, 10094, 16304, 9223, 11045, 3114, 14581, 4759, 10094],
("xpu", 3): [9223, 11045, 11045, 14581, 4759, 11667, 10543, 10094, 572, 10543, 9223, 1275, 9223, 9223, 4759, 10543, 2007, 4759, 10543, 1153, 1153, 1153, 8932, 9223, 10094, 11045, 5705, 14581, 4759, 10094],
}
)
# fmt: on
expected = torch.tensor(expected_tokens.get_expectation(), device=torch_device)
self.assertTrue(
torch.equal(generated_tokens[:30], expected),
f"Expected first 30 tokens:\n{expected.tolist()}\nGot:\n{generated_tokens[:30].tolist()}",
)
@run_first
@require_flash_attn
@require_torch_accelerator
def test_flash_attention_generation(self):
"""Test generation with Flash Attention 2."""
model = GlmImageForConditionalGeneration.from_pretrained(
self.model_id,
subfolder=self.model_subfolder,
torch_dtype=torch.bfloat16,
attn_implementation="flash_attention_2",
device_map="auto",
)
inputs = self.processor.apply_chat_template(
self.t2i_message, tokenize=True, add_generation_prompt=True, return_dict=True, return_tensors="pt"
).to(torch_device)
# Generate image tokens
output = model.generate(**inputs, max_new_tokens=5)
# Output should be longer than input
self.assertGreater(output.shape[1], inputs["input_ids"].shape[1])
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/glm_image/test_modeling_glm_image.py",
"license": "Apache License 2.0",
"lines": 554,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/models/glm_image/test_processor_glm_image.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from PIL import Image
from transformers.testing_utils import require_av, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_processing_common import ProcessorTesterMixin
if is_vision_available():
from transformers import AutoImageProcessor, AutoTokenizer, GlmImageProcessor
if is_torch_available():
import torch
@require_vision
@require_torch
class GlmImageProcessorTest(ProcessorTesterMixin, unittest.TestCase):
processor_class = GlmImageProcessor
model_id = "zai-org/GLM-Image"
@classmethod
def _setup_test_attributes(cls, processor):
cls.image_token = processor.image_token
@classmethod
def _setup_from_pretrained(cls, model_id, **kwargs):
return super()._setup_from_pretrained(
model_id,
subfolder="processor",
**kwargs,
)
@classmethod
def _setup_image_processor(cls):
# Provide a tiny image-processor config so placeholder expansion stays small
return AutoImageProcessor.from_pretrained(
cls.model_id,
subfolder="processor",
do_resize=True,
patch_size=4,
min_pixels=12 * 12,
max_pixels=18 * 18,
)
@classmethod
def _setup_tokenizer(cls):
# Ensure tokenizer is loaded from the correct subfolder when using custom components
return AutoTokenizer.from_pretrained(cls.model_id, subfolder="processor")
def prepare_image_inputs(self, batch_size: int | None = None, nested: bool = False):
"""Override to create images with valid aspect ratio (< 4) for GLM-Image."""
# GLM-Image requires aspect ratio < 4, so use near-square images
image_inputs = [Image.fromarray(np.random.randint(0, 255, (256, 256, 3), dtype=np.uint8))]
if batch_size is None:
return image_inputs
if nested:
return [image_inputs] * batch_size
return image_inputs * batch_size
@require_torch
@require_av
def _test_apply_chat_template(
self,
modality: str,
batch_size: int,
return_tensors: str,
input_name: str,
processor_name: str,
input_data: list[str],
):
# Skip image modality tests for GLM-Image because the processor expands image tokens
# based on image size, making the tokenized output differ from direct tokenizer call
if modality == "image":
self.skipTest(
"GLM-Image processor expands image tokens based on image size, "
"making tokenized output differ from direct tokenizer call"
)
processor = self.get_processor()
if processor.chat_template is None:
self.skipTest("Processor has no chat template")
if processor_name not in self.processor_class.get_attributes():
self.skipTest(f"{processor_name} attribute not present in {self.processor_class}")
batch_messages = [
[
{
"role": "user",
"content": [{"type": "text", "text": "Describe this."}],
},
]
] * batch_size
# Test that jinja can be applied
formatted_prompt = processor.apply_chat_template(batch_messages, add_generation_prompt=True, tokenize=False)
self.assertEqual(len(formatted_prompt), batch_size)
# Test that tokenizing with template and directly with `self.tokenizer` gives same output
formatted_prompt_tokenized = processor.apply_chat_template(
batch_messages, add_generation_prompt=True, tokenize=True, return_tensors=return_tensors
)
add_special_tokens = True
if processor.tokenizer.bos_token is not None and formatted_prompt[0].startswith(processor.tokenizer.bos_token):
add_special_tokens = False
tok_output = processor.tokenizer(
formatted_prompt, return_tensors=return_tensors, add_special_tokens=add_special_tokens
)
expected_output = tok_output.input_ids
self.assertListEqual(expected_output.tolist(), formatted_prompt_tokenized.tolist())
# Test that kwargs passed to processor's `__call__` are actually used
tokenized_prompt_100 = processor.apply_chat_template(
batch_messages,
add_generation_prompt=True,
tokenize=True,
padding="max_length",
truncation=True,
return_tensors=return_tensors,
max_length=100,
)
self.assertEqual(len(tokenized_prompt_100[0]), 100)
# Test that `return_dict=True` returns text related inputs in the dict
out_dict_text = processor.apply_chat_template(
batch_messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors=return_tensors,
)
self.assertTrue(all(key in out_dict_text for key in ["input_ids", "attention_mask"]))
self.assertEqual(len(out_dict_text["input_ids"]), batch_size)
self.assertEqual(len(out_dict_text["attention_mask"]), batch_size)
# Test that with modality URLs and `return_dict=True`, we get modality inputs in the dict
for idx, url in enumerate(input_data[:batch_size]):
batch_messages[idx][0]["content"] = [batch_messages[idx][0]["content"][0], {"type": modality, "url": url}]
out_dict = processor.apply_chat_template(
batch_messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors=return_tensors,
fps=2
if isinstance(input_data[0], str)
else None, # by default no more than 2 frames per second, otherwise too slow
)
input_name = getattr(self, input_name)
self.assertTrue(input_name in out_dict)
self.assertEqual(len(out_dict["input_ids"]), batch_size)
self.assertEqual(len(out_dict["attention_mask"]), batch_size)
mm_len = batch_size * 4
self.assertEqual(len(out_dict[input_name]), mm_len)
return_tensor_to_type = {"pt": torch.Tensor, "np": np.ndarray, None: list}
for k in out_dict:
self.assertIsInstance(out_dict[k], return_tensor_to_type[return_tensors])
def test_model_input_names(self):
processor = self.get_processor()
text = self.prepare_text_inputs(modalities=["image"])
image_input = self.prepare_image_inputs()
inputs_dict = {"text": text, "images": image_input}
inputs = processor(**inputs_dict, return_tensors="pt")
self.assertSetEqual(set(inputs.keys()), set(processor.model_input_names))
@unittest.skip(
"GlmImageProcessor injects additional special/control tokens around plain text inputs, so "
"`processor(text=X)` is not equivalent to `tokenizer(X)` for this model."
)
def test_tokenizer_defaults(self):
pass
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/glm_image/test_processor_glm_image.py",
"license": "Apache License 2.0",
"lines": 165,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/glm4_moe_lite/modular_glm4_moe_lite.py | # Copyright 2025 the HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch.nn as nn
from ...configuration_utils import PreTrainedConfig, layer_type_validation
from ...modeling_rope_utils import RopeParameters
from ..deepseek_v3.modeling_deepseek_v3 import DeepseekV3Attention
from ..glm4_moe.modeling_glm4_moe import (
Glm4MoeDecoderLayer,
Glm4MoeForCausalLM,
Glm4MoeMLP,
Glm4MoeModel,
Glm4MoeMoE,
Glm4MoeNaiveMoe,
Glm4MoePreTrainedModel,
Glm4MoeRMSNorm,
Glm4MoeRotaryEmbedding,
Glm4MoeTopkRouter,
)
class Glm4MoeLiteConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Glm4MoeLiteModel`]. It is used to instantiate an DeepSeek
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the DeepSeek-V3.
e.g. [bzantium/tiny-deepseek-v3](https://huggingface.co/bzantium/tiny-deepseek-v3)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 154880):
Vocabulary size of the Deep model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`Glm4MoeLiteModel`]
hidden_size (`int`, *optional*, defaults to 2048):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 10240):
Dimension of the MLP representations.
moe_intermediate_size (`int`, *optional*, defaults to 1536):
Dimension of the MoE representations.
num_hidden_layers (`int`, *optional*, defaults to 47):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 20):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*, defaults to 20):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
n_shared_experts (`int`, *optional*, defaults to 1):
Number of shared experts.
n_routed_experts (`int`, *optional*, defaults to 64):
Number of routed experts.
routed_scaling_factor (`float`, *optional*, defaults to 1.8):
Scaling factor or routed experts.
kv_lora_rank (`int`, *optional*, defaults to 512):
Rank of the LoRA matrices for key and value projections.
q_lora_rank (`int`, *optional*, defaults to 768):
Rank of the LoRA matrices for query projections.
qk_rope_head_dim (`int`, *optional*, defaults to 64):
Dimension of the query/key heads that use rotary position embeddings.
v_head_dim (`int`, *optional*, defaults to 256):
Dimension of the value heads.
qk_nope_head_dim (`int`, *optional*, defaults to 192):
Dimension of the query/key heads that don't use rotary position embeddings.
n_group (`int`, *optional*, defaults to 1):
Number of groups for routed experts.
topk_group (`int`, *optional*, defaults to 1):
Number of selected groups for each token(for each token, ensuring the selected experts is only within `topk_group` groups).
num_experts_per_tok (`int`, *optional*, defaults to 4):
Number of selected experts, None means dense model.
norm_topk_prob (`bool`, *optional*, defaults to `True`):
Whether to normalize the weights of the routed experts.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 202752):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 0):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 1):
End of stream token id.
pretraining_tp (`int`, *optional*, defaults to 1):
Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is
necessary to ensure exact reproducibility of the pretraining results. Please refer to [this
issue](https://github.com/pytorch/pytorch/issues/76232).
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
rope_interleave (`bool`, *optional*, defaults to `True`):
Whether to interleave the rotary position embeddings.
mlp_layer_types (`list`, *optional*):
MLP (Moe vs Dense) pattern for each layer.
attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
```python
>>> from transformers import Glm4MoeLiteModel, Glm4MoeLiteConfig
>>> # Initializing a Deepseek-V3 style configuration
>>> configuration = Glm4MoeLiteConfig()
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "glm4_moe_lite"
keys_to_ignore_at_inference = ["past_key_values"]
base_model_tp_plan = {
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.experts.gate_up_proj": "packed_colwise",
"layers.*.mlp.experts.down_proj": "rowwise",
"layers.*.mlp.experts": "moe_tp_experts",
"layers.*.mlp.gate_proj": "colwise",
"layers.*.mlp.up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
}
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
"norm": (["hidden_states"], ["hidden_states"]),
}
attribute_map = {
"num_local_experts": "n_routed_experts",
}
def __init__(
self,
vocab_size: int | None = 154880,
hidden_size: int | None = 2048,
intermediate_size: int | None = 10240,
moe_intermediate_size: int | None = 1536,
num_hidden_layers: int | None = 47,
num_attention_heads: int | None = 20,
num_key_value_heads: int | None = 20,
n_shared_experts: int | None = 1,
n_routed_experts: int | None = 64,
routed_scaling_factor: float | None = 1.8,
kv_lora_rank: int | None = 512,
q_lora_rank: int | None = 768,
qk_rope_head_dim: int | None = 64,
v_head_dim: int | None = 256,
qk_nope_head_dim: int | None = 192,
n_group: int | None = 1,
topk_group: int | None = 1,
num_experts_per_tok: int | None = 4,
norm_topk_prob: bool | None = True,
hidden_act: str | None = "silu",
max_position_embeddings: int | None = 202752,
initializer_range: float | None = 0.02,
rms_norm_eps: int | None = 1e-5,
use_cache: bool | None = True,
pad_token_id: int | None = None,
bos_token_id: int | None = 0,
eos_token_id: int | None = 1,
pretraining_tp: int | None = 1,
tie_word_embeddings: bool | None = False,
rope_parameters: RopeParameters | dict[str, RopeParameters] | None = None,
rope_interleave: bool | None = True,
mlp_layer_types=None,
attention_bias: bool | None = False,
attention_dropout: float | None = 0.0,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
# Default to MoE from the second layer and on
self.mlp_layer_types = mlp_layer_types
if self.mlp_layer_types is None:
self.mlp_layer_types = ["dense"] + ["sparse"] * (self.num_hidden_layers - 1)
layer_type_validation(self.mlp_layer_types, self.num_hidden_layers, attention=False)
self.moe_intermediate_size = moe_intermediate_size
self.num_attention_heads = num_attention_heads
self.n_shared_experts = n_shared_experts
self.n_routed_experts = n_routed_experts
self.routed_scaling_factor = routed_scaling_factor
self.kv_lora_rank = kv_lora_rank
self.q_lora_rank = q_lora_rank
self.qk_rope_head_dim = qk_rope_head_dim
self.v_head_dim = v_head_dim
self.qk_nope_head_dim = qk_nope_head_dim
self.qk_head_dim = qk_nope_head_dim + qk_rope_head_dim
self.head_dim = qk_rope_head_dim
self.n_group = n_group
self.topk_group = topk_group
self.num_experts_per_tok = num_experts_per_tok
self.norm_topk_prob = norm_topk_prob
self.rope_interleave = rope_interleave
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.pretraining_tp = pretraining_tp
self.use_cache = use_cache
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.rope_parameters = rope_parameters
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.tie_word_embeddings = tie_word_embeddings
super().__init__(**kwargs)
class Glm4MoeLiteRotaryEmbedding(Glm4MoeRotaryEmbedding):
pass
class Glm4MoeLiteAttention(DeepseekV3Attention):
pass
class Glm4MoeLiteMLP(Glm4MoeMLP):
pass
class Glm4MoeLiteTopkRouter(Glm4MoeTopkRouter):
pass
class Glm4MoeLiteRMSNorm(Glm4MoeRMSNorm):
pass
class Glm4MoeLiteNaiveMoe(Glm4MoeNaiveMoe):
pass
class Glm4MoeLiteMoE(Glm4MoeMoE):
pass
class Glm4MoeLiteDecoderLayer(Glm4MoeDecoderLayer, nn.Module):
def __init__(self, config: Glm4MoeLiteConfig, layer_idx: int):
nn.Module.__init__(self)
self.hidden_size = config.hidden_size
self.self_attn = Glm4MoeLiteAttention(config, layer_idx)
if config.mlp_layer_types[layer_idx] == "sparse":
self.mlp = Glm4MoeLiteMoE(config)
else:
self.mlp = Glm4MoeLiteMLP(config)
self.input_layernorm = Glm4MoeLiteRMSNorm(config.hidden_size, config.rms_norm_eps)
self.post_attention_layernorm = Glm4MoeLiteRMSNorm(config.hidden_size, config.rms_norm_eps)
class Glm4MoeLitePreTrainedModel(Glm4MoePreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"model\.layers\.47.*"]
class Glm4MoeLiteModel(Glm4MoeModel):
pass
class Glm4MoeLiteForCausalLM(Glm4MoeForCausalLM):
pass
__all__ = [
"Glm4MoeLiteConfig",
"Glm4MoeLitePreTrainedModel",
"Glm4MoeLiteModel",
"Glm4MoeLiteForCausalLM",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/glm4_moe_lite/modular_glm4_moe_lite.py",
"license": "Apache License 2.0",
"lines": 261,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/glm4_moe_lite/test_modeling_glm4_moe_lite.py | # Copyright 2025 the HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch GLM-4-MoE-Lite model."""
import unittest
import pytest
import torch
from transformers import Cache, is_torch_available
from transformers.testing_utils import (
cleanup,
require_torch,
require_torch_accelerator,
slow,
torch_device,
)
from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester
if is_torch_available():
from transformers import AutoTokenizer, Glm4MoeLiteForCausalLM, Glm4MoeLiteModel
class Glm4MoeLiteModelTester(CausalLMModelTester):
if is_torch_available():
base_model_class = Glm4MoeLiteModel
def __init__(
self,
parent,
n_routed_experts=8,
kv_lora_rank=32,
q_lora_rank=16,
qk_nope_head_dim=64,
qk_rope_head_dim=64,
v_head_dim=128,
):
super().__init__(parent=parent)
self.n_routed_experts = n_routed_experts
self.kv_lora_rank = kv_lora_rank
self.q_lora_rank = q_lora_rank
self.qk_nope_head_dim = qk_nope_head_dim
self.qk_rope_head_dim = qk_rope_head_dim
self.v_head_dim = v_head_dim
@require_torch
class Glm4MoeModelTest(CausalLMModelTest, unittest.TestCase):
model_tester_class = Glm4MoeLiteModelTester
test_all_params_have_gradient = False
model_split_percents = [0.5, 0.7, 0.8]
def _check_past_key_values_for_generate(self, batch_size, past_key_values, seq_length, config):
"""Needs to be overridden as GLM-4.7-Flash has special MLA cache format (though we don't really use the MLA)"""
self.assertIsInstance(past_key_values, Cache)
# (batch, head, seq_length, head_features)
expected_common_shape = (
batch_size,
getattr(config, "num_key_value_heads", config.num_attention_heads),
seq_length,
)
expected_key_shape = expected_common_shape + (config.qk_nope_head_dim + config.qk_rope_head_dim,)
expected_value_shape = expected_common_shape + (config.v_head_dim,)
for layer in past_key_values.layers:
self.assertEqual(layer.keys.shape, expected_key_shape)
self.assertEqual(layer.values.shape, expected_value_shape)
@require_torch_accelerator
@slow
class Glm4MoeIntegrationTest(unittest.TestCase):
def tearDown(self):
# See LlamaIntegrationTest.tearDown(). Can be removed once LlamaIntegrationTest.tearDown() is removed.
cleanup(torch_device, gc_collect=False)
@slow
@require_torch_accelerator
@pytest.mark.torch_compile_test
def test_compile_static_cache(self):
NUM_TOKENS_TO_GENERATE = 40
EXPECTED_TEXT_COMPLETION = [
'hello, world!\'\'\')\nprint(\'hello, world!\')\nprint("hello, world!")\nprint("hello, world!")\nprint("hello, world!")\nprint("hello, world!")\nprint("hello, world!")\n',
"tell me the story of the first Thanksgiving. commonly known as the Pilgrims, arrived in the autumn of 1620. They were seeking religious freedom and a new life in the Plymouth Colony. Their first",
]
prompts = ["[gMASK]<sop>hello", "[gMASK]<sop>tell me"]
tokenizer = AutoTokenizer.from_pretrained("zai-org/GLM-4.7-Flash")
model = Glm4MoeLiteForCausalLM.from_pretrained(
"zai-org/GLM-4.7-Flash", device_map=torch_device, dtype=torch.bfloat16
)
inputs = tokenizer(prompts, return_tensors="pt", padding=True).to(model.device)
# Dynamic Cache
generated_ids = model.generate(**inputs, max_new_tokens=NUM_TOKENS_TO_GENERATE, do_sample=False)
dynamic_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, dynamic_text)
# Static Cache
generated_ids = model.generate(
**inputs, max_new_tokens=NUM_TOKENS_TO_GENERATE, do_sample=False, cache_implementation="static"
)
static_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, static_text)
# Static Cache + compile
model._cache = None # clear cache object, initialized when we pass `cache_implementation="static"`
model.forward = torch.compile(model.forward, mode="reduce-overhead", fullgraph=True)
generated_ids = model.generate(
**inputs, max_new_tokens=NUM_TOKENS_TO_GENERATE, do_sample=False, cache_implementation="static"
)
static_compiled_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, static_compiled_text)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/glm4_moe_lite/test_modeling_glm4_moe_lite.py",
"license": "Apache License 2.0",
"lines": 106,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/loss/loss_lw_detr.py | # Copyright 2026 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
import torch.nn as nn
from ..utils import is_accelerate_available, is_scipy_available, is_vision_available
from .loss_for_object_detection import (
HungarianMatcher,
_set_aux_loss,
box_iou,
dice_loss,
generalized_box_iou,
nested_tensor_from_tensor_list,
sigmoid_focal_loss,
)
if is_vision_available():
from transformers.image_transforms import center_to_corners_format
if is_scipy_available():
from scipy.optimize import linear_sum_assignment
if is_accelerate_available():
from accelerate import PartialState
from accelerate.utils import reduce
class LwDetrHungarianMatcher(HungarianMatcher):
@torch.no_grad()
def forward(self, outputs, targets, group_detr):
"""
Differences:
- out_prob = outputs["logits"].flatten(0, 1).sigmoid() instead of softmax
- class_cost uses alpha and gamma
"""
batch_size, num_queries = outputs["logits"].shape[:2]
# We flatten to compute the cost matrices in a batch
out_prob = outputs["logits"].flatten(0, 1).sigmoid() # [batch_size * num_queries, num_classes]
out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4]
# Also concat the target labels and boxes
target_ids = torch.cat([v["class_labels"] for v in targets])
target_bbox = torch.cat([v["boxes"] for v in targets])
# Compute the classification cost.
alpha = 0.25
gamma = 2.0
neg_cost_class = (1 - alpha) * (out_prob**gamma) * (-(1 - out_prob + 1e-8).log())
pos_cost_class = alpha * ((1 - out_prob) ** gamma) * (-(out_prob + 1e-8).log())
class_cost = pos_cost_class[:, target_ids] - neg_cost_class[:, target_ids]
# Compute the L1 cost between boxes, cdist only supports float32
dtype = out_bbox.dtype
out_bbox = out_bbox.to(torch.float32)
target_bbox = target_bbox.to(torch.float32)
bbox_cost = torch.cdist(out_bbox, target_bbox, p=1)
bbox_cost = bbox_cost.to(dtype)
# Compute the giou cost between boxes
giou_cost = -generalized_box_iou(center_to_corners_format(out_bbox), center_to_corners_format(target_bbox))
# Final cost matrix
cost_matrix = self.bbox_cost * bbox_cost + self.class_cost * class_cost + self.giou_cost * giou_cost
cost_matrix = cost_matrix.view(batch_size, num_queries, -1).cpu()
sizes = [len(v["boxes"]) for v in targets]
indices = []
group_num_queries = num_queries // group_detr
cost_matrix_list = cost_matrix.split(group_num_queries, dim=1)
for group_id in range(group_detr):
group_cost_matrix = cost_matrix_list[group_id]
group_indices = [linear_sum_assignment(c[i]) for i, c in enumerate(group_cost_matrix.split(sizes, -1))]
if group_id == 0:
indices = group_indices
else:
indices = [
(
np.concatenate([indice1[0], indice2[0] + group_num_queries * group_id]),
np.concatenate([indice1[1], indice2[1]]),
)
for indice1, indice2 in zip(indices, group_indices)
]
return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]
class LwDetrImageLoss(nn.Module):
def __init__(self, matcher, num_classes, focal_alpha, losses, group_detr):
super().__init__()
self.matcher = matcher
self.num_classes = num_classes
self.focal_alpha = focal_alpha
self.losses = losses
self.group_detr = group_detr
# removed logging parameter, which was part of the original implementation
def loss_labels(self, outputs, targets, indices, num_boxes):
if "logits" not in outputs:
raise KeyError("No logits were found in the outputs")
source_logits = outputs["logits"]
idx = self._get_source_permutation_idx(indices)
target_classes_o = torch.cat([t["class_labels"][J] for t, (_, J) in zip(targets, indices)])
alpha = self.focal_alpha
gamma = 2
src_boxes = outputs["pred_boxes"][idx]
target_boxes = torch.cat([t["boxes"][i] for t, (_, i) in zip(targets, indices)], dim=0)
iou_targets = torch.diag(
box_iou(center_to_corners_format(src_boxes.detach()), center_to_corners_format(target_boxes))[0]
)
# Convert to the same dtype as the source logits as box_iou upcasts to float32
iou_targets = iou_targets.to(source_logits.dtype)
pos_ious = iou_targets.clone().detach()
prob = source_logits.sigmoid()
# init positive weights and negative weights
pos_weights = torch.zeros_like(source_logits)
neg_weights = prob**gamma
pos_ind = list(idx)
pos_ind.append(target_classes_o)
t = prob[pos_ind].pow(alpha) * pos_ious.pow(1 - alpha)
t = torch.clamp(t, 0.01).detach()
pos_weights[pos_ind] = t
neg_weights[pos_ind] = 1 - t
loss_ce = -pos_weights * prob.log() - neg_weights * (1 - prob).log()
loss_ce = loss_ce.sum() / num_boxes
losses = {"loss_ce": loss_ce}
return losses
@torch.no_grad()
def loss_cardinality(self, outputs, targets, indices, num_boxes):
"""
Compute the cardinality error, i.e. the absolute error in the number of predicted non-empty boxes.
This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients.
"""
logits = outputs["logits"]
device = logits.device
target_lengths = torch.as_tensor([len(v["class_labels"]) for v in targets], device=device)
# Count the number of predictions that are NOT "no-object" (sigmoid > 0.5 threshold)
card_pred = (logits.sigmoid().max(-1).values > 0.5).sum(1)
card_err = nn.functional.l1_loss(card_pred.float(), target_lengths.float())
losses = {"cardinality_error": card_err}
return losses
# Copied from loss.loss_for_object_detection.ImageLoss.loss_boxes
def loss_boxes(self, outputs, targets, indices, num_boxes):
"""
Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss.
Targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]. The target boxes
are expected in format (center_x, center_y, w, h), normalized by the image size.
"""
if "pred_boxes" not in outputs:
raise KeyError("No predicted boxes found in outputs")
idx = self._get_source_permutation_idx(indices)
source_boxes = outputs["pred_boxes"][idx]
target_boxes = torch.cat([t["boxes"][i] for t, (_, i) in zip(targets, indices)], dim=0)
loss_bbox = nn.functional.l1_loss(source_boxes, target_boxes, reduction="none")
losses = {}
losses["loss_bbox"] = loss_bbox.sum() / num_boxes
loss_giou = 1 - torch.diag(
generalized_box_iou(center_to_corners_format(source_boxes), center_to_corners_format(target_boxes))
)
losses["loss_giou"] = loss_giou.sum() / num_boxes
return losses
# Copied from loss.loss_for_object_detection.ImageLoss.loss_masks
def loss_masks(self, outputs, targets, indices, num_boxes):
"""
Compute the losses related to the masks: the focal loss and the dice loss.
Targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w].
"""
if "pred_masks" not in outputs:
raise KeyError("No predicted masks found in outputs")
source_idx = self._get_source_permutation_idx(indices)
target_idx = self._get_target_permutation_idx(indices)
source_masks = outputs["pred_masks"]
source_masks = source_masks[source_idx]
masks = [t["masks"] for t in targets]
# TODO use valid to mask invalid areas due to padding in loss
target_masks, valid = nested_tensor_from_tensor_list(masks).decompose()
target_masks = target_masks.to(source_masks)
target_masks = target_masks[target_idx]
# upsample predictions to the target size
source_masks = nn.functional.interpolate(
source_masks[:, None], size=target_masks.shape[-2:], mode="bilinear", align_corners=False
)
source_masks = source_masks[:, 0].flatten(1)
target_masks = target_masks.flatten(1)
target_masks = target_masks.view(source_masks.shape)
losses = {
"loss_mask": sigmoid_focal_loss(source_masks, target_masks, num_boxes),
"loss_dice": dice_loss(source_masks, target_masks, num_boxes),
}
return losses
# Copied from loss.loss_for_object_detection.ImageLoss._get_source_permutation_idx
def _get_source_permutation_idx(self, indices):
# permute predictions following indices
batch_idx = torch.cat([torch.full_like(source, i) for i, (source, _) in enumerate(indices)])
source_idx = torch.cat([source for (source, _) in indices])
return batch_idx, source_idx
# Copied from loss.loss_for_object_detection.ImageLoss._get_target_permutation_idx
def _get_target_permutation_idx(self, indices):
# permute targets following indices
batch_idx = torch.cat([torch.full_like(target, i) for i, (_, target) in enumerate(indices)])
target_idx = torch.cat([target for (_, target) in indices])
return batch_idx, target_idx
def get_loss(self, loss, outputs, targets, indices, num_boxes):
loss_map = {
"labels": self.loss_labels,
"cardinality": self.loss_cardinality,
"boxes": self.loss_boxes,
"masks": self.loss_masks,
}
if loss not in loss_map:
raise ValueError(f"Loss {loss} not supported")
return loss_map[loss](outputs, targets, indices, num_boxes)
def forward(self, outputs, targets):
"""
This performs the loss computation.
Args:
outputs (`dict`, *optional*):
Dictionary of tensors, see the output specification of the model for the format.
targets (`list[dict]`, *optional*):
List of dicts, such that `len(targets) == batch_size`. The expected keys in each dict depends on the
losses applied, see each loss' doc.
"""
group_detr = self.group_detr if self.training else 1
outputs_without_aux_and_enc = {
k: v for k, v in outputs.items() if k != "enc_outputs" and k != "auxiliary_outputs"
}
# Retrieve the matching between the outputs of the last layer and the targets
indices = self.matcher(outputs_without_aux_and_enc, targets, group_detr)
# Compute the average number of target boxes across all nodes, for normalization purposes
num_boxes = sum(len(t["class_labels"]) for t in targets)
num_boxes = num_boxes * group_detr
num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device)
world_size = 1
if is_accelerate_available():
if PartialState._shared_state != {}:
num_boxes = reduce(num_boxes)
world_size = PartialState().num_processes
num_boxes = torch.clamp(num_boxes / world_size, min=1).item()
# Compute all the requested losses
losses = {}
for loss in self.losses:
losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes))
# In case of auxiliary losses, we repeat this process with the output of each intermediate layer.
if "auxiliary_outputs" in outputs:
for i, auxiliary_outputs in enumerate(outputs["auxiliary_outputs"]):
indices = self.matcher(auxiliary_outputs, targets, group_detr)
for loss in self.losses:
if loss == "masks":
# Intermediate masks losses are too costly to compute, we ignore them.
continue
l_dict = self.get_loss(loss, auxiliary_outputs, targets, indices, num_boxes)
l_dict = {k + f"_{i}": v for k, v in l_dict.items()}
losses.update(l_dict)
if "enc_outputs" in outputs:
enc_outputs = outputs["enc_outputs"]
indices = self.matcher(enc_outputs, targets, group_detr=group_detr)
for loss in self.losses:
l_dict = self.get_loss(loss, enc_outputs, targets, indices, num_boxes)
l_dict = {k + "_enc": v for k, v in l_dict.items()}
losses.update(l_dict)
return losses
def LwDetrForObjectDetectionLoss(
logits,
labels,
device,
pred_boxes,
config,
outputs_class=None,
outputs_coord=None,
enc_outputs_class=None,
enc_outputs_coord=None,
**kwargs,
):
# First: create the matcher
matcher = LwDetrHungarianMatcher(
class_cost=config.class_cost, bbox_cost=config.bbox_cost, giou_cost=config.giou_cost
)
# Second: create the criterion
losses = ["labels", "boxes", "cardinality"]
criterion = LwDetrImageLoss(
matcher=matcher,
num_classes=config.num_labels,
focal_alpha=config.focal_alpha,
losses=losses,
group_detr=config.group_detr,
)
criterion.to(device)
# Third: compute the losses, based on outputs and labels
outputs_loss = {}
auxiliary_outputs = None
outputs_loss["logits"] = logits
outputs_loss["pred_boxes"] = pred_boxes
outputs_loss["enc_outputs"] = {
"logits": enc_outputs_class,
"pred_boxes": enc_outputs_coord,
}
if config.auxiliary_loss:
auxiliary_outputs = _set_aux_loss(outputs_class, outputs_coord)
outputs_loss["auxiliary_outputs"] = auxiliary_outputs
loss_dict = criterion(outputs_loss, labels)
# Fourth: compute total loss, as a weighted sum of the various losses
weight_dict = {"loss_ce": 1, "loss_bbox": config.bbox_loss_coefficient}
weight_dict["loss_giou"] = config.giou_loss_coefficient
if config.auxiliary_loss:
aux_weight_dict = {}
for i in range(config.decoder_layers - 1):
aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()})
weight_dict.update(aux_weight_dict)
enc_weight_dict = {k + "_enc": v for k, v in weight_dict.items()}
weight_dict.update(enc_weight_dict)
loss = sum(loss_dict[k] * weight_dict[k] for k in loss_dict if k in weight_dict)
return loss, loss_dict, auxiliary_outputs
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/loss/loss_lw_detr.py",
"license": "Apache License 2.0",
"lines": 307,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/lw_detr/modular_lw_detr.py | # Copyright 2026 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from collections.abc import Callable
from dataclasses import dataclass
from typing import Any
import torch
from torch import nn
from ... import initialization as init
from ...activations import ACT2FN
from ...backbone_utils import consolidate_backbone_kwargs_to_config
from ...configuration_utils import PreTrainedConfig
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...processing_utils import Unpack
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, logging
from ...utils.generic import can_return_tuple, merge_with_config_defaults
from ...utils.output_capturing import capture_outputs
from ..auto import AutoConfig
from ..convnext.modeling_convnext import ConvNextLayerNorm
from ..dab_detr.modeling_dab_detr import gen_sine_position_embeddings
from ..deformable_detr.modeling_deformable_detr import (
DeformableDetrDecoderOutput,
DeformableDetrForObjectDetection,
DeformableDetrMLPPredictionHead,
DeformableDetrModel,
DeformableDetrMultiscaleDeformableAttention,
)
from ..llama.modeling_llama import eager_attention_forward
from ..rt_detr.modeling_rt_detr import RTDetrConvNormLayer
from ..vit.modeling_vit import ViTAttention, ViTEncoder, ViTSelfAttention
from ..vitdet.configuration_vitdet import VitDetConfig
from ..vitdet.modeling_vitdet import (
VitDetBackbone,
VitDetEmbeddings,
VitDetMlp,
VitDetPreTrainedModel,
)
logger = logging.get_logger(__name__)
class LwDetrViTConfig(VitDetConfig):
r"""
This is the configuration class to store the configuration of a [`LwDetrViTModel`]. It is used to instantiate an
LW-DETR ViT model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the LW-DETR ViT
[AnnaZhang/lwdetr_small_60e_coco](https://huggingface.co/AnnaZhang/lwdetr_small_60e_coco) architecture.
LW-DETR ViT is the Vision Transformer backbone used in the LW-DETR model for real-time object detection. It features
interleaved window and global attention mechanisms to reduce computational complexity while maintaining high performance.
The model uses a window-major feature map organization for efficient attention computation.
Configuration objects inherit from [`VitDetConfig`] and can be used to control the model outputs. Read the
documentation from [`VitDetConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
mlp_ratio (`int`, *optional*, defaults to 4):
Ratio of mlp hidden dim to embedding dim.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
image_size (`int`, *optional*, defaults to 256):
The size (resolution) of each image.
pretrain_image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image during pretraining.
patch_size (`int`, *optional*, defaults to 16):
The size (resolution) of each patch.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the queries, keys and values.
window_block_indices (`list[int]`, *optional*, defaults to `[]`):
List of indices of blocks that should have window attention instead of regular global self-attention.
use_absolute_position_embeddings (`bool`, *optional*, defaults to `True`):
Whether to add absolute position embeddings to the patch embeddings.
out_features (`list[str]`, *optional*):
If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
(depending on how many stages the model has). If unset and `out_indices` is set, will default to the
corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
out_indices (`list[int]`, *optional*):
If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
If unset and `out_features` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
cae_init_values (`float`, *optional*, defaults to 0.1):
Initialization value for CAE parameters when `use_cae` is enabled.
num_windows (`int`, *optional*, defaults to 16):
Number of windows for window-based attention. Must be a perfect square and the image size must be
divisible by the square root of this value. This enables efficient window-major feature map organization.
Example:
```python
>>> from transformers import LwDetrViTConfig, LwDetrViTModel
>>> # Initializing a LW-DETR ViT configuration
>>> configuration = LwDetrViTConfig()
>>> # Initializing a model (with random weights) from the configuration
>>> model = LwDetrViTModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "lw_detr_vit"
def __init__(
self,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
mlp_ratio=4,
hidden_act="gelu",
dropout_prob=0.0,
initializer_range=0.02,
layer_norm_eps=1e-6,
image_size=256,
pretrain_image_size=224,
patch_size=16,
num_channels=3,
qkv_bias=True,
window_block_indices=[],
use_absolute_position_embeddings=True,
out_features=None,
out_indices=None,
cae_init_values: float = 0.1,
num_windows=16,
**kwargs,
):
super().__init__(
hidden_size=hidden_size,
num_hidden_layers=num_hidden_layers,
num_attention_heads=num_attention_heads,
mlp_ratio=mlp_ratio,
hidden_act=hidden_act,
dropout_prob=dropout_prob,
initializer_range=initializer_range,
layer_norm_eps=layer_norm_eps,
image_size=image_size,
pretrain_image_size=pretrain_image_size,
patch_size=patch_size,
num_channels=num_channels,
qkv_bias=qkv_bias,
window_block_indices=window_block_indices,
use_absolute_position_embeddings=use_absolute_position_embeddings,
out_features=out_features,
out_indices=out_indices,
**kwargs,
)
del self.residual_block_indices
del self.use_relative_position_embeddings
del self.window_size
del self.drop_path_rate
self.cae_init_values = cae_init_values
if num_windows % math.sqrt(num_windows) != 0:
raise ValueError(
f"`num_windows` has to be a perfect square, where num_windows % math.sqrt(num_windows) != 0, but got {num_windows}."
)
if image_size / num_windows % math.sqrt(num_windows) != 0:
raise ValueError(
f"`image_size` has to be divisible by `num_windows`, where image_size / num_windows % math.sqrt(num_windows) != 0,but got {image_size} and {num_windows}."
)
self.num_windows = num_windows
self.num_windows_side = int(math.sqrt(num_windows))
class LwDetrConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`LwDetrModel`]. It is used to instantiate
a LW-DETR model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the LW-DETR
[AnnaZhang/lwdetr_small_60e_coco](https://huggingface.co/AnnaZhang/lwdetr_small_60e_coco) architecture.
LW-DETR (Lightweight Detection Transformer) is a transformer-based object detection model designed for real-time
detection tasks. It replaces traditional CNN-based detectors like YOLO with a more efficient transformer architecture
that achieves competitive performance while being computationally lightweight.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
backbone_config (`PretrainedConfig` or `dict`, *optional*):
The configuration of the backbone model. If not provided, will default to `LwDetrViTConfig` with
a small ViT architecture optimized for detection tasks.
projector_scale_factors (`list[float]`, *optional*, defaults to `[]`):
Scale factors for the feature pyramid network. Each scale factor determines the resolution of features
at different levels. Supported values are 0.5, 1.0, and 2.0.
hidden_expansion (`float`, *optional*, defaults to 0.5):
Expansion factor for hidden dimensions in the projector layers.
c2f_num_blocks (`int`, *optional*, defaults to 3):
Number of blocks in the C2F layer.
activation_function (`str`, *optional*, defaults to `"silu"`):
The non-linear activation function in the projector. Supported values are `"silu"`, `"relu"`, `"gelu"`.
batch_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon value for batch normalization layers.
d_model (`int`, *optional*, defaults to 256):
Dimension of the model layers and the number of expected features in the decoder inputs.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
decoder_ffn_dim (`int`, *optional*, defaults to 2048):
Dimension of the "intermediate" (often named feed-forward) layer in decoder.
decoder_n_points (`int`, *optional*, defaults to 4):
The number of sampled keys in each feature level for each attention head in the decoder.
decoder_layers (`int`, *optional*, defaults to 3):
Number of decoder layers in the transformer.
decoder_self_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the decoder self-attention.
decoder_cross_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the decoder cross-attention.
decoder_activation_function (`str`, *optional*, defaults to `"relu"`):
The non-linear activation function in the decoder. Supported values are `"relu"`, `"silu"`, `"gelu"`.
num_queries (`int`, *optional*, defaults to 300):
Number of object queries, i.e. detection slots. This is the maximal number of objects
[`LwDetrModel`] can detect in a single image.
attention_bias (`bool`, *optional*, defaults to `True`):
Whether to add bias to the attention layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for activations inside the fully connected layer.
group_detr (`int`, *optional*, defaults to 13):
Number of groups for Group DETR attention mechanism, which helps reduce computational complexity.
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
disable_custom_kernels (`bool`, *optional*, defaults to `True`):
Disable the use of custom CUDA and CPU kernels. This option is necessary for the ONNX export, as custom
kernels are not supported by PyTorch ONNX export.
class_cost (`float`, *optional*, defaults to 2):
Relative weight of the classification error in the Hungarian matching cost.
bbox_cost (`float`, *optional*, defaults to 5):
Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost.
giou_cost (`float`, *optional*, defaults to 2):
Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost.
mask_loss_coefficient (`float`, *optional*, defaults to 1):
Relative weight of the Focal loss in the panoptic segmentation loss.
dice_loss_coefficient (`float`, *optional*, defaults to 1):
Relative weight of the DICE/F-1 loss in the panoptic segmentation loss.
bbox_loss_coefficient (`float`, *optional*, defaults to 5):
Relative weight of the L1 bounding box loss in the object detection loss.
giou_loss_coefficient (`float`, *optional*, defaults to 2):
Relative weight of the generalized IoU loss in the object detection loss.
eos_coefficient (`float`, *optional*, defaults to 0.1):
Relative classification weight of the 'no-object' class in the object detection loss.
focal_alpha (`float`, *optional*, defaults to 0.25):
Alpha parameter in the focal loss.
auxiliary_loss (`bool`, *optional*, defaults to `True`):
Whether auxiliary decoding losses (loss at each decoder layer) are to be used.
Examples:
```python
>>> from transformers import LwDetrConfig, LwDetrModel
>>> # Initializing a LW-DETR AnnaZhang/lwdetr_small_60e_coco style configuration
>>> configuration = LwDetrConfig()
>>> # Initializing a model (with random weights) from the AnnaZhang/lwdetr_small_60e_coco style configuration
>>> model = LwDetrModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "lw_detr"
sub_configs = {"backbone_config": AutoConfig}
def __init__(
self,
# backbone
backbone_config=None,
# projector
projector_scale_factors: list[float] = [],
hidden_expansion=0.5,
c2f_num_blocks=3,
activation_function="silu",
batch_norm_eps=1e-5,
# decoder
d_model=256,
dropout=0.1,
decoder_ffn_dim=2048,
decoder_n_points=4,
decoder_layers: int = 3,
decoder_self_attention_heads: int = 8,
decoder_cross_attention_heads: int = 16,
decoder_activation_function="relu",
# model
num_queries=300,
attention_bias=True,
attention_dropout=0.0,
activation_dropout=0.0,
group_detr: int = 13,
init_std=0.02,
disable_custom_kernels=True,
# loss
class_cost=2,
bbox_cost=5,
giou_cost=2,
mask_loss_coefficient=1,
dice_loss_coefficient=1,
bbox_loss_coefficient=5,
giou_loss_coefficient=2,
eos_coefficient=0.1,
focal_alpha=0.25,
auxiliary_loss=True,
**kwargs,
):
self.batch_norm_eps = batch_norm_eps
backbone_config, kwargs = consolidate_backbone_kwargs_to_config(
backbone_config=backbone_config,
default_config_type="lw_detr_vit",
default_config_kwargs={
"image_size": 1024,
"hidden_size": 192,
"num_hidden_layers": 10,
"window_block_indices": [0, 1, 3, 6, 7, 9],
"out_indices": [2, 4, 5, 9],
},
**kwargs,
)
self.backbone_config = backbone_config
# projector
self.projector_scale_factors = projector_scale_factors
for scale in projector_scale_factors:
if scale not in [0.5, 1.0, 2.0]:
raise ValueError(f"Unsupported scale factor: {scale}")
self.projector_in_channels = [d_model] * len(projector_scale_factors)
self.projector_out_channels = d_model
self.activation_function = activation_function
self.hidden_expansion = hidden_expansion
self.c2f_num_blocks = c2f_num_blocks
# decoder
self.d_model = d_model
self.dropout = dropout
self.num_queries = num_queries
self.decoder_ffn_dim = decoder_ffn_dim
self.num_feature_levels = len(self.projector_scale_factors)
self.decoder_n_points = decoder_n_points
self.decoder_layers = decoder_layers
self.decoder_activation_function = decoder_activation_function
self.decoder_self_attention_heads = decoder_self_attention_heads
self.decoder_cross_attention_heads = decoder_cross_attention_heads
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
# model
self.init_std = init_std
self.group_detr = group_detr
# Loss
self.auxiliary_loss = auxiliary_loss
# Hungarian matcher
self.class_cost = class_cost
self.bbox_cost = bbox_cost
self.giou_cost = giou_cost
# Loss coefficients
self.dice_loss_coefficient = dice_loss_coefficient
self.bbox_loss_coefficient = bbox_loss_coefficient
self.giou_loss_coefficient = giou_loss_coefficient
self.eos_coefficient = eos_coefficient
self.focal_alpha = focal_alpha
self.disable_custom_kernels = disable_custom_kernels
super().__init__(**kwargs)
class LwDetrViTSelfAttention(ViTSelfAttention):
def __init__(self, config: LwDetrViTConfig):
super().__init__(config)
del self.key
self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=False)
self.num_key_value_groups = 1
self.dropout_prob = config.dropout_prob
def forward(
self,
hidden_states: torch.Tensor,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, torch.Tensor]:
batch_size = hidden_states.shape[0]
new_shape = batch_size, -1, self.num_attention_heads, self.attention_head_size
key_layer = self.key(hidden_states).view(*new_shape).transpose(1, 2)
value_layer = self.value(hidden_states).view(*new_shape).transpose(1, 2)
query_layer = self.query(hidden_states).view(*new_shape).transpose(1, 2)
attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface(
self.config._attn_implementation, eager_attention_forward
)
context_layer, attention_probs = attention_interface(
self,
query_layer,
key_layer,
value_layer,
None,
is_causal=self.is_causal,
scaling=self.scaling,
dropout=0.0 if not self.training else self.dropout_prob,
**kwargs,
)
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.reshape(new_context_layer_shape)
return context_layer, attention_probs
class LwDetrViTAttention(ViTAttention):
def __init__(self, config: LwDetrViTConfig):
"""
Args:
config (`LwDetrViTConfig`):
Model configuration.
"""
super().__init__(config)
self.attention = LwDetrViTSelfAttention(config)
self.output = nn.Linear(config.hidden_size, config.hidden_size)
def forward(
self,
hidden_states: torch.Tensor,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
self_attn_output, _ = self.attention(hidden_states, **kwargs)
output = self.output(self_attn_output)
return output
class LwDetrViTMlp(VitDetMlp):
pass
class LwDetrViTLayer(GradientCheckpointingLayer):
def __init__(
self,
config: LwDetrViTConfig,
layer_idx,
) -> None:
super().__init__()
dim = config.hidden_size
self.attention = LwDetrViTAttention(config)
self.intermediate = LwDetrViTMlp(config=config, in_features=dim, hidden_features=int(dim * config.mlp_ratio))
self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.gamma_1 = nn.Parameter(torch.Tensor(dim), requires_grad=True)
self.gamma_2 = nn.Parameter(torch.Tensor(dim), requires_grad=True)
self.window = layer_idx in config.window_block_indices
self.num_windows = config.num_windows
def forward(
self,
hidden_states: torch.Tensor,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
batch_size, seq_len, channels = hidden_states.shape
hidden_states_norm = self.layernorm_before(hidden_states)
if not self.window:
hidden_states_norm = hidden_states_norm.reshape(
batch_size // self.num_windows, self.num_windows * seq_len, channels
)
attention_output = self.attention(hidden_states_norm, **kwargs)
attention_output = attention_output * self.gamma_1
if not self.window:
attention_output = attention_output.reshape(batch_size, seq_len, channels)
hidden_states = hidden_states + attention_output
layer_output = self.layernorm_after(hidden_states)
layer_output = self.intermediate(layer_output)
layer_output = layer_output * self.gamma_2
hidden_states = hidden_states + layer_output
return hidden_states
class LwDetrViTEncoder(ViTEncoder):
def __init__(self, config: LwDetrViTConfig) -> None:
super().__init__(config)
self.layer = nn.ModuleList([LwDetrViTLayer(config, i) for i in range(config.num_hidden_layers)])
def forward(
self,
hidden_states: torch.Tensor,
**kwargs: Unpack[TransformersKwargs],
) -> list[torch.Tensor]:
list_hidden_states = [hidden_states]
for i, layer_module in enumerate(self.layer):
hidden_states = layer_module(hidden_states, **kwargs)
list_hidden_states.append(hidden_states)
return list_hidden_states
class LwDetrViTEmbeddings(VitDetEmbeddings):
pass
class LwDetrViTPreTrainedModel(VitDetPreTrainedModel):
config: LwDetrViTConfig
base_model_prefix = "lw_detr_vit"
main_input_name = "pixel_values"
supports_gradient_checkpointing = True
_no_split_modules = ["LwDetrViTEmbeddings", "LwDetrViTLayer"]
_supports_sdpa = True
_supports_flash_attn = True
_supports_flex_attn = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": LwDetrViTLayer,
"attentions": LwDetrViTSelfAttention,
}
def _init_weights(self, module) -> None:
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
init.trunc_normal_(module.weight, mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
init.zeros_(module.bias)
elif isinstance(module, nn.LayerNorm):
init.zeros_(module.bias)
init.ones_(module.weight)
elif isinstance(module, LwDetrViTEmbeddings):
init.trunc_normal_(module.position_embeddings, mean=0.0, std=self.config.initializer_range)
if isinstance(module, LwDetrViTLayer):
nn.init.constant_(module.gamma_1, self.config.cae_init_values)
nn.init.constant_(module.gamma_2, self.config.cae_init_values)
@auto_docstring()
class LwDetrViTBackbone(VitDetBackbone):
@merge_with_config_defaults
@capture_outputs
@auto_docstring
def forward(self, pixel_values: torch.Tensor, **kwargs: Unpack[TransformersKwargs]) -> BackboneOutput:
r"""
Examples:
```python
>>> from transformers import LwDetrViTConfig, LwDetrViTBackbone
>>> import torch
>>> config = LwDetrViTConfig()
>>> model = LwDetrViTBackbone(config)
>>> pixel_values = torch.randn(1, 3, 224, 224)
>>> with torch.no_grad():
... outputs = model(pixel_values)
>>> feature_maps = outputs.feature_maps
>>> list(feature_maps[-1].shape)
[1, 768, 14, 14]
```"""
embedding_output = self.embeddings(pixel_values)
batch_size, channels, height, width = embedding_output.shape
# (batch_size, channels, height, width) -> (batch_size, height, width, channels)
hidden_states = embedding_output.permute(0, 2, 3, 1)
window_height = height // self.config.num_windows_side
window_width = width // self.config.num_windows_side
# (batch_size, height, width, channels) -> (batch_size*num_windows_side**2, window_height*window_width, channels)
hidden_states = (
hidden_states.reshape(
batch_size,
self.config.num_windows_side,
window_height,
self.config.num_windows_side,
window_width,
channels,
)
.permute(0, 1, 3, 2, 4, 5)
.reshape(batch_size * self.config.num_windows_side**2, window_height * window_width, channels)
)
hidden_states = self.encoder(hidden_states, **kwargs)
feature_maps = ()
for stage, hidden_state in zip(self.stage_names, hidden_states):
if stage in self.out_features:
hidden_state = (
hidden_state.reshape(
batch_size,
self.config.num_windows_side,
self.config.num_windows_side,
window_height,
window_width,
channels,
)
.permute(0, 5, 1, 3, 2, 4)
.reshape(batch_size, channels, height, width)
)
feature_maps += (hidden_state,)
return BackboneOutput(feature_maps=feature_maps)
class LwDetrConvNormLayer(RTDetrConvNormLayer):
def __init__(
self,
config: LwDetrConfig,
in_channels: int,
out_channels: int,
kernel_size: int,
stride: int,
activation: str | None = None,
):
super().__init__(config, in_channels, out_channels, kernel_size, stride, activation)
self.conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size,
stride,
padding=kernel_size // 2,
bias=False,
)
class LwDetrRepVggBlock(nn.Module):
def __init__(self, config: LwDetrConfig):
super().__init__()
hidden_channels = int(config.d_model * config.hidden_expansion)
self.conv1 = LwDetrConvNormLayer(
config, hidden_channels, hidden_channels, 3, 1, activation=config.activation_function
)
self.conv2 = LwDetrConvNormLayer(
config, hidden_channels, hidden_channels, 3, 1, activation=config.activation_function
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
y = self.conv1(x)
y = self.conv2(y)
return y
class LwDetrC2FLayer(nn.Module):
# Inspired by RTDetrCSPRepLayer
def __init__(self, config: LwDetrConfig, in_channels: int):
super().__init__()
num_blocks = config.c2f_num_blocks
activation = config.activation_function
out_channels = config.d_model
self.hidden_channels = int(out_channels * config.hidden_expansion)
conv1_out_channels = 2 * self.hidden_channels
self.conv1 = LwDetrConvNormLayer(config, in_channels, conv1_out_channels, 1, 1, activation=activation)
conv2_in_channels = (2 + num_blocks) * self.hidden_channels
self.conv2 = LwDetrConvNormLayer(config, conv2_in_channels, out_channels, 1, 1, activation=activation)
self.bottlenecks = nn.ModuleList(LwDetrRepVggBlock(config) for _ in range(num_blocks))
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.conv1(hidden_states)
all_hidden_states = list(hidden_states.split(self.hidden_channels, 1))
hidden_states = all_hidden_states[-1]
for bottleneck in self.bottlenecks:
hidden_states = bottleneck(hidden_states)
all_hidden_states.append(hidden_states)
hidden_states = torch.cat(all_hidden_states, 1)
hidden_states = self.conv2(hidden_states)
return hidden_states
class LwDetrLayerNorm(ConvNextLayerNorm):
pass
class LwDetrSamplingLayer(nn.Module):
def __init__(self, config: LwDetrConfig, channel_size: int, scale: float):
super().__init__()
self.scale = scale
self.channel_size = channel_size
layers = []
if scale == 2.0:
if channel_size > 512:
layers.append(LwDetrConvNormLayer(config, channel_size, channel_size // 2, 1, 1, activation="relu"))
layers.append(nn.ConvTranspose2d(channel_size // 2, channel_size // 4, kernel_size=2, stride=2))
else:
layers.append(nn.ConvTranspose2d(channel_size, channel_size // 2, 2, 2))
elif scale == 0.5:
layers.append(LwDetrConvNormLayer(config, channel_size, channel_size, 3, 2, activation="relu"))
self.layers = nn.ModuleList(layers)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
for layer in self.layers:
hidden_states = layer(hidden_states)
return hidden_states
class LwDetrScaleProjector(nn.Module):
def __init__(self, config: LwDetrConfig, scale: float):
super().__init__()
intermediate_dims = [config.backbone_config.hidden_size] * len(config.backbone_config.out_indices)
sampling_layers = []
for channel_size in intermediate_dims:
sampling_layers.append(LwDetrSamplingLayer(config, channel_size, scale))
self.sampling_layers = nn.ModuleList(sampling_layers)
intermediate_dim = intermediate_dims[-1]
if scale == 2.0:
if intermediate_dim > 512:
intermediate_dim = intermediate_dim // 4
else:
intermediate_dim = intermediate_dim // 2
projector_input_dim = intermediate_dim * len(intermediate_dims)
self.projector_layer = LwDetrC2FLayer(config, projector_input_dim)
self.layer_norm = LwDetrLayerNorm(config.d_model, data_format="channels_first")
def forward(self, hidden_states_tuple: tuple[torch.Tensor]) -> torch.Tensor:
sampled_hidden_states = []
for sampling_layer, hidden_states in zip(self.sampling_layers, hidden_states_tuple):
hidden_states = sampling_layer(hidden_states)
sampled_hidden_states.append(hidden_states)
hidden_states = torch.cat(sampled_hidden_states, dim=1)
hidden_states = self.projector_layer(hidden_states)
hidden_states = self.layer_norm(hidden_states)
return hidden_states
class LwDetrMultiScaleProjector(nn.Module):
def __init__(self, config: LwDetrConfig):
super().__init__()
self.config = config
scale_factors = config.projector_scale_factors
self.scale_layers = nn.ModuleList([LwDetrScaleProjector(config, scale) for scale in scale_factors])
def forward(self, hidden_states: tuple[torch.Tensor]) -> list[torch.Tensor]:
output_hidden_states = []
for scale_layer in self.scale_layers:
output_hidden_states.append(scale_layer(hidden_states))
return output_hidden_states
class LwDetrConvEncoder(nn.Module):
def __init__(self, config: LwDetrConfig):
super().__init__()
self.backbone = LwDetrViTBackbone(config.backbone_config)
self.projector = LwDetrMultiScaleProjector(config)
def forward(self, pixel_values: torch.Tensor, pixel_mask: torch.Tensor):
# send pixel_values through the model to get list of feature maps
features = self.backbone(pixel_values).feature_maps
features = self.projector(features)
out = []
for feature_map in features:
# downsample pixel_mask to match shape of corresponding feature_map
mask = nn.functional.interpolate(pixel_mask[None].float(), size=feature_map.shape[-2:]).to(torch.bool)[0]
out.append((feature_map, mask))
return out
class LwDetrAttention(nn.Module):
def __init__(self, config: LwDetrConfig, layer_idx: int):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, "head_dim", config.d_model // config.decoder_self_attention_heads)
self.scaling = self.head_dim**-0.5
self.attention_dropout = config.attention_dropout
self.is_causal = False
self.num_key_value_groups = 1
self.q_proj = nn.Linear(
config.d_model, config.decoder_self_attention_heads * self.head_dim, bias=config.attention_bias
)
self.k_proj = nn.Linear(
config.d_model, config.decoder_self_attention_heads * self.head_dim, bias=config.attention_bias
)
self.v_proj = nn.Linear(
config.d_model, config.decoder_self_attention_heads * self.head_dim, bias=config.attention_bias
)
self.o_proj = nn.Linear(
config.decoder_self_attention_heads * self.head_dim, config.d_model, bias=config.attention_bias
)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: torch.Tensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, torch.Tensor]:
batch_size, seq_len, _ = hidden_states.shape
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
hidden_states_original = hidden_states
if position_embeddings is not None:
hidden_states = hidden_states if position_embeddings is None else hidden_states + position_embeddings
if self.training:
# at training, we use group detr technique to add more supervision by using multiple weight-sharing decoders at once for faster convergence
# at inference, we only use one decoder
hidden_states_original = torch.cat(
hidden_states_original.split(seq_len // self.config.group_detr, dim=1), dim=0
)
hidden_states = torch.cat(hidden_states.split(seq_len // self.config.group_detr, dim=1), dim=0)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states_original).view(hidden_shape).transpose(1, 2)
attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface(
self.config._attn_implementation, eager_attention_forward
)
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask=None,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
if self.training:
attn_output = torch.cat(torch.split(attn_output, batch_size, dim=0), dim=1)
return attn_output, attn_weights
class LwDetrMultiscaleDeformableAttention(DeformableDetrMultiscaleDeformableAttention):
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor | None = None,
encoder_hidden_states=None,
encoder_attention_mask=None,
position_embeddings: torch.Tensor | None = None,
reference_points=None,
spatial_shapes=None,
spatial_shapes_list=None,
level_start_index=None,
**kwargs: Unpack[TransformersKwargs],
):
return super().forward(
hidden_states=hidden_states,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
position_embeddings=position_embeddings,
reference_points=reference_points,
spatial_shapes=spatial_shapes,
spatial_shapes_list=spatial_shapes_list,
level_start_index=level_start_index,
**kwargs,
)
class LwDetrMLP(nn.Module):
def __init__(self, config: LwDetrConfig):
super().__init__()
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.decoder_activation_function]
self.fc1 = nn.Linear(config.d_model, config.decoder_ffn_dim)
self.fc2 = nn.Linear(config.decoder_ffn_dim, config.d_model)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
residual = hidden_states
hidden_states = self.fc1(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
return hidden_states
class LwDetrDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: LwDetrConfig, layer_idx: int):
nn.Module.__init__(self)
# self-attention
self.self_attn = LwDetrAttention(config, layer_idx=layer_idx)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.decoder_activation_function]
self.activation_dropout = config.activation_dropout
self.self_attn_layer_norm = nn.LayerNorm(config.d_model)
# cross-attention
self.cross_attn = LwDetrMultiscaleDeformableAttention(
config,
num_heads=config.decoder_cross_attention_heads,
n_points=config.decoder_n_points,
)
self.cross_attn_layer_norm = nn.LayerNorm(config.d_model)
# mlp
self.mlp = LwDetrMLP(config)
self.layer_norm = nn.LayerNorm(config.d_model)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: torch.Tensor | None = None,
reference_points=None,
spatial_shapes=None,
spatial_shapes_list=None,
level_start_index=None,
encoder_hidden_states: torch.Tensor | None = None,
encoder_attention_mask: torch.Tensor | None = None,
**kwargs: Unpack[TransformersKwargs],
):
self_attention_output, self_attn_weights = self.self_attn(
hidden_states, position_embeddings=position_embeddings, **kwargs
)
self_attention_output = nn.functional.dropout(self_attention_output, p=self.dropout, training=self.training)
hidden_states = hidden_states + self_attention_output
hidden_states = self.self_attn_layer_norm(hidden_states)
cross_attention_output, cross_attn_weights = self.cross_attn(
hidden_states=hidden_states,
attention_mask=encoder_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
position_embeddings=position_embeddings,
reference_points=reference_points,
spatial_shapes=spatial_shapes,
spatial_shapes_list=spatial_shapes_list,
level_start_index=level_start_index,
**kwargs,
)
cross_attention_output = nn.functional.dropout(cross_attention_output, p=self.dropout, training=self.training)
hidden_states = hidden_states + cross_attention_output
hidden_states = self.cross_attn_layer_norm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = self.layer_norm(hidden_states)
return hidden_states
@auto_docstring
class LwDetrPreTrainedModel(PreTrainedModel):
config: LwDetrConfig
base_model_prefix = "model"
main_input_name = "pixel_values"
_no_split_modules = [
r"LwDetrConvEncoder",
r"LwDetrDecoderLayer",
]
_supports_sdpa = True
_supports_flash_attn = True
_supports_flex_attn = True
_supports_attention_backend = True
_can_record_outputs = {
"attentions": [LwDetrAttention, LwDetrMultiscaleDeformableAttention],
"hidden_states": [LwDetrDecoderLayer],
}
@torch.no_grad()
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, LwDetrMultiscaleDeformableAttention):
init.constant_(module.sampling_offsets.weight, 0.0)
thetas = torch.arange(module.n_heads, dtype=torch.int64).float() * (2.0 * math.pi / module.n_heads)
grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)
grid_init = (
(grid_init / grid_init.abs().max(-1, keepdim=True)[0])
.view(module.n_heads, 1, 1, 2)
.repeat(1, module.n_levels, module.n_points, 1)
)
for i in range(module.n_points):
grid_init[:, :, i, :] *= i + 1
init.copy_(module.sampling_offsets.bias, grid_init.view(-1))
init.constant_(module.attention_weights.weight, 0.0)
init.constant_(module.attention_weights.bias, 0.0)
init.xavier_uniform_(module.value_proj.weight)
init.constant_(module.value_proj.bias, 0.0)
init.xavier_uniform_(module.output_proj.weight)
init.constant_(module.output_proj.bias, 0.0)
if hasattr(module, "level_embed"):
init.normal_(module.level_embed)
if hasattr(module, "refpoint_embed") and module.refpoint_embed is not None:
init.constant_(module.refpoint_embed.weight, 0)
if hasattr(module, "class_embed") and module.class_embed is not None:
prior_prob = 0.01
bias_value = -math.log((1 - prior_prob) / prior_prob)
init.constant_(module.class_embed.bias, bias_value)
if hasattr(module, "bbox_embed") and module.bbox_embed is not None:
init.constant_(module.bbox_embed.layers[-1].weight, 0)
init.constant_(module.bbox_embed.layers[-1].bias, 0)
def refine_bboxes(reference_points, deltas):
reference_points = reference_points.to(deltas.device)
new_reference_points_cxcy = deltas[..., :2] * reference_points[..., 2:] + reference_points[..., :2]
new_reference_points_wh = deltas[..., 2:].exp() * reference_points[..., 2:]
new_reference_points = torch.cat((new_reference_points_cxcy, new_reference_points_wh), -1)
return new_reference_points
@dataclass
@auto_docstring(
custom_intro="""
Base class for outputs of the LwDetrDecoder. This class adds two attributes to
BaseModelOutputWithCrossAttentions, namely:
- a stacked tensor of intermediate decoder hidden states (i.e. the output of each decoder layer)
- a stacked tensor of intermediate reference points.
"""
)
class LwDetrDecoderOutput(DeformableDetrDecoderOutput):
pass
class LwDetrDecoder(LwDetrPreTrainedModel):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`DeformableDetrDecoderLayer`].
The decoder updates the query embeddings through multiple self-attention and deformable cross-attention layers.
Some tweaks for LwDetr:
- it uses group detr technique at training for faster convergence.
Args:
config: LwDetrConfig
"""
def __init__(self, config: LwDetrConfig):
super().__init__(config)
self.dropout = config.dropout
self.layers = nn.ModuleList([LwDetrDecoderLayer(config, i) for i in range(config.decoder_layers)])
self.layernorm = nn.LayerNorm(config.d_model)
self.gradient_checkpointing = False
self.ref_point_head = LwDetrMLPPredictionHead(2 * config.d_model, config.d_model, config.d_model, num_layers=2)
self.post_init()
def get_reference(self, reference_points, valid_ratios):
# batch_size, num_queries, batch_size, 4
obj_center = reference_points[..., :4]
# batch_size, num_queries, num_levels, 4
reference_points_inputs = obj_center[:, :, None] * torch.cat([valid_ratios, valid_ratios], -1)[:, None]
# batch_size, num_queries, d_model * 2
query_sine_embed = gen_sine_position_embeddings(reference_points_inputs[:, :, 0, :], self.config.d_model)
# batch_size, num_queries, d_model
query_pos = self.ref_point_head(query_sine_embed)
return reference_points_inputs, query_pos
@merge_with_config_defaults
@capture_outputs
def forward(
self,
inputs_embeds: torch.Tensor | None = None,
reference_points: torch.Tensor | None = None,
spatial_shapes: torch.Tensor | None = None,
spatial_shapes_list: torch.Tensor | None = None,
level_start_index: torch.Tensor | None = None,
valid_ratios: torch.Tensor | None = None,
encoder_hidden_states: torch.Tensor | None = None,
encoder_attention_mask: torch.Tensor | None = None,
**kwargs: Unpack[TransformersKwargs],
):
intermediate = ()
intermediate_reference_points = (reference_points,)
if inputs_embeds is not None:
hidden_states = inputs_embeds
reference_points_inputs, query_pos = self.get_reference(reference_points, valid_ratios)
for idx, decoder_layer in enumerate(self.layers):
hidden_states = decoder_layer(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
position_embeddings=query_pos,
reference_points=reference_points_inputs,
spatial_shapes=spatial_shapes,
spatial_shapes_list=spatial_shapes_list,
level_start_index=level_start_index,
**kwargs,
)
intermediate_hidden_states = self.layernorm(hidden_states)
intermediate += (intermediate_hidden_states,)
intermediate = torch.stack(intermediate)
last_hidden_state = intermediate[-1]
intermediate_reference_points = torch.stack(intermediate_reference_points)
return LwDetrDecoderOutput(
last_hidden_state=last_hidden_state,
intermediate_hidden_states=intermediate,
intermediate_reference_points=intermediate_reference_points,
)
@dataclass
@auto_docstring(
custom_intro="""
Base class for outputs of the LwDetr backbone-decoder model.
"""
)
class LwDetrModelOutput(ModelOutput):
r"""
init_reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`):
Initial reference points sent through the Transformer decoder.
intermediate_hidden_states (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, hidden_size)`):
Stacked intermediate hidden states (output of each layer of the decoder).
intermediate_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`):
Stacked intermediate reference points (reference points of each layer of the decoder).
enc_outputs_class (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`):
Predicted bounding boxes scores where the top `config.two_stage_num_proposals` scoring bounding boxes are
picked as region proposals in the first stage. Output of bounding box binary classification (i.e.
foreground and background).
enc_outputs_coord_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`):
Logits of predicted bounding boxes coordinates in the first stage.
"""
init_reference_points: torch.FloatTensor | None = None
last_hidden_state: torch.FloatTensor | None = None
intermediate_hidden_states: torch.FloatTensor | None = None
intermediate_reference_points: torch.FloatTensor | None = None
enc_outputs_class: torch.FloatTensor | None = None
enc_outputs_coord_logits: torch.FloatTensor | None = None
hidden_states: tuple[torch.FloatTensor, ...] | None = None
attentions: tuple[torch.FloatTensor, ...] | None = None
cross_attentions: tuple[torch.FloatTensor, ...] | None = None
@auto_docstring(
custom_intro="""
The bare LW Detr Model (consisting of a backbone and decoder Transformer) outputting raw
hidden-states without any specific head on top.
"""
)
class LwDetrModel(DeformableDetrModel):
def __init__(self, config: LwDetrConfig):
LwDetrPreTrainedModel.__init__(config)
# Create backbone + positional encoding
self.backbone = LwDetrConvEncoder(config)
self.group_detr = config.group_detr
self.num_queries = config.num_queries
hidden_dim = config.d_model
self.reference_point_embed = nn.Embedding(self.num_queries * self.group_detr, 4)
self.query_feat = nn.Embedding(self.num_queries * self.group_detr, hidden_dim)
self.decoder = LwDetrDecoder(config)
self.enc_output = nn.ModuleList([nn.Linear(hidden_dim, hidden_dim) for _ in range(self.group_detr)])
self.enc_output_norm = nn.ModuleList([nn.LayerNorm(hidden_dim) for _ in range(self.group_detr)])
# Should normally be None and then instantiated in the ForObjectDetection class
self.enc_out_bbox_embed = nn.ModuleList(
[LwDetrMLPPredictionHead(config.d_model, config.d_model, 4, num_layers=3) for _ in range(self.group_detr)]
)
self.enc_out_class_embed = nn.ModuleList(
[nn.Linear(config.d_model, config.num_labels) for _ in range(self.group_detr)]
)
self.post_init()
def gen_encoder_output_proposals(self, enc_output, padding_mask, spatial_shapes):
"""Generate the encoder output proposals from encoded enc_output.
Args:
enc_output (Tensor[batch_size, sequence_length, hidden_size]): Output of the encoder.
padding_mask (Tensor[batch_size, sequence_length]): Padding mask for `enc_output`.
spatial_shapes (list[tuple[int, int]]): Spatial shapes of the feature maps.
Returns:
`tuple(torch.FloatTensor)`: A tuple of feature map and bbox prediction.
- object_query (Tensor[batch_size, sequence_length, hidden_size]): Object query features. Later used to
directly predict a bounding box. (without the need of a decoder)
- output_proposals (Tensor[batch_size, sequence_length, 4]): Normalized proposals, after an inverse
sigmoid.
"""
batch_size = enc_output.shape[0]
proposals = []
_cur = 0
for level, (height, width) in enumerate(spatial_shapes):
mask_flatten_ = padding_mask[:, _cur : (_cur + height * width)].view(batch_size, height, width, 1)
valid_height = torch.sum(~mask_flatten_[:, :, 0, 0], 1)
valid_width = torch.sum(~mask_flatten_[:, 0, :, 0], 1)
grid_y, grid_x = torch.meshgrid(
torch.linspace(
0,
height - 1,
height,
dtype=enc_output.dtype,
device=enc_output.device,
),
torch.linspace(
0,
width - 1,
width,
dtype=enc_output.dtype,
device=enc_output.device,
),
indexing="ij",
)
grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1)
scale = torch.cat([valid_width.unsqueeze(-1), valid_height.unsqueeze(-1)], 1).view(batch_size, 1, 1, 2)
grid = (grid.unsqueeze(0).expand(batch_size, -1, -1, -1) + 0.5) / scale
width_height = torch.ones_like(grid) * 0.05 * (2.0**level)
proposal = torch.cat((grid, width_height), -1).view(batch_size, -1, 4)
proposals.append(proposal)
_cur += height * width
output_proposals = torch.cat(proposals, 1)
output_proposals_valid = ((output_proposals > 0.01) & (output_proposals < 0.99)).all(-1, keepdim=True)
output_proposals = output_proposals.masked_fill(padding_mask.unsqueeze(-1), float("inf"))
output_proposals = output_proposals.masked_fill(~output_proposals_valid, float("inf"))
# assign each pixel as an object query
object_query = enc_output
object_query = object_query.masked_fill(padding_mask.unsqueeze(-1), float(0))
object_query = object_query.masked_fill(~output_proposals_valid, float(0))
return object_query, output_proposals
@can_return_tuple
@auto_docstring
def forward(
self,
pixel_values: torch.FloatTensor = None,
pixel_mask: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> LwDetrModelOutput:
r"""
Examples:
```python
>>> from transformers import AutoImageProcessor, DeformableDetrModel
>>> from PIL import Image
>>> import httpx
>>> from io import BytesIO
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> with httpx.stream("GET", url) as response:
... image = Image.open(BytesIO(response.read()))
>>> image_processor = AutoImageProcessor.from_pretrained("AnnaZhang/lwdetr_small_60e_coco")
>>> model = DeformableDetrModel.from_pretrained("AnnaZhang/lwdetr_small_60e_coco")
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
>>> list(last_hidden_states.shape)
[1, 300, 256]
```"""
batch_size, num_channels, height, width = pixel_values.shape
device = pixel_values.device
if pixel_mask is None:
pixel_mask = torch.ones(((batch_size, height, width)), dtype=torch.long, device=device)
# Extract multi-scale feature maps of same resolution `config.d_model` (cf Figure 4 in paper)
# First, sent pixel_values + pixel_mask through Backbone to obtain the features
# which is a list of tuples
features = self.backbone(pixel_values, pixel_mask)
# Then, apply 1x1 convolution to reduce the channel dimension to d_model (256 by default)
sources = []
masks = []
for level, (source, mask) in enumerate(features):
sources.append(source)
masks.append(mask)
if mask is None:
raise ValueError("No attention mask was provided")
if self.training:
reference_points = self.reference_point_embed.weight
query_feat = self.query_feat.weight
else:
# only use one group in inference
reference_points = self.reference_point_embed.weight[: self.num_queries]
query_feat = self.query_feat.weight[: self.num_queries]
# Prepare encoder inputs (by flattening)
source_flatten = []
mask_flatten = []
spatial_shapes_list = []
for source, mask in zip(sources, masks):
batch_size, num_channels, height, width = source.shape
spatial_shape = (height, width)
spatial_shapes_list.append(spatial_shape)
source = source.flatten(2).transpose(1, 2)
mask = mask.flatten(1)
source_flatten.append(source)
mask_flatten.append(mask)
source_flatten = torch.cat(source_flatten, 1)
mask_flatten = torch.cat(mask_flatten, 1)
spatial_shapes = torch.as_tensor(spatial_shapes_list, dtype=torch.long, device=source_flatten.device)
level_start_index = torch.cat((spatial_shapes.new_zeros((1,)), spatial_shapes.prod(1).cumsum(0)[:-1]))
valid_ratios = torch.stack([self.get_valid_ratio(m, dtype=source_flatten.dtype) for m in masks], 1)
target = query_feat.unsqueeze(0).expand(batch_size, -1, -1)
reference_points = reference_points.unsqueeze(0).expand(batch_size, -1, -1)
object_query_embedding, output_proposals = self.gen_encoder_output_proposals(
source_flatten, ~mask_flatten, spatial_shapes_list
)
group_detr = self.group_detr if self.training else 1
topk = self.num_queries
topk_coords_logits = []
topk_coords_logits_undetach = []
object_query_undetach = []
for group_id in range(group_detr):
group_object_query = self.enc_output[group_id](object_query_embedding)
group_object_query = self.enc_output_norm[group_id](group_object_query)
group_enc_outputs_class = self.enc_out_class_embed[group_id](group_object_query)
group_delta_bbox = self.enc_out_bbox_embed[group_id](group_object_query)
group_enc_outputs_coord = refine_bboxes(output_proposals, group_delta_bbox)
group_topk_proposals = torch.topk(group_enc_outputs_class.max(-1)[0], topk, dim=1)[1]
group_topk_coords_logits_undetach = torch.gather(
group_enc_outputs_coord,
1,
group_topk_proposals.unsqueeze(-1).repeat(1, 1, 4),
)
group_topk_coords_logits = group_topk_coords_logits_undetach.detach()
group_object_query_undetach = torch.gather(
group_object_query, 1, group_topk_proposals.unsqueeze(-1).repeat(1, 1, self.config.d_model)
)
topk_coords_logits.append(group_topk_coords_logits)
topk_coords_logits_undetach.append(group_topk_coords_logits_undetach)
object_query_undetach.append(group_object_query_undetach)
topk_coords_logits = torch.cat(topk_coords_logits, 1)
topk_coords_logits_undetach = torch.cat(topk_coords_logits_undetach, 1)
object_query_undetach = torch.cat(object_query_undetach, 1)
enc_outputs_class = object_query_undetach
enc_outputs_coord_logits = topk_coords_logits
reference_points = refine_bboxes(topk_coords_logits_undetach, reference_points)
init_reference_points = reference_points
decoder_outputs = self.decoder(
inputs_embeds=target,
reference_points=reference_points,
spatial_shapes=spatial_shapes,
spatial_shapes_list=spatial_shapes_list,
level_start_index=level_start_index,
valid_ratios=valid_ratios,
encoder_hidden_states=source_flatten,
encoder_attention_mask=mask_flatten,
**kwargs,
)
return LwDetrModelOutput(
init_reference_points=init_reference_points,
last_hidden_state=decoder_outputs.last_hidden_state,
intermediate_hidden_states=decoder_outputs.intermediate_hidden_states,
intermediate_reference_points=decoder_outputs.intermediate_reference_points,
enc_outputs_class=enc_outputs_class,
enc_outputs_coord_logits=enc_outputs_coord_logits,
hidden_states=decoder_outputs.hidden_states,
attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
)
class LwDetrMLPPredictionHead(DeformableDetrMLPPredictionHead):
pass
@dataclass
@auto_docstring(
custom_intro="""
Output type of [`LwDetrForObjectDetection`].
"""
)
class LwDetrObjectDetectionOutput(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)):
Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a
bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized
scale-invariant IoU loss.
loss_dict (`Dict`, *optional*):
A dictionary containing the individual losses. Useful for logging.
logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes + 1)`):
Classification logits (including no-object) for all queries.
pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`):
Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These
values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding
possible padding). You can use [`~DeformableDetrProcessor.post_process_object_detection`] to retrieve the
unnormalized bounding boxes.
auxiliary_outputs (`list[Dict]`, *optional*):
Optional, only returned when auxiliary losses are activated (i.e. `config.auxiliary_loss` is set to `True`)
and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and
`pred_boxes`) for each decoder layer.
init_reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`):
Initial reference points sent through the Transformer decoder.
intermediate_hidden_states (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, hidden_size)`):
Stacked intermediate hidden states (output of each layer of the decoder).
intermediate_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`):
Stacked intermediate reference points (reference points of each layer of the decoder).
enc_outputs_class (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`):
Predicted bounding boxes scores where the top `config.two_stage_num_proposals` scoring bounding boxes are
picked as region proposals in the first stage. Output of bounding box binary classification (i.e.
foreground and background).
enc_outputs_coord_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`):
Logits of predicted bounding boxes coordinates in the first stage.
"""
loss: torch.FloatTensor | None = None
loss_dict: dict | None = None
logits: torch.FloatTensor | None = None
pred_boxes: torch.FloatTensor | None = None
auxiliary_outputs: list[dict] | None = None
init_reference_points: torch.FloatTensor | None = None
last_hidden_state: torch.FloatTensor | None = None
intermediate_hidden_states: torch.FloatTensor | None = None
intermediate_reference_points: torch.FloatTensor | None = None
enc_outputs_class: Any = None
enc_outputs_coord_logits: torch.FloatTensor | None = None
hidden_states: tuple[torch.FloatTensor, ...] | None = None
attentions: tuple[torch.FloatTensor, ...] | None = None
cross_attentions: tuple[torch.FloatTensor, ...] | None = None
@auto_docstring(
custom_intro="""
LW DETR Model (consisting of a backbone and decoder Transformer) with object detection heads on
top, for tasks such as COCO detection.
"""
)
class LwDetrForObjectDetection(DeformableDetrForObjectDetection):
_tied_weights_keys = None
def __init__(self, config: LwDetrConfig):
PreTrainedModel.__init__(self, config)
self.model = LwDetrModel(config)
self.class_embed = nn.Linear(config.d_model, config.num_labels)
self.bbox_embed = LwDetrMLPPredictionHead(config.d_model, config.d_model, 4, num_layers=3)
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
pixel_values: torch.FloatTensor = None,
pixel_mask: torch.LongTensor | None = None,
labels: list[dict] | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> LwDetrObjectDetectionOutput:
r"""
decoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, num_queries)`, *optional*):
Not used by default. Can be used to mask object queries.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing the flattened feature map (output of the backbone + projection layer), you
can choose to directly pass a flattened representation of an image.
decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*):
Optionally, instead of initializing the queries with a tensor of zeros, you can choose to directly pass an
embedded representation.
labels (`list[Dict]` of len `(batch_size,)`, *optional*):
Labels for computing the bipartite matching loss. List of dicts, each dictionary containing at least the
following 2 keys: 'class_labels' and 'boxes' (the class labels and bounding boxes of an image in the batch
respectively). The class labels themselves should be a `torch.LongTensor` of len `(number of bounding boxes
in the image,)` and the boxes a `torch.FloatTensor` of shape `(number of bounding boxes in the image, 4)`.
Examples:
```python
>>> from transformers import AutoImageProcessor, LwDetrForObjectDetection
>>> from PIL import Image
>>> import httpx
>>> from io import BytesIO
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> with httpx.stream("GET", url) as response:
... image = Image.open(BytesIO(response.read()))
>>> image_processor = AutoImageProcessor.from_pretrained("AnnaZhang/lwdetr_small_60e_coco")
>>> model = LwDetrForObjectDetection.from_pretrained("AnnaZhang/lwdetr_small_60e_coco")
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> # convert outputs (bounding boxes and class logits) to Pascal VOC format (xmin, ymin, xmax, ymax)
>>> target_sizes = torch.tensor([image.size[::-1]])
>>> results = image_processor.post_process_object_detection(outputs, threshold=0.5, target_sizes=target_sizes)[
... 0
... ]
>>> for score, label, box in zip(results["scores"], results["labels"], results["boxes"]):
... box = [round(i, 2) for i in box.tolist()]
... print(
... f"Detected {model.config.id2label[label.item()]} with confidence "
... f"{round(score.item(), 3)} at location {box}"
... )
Detected cat with confidence 0.8 at location [16.5, 52.84, 318.25, 470.78]
Detected cat with confidence 0.789 at location [342.19, 24.3, 640.02, 372.25]
Detected remote with confidence 0.633 at location [40.79, 72.78, 176.76, 117.25]
```"""
outputs = self.model(
pixel_values,
pixel_mask=pixel_mask,
**kwargs,
)
last_hidden_states = outputs.last_hidden_state
intermediate_reference_points = outputs.intermediate_reference_points
enc_outputs_class_logits = outputs.enc_outputs_class
enc_outputs_boxes_logits = outputs.enc_outputs_coord_logits
logits = self.class_embed(last_hidden_states)
pred_boxes_delta = self.bbox_embed(last_hidden_states)
pred_boxes = refine_bboxes(intermediate_reference_points[-1], pred_boxes_delta)
enc_outputs_class_logits_list = enc_outputs_class_logits.split(self.config.num_queries, dim=1)
pred_class = []
group_detr = self.config.group_detr if self.training else 1
for group_index in range(group_detr):
group_pred_class = self.model.enc_out_class_embed[group_index](enc_outputs_class_logits_list[group_index])
pred_class.append(group_pred_class)
enc_outputs_class_logits = torch.cat(pred_class, dim=1)
loss, loss_dict, auxiliary_outputs = None, None, None
if labels is not None:
outputs_class, outputs_coord = None, None
if self.config.auxiliary_loss:
intermediate_hidden_states = outputs.intermediate_hidden_states
outputs_coord_delta = self.bbox_embed(intermediate_hidden_states)
outputs_coord = refine_bboxes(intermediate_reference_points, outputs_coord_delta)
outputs_class = self.class_embed(intermediate_hidden_states)
loss, loss_dict, auxiliary_outputs = self.loss_function(
logits,
labels,
self.device,
pred_boxes,
self.config,
outputs_class,
outputs_coord,
enc_outputs_class_logits,
enc_outputs_boxes_logits,
)
return LwDetrObjectDetectionOutput(
loss=loss,
loss_dict=loss_dict,
logits=logits,
pred_boxes=pred_boxes,
auxiliary_outputs=auxiliary_outputs,
last_hidden_state=outputs.last_hidden_state,
intermediate_hidden_states=outputs.intermediate_hidden_states,
intermediate_reference_points=outputs.intermediate_reference_points,
init_reference_points=outputs.init_reference_points,
enc_outputs_class=enc_outputs_class_logits,
enc_outputs_coord_logits=enc_outputs_boxes_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
__all__ = [
"LwDetrConfig",
"LwDetrPreTrainedModel",
"LwDetrModel",
"LwDetrForObjectDetection",
"LwDetrViTConfig",
"LwDetrViTPreTrainedModel",
"LwDetrViTBackbone",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/lw_detr/modular_lw_detr.py",
"license": "Apache License 2.0",
"lines": 1393,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/lw_detr/test_modeling_lw_detr.py | # Copyright 2026 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from functools import cached_property
from transformers import (
DeformableDetrImageProcessor,
LwDetrConfig,
LwDetrViTConfig,
is_torch_available,
is_vision_available,
)
from transformers.testing_utils import (
Expectations,
require_torch,
require_vision,
slow,
torch_device,
)
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import LwDetrForObjectDetection, LwDetrModel, LwDetrViTBackbone
if is_vision_available():
from PIL import Image
CHECKPOINT = {
"tiny": "stevenbucaille/lwdetr_tiny_30e_objects365",
"xlarge": "stevenbucaille/lwdetr_xlarge_30e_objects365",
}
class LwDetrVitModelTester:
def __init__(
self,
parent,
batch_size=3,
num_labels=3,
num_channels=3,
use_labels=True,
is_training=True,
image_size=256,
hidden_size=16,
num_hidden_layers=2,
num_attention_heads=2,
window_block_indices=[1],
out_indices=[0],
num_windows=16,
dropout_prob=0.0,
attn_implementation="eager",
):
self.parent = parent
self.batch_size = batch_size
self.num_labels = num_labels
self.num_channels = num_channels
self.use_labels = use_labels
self.image_size = image_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.window_block_indices = window_block_indices
self.out_indices = out_indices
self.num_windows = num_windows
self.dropout_prob = dropout_prob
self.attn_implementation = attn_implementation
self.is_training = is_training
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
labels = None
if self.use_labels:
labels = ids_tensor([self.batch_size], self.num_labels)
config = self.get_config()
return config, pixel_values, labels
def get_config(self):
return LwDetrViTConfig(
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
window_block_indices=self.window_block_indices,
out_indices=self.out_indices,
num_windows=self.num_windows,
hidden_dropout_prob=self.dropout_prob,
attention_probs_dropout_prob=self.dropout_prob,
attn_implementation=self.attn_implementation,
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values, labels = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
def create_and_check_backbone(self, config, pixel_values, labels):
model = LwDetrViTBackbone(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
# verify hidden states
self.parent.assertEqual(len(result.feature_maps), len(config.out_features))
self.parent.assertListEqual(
list(result.feature_maps[0].shape),
[
self.batch_size,
self.hidden_size,
self.get_config().num_windows_side ** 2,
self.get_config().num_windows_side ** 2,
],
)
# verify channels
self.parent.assertEqual(len(model.channels), len(config.out_features))
self.parent.assertListEqual(model.channels, [config.hidden_size])
# verify backbone works with out_features=None
config.out_features = None
model = LwDetrViTBackbone(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps), 1)
self.parent.assertListEqual(
list(result.feature_maps[0].shape),
[self.batch_size, config.hidden_size, config.patch_size, config.patch_size],
)
# verify channels
self.parent.assertEqual(len(model.channels), 1)
self.parent.assertListEqual(model.channels, [config.hidden_size])
@require_torch
class LwDetrViTBackboneTest(ModelTesterMixin, BackboneTesterMixin, unittest.TestCase):
all_model_classes = (LwDetrViTBackbone,) if is_torch_available() else ()
config_class = LwDetrViTConfig
test_resize_embeddings = False
test_torch_exportable = True
model_split_percents = [0.5, 0.87, 0.9]
def setUp(self):
self.model_tester = LwDetrVitModelTester(self)
def test_backbone(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*config_and_inputs)
def test_model_get_set_embeddings(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, nn.Linear))
def test_attention_outputs(self):
def check_attention_output(inputs_dict, config, model_class):
config._attn_implementation = "eager"
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
windowed_attentions = [attentions[i] for i in self.model_tester.window_block_indices]
unwindowed_attentions = [attentions[i] for i in self.model_tester.out_indices]
expected_windowed_attention_shape = [
self.model_tester.batch_size * self.model_tester.num_windows,
self.model_tester.num_attention_heads,
self.model_tester.get_config().num_windows_side ** 2,
self.model_tester.get_config().num_windows_side ** 2,
]
expected_unwindowed_attention_shape = [
self.model_tester.batch_size,
self.model_tester.num_attention_heads,
self.model_tester.image_size,
self.model_tester.image_size,
]
for i, attention in enumerate(windowed_attentions):
self.assertListEqual(
list(attention.shape),
expected_windowed_attention_shape,
)
for i, attention in enumerate(unwindowed_attentions):
self.assertListEqual(
list(attention.shape),
expected_unwindowed_attention_shape,
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
check_attention_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
check_attention_output(inputs_dict, config, model_class)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.hidden_states
expected_num_stages = self.model_tester.num_hidden_layers
self.assertEqual(len(hidden_states), expected_num_stages + 1)
# VitDet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[
self.model_tester.hidden_size,
self.model_tester.hidden_size,
],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
# overwrite since LwDetrVitDet only supports retraining gradients of hidden states
def test_retain_grad_hidden_states_attentions(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = self.has_attentions
# no need to test all models as different heads yield the same functionality
model_class = self.all_model_classes[0]
model = model_class(config)
model.to(torch_device)
inputs = self._prepare_for_class(inputs_dict, model_class)
outputs = model(**inputs)
output = outputs.feature_maps[0]
# Encoder-/Decoder-only models
hidden_states = outputs.hidden_states[0]
hidden_states.retain_grad()
output.flatten()[0].backward(retain_graph=True)
self.assertIsNotNone(hidden_states.grad)
def prepare_img():
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
class LwDetrModelTester:
def __init__(
self,
parent,
batch_size=3,
is_training=True,
image_size=256,
num_labels=5,
n_targets=4,
use_labels=True,
initializer_range=0.02,
layer_norm_eps=1e-5,
batch_norm_eps=1e-5,
# backbone
backbone_config=None,
# projector
projector_scale_factors=[0.5, 2.0],
# decoder
d_model=32,
decoder_ffn_dim=32,
decoder_layers=2,
decoder_self_attention_heads=2,
decoder_cross_attention_heads=4,
# model
num_queries=10,
group_detr=2,
dropout=0.0,
activation_dropout=0.0,
attention_dropout=0.0,
attn_implementation="eager",
):
self.parent = parent
self.batch_size = batch_size
self.is_training = is_training
self.num_channels = 3
self.image_size = image_size
self.num_labels = num_labels
self.n_targets = n_targets
self.use_labels = use_labels
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.batch_norm_eps = batch_norm_eps
self.backbone_config = backbone_config
self.projector_scale_factors = projector_scale_factors
self.d_model = d_model
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_self_attention_heads = decoder_self_attention_heads
self.decoder_cross_attention_heads = decoder_cross_attention_heads
self.num_queries = num_queries
self.group_detr = group_detr
self.dropout = dropout
self.activation_dropout = activation_dropout
self.attention_dropout = attention_dropout
self.attn_implementation = attn_implementation
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
pixel_mask = torch.ones([self.batch_size, self.image_size, self.image_size], device=torch_device)
labels = None
if self.use_labels:
labels = []
for i in range(self.batch_size):
target = {}
target["class_labels"] = torch.randint(
high=self.num_labels, size=(self.n_targets,), device=torch_device
)
target["boxes"] = torch.rand(self.n_targets, 4, device=torch_device, dtype=pixel_values.dtype)
labels.append(target)
config = self.get_config()
config.num_labels = self.num_labels
return config, pixel_values, pixel_mask, labels
def get_config(self):
backbone_config = LwDetrViTConfig(
hidden_size=16,
num_hidden_layers=4,
num_attention_heads=2,
window_block_indices=[0, 2],
out_indices=[1, 3],
num_windows=16,
image_size=self.image_size,
dropout_prob=self.dropout,
attn_implementation=self.attn_implementation,
)
return LwDetrConfig(
backbone_config=backbone_config,
d_model=self.d_model,
projector_scale_factors=self.projector_scale_factors,
decoder_ffn_dim=self.decoder_ffn_dim,
decoder_layers=self.decoder_layers,
decoder_self_attention_heads=self.decoder_self_attention_heads,
decoder_cross_attention_heads=self.decoder_cross_attention_heads,
num_queries=self.num_queries,
group_detr=self.group_detr,
dropout=self.dropout,
activation_dropout=self.activation_dropout,
attention_dropout=self.attention_dropout,
attn_implementation=self.attn_implementation,
_attn_implementation=self.attn_implementation,
)
def prepare_config_and_inputs_for_common(self):
config, pixel_values, pixel_mask, labels = self.prepare_config_and_inputs()
inputs_dict = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def create_and_check_lw_detr_model(self, config, pixel_values, pixel_mask, labels):
model = LwDetrModel(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values=pixel_values, pixel_mask=pixel_mask)
result = model(pixel_values)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.num_queries, self.d_model))
def create_and_check_lw_detr_object_detection_head_model(self, config, pixel_values, pixel_mask, labels):
model = LwDetrForObjectDetection(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values=pixel_values, pixel_mask=pixel_mask)
result = model(pixel_values)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels))
self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4))
result = model(pixel_values=pixel_values, pixel_mask=pixel_mask, labels=labels)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels))
self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4))
@require_torch
class LwDetrModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (LwDetrModel, LwDetrForObjectDetection) if is_torch_available() else ()
pipeline_model_mapping = (
{"image-feature-extraction": LwDetrModel, "object-detection": LwDetrForObjectDetection}
if is_torch_available()
else {}
)
is_encoder_decoder = False
test_missing_keys = False
test_torch_exportable = True
model_split_percents = [0.5, 0.87, 0.9]
# special case for head models
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
if return_labels:
if model_class.__name__ == "LwDetrForObjectDetection":
labels = []
for i in range(self.model_tester.batch_size):
target = {}
target["class_labels"] = torch.ones(
size=(self.model_tester.n_targets,), device=torch_device, dtype=torch.long
)
target["boxes"] = torch.ones(
self.model_tester.n_targets, 4, device=torch_device, dtype=torch.float
)
labels.append(target)
inputs_dict["labels"] = labels
return inputs_dict
def setUp(self):
self.model_tester = LwDetrModelTester(self)
self.config_tester = ConfigTester(
self,
config_class=LwDetrConfig,
has_text_modality=False,
common_properties=["d_model", "decoder_self_attention_heads"],
)
def test_config(self):
self.config_tester.run_common_tests()
def test_lw_detr_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lw_detr_model(*config_and_inputs)
def test_lw_detr_object_detection_head_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lw_detr_object_detection_head_model(*config_and_inputs)
@unittest.skip(reason="LwDetr does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="LwDetr does not use test_inputs_embeds_matches_input_ids")
def test_inputs_embeds_matches_input_ids(self):
pass
@unittest.skip(reason="LwDetr does not support input and output embeddings")
def test_model_get_set_embeddings(self):
pass
@unittest.skip(reason="LwDetr does not support input and output embeddings")
def test_model_common_attributes(self):
pass
@unittest.skip(reason="LwDetr does not use token embeddings")
def test_resize_tokens_embeddings(self):
pass
@unittest.skip(reason="Feed forward chunking is not implemented")
def test_feed_forward_chunking(self):
pass
def test_attention_outputs(self):
def check_attention_outputs(inputs_dict, config, model_class):
model = model_class._from_config(config, attn_implementation="eager")
config = model.config
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), self.model_tester.decoder_layers)
expected_attentions_shape = [
self.model_tester.batch_size,
self.model_tester.decoder_self_attention_heads,
self.model_tester.num_queries,
self.model_tester.num_queries,
]
for i in range(self.model_tester.decoder_layers):
self.assertEqual(expected_attentions_shape, list(attentions[i].shape))
# check cross_attentions outputs
expected_attentions_shape = [
self.model_tester.batch_size,
self.model_tester.num_queries,
self.model_tester.decoder_cross_attention_heads,
config.num_feature_levels,
config.decoder_n_points,
]
cross_attentions = outputs.cross_attentions
self.assertEqual(len(cross_attentions), self.model_tester.decoder_layers)
for i in range(self.model_tester.decoder_layers):
self.assertEqual(expected_attentions_shape, list(cross_attentions[i].shape))
out_len = len(outputs)
correct_outlen = 8 # 6 + attentions + cross_attentions
# Object Detection model returns pred_logits, pred_boxes and auxiliary outputs
if model_class.__name__ == "LwDetrForObjectDetection":
correct_outlen += 2
if "labels" in inputs_dict:
correct_outlen += 3 # loss, loss_dict and auxiliary outputs is added to beginning
self.assertEqual(correct_outlen, out_len)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
inputs_dict["output_hidden_states"] = False
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
check_attention_outputs(inputs_dict, config, model_class)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
check_attention_outputs(inputs_dict, config, model_class)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.hidden_states
expected_num_hidden_states = self.model_tester.decoder_layers + 1
self.assertEqual(len(hidden_states), expected_num_hidden_states)
for i in range(expected_num_hidden_states):
self.assertListEqual(
list(hidden_states[i].shape),
[
self.model_tester.batch_size,
self.model_tester.num_queries,
self.model_tester.d_model,
],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = False
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def test_retain_grad_hidden_states_attentions(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = True
model_class = self.all_model_classes[0]
model = model_class(config)
model.to(torch_device)
inputs = self._prepare_for_class(inputs_dict, model_class)
outputs = model(**inputs)
# we take the first output since last_hidden_state is the first item
output = outputs.last_hidden_state
hidden_states = outputs.hidden_states[0]
attentions = outputs.attentions[0]
hidden_states.retain_grad()
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=True)
self.assertIsNotNone(hidden_states.grad)
self.assertIsNotNone(attentions.grad)
def test_forward_auxiliary_loss(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.auxiliary_loss = True
# only test for object detection and segmentation model
for model_class in self.all_model_classes[1:]:
model = model_class(config)
model.to(torch_device)
inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
outputs = model(**inputs)
self.assertIsNotNone(outputs.auxiliary_outputs)
self.assertEqual(len(outputs.auxiliary_outputs), self.model_tester.decoder_layers - 1)
@require_torch
@require_vision
class LwDetrModelIntegrationTest(unittest.TestCase):
@cached_property
def default_image_processor(self):
if is_vision_available():
return {
"tiny": DeformableDetrImageProcessor.from_pretrained(CHECKPOINT["tiny"]),
"xlarge": DeformableDetrImageProcessor.from_pretrained(CHECKPOINT["xlarge"]),
}
@slow
def test_inference_object_detection_head_tiny(self):
size = "tiny"
model = LwDetrForObjectDetection.from_pretrained(CHECKPOINT[size], attn_implementation="eager").to(
torch_device
)
image_processor = self.default_image_processor[size]
image = prepare_img()
encoding = image_processor(images=image, return_tensors="pt").to(torch_device)
pixel_values = encoding["pixel_values"].to(torch_device)
pixel_mask = encoding["pixel_mask"].to(torch_device)
with torch.no_grad():
outputs = model(pixel_values, pixel_mask)
expected_logits_shape = torch.Size((1, model.config.num_queries, model.config.num_labels))
self.assertEqual(outputs.logits.shape, expected_logits_shape)
expectations = Expectations(
{
("cuda", (8, 0)): [-7.7648, -4.1330, -2.9003, -4.0559, -2.9635],
("xpu", (3, 0)): [-7.7693, -4.1270, -2.9018, -4.0605, -2.9575],
}
)
expected_logits = torch.tensor(expectations.get_expectation()).to(torch_device)
torch.testing.assert_close(outputs.logits.flatten()[:5], expected_logits, rtol=2e-4, atol=2e-4)
expected_boxes_shape = torch.Size((1, model.config.num_queries, 4))
self.assertEqual(outputs.pred_boxes.shape, expected_boxes_shape)
expectations = Expectations(
{
("cuda", (8, 0)): [0.1694, 0.1979, 0.2121, 0.0912, 0.2537],
("xpu", (3, 0)): [0.1694, 0.1979, 0.2121, 0.0912, 0.2537],
}
)
expected_boxes = torch.tensor(expectations.get_expectation()).to(torch_device)
torch.testing.assert_close(outputs.pred_boxes.flatten()[:5], expected_boxes, rtol=2e-4, atol=2e-4)
results = image_processor.post_process_object_detection(
outputs, threshold=0.0, target_sizes=[image.size[::-1]]
)[0]
expectations = Expectations(
{
("cuda", (8, 0)): [0.8684, 0.7492, 0.7146, 0.4362],
("xpu", (3, 0)): [0.8676, 0.7527, 0.7177, 0.4391],
}
)
expected_scores = torch.tensor(expectations.get_expectation()).to(torch_device)
expected_labels = [140, 133, 140, 133]
expectations = Expectations(
{
("cuda", (8, 0)): [
[4.9333, 56.6130, 319.7758, 474.7774],
[40.5547, 73.0968, 176.2951, 116.8605],
[340.3403, 25.1044, 640.2798, 368.7382],
[334.2971, 77.0087, 371.2877, 189.8089],
],
("xpu", (3, 0)): [
[4.8948, 56.5549, 319.8077, 474.7937],
[40.5620, 73.1059, 176.2996, 116.8567],
[340.3327, 25.1026, 640.3193, 368.6754],
[334.2945, 76.9876, 371.2914, 189.8221],
],
}
)
expected_slice_boxes = torch.tensor(expectations.get_expectation()).to(torch_device)
torch.testing.assert_close(results["scores"][:4], expected_scores, atol=1e-3, rtol=2e-4)
self.assertSequenceEqual(results["labels"][:4].tolist(), expected_labels)
torch.testing.assert_close(results["boxes"][:4], expected_slice_boxes, atol=1e-3, rtol=2e-4)
@slow
def test_inference_object_detection_head_xlarge(self):
size = "xlarge"
model = LwDetrForObjectDetection.from_pretrained(CHECKPOINT[size], attn_implementation="eager").to(
torch_device
)
image_processor = self.default_image_processor[size]
image = prepare_img()
encoding = image_processor(images=image, return_tensors="pt").to(torch_device)
pixel_values = encoding["pixel_values"].to(torch_device)
pixel_mask = encoding["pixel_mask"].to(torch_device)
with torch.no_grad():
outputs = model(pixel_values, pixel_mask)
expected_logits_shape = torch.Size((1, model.config.num_queries, model.config.num_labels))
self.assertEqual(outputs.logits.shape, expected_logits_shape)
expectations = Expectations(
{
("cuda", (8, 0)): [-11.9394, -4.3419, -4.4172, -5.0299, -6.9282],
("xpu", (3, 0)): [-11.9292, -4.3307, -4.4075, -5.0207, -6.9211],
}
)
expected_logits = torch.tensor(expectations.get_expectation()).to(torch_device)
torch.testing.assert_close(outputs.logits.flatten()[:5], expected_logits, rtol=2e-4, atol=2e-4)
expected_boxes_shape = torch.Size((1, model.config.num_queries, 4))
self.assertEqual(outputs.pred_boxes.shape, expected_boxes_shape)
expectations = Expectations(
{
("cuda", (8, 0)): [0.7689, 0.4107, 0.4617, 0.7244, 0.2526],
("xpu", (3, 0)): [0.7688, 0.4106, 0.4618, 0.7245, 0.2526],
}
)
expected_boxes = torch.tensor(expectations.get_expectation()).to(torch_device)
torch.testing.assert_close(outputs.pred_boxes.flatten()[:5], expected_boxes, rtol=2e-4, atol=2e-4)
results = image_processor.post_process_object_detection(
outputs, threshold=0.0, target_sizes=[image.size[::-1]]
)[0]
expectations = Expectations(
{
("cuda", (8, 0)): [0.9746, 0.9717, 0.9344, 0.8182],
("xpu", (3, 0)): [0.9745, 0.9715, 0.9339, 0.8163],
}
)
expected_scores = torch.tensor(expectations.get_expectation()).to(torch_device)
expected_labels = [140, 140, 133, 133]
expectations = Expectations(
{
("cuda", (8, 0)): [
[7.4541, 54.2878, 315.8890, 474.8681],
[344.3325, 23.2591, 639.7999, 370.9900],
[40.4797, 73.3092, 175.6086, 116.9654],
[333.9930, 77.1547, 370.4000, 186.1230],
],
("xpu", (3, 0)): [
[7.4487, 54.2931, 315.8945, 474.8726],
[344.2597, 23.2305, 639.8082, 370.9894],
[40.4780, 73.3095, 175.6083, 116.9673],
[333.9890, 77.1453, 370.4069, 186.1300],
],
}
)
expected_slice_boxes = torch.tensor(expectations.get_expectation()).to(torch_device)
torch.testing.assert_close(results["scores"][:4], expected_scores, atol=1e-3, rtol=2e-4)
self.assertSequenceEqual(results["labels"][:4].tolist(), expected_labels)
torch.testing.assert_close(results["boxes"][:4], expected_slice_boxes, atol=1e-3, rtol=2e-4)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/lw_detr/test_modeling_lw_detr.py",
"license": "Apache License 2.0",
"lines": 669,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/minimax_m2/modular_minimax_m2.py | # Copyright 2025 the MiniMax AI Team and HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn.functional as F
from torch import nn
from ... import initialization as init
from ...cache_utils import Cache, DynamicCache
from ...configuration_utils import PreTrainedConfig
from ...masking_utils import create_causal_mask
from ...modeling_outputs import MoeModelOutputWithPast
from ...modeling_rope_utils import RopeParameters
from ...modeling_utils import PreTrainedModel
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring
from ...utils.generic import merge_with_config_defaults
from ...utils.output_capturing import capture_outputs
from ..flex_olmo.modeling_flex_olmo import FlexOlmoAttention
from ..glm4_moe.modeling_glm4_moe import (
Glm4MoeRotaryEmbedding,
apply_rotary_pos_emb, # noqa: F401
)
from ..mixtral.modeling_mixtral import (
MixtralExperts,
MixtralForCausalLM,
MixtralModel,
MixtralPreTrainedModel,
MixtralRMSNorm,
MixtralSparseMoeBlock,
MixtralTopKRouter,
)
class MiniMaxM2Config(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`MiniMaxM2Model`]. It is used to instantiate an
MiniMaxM2 model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the MiniMaxM2.
[MiniMaxAI/MiniMax-M2](https://huggingface.co/MiniMaxAI/MiniMax-M2)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`Optional`, *optional*, defaults to 200064):
Vocabulary size of the MiniMaxM2 model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`MiniMaxM2Model`]
hidden_size (`Optional`, *optional*, defaults to 3072):
Dimension of the hidden representations.
intermediate_size (`Optional`, *optional*, defaults to 1536):
Dimension of the MLP representations.
num_hidden_layers (`Optional`, *optional*, defaults to 62):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`Optional`, *optional*, defaults to 48):
Number of attention heads for each attention layer in the Transformer encoder.
num_key_value_heads (`Optional`, *optional*, defaults to 8):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `8`.
head_dim (`Optional`, *optional*, defaults to 128):
The attention head dimension.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`Optional`, *optional*, defaults to 196608):
The maximum sequence length that this model might ever be used with. MiniMaxM2's sliding window attention
allows sequence of up to 196608 tokens.
initializer_range (`Optional`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`Optional`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
use_cache (`Optional`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`Optional`, *optional*):
The id of the padding token.
bos_token_id (`Optional`, *optional*, defaults to 200034):
The id of the "beginning-of-sequence" token.
eos_token_id (`Optional`, *optional*, defaults to 200020):
The id of the "end-of-sequence" token.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether the model's input and output word embeddings should be tied.
attention_dropout (`Optional`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
num_experts_per_tok (`Optional`, *optional*, defaults to 8):
The number of experts to route per-token, can be also interpreted as the `top-k` routing
parameter
num_local_experts (`Optional`, *optional*, defaults to 256):
Number of experts per Sparse MLP layer.
output_router_logits (`Optional`, *optional*, defaults to `False`):
Whether or not the router logits should be returned by the model. Enabling this will also
allow the model to output the auxiliary loss. See [here]() for more details
router_aux_loss_coef (`Optional`, *optional*, defaults to 0.001):
The aux loss factor for the total loss.
router_jitter_noise (`Optional`, *optional*, defaults to 0.0):
Amount of noise to add to the router.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionaty should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
```python
>>> from transformers import MiniMaxM2Model, MiniMaxM2Config
>>> # Initializing a MiniMaxM2 style configuration
>>> configuration = MiniMaxM2Config()
>>> # Initializing a model from the MiniMaxM2 style configuration
>>> model = MiniMaxM2Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "minimax_m2"
keys_to_ignore_at_inference = ["past_key_values"]
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise_rep",
"layers.*.self_attn.k_proj": "colwise_rep",
"layers.*.self_attn.v_proj": "colwise_rep",
"layers.*.self_attn.o_proj": "rowwise_rep",
"layers.*.mlp.gate": "colwise_rep", # we need to replicate here to correctly route experts
"layers.*.mlp.experts.gate_up_proj": "packed_colwise",
"layers.*.mlp.experts.down_proj": "rowwise",
"layers.*.mlp.experts": "moe_tp_experts",
}
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
"norm": (["hidden_states"], ["hidden_states"]),
}
attribute_map = {
"num_experts": "num_local_experts",
}
default_theta = 5000000.0
def __init__(
self,
vocab_size: int | None = 200064,
hidden_size: int | None = 3072,
intermediate_size: int | None = 1536,
num_hidden_layers: int | None = 62,
num_attention_heads: int | None = 48,
num_key_value_heads: int | None = 8,
head_dim: int | None = 128,
hidden_act: str | None = "silu",
max_position_embeddings: int | None = 196608,
initializer_range: float | None = 0.02,
rms_norm_eps: int | None = 1e-06,
use_cache: bool | None = True,
pad_token_id: int | None = None,
bos_token_id: int | None = 200034,
eos_token_id: int | None = 200020,
tie_word_embeddings: bool | None = False,
attention_dropout: float | None = 0.0,
num_experts_per_tok: int | None = 8,
num_local_experts: int | None = 256,
output_router_logits: bool | None = False,
router_aux_loss_coef: float | None = 0.001,
router_jitter_noise: float | None = 0.0,
rope_parameters: RopeParameters | dict[RopeParameters] | None = None,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.attention_dropout = attention_dropout
self.head_dim = head_dim
self.rope_parameters = rope_parameters
self.num_experts_per_tok = num_experts_per_tok
self.num_local_experts = num_local_experts
self.output_router_logits = output_router_logits
self.router_aux_loss_coef = router_aux_loss_coef
self.router_jitter_noise = router_jitter_noise
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.tie_word_embeddings = tie_word_embeddings
super().__init__(**kwargs)
class MiniMaxM2TopKRouter(MixtralTopKRouter):
def forward(self, hidden_states, e_score_correction_bias):
hidden_states = hidden_states.reshape(-1, self.hidden_dim)
router_logits = F.linear(hidden_states.to(self.weight.dtype), self.weight) # (seq_len, num_experts)
# Main difference to other Moe, using Sigmoid activation instead of Softmax
routing_weights = nn.functional.sigmoid(router_logits.float())
scores_for_choice = routing_weights + e_score_correction_bias
_, top_k_index = torch.topk(scores_for_choice, self.top_k, dim=-1, sorted=False)
top_k_weights = routing_weights.gather(1, top_k_index)
top_k_weights /= top_k_weights.sum(dim=-1, keepdim=True)
router_scores = top_k_weights
return router_logits, router_scores, top_k_index
class MiniMaxM2Experts(MixtralExperts):
pass
class MiniMaxM2SparseMoeBlock(MixtralSparseMoeBlock):
def __init__(self, config):
super().__init__()
self.register_buffer("e_score_correction_bias", torch.zeros(config.num_local_experts))
def forward(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
batch_size, sequence_length, hidden_dim = hidden_states.shape
if self.training and self.jitter_noise > 0:
hidden_states *= torch.empty_like(hidden_states).uniform_(1.0 - self.jitter_noise, 1.0 + self.jitter_noise)
hidden_states = hidden_states.view(-1, hidden_states.shape[-1])
_, top_k_weights, top_k_index = self.gate(hidden_states, self.e_score_correction_bias)
hidden_states = self.experts(hidden_states, top_k_index, top_k_weights)
hidden_states = hidden_states.reshape(batch_size, sequence_length, hidden_dim)
return hidden_states
class MiniMaxM2RMSNorm(MixtralRMSNorm):
pass
class MiniMaxM2RotaryEmbedding(Glm4MoeRotaryEmbedding):
pass
class MiniMaxM2Attention(FlexOlmoAttention):
def __init__(self, config: MiniMaxM2Config, layer_idx: int):
super().__init__(config, layer_idx)
self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False)
class MiniMaxM2PreTrainedModel(MixtralPreTrainedModel):
@torch.no_grad()
def _init_weights(self, module):
PreTrainedModel._init_weights(self, module)
std = self.config.initializer_range
if isinstance(module, MiniMaxM2Experts):
init.normal_(module.gate_up_proj, mean=0.0, std=std)
init.normal_(module.down_proj, mean=0.0, std=std)
elif isinstance(module, MiniMaxM2TopKRouter):
init.normal_(module.weight, mean=0.0, std=std)
elif isinstance(module, MiniMaxM2SparseMoeBlock):
init.zeros_(module.e_score_correction_bias)
class MiniMaxM2Model(MixtralModel):
@merge_with_config_defaults
@capture_outputs
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
use_cache: bool | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> MoeModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
# No sliding window opposed to mixtral
causal_mask = create_causal_mask(
config=self.config,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
position_ids=position_ids,
)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids)
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
hidden_states = decoder_layer(
hidden_states,
attention_mask=causal_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return MoeModelOutputWithPast( # only diff with Mistral is the output type, we need MoE
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
class MiniMaxM2ForCausalLM(MixtralForCausalLM):
pass
__all__ = [
"MiniMaxM2Config",
"MiniMaxM2ForCausalLM",
"MiniMaxM2Model", # noqa: F822
"MiniMaxM2PreTrainedModel", # noqa: F822
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/minimax_m2/modular_minimax_m2.py",
"license": "Apache License 2.0",
"lines": 302,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:benchmark_v2/benchmark_scripts/continuous_batching_overall.py | import re
import subprocess
from pathlib import Path
from tabulate import tabulate
SCRIPT_LOCATION = (Path(__file__).parent.parent.parent / "examples/pytorch/continuous_batching.py").as_posix()
COMMON_ARGS = "--log-level WARNING --seed 0".split()
ERROR_OUTPUT = {"time_seconds": "X", "num_tokens": "X", "throughput_tok_per_sec": "ERROR"}
def run_and_parse_cb_example(args: str) -> dict:
print(f"Benchmarking with args: {args}")
output = subprocess.run(
["python", SCRIPT_LOCATION] + args.split() + COMMON_ARGS,
stdout=subprocess.PIPE,
)
output = output.stdout.decode("utf-8")
if "generate_batch despite unexpected termination" in output:
return {"args": args, **ERROR_OUTPUT}
pattern = r"CB generation took: ([\d.]+) seconds for (\d+) tokens\. ([\d.]+)tok/s"
match = re.search(pattern, output)
if match is not None:
return {
"args": args,
"time_seconds": float(match.group(1)),
"num_tokens": int(match.group(2)),
"throughput_tok_per_sec": float(match.group(3)),
}
else:
return {"args": args, **ERROR_OUTPUT}
if __name__ == "__main__":
results = [
{
"args": "Arguments",
"time_seconds": "Duration (s)",
"num_tokens": "Generated tokens",
"throughput_tok_per_sec": "Throughput (tok/s)",
}
]
# Benchmark with low number of samples
results.append(run_and_parse_cb_example("--samples 10"))
results.append(run_and_parse_cb_example("--samples 20 --num-blocks 20")) # and low number of blocks
results.append(run_and_parse_cb_example("--samples 50"))
# Benchmark with compile: default, flash attention 2 and sdpa
results.append(run_and_parse_cb_example("--samples 100"))
results.append(run_and_parse_cb_example("--samples 100 --attn flash_attention_2"))
results.append(run_and_parse_cb_example("--samples 100 --attn sdpa"))
# Benchmark with high number of samples
results.append(run_and_parse_cb_example("--samples 500"))
# Benchmark with async API
results.append(run_and_parse_cb_example("--samples 500 --use-async"))
# Benchmark with prefix sharing and compile (best performance, but not reproducible due to compilation)
results.append(run_and_parse_cb_example("--samples 500 --add-prefix --compile"))
# Benchmark with parallel decoding
results.append(run_and_parse_cb_example("--samples 50 --num-return-sequences 8 --do-sample"))
results.append(run_and_parse_cb_example("--samples 100 --num-return-sequences 4 --do-sample"))
# Print results
print()
print(tabulate(results, tablefmt="github"))
| {
"repo_id": "huggingface/transformers",
"file_path": "benchmark_v2/benchmark_scripts/continuous_batching_overall.py",
"license": "Apache License 2.0",
"lines": 56,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
huggingface/transformers:src/transformers/integrations/moe.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Callable
from functools import wraps
from ..utils import logging
from ..utils.generic import GeneralInterface
from ..utils.import_utils import is_grouped_mm_available, is_torch_available, is_torchdynamo_compiling
if is_torch_available():
import torch
logger = logging.get_logger(__name__)
# Examples of experts class with its eager mm implementation
# class Experts(nn.Module):
# """Collection of expert weights stored as 3D tensors."""
# def __init__(self, config):
# super().__init__()
# self.num_experts = config.n_routed_experts
# self.hidden_dim = config.hidden_size
# self.intermediate_dim = config.moe_intermediate_size
# self.gate_up_proj = nn.Parameter(torch.empty(self.num_experts, 2 * self.intermediate_dim, self.hidden_dim))
# self.down_proj = nn.Parameter(torch.empty(self.num_experts, self.hidden_dim, self.intermediate_dim))
# self.act_fn = ACT2FN[config.hidden_act]
# def forward(
# self,
# hidden_states: torch.Tensor,
# top_k_index: torch.Tensor,
# top_k_weights: torch.Tensor,
# ) -> torch.Tensor:
# final_hidden_states = torch.zeros_like(hidden_states)
# with torch.no_grad():
# expert_mask = torch.nn.functional.one_hot(top_k_index, num_classes=self.num_experts)
# expert_mask = expert_mask.permute(2, 1, 0)
# expert_hit = torch.greater(expert_mask.sum(dim=(-1, -2)), 0).nonzero()
# for expert_idx in expert_hit:
# expert_idx = expert_idx[0]
# if expert_idx == self.num_experts:
# continue
# top_k_pos, token_idx = torch.where(expert_mask[expert_idx])
# current_state = hidden_states[token_idx]
# gate, up = nn.functional.linear(current_state, self.gate_up_proj[expert_idx]).chunk(2, dim=-1)
# current_hidden_states = self.act_fn(gate) * up
# current_hidden_states = nn.functional.linear(current_hidden_states, self.down_proj[expert_idx])
# current_hidden_states = current_hidden_states * top_k_weights[token_idx, top_k_pos, None]
# final_hidden_states.index_add_(0, token_idx, current_hidden_states.to(final_hidden_states.dtype))
# return final_hidden_states
def _batched_linear(
input: torch.Tensor,
weight: torch.Tensor,
bias: torch.Tensor | None = None,
is_transposed: bool = False,
) -> torch.Tensor:
"""Batched linear layer supporting optional bias and transposed weights.
Args:
input (`torch.Tensor`):
Input tensor of shape (batch_size, input_dim).
weight (`torch.Tensor`):
Weight tensor of shape (batch_size, output_dim, input_dim) if transposed is `False`,
else of shape (batch_size, input_dim, output_dim).
bias (`torch.Tensor`, *optional*):
Bias tensor of shape (batch_size, output_dim). Default is `None`.
is_transposed (`bool`, *optional*, defaults to `False`):
Whether the weight tensor is transposed.
Returns:
`torch.Tensor`: Output tensor of shape (batch_size, output_dim).
"""
if is_transposed:
# (batch_size, 1, input_dim) @ (batch_size, input_dim, output_dim) -> (batch_size, 1, output_dim) -> (batch_size, output_dim)
out = torch.bmm(input.unsqueeze(1), weight).squeeze(1)
else:
# (batch_size, output_dim, input_dim) @ (batch_size, input_dim, 1) -> (batch_size, output_dim, 1) -> (batch_size, output_dim)
out = torch.bmm(weight, input.unsqueeze(-1)).squeeze(-1)
if bias is not None:
out = out + bias
return out
def batched_mm_experts_forward(
self: torch.nn.Module,
hidden_states: torch.Tensor,
top_k_index: torch.Tensor,
top_k_weights: torch.Tensor,
) -> torch.Tensor:
device = hidden_states.device
num_top_k = top_k_index.size(-1)
num_tokens = hidden_states.size(0)
hidden_dim = hidden_states.size(-1)
# Reshape for easier indexing
# S is the number of selected tokens-experts pairs (S = num_tokens * num_top_k)
token_idx = torch.arange(num_tokens, device=device).unsqueeze(1).expand(-1, num_top_k).reshape(-1) # (S,)
sample_weights = top_k_weights.reshape(-1) # (S,)
expert_ids = top_k_index.reshape(-1) # (S,)
# Handle invalid expert IDs from Expert Parallelism (EP)
# When EP is enabled, tokens assigned to experts on other devices are marked with sentinel value >= num_experts
valid_mask = expert_ids < self.num_experts
expert_ids_clamped = expert_ids.clamp(0, self.num_experts - 1)
# Get current hidden states for selected samples
selected_hidden_states = hidden_states[token_idx]
# Select expert weights and biases for selected samples (using clamped IDs for safe indexing)
selected_gate_up = self.gate_up_proj[expert_ids_clamped]
selected_down = self.down_proj[expert_ids_clamped]
selected_gate_up_bias = self.gate_up_proj_bias[expert_ids_clamped] if self.has_bias else None
selected_down_bias = self.down_proj_bias[expert_ids_clamped] if self.has_bias else None
# --- Up projection per expert (batched) ---
gate_up_out = _batched_linear(
selected_hidden_states, selected_gate_up, selected_gate_up_bias, is_transposed=self.is_transposed
) # (S, 2 * intermediate_dim)
# Apply gating
gated_out = self._apply_gate(gate_up_out) # (S, intermediate_dim)
# --- Down projection per expert (batched) ---
out_per_sample = _batched_linear(
gated_out, selected_down, selected_down_bias, is_transposed=self.is_transposed
) # (S, hidden_dim)
# Apply routing weights and zero out invalid expert contributions
if sample_weights.shape != expert_ids_clamped.shape:
sample_weights = sample_weights.gather(0, expert_ids_clamped)
out_per_sample = out_per_sample * sample_weights.unsqueeze(-1) # (S, hidden_dim)
out_per_sample = out_per_sample * valid_mask.unsqueeze(-1).to(out_per_sample.dtype)
# Accumulate results using deterministic reshape+sum instead of index_add_
# (index_add_ with duplicate indices is non-deterministic on CUDA due to atomicAdd)
final_hidden_states = out_per_sample.view(num_tokens, num_top_k, hidden_dim).sum(dim=1)
return final_hidden_states.to(hidden_states.dtype)
# torch.compiler.disable does not work with fullgraph=True, so we implement a custom operator to opaque this function.
# This is not "free compilation compatibility" because now inductor won't be able to optimize matmuls inside the loop,
# but since the matmuls here have dynamic shapes, inductor wouldn't have been able to optimize them anyway.
def _grouped_mm_fallback(input: torch.Tensor, weight: torch.Tensor, offs: torch.Tensor) -> torch.Tensor:
"""
Fallback grouped matrix multiplication used when `torch.nn.functional.grouped_mm` and `torch._grouped_mm`
are unavailable or incompatible with `torch.compile` (e.g. non-bfloat16 weights).
Args:
input (`torch.Tensor`): Input of shape (S, input_dim), sorted by expert id.
weight (`torch.Tensor`): Expert weights of shape (num_experts, input_dim, output_dim).
offs (`torch.Tensor`): Cumulative token counts per expert of shape (num_experts,).
Returns:
`torch.Tensor`: Output of shape (S, output_dim).
"""
output = torch.zeros(input.size(0), weight.size(2), device=input.device, dtype=input.dtype) # (S, output_dim)
start = 0
# single cpu<->gpu sync point here,
# avoids multiple syncs inside the loop
for i, end in enumerate(offs.tolist()):
if start == end:
continue
torch.mm(input[start:end], weight[i], out=output[start:end])
start = end
return output
def _grouped_mm_fallback_fake(input: torch.Tensor, weight: torch.Tensor, offs: torch.Tensor) -> torch.Tensor:
"""Shape/dtype inference stub for `_grouped_mm_fallback` required by `torch.compile`."""
assert input.dim() == 2, f"input must be 2D (S, input_dim), got shape {tuple(input.shape)}"
assert weight.dim() == 3, (
f"weight must be 3D (num_experts, input_dim, output_dim), got shape {tuple(weight.shape)}"
)
assert offs.dim() == 1, f"offs must be 1D (num_experts,), got shape {tuple(offs.shape)}"
assert offs.size(0) == weight.size(0), f"offs length {offs.size(0)} must match number of experts {weight.size(0)}"
assert input.size(1) == weight.size(1), (
f"input_dim mismatch: input has {input.size(1)}, weight has {weight.size(1)}"
)
assert offs.dtype in (torch.int32, torch.int64), f"offs must be an integer tensor, got {offs.dtype}"
return torch.empty(input.size(0), weight.size(2), device=input.device, dtype=input.dtype)
def _grouped_mm_fallback_setup_context(ctx, inputs, output):
"""Saves input and weight for backward; offs is stored directly as it is a non-differentiable integer tensor."""
ctx.save_for_backward(inputs[0], inputs[1])
ctx.offs = inputs[2]
def _grouped_mm_fallback_backward(ctx, grad_output):
"""Backward pass for `_grouped_mm_fallback`. Computes grad_input and grad_weight per expert group; offs has no gradient."""
input, weight = ctx.saved_tensors
grad_input = torch.zeros_like(input)
grad_weight = torch.zeros_like(weight)
start = 0
# single cpu<->gpu sync point here,
# avoids multiple syncs inside the loop
for i, end in enumerate(ctx.offs.tolist()):
if start == end:
continue
torch.mm(grad_output[start:end], weight[i].T, out=grad_input[start:end])
torch.mm(input[start:end].T, grad_output[start:end], out=grad_weight[i])
start = end
return grad_input, grad_weight, None
if is_torch_available():
torch.library.custom_op("transformers::grouped_mm_fallback", _grouped_mm_fallback, mutates_args=())
torch.library.register_fake("transformers::grouped_mm_fallback", _grouped_mm_fallback_fake)
torch.library.register_autograd(
"transformers::grouped_mm_fallback",
_grouped_mm_fallback_backward,
setup_context=_grouped_mm_fallback_setup_context,
)
def _can_use_grouped_mm(input: torch.Tensor, weight: torch.Tensor, offs: torch.Tensor) -> bool:
"""
Check if torch.nn.functional.grouped_mm or torch._grouped_mm can be used based on availability and compatibility with torch.compile.
Args:
input (`torch.Tensor`):
Input tensor of shape (S, input_dim).
weight (`torch.Tensor`):
Weight tensor of shape (num_experts, input_dim, output_dim).
offs (`torch.Tensor`):
Offsets tensor indicating the boundaries of each group in the input tensor.
Returns:
`bool`: True if grouped_mm can be used, False otherwise.
"""
if is_torchdynamo_compiling() and weight.dtype != torch.bfloat16:
# torch.grouped_mm is not supported in torch.compile with dtypes other than bfloat16
return False
return is_grouped_mm_available()
def _grouped_mm(
input: torch.Tensor,
weight: torch.Tensor,
offs: torch.Tensor,
) -> torch.Tensor:
"""Grouped matrix multiplication dispatcher that uses torch.nn.functional.grouped_mm if available, else falls back to torch._grouped_mm.
Args:
input (`torch.Tensor`):
Input tensor of shape (S, input_dim).
weight (`torch.Tensor`):
Weight tensor of shape (num_experts, input_dim, output_dim).
offs (`torch.Tensor`):
Offsets tensor indicating the boundaries of each group in the input tensor.
Returns:
`torch.Tensor`: Output tensor of shape (S, output_dim).
"""
if _can_use_grouped_mm(input, weight, offs):
# torch.nn.functional.grouped_mm and torch._grouped_mm are not autocast-enabled,
# when autocast is enabled we can end up with intermediate tensors in fp32 (e.g. LayerNorm output) and weight tensors in bf16
# In that case we need to cast the input to the weight dtype to avoid dtype mismatch errors.
# See: https://github.com/pytorch/pytorch/issues/174763
if hasattr(torch.nn.functional, "grouped_mm"):
return torch.nn.functional.grouped_mm(input.to(weight.dtype), weight, offs=offs)
elif hasattr(torch, "_grouped_mm"):
return torch._grouped_mm(input.to(weight.dtype), weight, offs=offs)
return torch.ops.transformers.grouped_mm_fallback(input, weight, offs=offs)
def _grouped_linear(
input: torch.Tensor,
weight: torch.Tensor,
offs: torch.Tensor,
bias: torch.Tensor | None = None,
is_transposed: bool = False,
) -> torch.Tensor:
"""Grouped linear layer supporting optional bias and transposed weights.
Args:
input (`torch.Tensor`):
Input tensor of shape (S, input_dim).
weight (`torch.Tensor`):
Weight tensor of shape (num_experts, input_dim, output_dim) if `is_transposed`,
else of shape (num_experts, output_dim, input_dim).
offs (`torch.Tensor`):
Offsets tensor indicating the boundaries of each group in the input tensor.
bias (`torch.Tensor`, *optional*):
Bias tensor of shape (num_experts, output_dim). Default is `None`.
is_transposed (`bool`, *optional*, defaults to `False`):
Whether the weight tensor is transposed.
Returns:
`torch.Tensor`: Output tensor of shape (S, output_dim).
"""
if is_transposed:
# (S, input_dim) @ grouped (num_experts, input_dim, output_dim) -> (S, output_dim)
out = _grouped_mm(input, weight, offs=offs)
else:
# (S, input_dim) @ grouped (num_experts, output_dim, input_dim).T -> (S, output_dim)
out = _grouped_mm(input, weight.transpose(-2, -1), offs=offs)
if bias is not None:
# We should be able to pass bias to the grouped_mm call, but it's not yet supported.
out = out + bias
return out
def grouped_mm_experts_forward(
self: torch.nn.Module,
hidden_states: torch.Tensor,
top_k_index: torch.Tensor,
top_k_weights: torch.Tensor,
) -> torch.Tensor:
device = hidden_states.device
num_top_k = top_k_index.size(-1)
num_tokens = hidden_states.size(0)
hidden_dim = hidden_states.size(-1)
# Reshape for easier indexing
# S is the number of selected tokens-experts pairs (S = num_tokens * num_top_k)
token_idx = torch.arange(num_tokens, device=device).unsqueeze(1).expand(-1, num_top_k).reshape(-1) # (S,)
sample_weights = top_k_weights.reshape(-1) # (S,)
expert_ids = top_k_index.reshape(-1) # (S,)
# Get current hidden states for selected samples
selected_hidden_states = hidden_states[token_idx]
# Sort by expert for grouped processing
perm = torch.argsort(expert_ids)
inv_perm = torch.argsort(perm)
expert_ids_g = expert_ids[perm]
sample_weights_g = sample_weights[perm]
selected_hidden_states_g = selected_hidden_states[perm]
# Select expert weights and biases for selected samples
# NOTE: We keep all experts here and rely on offsets to target the active ones.
# I have already implemented a version that only passes the active experts, but
# to do so I had to use torch.unique which breaks the graph capture (data-dependent).
# Also there were no speedup gains from it in my experiments, even in eager mode.
selected_gate_up = self.gate_up_proj
selected_down = self.down_proj
selected_gate_up_bias = self.gate_up_proj_bias[expert_ids_g] if self.has_bias else None
selected_down_bias = self.down_proj_bias[expert_ids_g] if self.has_bias else None
# Compute offsets for grouped_mm
# using histc instead of bincount to avoid cuda graph issues
# With deterministic algorithms, CPU only supports float input, CUDA only supports int input.
histc_input = expert_ids_g.float() if device.type == "cpu" else expert_ids_g.int()
num_tokens_per_expert = torch.histc(histc_input, bins=self.num_experts, min=0, max=self.num_experts - 1)
offsets = torch.cumsum(num_tokens_per_expert, dim=0, dtype=torch.int32)
# --- Up projection per expert (grouped) ---
gate_up_out = _grouped_linear(
selected_hidden_states_g,
selected_gate_up,
offs=offsets,
bias=selected_gate_up_bias,
is_transposed=self.is_transposed,
) # (S, 2 * intermediate_dim)
# Apply gating
gated_out = self._apply_gate(gate_up_out) # (S, intermediate_dim)
# --- Down projection per expert (grouped) ---
out_per_sample_g = _grouped_linear(
gated_out,
selected_down,
offs=offsets,
bias=selected_down_bias,
is_transposed=self.is_transposed,
) # (S, hidden_dim)
# Apply routing weights
out_per_sample_g = out_per_sample_g * sample_weights_g.unsqueeze(-1) # (S, hidden_dim)
# Restore original order
out_per_sample = out_per_sample_g[inv_perm]
# Accumulate results using deterministic reshape+sum instead of index_add_
# (index_add_ with duplicate indices is non-deterministic on CUDA due to atomicAdd)
final_hidden_states = out_per_sample.view(num_tokens, num_top_k, hidden_dim).sum(dim=1)
return final_hidden_states.to(hidden_states.dtype)
class ExpertsInterface(GeneralInterface):
"""Interface for registering custom experts implementations."""
_global_mapping = {
"batched_mm": batched_mm_experts_forward,
"grouped_mm": grouped_mm_experts_forward,
}
def get_interface(self, experts_implementation: str, default: Callable) -> Callable:
"""Return the requested `experts_implementation`. Also strictly check its validity, and raise if invalid."""
if experts_implementation is None:
logger.warning_once(
"You tried to access the `ExpertsInterface` with a `config._experts_implementation` set to `None`. This "
"is expected if you use an Expert Module as a standalone Module. If this is not the case, something went "
"wrong with the dispatch of `config._experts_implementation`"
)
elif experts_implementation != "eager" and experts_implementation not in self:
raise KeyError(
f"`{experts_implementation}` is not a valid experts implementation registered in the `ExpertsInterface`"
)
return super().get(experts_implementation, default)
ALL_EXPERTS_FUNCTIONS = ExpertsInterface()
def _default_apply_gate(self, gate_up_out: torch.Tensor) -> torch.Tensor:
"""
Default gating mechanism: splits the gate_up_out into gate and up parts,
applies the activation function to the gate part, and multiplies it with the up part.
Args:
gate_up_out (`torch.Tensor`):
The output tensor from the gate and up projection of shape (S, 2 * intermediate_dim).
Returns:
`torch.Tensor`: The gated output tensor of shape (S, intermediate_dim).
"""
gate, up = gate_up_out.chunk(2, dim=-1) # (S, intermediate_dim)
return self.act_fn(gate) * up # (S, intermediate_dim)
def use_experts_implementation(
experts_class: type[torch.nn.Module] | None = None, *, is_transposed: bool = False, has_bias: bool = False
) -> type[torch.nn.Module]:
"""Decorator to modify experts class to support different experts implementations.
Args:
experts_class (`type[torch.nn.Module]`, *optional*):
The experts class to modify. If not provided, returns a decorator that can be applied to the class.
is_transposed (`bool`, *optional*, defaults to `False`):
Whether the expert weights are stored in transposed format.
has_bias (`bool`, *optional*, defaults to `False`):
Whether the expert layers include bias terms.
Returns:
`type[torch.nn.Module]`: The modified experts class.
"""
def wrapper(experts_class: type[torch.nn.Module]) -> type[torch.nn.Module]:
original_init = experts_class.__init__
original_forward = experts_class.forward
@wraps(original_init)
def __init__(self, config, *args, **kwargs):
original_init(self, config, *args, **kwargs)
self.config = config
self.has_bias = has_bias
self.is_transposed = is_transposed
@wraps(original_forward)
def forward(self, *args, **kwargs):
experts_forward = ALL_EXPERTS_FUNCTIONS.get_interface(
self.config._experts_implementation, original_forward
)
return experts_forward(self, *args, **kwargs)
if not hasattr(experts_class, "_apply_gate"):
experts_class._apply_gate = _default_apply_gate
experts_class.__init__ = __init__
experts_class.forward = forward
return experts_class
if experts_class is not None:
return wrapper(experts_class)
return wrapper
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/integrations/moe.py",
"license": "Apache License 2.0",
"lines": 402,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/glmasr/configuration_glmasr.py | # Copyright 2025 the HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...configuration_utils import PreTrainedConfig
from ..auto import CONFIG_MAPPING, AutoConfig
class GlmAsrEncoderConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`GlmAsrEncoder`]. It is used to instantiate a
glmasr audio encoder according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the audio encoder of the glmasr
architecture.
e.g. [zai-org/GLM-ASR-Nano-2512](https://huggingface.co/zai-org/GLM-ASR-Nano-2512)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 1280):
Dimensionality of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 5120):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 20):
Number of attention heads for each attention layer in the Transformer encoder.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler.
max_position_embeddings (`int`, *optional*, defaults to 1500):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
num_mel_bins (`int`, *optional*, defaults to 128):
Number of mel features used per input features. Should correspond to the value used in the
`GlmAsrProcessor` class.
```python
>>> from transformers import GlmAsrEncoderConfig, GlmAsrEncoder
>>> # Initializing a GlmAsrEncoderConfig
>>> configuration = GlmAsrEncoderConfig()
>>> # Initializing a GlmAsrEncoder (with random weights)
>>> model = GlmAsrEncoder(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "glmasr_encoder"
def __init__(
self,
hidden_size=1280,
intermediate_size=5120,
num_hidden_layers=32,
num_attention_heads=20,
num_key_value_heads=None,
hidden_act="gelu",
max_position_embeddings=1500,
initializer_range=0.02,
rope_parameters=None,
attention_dropout=0.0,
num_mel_bins=128,
**kwargs,
):
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.head_dim = hidden_size // num_attention_heads
self.max_position_embeddings = max_position_embeddings
self.rope_parameters = rope_parameters
self.attention_dropout = attention_dropout
self.num_mel_bins = num_mel_bins
kwargs.setdefault("partial_rotary_factor", 0.5)
super().__init__(**kwargs)
class GlmAsrConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`GlmAsrForConditionalGeneration`]. It is used to instantiate an
glmasr model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the glmasr-Mini-3B.
e.g. [zai-org/GLM-ASR-Nano-2512](https://huggingface.co/zai-org/GLM-ASR-Nano-2512)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
audio_config (`Union[AutoConfig, dict]`, *optional*):
The config object or dictionary of the audio encoder.
text_config (`Union[AutoConfig, dict]`, *optional*):
The config object or dictionary of the text model.
audio_token_id (`int`, *optional*, defaults to 59260):
The audio token index to encode the audio prompt.
projector_hidden_act (`str`, *optional*, defaults to `"gelu"`):
The activation function (function or string) in the multi-modal projector.
```python
>>> from transformers import GlmAsrForConditionalGeneration, GlmAsrConfig
>>> # Initializing a glmasr configuration
>>> configuration = GlmAsrConfig()
>>> # Initializing a GLM-ASR-Nano-2512 model with random weights
>>> model = GlmAsrForConditionalGeneration(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "glmasr"
sub_configs = {"text_config": AutoConfig, "audio_config": AutoConfig}
_default_text_config_kwargs = {
"vocab_size": 59264,
"hidden_size": 2048,
"intermediate_size": 6144,
"num_hidden_layers": 28,
"num_attention_heads": 16,
"num_key_value_heads": 4,
"max_position_embeddings": 8192,
"rms_norm_eps": 1e-05,
"use_cache": True,
"eos_token_id": [59246, 59253, 59255],
"rope_parameters": {"rope_theta": 10000.0, "rope_type": "default"},
}
def __init__(
self,
audio_config=None,
text_config=None,
audio_token_id=59260,
projector_hidden_act="gelu",
**kwargs,
):
if isinstance(audio_config, dict):
audio_config["model_type"] = audio_config.get("model_type", "glmasr_encoder")
audio_config = CONFIG_MAPPING[audio_config["model_type"]](**audio_config)
elif audio_config is None:
audio_config = CONFIG_MAPPING["glmasr_encoder"]()
self.audio_config = audio_config
if isinstance(text_config, dict):
text_config["model_type"] = text_config.get("model_type", "llama")
text_config = CONFIG_MAPPING[text_config["model_type"]](
**{**self._default_text_config_kwargs, **text_config}
)
elif text_config is None:
text_config = CONFIG_MAPPING["llama"](**self._default_text_config_kwargs)
self.text_config = text_config
self.vocab_size = text_config.vocab_size
self.hidden_size = text_config.hidden_size
self.audio_token_id = audio_token_id
self.projector_hidden_act = projector_hidden_act
super().__init__(**kwargs)
__all__ = ["GlmAsrEncoderConfig", "GlmAsrConfig"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/glmasr/configuration_glmasr.py",
"license": "Apache License 2.0",
"lines": 166,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/glmasr/convert_glmasr_weights_to_hf.py | import argparse
import re
import torch
from safetensors.torch import load_file
from transformers import (
GlmAsrConfig,
GlmAsrForConditionalGeneration,
GlmAsrProcessor,
TokenizersBackend,
WhisperFeatureExtractor,
)
from transformers.utils.hub import cached_file
chat_template = """{%- macro to_text(content) -%}
{%- if content is string -%}
{{- content -}}
{%- elif content is iterable and content is not mapping -%}
{%- for item in content -%}
{%- if item is mapping and item.type == 'text' and item.text is defined -%}
{{- item.text -}}
{%- elif item is mapping and (item.type == 'audio' or 'audio' in item) -%}
<|begin_of_audio|><|pad|><|end_of_audio|><|user|>
{% elif item is string -%}
{{- item -}}
{%- endif -%}
{%- endfor -%}
{%- else -%}
{{- content -}}
{%- endif -%}
{%- endmacro -%}
{%- for m in messages -%}
{%- if m.role == 'system' -%}
<|system|>
{{ to_text(m.content) | trim }}
{%- elif m.role == 'user' -%}
<|user|>
{{ to_text(m.content) | trim }}
{%- elif m.role == 'assistant' -%}
<|assistant|>
{{ to_text(m.content) | trim }}
{%- endif -%}
{%- endfor -%}
{%- if add_generation_prompt -%}
<|assistant|>
{% endif -%}"""
# fmt: off
ORIGINAL_TO_CONVERTED_KEY_MAPPING = {
r"^model\.norm\.weight$": r"language_model.model.norm.weight",
r"^model\.(.*)$": r"language_model.model.\1",
r"^lm_head\.(.*)$": r"language_model.lm_head.\1",
r"^audio_encoder\.adapting\.0\.(.*)$": r"multi_modal_projector.linear_1.\1",
r"^audio_encoder\.adapting\.2\.(.*)$": r"multi_modal_projector.linear_2.\1",
r"^audio_encoder\.proj\.(weight|bias)$": r"multi_modal_projector.\1",
r"^audio_encoder\.whisper\.(.*)$": r"audio_tower.\1",
r"^audio_encoder\.layer_norm\.(.*)$": r"audio_tower.norm.\1",
r"^audio_tower\.layers\.(\d+)\.self_attn\.out_proj\.(.*)$": r"audio_tower.layers.\1.self_attn.o_proj.\2",
r"^audio_tower\.layers\.(\d+)\.self_attn_layer_norm\.(.*)$": r"audio_tower.layers.\1.input_layernorm.\2",
r"^audio_tower\.layers\.(\d+)\.final_layer_norm\.(.*)$": r"audio_tower.layers.\1.post_attention_layernorm.\2",
r"^audio_tower\.layers\.(\d+)\.(fc[12])\.(.*)$": r"audio_tower.layers.\1.mlp.\2.\3",
}
# fmt: on
def permute_rope(tensor, config):
# IMPORTANT: the original checkpoint applies partial rope (half dimension) in the interleaved manner
# since we use a different rope implementation, we want to permute the order like:
# original order: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63]
# permuted order: [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63]
if tensor.dim() == 2:
dim1, dim2 = tensor.shape
else:
dim1 = tensor.shape[0]
n_heads = config.audio_config.num_attention_heads
head_dim = config.audio_config.head_dim
rope_dim = dim1 // 2
rope_indices = torch.arange(rope_dim)
rope_indices = rope_indices.view(n_heads, rope_dim // n_heads // 2, 2)
rope_indices = rope_indices.transpose(1, 2)
rope_indices = rope_indices.reshape(n_heads, -1)
non_rope_start = head_dim // 2
non_rope_indices = torch.arange(non_rope_start, head_dim, dtype=torch.long)
non_rope_indices = non_rope_indices.expand(n_heads, -1)
head_offsets = torch.arange(n_heads, dtype=torch.long)[:, None] * (head_dim // 2)
non_rope_indices = non_rope_indices + head_offsets.expand(n_heads, head_dim // 2)
combined_indices = torch.cat([rope_indices, non_rope_indices], dim=1)
global_head_offsets = torch.arange(n_heads, dtype=torch.long)[:, None] * (head_dim // 2)
combined_indices = combined_indices + global_head_offsets.expand(n_heads, head_dim)
permutation_indices = combined_indices.reshape(-1)
tensor = tensor[permutation_indices]
return tensor
def convert_key(key, mapping):
for pattern, replacement in mapping.items():
key = re.sub(pattern, replacement, key)
return key
def main():
parser = argparse.ArgumentParser(description="Convert GLM-ASR model weights to Hugging Face format")
parser.add_argument(
"--input_path_or_repo",
type=str,
default="zai-org/GLM-ASR-Nano-2512",
help="Path to input model file or Hugging Face repository ID",
)
parser.add_argument(
"--revision",
type=str,
default="91967eab799804ab256a3819a085b92378906eb2",
help="Revision of the input repository",
)
parser.add_argument(
"--output_dir",
type=str,
default=None,
help="Output directory to save the converted model and processor",
)
parser.add_argument(
"--push_to_hub",
type=str,
default=None,
help="Repository ID to push the model and processor to Hub (if not provided, won't push)",
)
args = parser.parse_args()
path = cached_file(args.input_path_or_repo, "model.safetensors", revision=args.revision)
state_dict = load_file(path)
config = GlmAsrConfig()
model = GlmAsrForConditionalGeneration(config)
new_state_dict = {}
for k, v in state_dict.items():
new_key = convert_key(k, ORIGINAL_TO_CONVERTED_KEY_MAPPING)
# those are not used
if new_key in [
"audio_encoder.audio_bos_eos_token.weight", # already present in the emb
"audio_tower.embed_positions.weight",
"multi_modal_projector.bias",
"multi_modal_projector.weight",
]:
continue
if "audio_tower" in new_key and ("q_proj" in new_key or "k_proj" in new_key):
v = permute_rope(v, config)
new_state_dict[new_key] = v
model.load_state_dict(new_state_dict, strict=True, assign=True)
feature_extractor = WhisperFeatureExtractor(feature_size=128)
tokenizer = TokenizersBackend.from_pretrained(args.input_path_or_repo, revision=args.revision)
tokenizer.pad_token = tokenizer.eos_token
processor = GlmAsrProcessor(feature_extractor=feature_extractor, tokenizer=tokenizer, chat_template=chat_template)
if args.output_dir:
model.save_pretrained(args.output_dir)
processor.save_pretrained(args.output_dir)
if args.push_to_hub:
model.push_to_hub(args.push_to_hub)
processor.push_to_hub(args.push_to_hub)
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/glmasr/convert_glmasr_weights_to_hf.py",
"license": "Apache License 2.0",
"lines": 149,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
huggingface/transformers:src/transformers/models/glmasr/modular_glmasr.py | # Copyright 2025 the HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Callable
import numpy as np
from ...activations import ACT2FN
from ...audio_utils import AudioInput, make_list_of_audio
from ...cache_utils import Cache
from ...feature_extraction_utils import BatchFeature
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import BaseModelOutputWithPooling, CausalLMOutputWithPast
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring, is_torch_available, logging
from ...utils.generic import can_return_tuple, merge_with_config_defaults
from ...utils.output_capturing import capture_outputs
from ..audioflamingo3.modeling_audioflamingo3 import (
AudioFlamingo3ForConditionalGeneration,
AudioFlamingo3MultiModalProjector,
AudioFlamingo3PreTrainedModel,
)
from ..audioflamingo3.processing_audioflamingo3 import AudioFlamingo3Processor, AudioFlamingo3ProcessorKwargs
from ..glm.modeling_glm import GlmRotaryEmbedding
from ..llama.modeling_llama import LlamaAttention, eager_attention_forward, rotate_half
from .configuration_glmasr import GlmAsrConfig, GlmAsrEncoderConfig
if is_torch_available():
import torch
from torch import nn
logger = logging.get_logger(__name__)
class GlmAsrProcessorKwargs(AudioFlamingo3ProcessorKwargs): ...
class GlmAsrProcessor(AudioFlamingo3Processor):
r"""
Constructs an GlmAsr processor which wraps an GlmAsr feature extractor and an GlmAsr
tokenizer into a single processor.
[`GlmAsrProcessor`] offers all the functionalities of [`WhisperFeatureExtractor`] and
[`Qwen2TokenizerFast`]. See the [`~GlmAsrProcessor.__call__`] for more information.
Args:
feature_extractor ([`WhisperFeatureExtractor`]):
The feature extractor is a required input.
tokenizer ([`Qwen2TokenizerFast`]):
The tokenizer is a required input.
chat_template (`Optional[str]`, *optional*):
The Jinja template to use for formatting the conversation. If not provided, the tokenizer's default chat
template will be used.
audio_token (`Optional[str]`, *optional*, defaults to `"<|pad|>`"):
Special token used to represent audio inputs in the chat template.
default_transcription_prompt (`str`, *optional*, defaults to `"Please transcribe this audio into text"`):
Default prompt to use for transcription tasks when applying transcription requests.
max_audio_len (`int`, *optional*, defaults to 655):
Maximum length of audio sequences in seconds. Audio longer than this will be truncated.
655 gives approximately 8192 tokens, corresponding to the maximum sequence length of the text model.
"""
def __init__(
self,
feature_extractor,
tokenizer,
chat_template=None,
audio_token="<|pad|>",
default_transcription_prompt="Please transcribe this audio into text",
max_audio_len=655,
):
super().__init__(
feature_extractor,
tokenizer,
chat_template=chat_template,
audio_token=audio_token,
default_transcription_prompt=default_transcription_prompt,
max_audio_len=max_audio_len,
)
def _get_audio_token_length(self, audio_lengths: "torch.Tensor") -> "torch.Tensor":
merge_factor = 4
for padding, kernel_size, stride in [(1, 3, 1), (1, 3, 2)]:
audio_lengths = (audio_lengths + 2 * padding - (kernel_size - 1) - 1) // stride + 1
num_tokens = (audio_lengths - merge_factor) // merge_factor + 1
return num_tokens
def apply_transcription_request(
self,
audio: str | list[str] | AudioInput,
prompt: str | list[str] | None = None,
**kwargs: Unpack[GlmAsrProcessorKwargs],
) -> BatchFeature:
"""
Prepare inputs for automatic speech recognition without manually writing the default transcription prompt.
Args:
audio (`str`, `list[str]`, `np.ndarray`, `torch.Tensor`, `list[np.ndarray]`, `list[torch.Tensor]`):
Audio to transcribe. Strings are interpreted as local paths or URLs and will be loaded automatically by
the chat template loader; NumPy arrays and PyTorch tensors are forwarded directly.
prompt (`str` or `list[str]`, *optional*):
Custom prompt(s) to include in the user turn. A list must be the same length as the batch. When `None`,
each sample uses `"Transcribe the input speech."`.
**kwargs:
Additional keyword arguments forwarded to [`~AudioFlamingo3Processor.apply_chat_template`] (for example
`text_kwargs`, `audio_kwargs`, ...).
Returns:
[`BatchFeature`]: Processor outputs ready to be passed to [`AudioFlamingo3ForConditionalGeneration.generate`].
"""
if isinstance(audio, str):
audio_items: list[str | np.ndarray] = [audio]
elif isinstance(audio, (list, tuple)) and audio and all(isinstance(el, str) for el in audio):
audio_items = list(audio)
else:
audio_items = list(make_list_of_audio(audio))
if is_torch_available():
audio_items = [el.detach().cpu().numpy() if isinstance(el, torch.Tensor) else el for el in audio_items]
batch_size = len(audio_items)
if batch_size == 0:
raise ValueError("`audio` must contain at least one sample.")
if prompt is None:
prompts = [self.default_transcription_prompt] * batch_size
elif isinstance(prompt, str):
prompts = [prompt] * batch_size
elif isinstance(prompt, (list, tuple)):
if len(prompt) != batch_size:
raise ValueError(
f"Received {len(prompt)} prompt(s) for {batch_size} audio sample(s); counts must match."
)
prompts = []
for item in prompt:
if item is None:
prompts.append(self.default_transcription_prompt)
elif isinstance(item, str):
prompts.append(item)
else:
raise TypeError("Each prompt must be a string or `None`.")
else:
raise TypeError("`prompt` must be a string, a sequence of strings, or `None`.")
conversations = [
[
{
"role": "user",
"content": [
{"type": "audio", "path": audio_item}
if isinstance(audio_item, str)
else {"type": "audio", "audio": audio_item},
{"type": "text", "text": prompt_text},
],
}
]
for prompt_text, audio_item in zip(prompts, audio_items)
]
return self.apply_chat_template(
conversations,
tokenize=True,
add_generation_prompt=True,
return_dict=True,
**kwargs,
)
class GlmAsrRotaryEmbedding(GlmRotaryEmbedding): ...
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
rotary_dim = cos.shape[-1]
q_rot, q_pass = q[..., :rotary_dim], q[..., rotary_dim:]
k_rot, k_pass = k[..., :rotary_dim], k[..., rotary_dim:]
# Apply rotary embeddings on the first half or full tensor
q_embed = (q_rot * cos) + (rotate_half(q_rot) * sin)
k_embed = (k_rot * cos) + (rotate_half(k_rot) * sin)
# Concatenate back to full shape
q_embed = torch.cat([q_embed, q_pass], dim=-1)
k_embed = torch.cat([k_embed, k_pass], dim=-1)
return q_embed, k_embed
class GlmAsrAttention(LlamaAttention):
def __init__(self, config: GlmAsrConfig, layer_idx: int):
super().__init__(config, layer_idx)
self.is_causal = False
self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=True)
self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=True)
self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=True)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, torch.Tensor]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface(
self.config._attn_implementation, eager_attention_forward
)
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask=None,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class GlmAsrMLP(nn.Module):
def __init__(self, config):
super().__init__()
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, hidden_states: torch.Tensor):
hidden_states = self.fc1(hidden_states)
hidden_states = self.act_fn(hidden_states)
hidden_states = self.fc2(hidden_states)
return hidden_states
class GlmAsrEncoderLayer(GradientCheckpointingLayer):
def __init__(self, config: GlmAsrConfig, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = GlmAsrAttention(config=config, layer_idx=layer_idx)
self.mlp = GlmAsrMLP(config)
self.input_layernorm = nn.LayerNorm(config.hidden_size)
self.post_attention_layernorm = nn.LayerNorm(config.hidden_size)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
class GlmAsrPreTrainedModel(AudioFlamingo3PreTrainedModel): ...
# TODO: @eustlb, this is what WhisperEncoder should look like
class GlmAsrEncoder(GlmAsrPreTrainedModel):
config: GlmAsrEncoderConfig
main_input_name = "input_features"
input_modalities = "audio"
_no_split_modules = ["GlmAsrEncoderLayer"]
_can_record_outputs = {
"hidden_states": GlmAsrEncoderLayer,
"attentions": GlmAsrAttention,
}
def __init__(self, config: GlmAsrEncoderConfig):
super().__init__(config)
self.conv1 = nn.Conv1d(config.num_mel_bins, config.hidden_size, kernel_size=3, padding=1)
self.conv2 = nn.Conv1d(config.hidden_size, config.hidden_size, kernel_size=3, stride=2, padding=1)
self.layers = nn.ModuleList(
[GlmAsrEncoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = nn.LayerNorm(config.hidden_size)
self.rotary_emb = GlmAsrRotaryEmbedding(config=config)
self.gradient_checkpointing = False
self.post_init()
@merge_with_config_defaults
@capture_outputs
@auto_docstring
def forward(self, input_features, **kwargs: Unpack[TransformersKwargs]):
inputs_embeds = nn.functional.gelu(self.conv1(input_features))
inputs_embeds = nn.functional.gelu(self.conv2(inputs_embeds))
inputs_embeds = inputs_embeds.transpose(1, 2)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(
hidden_states, position_ids=torch.arange(hidden_states.shape[1], device=hidden_states.device)[None, :]
)
for encoder_layer in self.layers:
hidden_states = encoder_layer(hidden_states, position_embeddings=position_embeddings, **kwargs)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPooling(last_hidden_state=hidden_states)
class GlmAsrMultiModalProjector(AudioFlamingo3MultiModalProjector):
def __init__(self, config: GlmAsrConfig):
super().__init__()
self.linear_1 = nn.Linear(config.audio_config.intermediate_size, config.text_config.hidden_size * 2)
self.linear_2 = nn.Linear(config.text_config.hidden_size * 2, config.text_config.hidden_size)
@auto_docstring(
custom_intro="""
The GlmAsr model which consists of a fine-tuned Whisper encoder, a multi-modal projector and a Llama language model.
"""
)
class GlmAsrForConditionalGeneration(AudioFlamingo3ForConditionalGeneration):
@can_return_tuple
@auto_docstring(
custom_intro="Compute audio embeddings from log-mel input features using the audio encoder and multi-modal projector."
)
def get_audio_features(
self,
input_features: torch.FloatTensor,
input_features_mask: torch.Tensor,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | BaseModelOutputWithPooling:
audio_outputs = self.audio_tower(input_features, return_dict=True, **kwargs)
audio_hidden_states = audio_outputs.last_hidden_state
audio_hidden_states = audio_hidden_states.reshape(
input_features.shape[0], -1, self.config.audio_config.intermediate_size
)
audio_embeds = self.multi_modal_projector(audio_hidden_states)
audio_lengths = input_features_mask.sum(-1)
for padding, kernel_size, stride in [(1, 3, 1), (1, 3, 2)]:
audio_lengths = (audio_lengths + 2 * padding - (kernel_size - 1) - 1) // stride + 1
merge_factor = 4
post_lengths = (audio_lengths - merge_factor) // merge_factor + 1
valid_mask = torch.arange(audio_embeds.shape[1], device=post_lengths.device)[None, :] < post_lengths[:, None]
audio_outputs.pooler_output = audio_embeds[valid_mask.to(audio_embeds.device)]
return audio_outputs
def forward(
self,
input_ids: torch.LongTensor | None = None,
input_features: torch.FloatTensor | None = None,
input_features_mask: torch.Tensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
labels: torch.LongTensor | None = None,
use_cache: bool | None = None,
cache_position: torch.LongTensor | None = None,
logits_to_keep: int | torch.Tensor = 0,
**kwargs: Unpack[TransformersKwargs],
) -> CausalLMOutputWithPast:
r"""
input_features_mask (`torch.Tensor` of shape `(batch_size, feature_sequence_length)`):
Mask to avoid performing attention on padding feature indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import GlmAsrForConditionalGeneration, AutoProcessor
>>> model_id = "zai-org/GLM-ASR-Nano-2512"
>>> processor = AutoProcessor.from_pretrained(model_id)
>>> model = GlmAsrForConditionalGeneration.from_pretrained(model_id, dtype="auto", device_map="auto")
>>> inputs = processor.apply_transcription_request("https://huggingface.co/datasets/hf-internal-testing/dummy-audio-samples/resolve/main/bcn_weather.mp3")
>>> inputs = inputs.to(model.device, dtype=model.dtype)
>>> outputs = model.generate(**inputs, do_sample=False, max_new_tokens=500)
>>> decoded_outputs = processor.batch_decode(outputs[:, inputs.input_ids.shape[1] :], skip_special_tokens=True)
>>> print(decoded_outputs)
```"""
return super().forward(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
labels=labels,
use_cache=use_cache,
cache_position=cache_position,
logits_to_keep=logits_to_keep,
**kwargs,
)
__all__ = ["GlmAsrEncoder", "GlmAsrForConditionalGeneration", "GlmAsrProcessor", "GlmAsrPreTrainedModel"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/glmasr/modular_glmasr.py",
"license": "Apache License 2.0",
"lines": 369,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/glmasr/test_modeling_glmasr.py | # Copyright 2025 the HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch glmasr model."""
import tempfile
import unittest
import pytest
from transformers import (
AutoProcessor,
GlmAsrConfig,
GlmAsrForConditionalGeneration,
is_torch_available,
)
from transformers.testing_utils import (
cleanup,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
class GlmAsrModelTester:
def __init__(
self,
parent,
ignore_index=-100,
audio_token_id=0,
seq_length=35,
feat_seq_length=64,
text_config={
"model_type": "llama",
"intermediate_size": 64,
"initializer_range": 0.02,
"hidden_size": 16,
"max_position_embeddings": 52,
"num_hidden_layers": 2,
"num_attention_heads": 4,
"use_labels": True,
"use_mrope": False,
"vocab_size": 99,
"head_dim": 8,
"pad_token_id": 1, # can't be the same as the audio token id
},
is_training=True,
audio_config={
"model_type": "glmasr_encoder",
"hidden_size": 128,
"num_attention_heads": 2,
"intermediate_size": 512,
"num_hidden_layers": 2,
"num_mel_bins": 128,
"max_source_positions": 32,
"initializer_range": 0.02,
},
):
self.parent = parent
self.ignore_index = ignore_index
self.audio_token_id = audio_token_id
self.text_config = text_config
self.audio_config = audio_config
self.seq_length = seq_length
self.feat_seq_length = feat_seq_length
self.num_hidden_layers = text_config["num_hidden_layers"]
self.vocab_size = text_config["vocab_size"]
self.hidden_size = text_config["hidden_size"]
self.num_attention_heads = text_config["num_attention_heads"]
self.is_training = is_training
self.batch_size = 3
self.encoder_seq_length = seq_length
def get_config(self):
return GlmAsrConfig(
text_config=self.text_config,
audio_config=self.audio_config,
ignore_index=self.ignore_index,
audio_token_id=self.audio_token_id,
)
def prepare_config_and_inputs(self):
input_features_values = floats_tensor(
[
self.batch_size,
self.audio_config["num_mel_bins"],
self.feat_seq_length,
]
)
config = self.get_config()
input_features_mask = torch.ones([self.batch_size, self.feat_seq_length], dtype=torch.bool).to(torch_device)
return config, input_features_values, input_features_mask
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_features_values, input_features_mask = config_and_inputs
num_audio_tokens_per_batch_idx = 8
input_ids = ids_tensor([self.batch_size, self.seq_length], config.text_config.vocab_size - 1) + 1
attention_mask = torch.ones(input_ids.shape, dtype=torch.long).to(torch_device)
attention_mask[:, :1] = 0
input_ids[:, 1 : 1 + num_audio_tokens_per_batch_idx] = config.audio_token_id
inputs_dict = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"input_features": input_features_values,
"input_features_mask": input_features_mask,
}
return config, inputs_dict
@require_torch
class GlmAsrForConditionalGenerationModelTest(
ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase
):
"""
Model tester for `GlmAsrForConditionalGeneration`.
"""
all_model_classes = (GlmAsrForConditionalGeneration,) if is_torch_available() else ()
pipeline_model_mapping = {"audio-text-to-text": GlmAsrForConditionalGeneration} if is_torch_available() else {}
_is_composite = True
def setUp(self):
self.model_tester = GlmAsrModelTester(self)
self.config_tester = ConfigTester(self, config_class=GlmAsrConfig, has_text_modality=False)
@unittest.skip(
reason="This test does not apply to GlmAsr since inputs_embeds corresponding to audio tokens are replaced when input features are provided."
)
def test_inputs_embeds_matches_input_ids(self):
pass
@unittest.skip(reason="Compile not yet supported for GlmAsr models")
@pytest.mark.torch_compile_test
def test_sdpa_can_compile_dynamic(self):
pass
@unittest.skip(reason="Compile not yet supported for GlmAsr models")
def test_sdpa_can_dispatch_on_flash(self):
pass
@unittest.skip(reason="GlmAsr tests avoid right-padding equivalence; fusion is in-place.")
def test_flash_attn_2_inference_equivalence_right_padding(self):
pass
@unittest.skip(reason="GlmAsr has no separate base model without a head.")
def test_model_base_model_prefix(self):
pass
def test_sdpa_can_dispatch_composite_models(self):
# GlmAsr is audio+text composite; verify SDPA toggles propagate to submodules.
if not self.has_attentions:
self.skipTest(reason="Model architecture does not support attentions")
if not self._is_composite:
self.skipTest(f"{self.all_model_classes[0].__name__} does not support SDPA")
for model_class in self.all_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
# SDPA (default)
model_sdpa = model_class.from_pretrained(tmpdirname)
model_sdpa = model_sdpa.eval().to(torch_device)
text_attn = "sdpa" if model.language_model._supports_sdpa else "eager"
audio_attn = "sdpa" if model.audio_tower._supports_sdpa else "eager"
self.assertTrue(model_sdpa.config._attn_implementation == "sdpa")
self.assertTrue(model.language_model.config._attn_implementation == text_attn)
self.assertTrue(model.audio_tower.config._attn_implementation == audio_attn)
# Eager
model_eager = model_class.from_pretrained(tmpdirname, attn_implementation="eager")
model_eager = model_eager.eval().to(torch_device)
self.assertTrue(model_eager.config._attn_implementation == "eager")
self.assertTrue(model_eager.language_model.config._attn_implementation == "eager")
self.assertTrue(model_eager.audio_tower.config._attn_implementation == "eager")
for _, submodule in model_eager.named_modules():
class_name = submodule.__class__.__name__
if "SdpaAttention" in class_name or "SdpaSelfAttention" in class_name:
raise ValueError("The eager model should not have SDPA attention layers")
@require_torch
class GlmAsrForConditionalGenerationIntegrationTest(unittest.TestCase):
def setUp(self):
self.checkpoint_name = "zai-org/GLM-ASR-Nano-2512"
self.processor = AutoProcessor.from_pretrained(self.checkpoint_name)
def tearDown(self):
cleanup(torch_device, gc_collect=True)
@slow
def test_single_batch_sub_30(self):
conversation = [
{
"role": "user",
"content": [
{
"type": "audio",
"url": "https://huggingface.co/datasets/eustlb/audio-samples/resolve/main/bcn_weather.mp3",
},
{"type": "text", "text": "Please transcribe this audio into text"},
],
},
]
model = GlmAsrForConditionalGeneration.from_pretrained(
self.checkpoint_name, device_map=torch_device, dtype="auto"
)
inputs = self.processor.apply_chat_template(
conversation, tokenize=True, add_generation_prompt=True, return_dict=True
).to(model.device, dtype=model.dtype)
inputs_transcription = self.processor.apply_transcription_request(
"https://huggingface.co/datasets/eustlb/audio-samples/resolve/main/bcn_weather.mp3",
).to(model.device, dtype=model.dtype)
for key in inputs:
self.assertTrue(torch.equal(inputs[key], inputs_transcription[key]))
outputs = model.generate(**inputs, do_sample=False, max_new_tokens=500)
decoded_outputs = self.processor.batch_decode(
outputs[:, inputs.input_ids.shape[1] :], skip_special_tokens=True
)
EXPECTED_OUTPUT = [
"Yesterday it was thirty five degrees in Barcelona, but today the temperature will go down to minus twenty degrees."
]
self.assertEqual(decoded_outputs, EXPECTED_OUTPUT)
@slow
def test_single_batch_over_30(self):
conversation = [
{
"role": "user",
"content": [
{
"type": "audio",
"url": "https://huggingface.co/datasets/eustlb/audio-samples/resolve/main/obama2.mp3",
},
{"type": "text", "text": "Please transcribe this audio into text"},
],
},
]
model = GlmAsrForConditionalGeneration.from_pretrained(
self.checkpoint_name, device_map=torch_device, dtype="auto"
)
inputs = self.processor.apply_chat_template(
conversation, tokenize=True, add_generation_prompt=True, return_dict=True
).to(model.device, dtype=model.dtype)
inputs_transcription = self.processor.apply_transcription_request(
"https://huggingface.co/datasets/eustlb/audio-samples/resolve/main/obama2.mp3",
).to(model.device, dtype=model.dtype)
for key in inputs:
self.assertTrue(torch.equal(inputs[key], inputs_transcription[key]))
outputs = model.generate(**inputs, do_sample=False, max_new_tokens=500)
decoded_outputs = self.processor.batch_decode(
outputs[:, inputs.input_ids.shape[1] :], skip_special_tokens=True
)
EXPECTED_OUTPUT = [
"This week, I traveled to Chicago to deliver my final farewell address to the nation, following in the tradition of presidents before me. It was an opportunity to say thank you. Whether we've seen eye to eye or rarely agreed at all, my conversations with you, the American people, in living rooms and schools, at farms and on factory floors, at diners and on distant military outposts, all these conversations are what have kept me honest, kept me inspired, and kept me going. Every day, I learned from you. You made me a better president, and you made me a better man. Over the"
]
self.assertEqual(decoded_outputs, EXPECTED_OUTPUT)
@slow
def test_batched(self):
conversation = [
[
{
"role": "user",
"content": [
{
"type": "audio",
"url": "https://huggingface.co/datasets/eustlb/audio-samples/resolve/main/bcn_weather.mp3",
},
{"type": "text", "text": "Please transcribe this audio into text"},
],
},
],
[
{
"role": "user",
"content": [
{
"type": "audio",
"url": "https://huggingface.co/datasets/eustlb/audio-samples/resolve/main/obama2.mp3",
},
{"type": "text", "text": "Please transcribe this audio into text"},
],
},
],
]
model = GlmAsrForConditionalGeneration.from_pretrained(
self.checkpoint_name, device_map=torch_device, dtype="auto"
)
inputs = self.processor.apply_chat_template(
conversation, tokenize=True, add_generation_prompt=True, return_dict=True
).to(model.device, dtype=model.dtype)
inputs_transcription = self.processor.apply_transcription_request(
[
"https://huggingface.co/datasets/eustlb/audio-samples/resolve/main/bcn_weather.mp3",
"https://huggingface.co/datasets/eustlb/audio-samples/resolve/main/obama2.mp3",
],
).to(model.device, dtype=model.dtype)
for key in inputs:
self.assertTrue(torch.equal(inputs[key], inputs_transcription[key]))
outputs = model.generate(**inputs, do_sample=False, max_new_tokens=500)
decoded_outputs = self.processor.batch_decode(
outputs[:, inputs.input_ids.shape[1] :], skip_special_tokens=True
)
EXPECTED_OUTPUT = [
"Yesterday it was thirty five degrees in Barcelona, but today the temperature will go down to minus twenty degrees.",
"This week, I traveled to Chicago to deliver my final farewell address to the nation, following in the tradition of presidents before me. It was an opportunity to say thank you. Whether we've seen eye to eye or rarely agreed at all, my conversations with you, the American people, in living rooms and schools, at farms and on factory floors, at diners and on distant military outposts, all these conversations are what have kept me honest, kept me inspired, and kept me going. Every day, I learned from you. You made me a better president, and you made me a better man. Over the",
]
self.assertEqual(decoded_outputs, EXPECTED_OUTPUT)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/glmasr/test_modeling_glmasr.py",
"license": "Apache License 2.0",
"lines": 300,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/ernie4_5_vl_moe/convert_ernie4_5_vl_moe_to_hf.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Converts Ernie 4.5 VL config and processor to Hugging Face format."""
import argparse
import json
import os
from pathlib import Path
from shutil import copyfile
from huggingface_hub import hf_hub_download, snapshot_download
from tokenizers import AddedToken
from transformers import (
AutoTokenizer,
Ernie4_5_VLMoeConfig,
Ernie4_5_VLMoeImageProcessorFast,
Ernie4_5_VLMoeProcessor,
Ernie4_5_VLMoeVideoProcessor,
LlamaTokenizer,
)
CONFIG_NAME = "config.json"
VALID_VISION_CONFIG_KEYS = [
"depth",
"hidden_size",
"hidden_act",
"num_heads",
"in_channels",
"patch_size",
"spatial_merge_size",
]
VALID_TEXT_CONFIG_KEYS = [
"hidden_size",
"intermediate_size",
"max_position_embeddings",
"moe_intermediate_size",
"moe_k",
"moe_layer_interval",
"moe_num_shared_experts",
"num_attention_heads",
"num_hidden_layers",
"num_key_value_heads",
"rms_norm_eps",
"rope_theta",
"vocab_size",
"tie_word_embeddings",
"use_cache",
"use_bias",
]
TEXT_TO_VISION_CONFIG_KEYS = [
"spatial_conv_size",
"temporal_conv_size",
]
ALL_VISION_CONFIG_KEYS = VALID_VISION_CONFIG_KEYS + TEXT_TO_VISION_CONFIG_KEYS + ["intermediate_size"]
ALL_TEXT_CONFIG_KEYS = VALID_TEXT_CONFIG_KEYS + [
"hidden_act",
"mlp_layer_types",
"moe_num_experts",
"rope_parameters",
]
TMP_TOKENIZER_DIR = "/tmp/ernie_vl_tokenizer"
TOKENIZER_CONFIG_FILE = "tokenizer_config.json"
DEFAULT_CHAT_TEMPLATE = """
{%- set image_count = namespace(value=0) -%}
{%- set video_count = namespace(value=0) -%}
{{- '<|begin_of_sentence|>' }}
{%- for message in messages -%}
{%- if message.role in ['system', 'user'] -%}
{%- if message.role == 'user' -%}
{{- 'User: ' -}}
{%- endif -%}
{%- if message.content is string -%}
{{- message.content -}}
{%- else -%}
{%- for content_item in message.content -%}
{%- if content_item.type == 'text' -%}
{{- content_item.text -}}
{%- elif content_item.type in ['image_url', 'image'] -%}
{%- set image_count.value = image_count.value + 1 -%}
Picture {{ image_count.value }}:<|IMAGE_START|><|IMAGE_PLACEHOLDER|><|IMAGE_END|>
{%- elif content_item.type in ['video_url', 'video'] -%}
{%- set video_count.value = video_count.value + 1 -%}
Video {{ video_count.value }}:<|VIDEO_START|><|VIDEO_PLACEHOLDER|><|VIDEO_END|>
{%- endif -%}
{%- endfor -%}
{%- endif -%}
{%- if message.role == 'system' -%}
{{- '
' -}}
{%- endif -%}
{%- elif message.role == 'assistant' -%}
{%- macro extract_text_content(content_field) -%}
{%- if content_field is string -%}
{{- content_field -}}
{%- elif content_field is iterable and content_field is not string -%}
{%- set ns = namespace(text_parts=[]) -%}
{%- set text_parts = [] -%}
{%- for item in content_field -%}
{%- if item.type == 'text' -%}
{%- set ns.text_parts = ns.text_parts + [item.text] -%}
{%- endif -%}
{%- endfor -%}
{{- ns.text_parts | join("") -}}
{%- else -%}
{{- '' -}}
{%- endif -%}
{%- endmacro -%}
{%- set reasoning_content = extract_text_content(message.reasoning_content) -%}
{%- set content = extract_text_content(message.content) -%}
{%- if '</think>' in content %}
{%- set reasoning_content = content.split('</think>')[0].rstrip('
').split('<think>')[-1].lstrip('
') %}
{%- set content = content.split('</think>')[-1].lstrip('
') %}
{%- endif %}
{%- if reasoning_content %}
{{- '
' + 'Assistant: ' + '<think>
' + reasoning_content.strip('
') + '
</think>
' + content.lstrip('
') }}
{%- else %}
{{- '
' + 'Assistant: ' + content }}
{%- endif %}
{{- '<|end_of_sentence |>' }}
{%- endif -%}
{%- endfor -%}
{%- if add_generation_prompt is not defined or add_generation_prompt is true %}
{{- '\nAssistant: ' -}}
{%- if (enable_thinking is defined and enable_thinking is false) or enable_thinking is not defined %}
{{- '<think>\n\n</think>\n\n' }}
{%- endif %}
{%- if enable_thinking is defined and enable_thinking is true %}{{- '<think>' }}{%- endif %}
{%- endif %}"""
FONT_REPO = "AntonV/ernie4_5_fonts"
FONT_NAME = "Roboto-Regular.ttf"
def load_json(save_dir, filename):
with open(os.path.join(save_dir, filename), "r") as f:
return json.load(f)
def write_json(json_object, save_dir, filename):
with open(os.path.join(save_dir, filename), "w") as f:
json.dump(json_object, f, indent=2, sort_keys=True, ensure_ascii=False)
def convert_vision_config_to_hf(vision_config, original_config, original_vision_config):
# convert vision related stuff
for key in VALID_VISION_CONFIG_KEYS:
vision_config[key] = original_vision_config[key]
vision_config["intermediate_size"] = original_vision_config["hidden_size"] * original_vision_config["mlp_ratio"]
# convert originally text attributes to vision
for key in TEXT_TO_VISION_CONFIG_KEYS:
vision_config[key.replace("conv", "merge")] = original_config[key]
vision_config["rms_norm_eps"] = 1e-6
# delete everything else
for key in list(vision_config.keys()):
if key not in ALL_VISION_CONFIG_KEYS:
del vision_config[key]
return vision_config
def convert_text_config_to_hf(text_config, original_config):
# carry directly over
for key in VALID_TEXT_CONFIG_KEYS:
text_config[key] = original_config.get(key)
# special cases
text_config["hidden_act"] = "silu" # default value which is not explicit in their json
text_config["use_cache"] = True # not always included but we should default to `True`
text_config["moe_num_experts"] = original_config["moe_num_experts"][0] # the same for both modalities
text_config["rope_parameters"] = {
"rope_type": "default",
"rope_theta": 500_000.0,
"mrope_section": [22, 22, 20],
}
if text_config["moe_num_shared_experts"] is None:
text_config["moe_num_shared_experts"] = 0
# ernie logic to construct mlp/moe layers
text_config["mlp_layer_types"] = []
for layer_idx in range(text_config["num_hidden_layers"]):
if (
((layer_idx + 1) % text_config["moe_layer_interval"] == 0)
and layer_idx >= min(original_config["moe_layer_start_index"])
and layer_idx <= max(original_config["moe_layer_end_index"])
):
text_config["mlp_layer_types"].append("sparse")
else:
text_config["mlp_layer_types"].append("dense")
text_config.pop("moe_layer_interval", None)
# delete everything else
for key in list(text_config.keys()):
if key not in ALL_TEXT_CONFIG_KEYS:
del text_config[key]
return text_config
def convert_config(model_path, save_dir):
checkpoint_path = snapshot_download(repo_id=model_path, allow_patterns=["*config*"])
for filename in sorted(os.listdir(checkpoint_path)):
if filename == CONFIG_NAME:
hf_config = Ernie4_5_VLMoeConfig()
original_config = load_json(checkpoint_path, filename)
# general config
image_token_id = original_config["im_patch_id"]
# vision config
vision_config = hf_config.vision_config.to_dict()
original_vision_config = original_config["vision_config"]
vision_config = convert_vision_config_to_hf(vision_config, original_config, original_vision_config)
# text config
text_config = hf_config.text_config.to_dict()
text_config = convert_text_config_to_hf(text_config, original_config)
# total config
final_config = Ernie4_5_VLMoeConfig(
text_config=text_config,
vision_config=vision_config,
image_token_id=image_token_id,
)
setattr(final_config, "architectures", original_config["architectures"]) # carry over
final_config.save_pretrained(save_dir)
break
print("Converted model config\n")
def convert_tokenizer(original_tokenizer_path, save_dir):
# Load in legacy mode
hf_tok = LlamaTokenizer.from_pretrained(
original_tokenizer_path,
pad_token="<unk>",
cls_token="<|begin_of_sentence|>",
sep_token="<|end_of_sentence|>",
mask_token="<mask:1>",
add_bos_token=False,
add_prefix_space=False,
chat_template=DEFAULT_CHAT_TEMPLATE,
)
hf_tok.model_max_length = 131072
hf_tok.init_kwargs.pop("auto_map", None) # remote specific
# SPM special added but we want to treat them as non-special
hf_tok.add_tokens([AddedToken(f"{i}", normalized=False, special=False) for i in range(10)])
hf_tok.save_pretrained(TMP_TOKENIZER_DIR)
# Manipulate special tokens and add video token
tokenizer_config = load_json(TMP_TOKENIZER_DIR, TOKENIZER_CONFIG_FILE)
# Doubled usage of extra and inherint special tokens
tokenizer_config["extra_special_tokens"].remove("<s>")
tokenizer_config["extra_special_tokens"].remove("</s>")
# SPM special added but we want to treat them as non-special
for i in range(10):
tokenizer_config["extra_special_tokens"].remove(f"{i}")
# Removed from list, re-add
tokenizer_config["extra_special_tokens"].append("<|IMAGE_PLACEHOLDER|>")
tokenizer_config["extra_special_tokens"].append("<|IMAGE_START|>")
tokenizer_config["extra_special_tokens"].append("<|IMAGE_END|>")
tokenizer_config["extra_special_tokens"].append("<|VIDEO_PLACEHOLDER|>")
tokenizer_config["extra_special_tokens"].append("<|VIDEO_START|>")
tokenizer_config["extra_special_tokens"].append("<|VIDEO_END|>")
tokenizer_config["extra_special_tokens"].append("<think>")
tokenizer_config["extra_special_tokens"].append("</think>")
# To be called via `.xxx_token`
tokenizer_config |= {
"image_token": "<|IMAGE_PLACEHOLDER|>",
"image_end_token": "<|IMAGE_END|>",
"image_start_token": "<|IMAGE_START|>",
"video_token": "<|VIDEO_PLACEHOLDER|>",
"video_end_token": "<|VIDEO_END|>",
"video_start_token": "<|VIDEO_START|>",
}
write_json(tokenizer_config, TMP_TOKENIZER_DIR, TOKENIZER_CONFIG_FILE)
# Reload and save to get correct formatting
tokenizer = AutoTokenizer.from_pretrained(TMP_TOKENIZER_DIR)
tokenizer.save_pretrained(save_dir)
def convert_processor(model_path, save_dir):
print("Starting to convert processor")
convert_tokenizer(model_path, save_dir)
tokenizer = AutoTokenizer.from_pretrained(save_dir)
# font used within the video processor
copyfile(hf_hub_download(FONT_REPO, FONT_NAME), Path(save_dir, FONT_NAME))
processor = Ernie4_5_VLMoeProcessor(
image_processor=Ernie4_5_VLMoeImageProcessorFast(),
tokenizer=tokenizer,
video_processor=Ernie4_5_VLMoeVideoProcessor(font=str(Path(save_dir, FONT_NAME))),
chat_template=tokenizer.chat_template,
)
processor.save_pretrained(save_dir)
print("Finished converting the processor\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_path",
type=str,
default="baidu/ERNIE-4.5-VL-28B-A3B-PT",
help="Path to the downloaded checkpoint",
)
parser.add_argument("--output_folder", default="AntonV/ErnieVL", type=str, help="Path to your output directory.")
parser.add_argument(
"--convert_preprocessor",
type=bool,
default=True,
help="Whether or not the preprocessor (tokenizer + image/video processors) should be converted along with the model.",
)
args = parser.parse_args()
convert_config(args.checkpoint_path, args.output_folder)
if args.convert_preprocessor:
convert_processor(args.checkpoint_path, args.output_folder)
print(f"Saved converted checkpoint to {args.output_folder}")
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/ernie4_5_vl_moe/convert_ernie4_5_vl_moe_to_hf.py",
"license": "Apache License 2.0",
"lines": 307,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/ernie4_5_vl_moe/modular_ernie4_5_vl_moe.py | # Copyright 2025 Baidu and HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch Ernie4.5-VL model."""
import itertools
from collections.abc import Callable
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from ... import initialization as init
from ...cache_utils import Cache, DynamicCache
from ...configuration_utils import PreTrainedConfig, layer_type_validation
from ...generation import GenerationMixin
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_processing_utils_fast import (
group_images_by_shape,
reorder_images,
)
from ...image_transforms import convert_to_rgb, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
SizeDict,
get_image_size,
infer_channel_dimension_format,
is_scaled_image,
make_list_of_images,
to_numpy_array,
)
from ...masking_utils import create_causal_mask
from ...modeling_flash_attention_utils import FlashAttentionKwargs
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import BaseModelOutputWithPooling, MoeCausalLMOutputWithPast, MoeModelOutputWithPast
from ...modeling_rope_utils import dynamic_rope_update
from ...modeling_utils import PreTrainedModel
from ...processing_utils import Unpack
from ...utils import (
TensorType,
TransformersKwargs,
auto_docstring,
can_return_tuple,
logging,
)
from ...utils.generic import maybe_autocast, merge_with_config_defaults
from ...utils.output_capturing import OutputRecorder, capture_outputs
from ..ernie4_5_moe.configuration_ernie4_5_moe import Ernie4_5_MoeConfig
from ..ernie4_5_moe.modeling_ernie4_5_moe import (
Ernie4_5_MoeAttention,
Ernie4_5_MoeExperts,
Ernie4_5_MoeMLP,
Ernie4_5_MoeModel,
Ernie4_5_MoeRMSNorm,
Ernie4_5_MoeStatics,
Ernie4_5_MoeTopKRouter,
)
from ..glm4v.image_processing_glm4v import Glm4vImageProcessor, Glm4vImageProcessorKwargs
from ..glm4v.image_processing_glm4v_fast import Glm4vImageProcessorFast
from ..glm4v.modeling_glm4v import Glm4vForConditionalGeneration
from ..mixtral.modeling_mixtral import load_balancing_loss_func
from ..qwen2_5_vl.modeling_qwen2_5_vl import (
Qwen2_5_VisionPatchEmbed,
Qwen2_5_VisionRotaryEmbedding,
Qwen2_5_VLPreTrainedModel,
Qwen2_5_VLVisionAttention,
Qwen2_5_VLVisionBlock,
)
from ..qwen2_vl.configuration_qwen2_vl import Qwen2VLVisionConfig
from ..qwen2_vl.image_processing_qwen2_vl import smart_resize
from ..qwen2_vl.modeling_qwen2_vl import Qwen2VisionTransformerPretrainedModel, Qwen2VLModel, VisionMlp
logger = logging.get_logger(__name__)
class Ernie4_5_VLMoeVisionConfig(Qwen2VLVisionConfig):
r"""
This is the configuration class to store the configuration of the [`Ernie4_5_VLMoeVisionTransformerPretrainedModel`].
It is used to instantiate the vision models portion of the complete Ernie4.5-VL Moe model according to the specified
arguments, defining the model architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
depth (`int`, *optional*, defaults to 32):
Number of layers (depth) in the model.
hidden_size (`int`, *optional*, defaults to 1280):
Dimensionality of the encoder layers and the pooler layer.
hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
The non-linear activation function (function or string) in the encoder and pooler.
intermediate_size (`int`, *optional*, defaults to 5120):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
num_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
in_channels (`int`, *optional*, defaults to 3):
The number of input channels.
patch_size (`int`, *optional*, defaults to 14):
The size (resolution) of each patch.
spatial_merge_size (`int`, *optional*, defaults to 2):
The size used for merging spatial dimensions.
temporal_merge_size (`int`, *optional*, defaults to 2):
The size used for merge along the temporal dimension.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
"""
model_type = "ernie4_5_vl_moe_vision"
base_model_tp_plan = {
"blocks.*.attn.qkv": "colwise",
"blocks.*.attn.proj": "rowwise",
"blocks.*.mlp.fc1": "colwise",
"blocks.*.mlp.fc2": "rowwise",
}
def __init__(
self,
depth=32,
hidden_size=1280,
hidden_act="quick_gelu",
intermediate_size=4 * 1280,
num_heads=16,
in_channels=3,
patch_size=14,
spatial_merge_size=2,
temporal_merge_size=2,
rms_norm_eps=1e-6,
initializer_range=0.02,
**kwargs,
):
super().__init__(
depth=depth,
hidden_size=hidden_size,
hidden_act=hidden_act,
intermediate_size=intermediate_size,
num_heads=num_heads,
in_channels=in_channels,
patch_size=patch_size,
spatial_merge_size=spatial_merge_size,
temporal_merge_size=temporal_merge_size,
rms_norm_eps=rms_norm_eps,
initializer_range=initializer_range,
**kwargs,
)
del self.embed_dim # noqa: F821
del self.mlp_ratio # noqa: F821
del self.temporal_patch_size # noqa: F821
self.intermediate_size = intermediate_size
self.temporal_merge_size = temporal_merge_size
self.rms_norm_eps = rms_norm_eps
class Ernie4_5_VLMoeTextConfig(Ernie4_5_MoeConfig, PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Ernie4_5_VLMoeTextModel`]. It is used to instantiate a
the text model portion of the complete Ernie4.5-VL Moe model according to the specified arguments, defining the model architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 103424):
Vocabulary size of the Ernie 4.5 VL model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`Ernie4_5_VLMoeTextModel`]
hidden_size (`int`, *optional*, defaults to 2560):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 12288):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 28):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 20):
Number of attention heads for each attention layer in the Transformer encoder.
num_key_value_heads (`int`, *optional*, defaults to 4):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `4`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 131072):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
use_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in any of the projections including mlp and attention for example.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionaty should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
mlp_layer_types (`list`, *optional*):
MLP (Moe vs Dense) pattern for each layer.
moe_intermediate_size (`list[int]`, *optional*, defaults to `[1536, 512]`):
Intermediate size of the routed experts; differs between text (first) and image (second) experts.
moe_k (`int`, *optional*, defaults to 6):
Number of selected experts.
moe_num_experts (`int`, *optional*, defaults to 64):
Number of routed experts.
moe_num_shared_experts (`int`, *optional*, defaults to 2):
The number of experts that are shared for all MoE forwards.
moe_norm_min (`float`, *optional*, defaults to 1e-12):
Minimum division value during routing normalization.
output_router_logits (`bool`, *optional*, defaults to `False`):
Whether or not the router logits should be returned by the model. Enabling this will also
allow the model to output the auxiliary loss, including load balancing loss and router z-loss.
router_aux_loss_coef (`float`, *optional*, defaults to 0.001):
The aux loss factor for the total loss.
pad_token_id (`int`, *optional*):
Padding token id.
eos_token_id (`int`, *optional*):
End of stream token id.
bos_token_id (`int`, *optional*):
Beginning of stream token id.
"""
model_type = "ernie4_5_vl_moe_text"
base_config_key = "text_config"
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise",
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.shared_experts.gate_proj": "colwise",
"layers.*.mlp.shared_experts.up_proj": "colwise",
"layers.*.mlp.shared_experts.down_proj": "rowwise",
"layers.*.mlp.gate_proj": "colwise",
"layers.*.mlp.up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
}
def __init__(
self,
vocab_size=103424,
hidden_size=2560,
intermediate_size=12288,
num_hidden_layers=28,
num_attention_heads=20,
num_key_value_heads=4,
hidden_act="silu",
max_position_embeddings=131072,
initializer_range=0.02,
rms_norm_eps=1e-5,
use_cache=True,
use_bias=False,
rope_parameters=None,
mlp_layer_types=None,
moe_intermediate_size=None,
moe_k=6,
moe_num_experts=64,
moe_num_shared_experts=2,
moe_norm_min=1e-12,
output_router_logits=False,
router_aux_loss_coef=0.001,
pad_token_id=None,
eos_token_id=None,
bos_token_id=None,
**kwargs,
):
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.use_bias = use_bias
self.rope_parameters = rope_parameters
# Default to MoE from the second layer and on
self.mlp_layer_types = mlp_layer_types
if self.mlp_layer_types is None:
self.mlp_layer_types = ["dense"] + ["sparse"] * (self.num_hidden_layers - 1)
layer_type_validation(self.mlp_layer_types, self.num_hidden_layers, attention=False)
self.moe_intermediate_size = moe_intermediate_size
if self.moe_intermediate_size is None:
self.moe_intermediate_size = [1536, 512]
self.moe_k = moe_k
self.moe_num_experts = moe_num_experts
self.moe_num_shared_experts = moe_num_shared_experts
self.moe_norm_min = moe_norm_min
self.output_router_logits = output_router_logits
self.router_aux_loss_coef = router_aux_loss_coef
self.pad_token_id = pad_token_id
self.eos_token_id = eos_token_id
self.bos_token_id = bos_token_id
PreTrainedConfig.__init__(ignore_keys_at_rope_validation={"mrope_section"}, **kwargs)
class Ernie4_5_VLMoeConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Ernie4_5_VLMoeModel`]. It is used to instantiate a
Ernie4.5-VL MoE model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of
Ernie 4.5 VL 28B A3B [baidu/ERNIE-4.5-VL-28B-A3B-PT](https://huggingface.co/baidu/ERNIE-4.5-VL-28B-A3B-PT).
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
text_config (`Union[PreTrainedConfig, dict]`, *optional*, defaults to `Ernie4_5_VLMoeTextConfig`):
The config object or dictionary of the text backbone.
vision_config (`Union[PreTrainedConfig, dict]`, *optional*, defaults to `Ernie4_5_VLMoeVisionConfig`):
The config object or dictionary of the vision backbone.
image_start_token_id (`int`, *optional*, defaults to 101304):
The image token index to encode the start of image.
image_end_token_id (`int`, *optional*, defaults to 101305):
The image token index to encode the end of image.
image_token_id (`int`, *optional*, defaults to 100295):
The image token index to encode the image prompt.
video_start_token_id (`int`, *optional*, defaults to 101306):
The video token index to encode the start of video.
video_end_token_id (`int`, *optional*, defaults to 101307):
The video token index to encode the end of video.
video_token_id (`int`, *optional*, defaults to 103367):
The video token index to encode the video prompt.
tie_word_embeddings (`bool`, *optional*, defaults to `True`):
Whether the model's input and output word embeddings should be tied.
```python
>>> from transformers import Ernie4_5_VLMoeForConditionalGeneration, Ernie4_5_VLMoeConfig
>>> # Initializing a Ernie4_5_VLMoe style configuration
>>> configuration = Ernie4_5_VLMoeConfig()
>>> # Initializing a model from the Ernie 4.5 VL 28B A3B configuration
>>> model = Ernie4_5_VLMoeForConditionalGeneration(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "ernie4_5_vl_moe"
sub_configs = {"vision_config": Ernie4_5_VLMoeVisionConfig, "text_config": Ernie4_5_VLMoeTextConfig}
keys_to_ignore_at_inference = ["past_key_values"]
def __init__(
self,
text_config=None,
vision_config=None,
image_start_token_id=101304,
image_end_token_id=101305,
image_token_id=100295,
video_start_token_id=101306,
video_end_token_id=101307,
video_token_id=103367,
tie_word_embeddings=True,
**kwargs,
):
if isinstance(vision_config, dict):
self.vision_config = self.sub_configs["vision_config"](**vision_config)
elif isinstance(vision_config, Ernie4_5_VLMoeVisionConfig):
self.vision_config = vision_config
elif vision_config is None:
self.vision_config = self.sub_configs["vision_config"]()
if isinstance(text_config, dict):
self.text_config = self.sub_configs["text_config"](**text_config)
elif isinstance(text_config, Ernie4_5_VLMoeTextConfig):
self.text_config = text_config
elif text_config is None:
self.text_config = self.sub_configs["text_config"](**kwargs)
self.image_start_token_id = image_start_token_id
self.image_end_token_id = image_end_token_id
self.image_token_id = image_token_id
self.video_start_token_id = video_start_token_id
self.video_end_token_id = video_end_token_id
self.video_token_id = video_token_id
self.tie_word_embeddings = tie_word_embeddings
super().__init__(**kwargs)
class Ernie4_5_VLMoeTextRotaryEmbedding(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, config, device=None):
super().__init__()
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_type = self.config.rope_parameters["rope_type"]
rope_init_fn: Callable = self.compute_default_rope_parameters
if self.rope_type != "default":
raise ValueError(f"Ernie 4.5 VL requires the `default` rope type, but found {self.rope_type} instead.")
inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.original_inv_freq = inv_freq
self.mrope_section = config.rope_parameters.get("mrope_section", [22, 22, 20])
@staticmethod
def compute_default_rope_parameters(
config: Ernie4_5_VLMoeTextConfig | None = None,
device: Optional["torch.device"] = None,
seq_len: int | None = None,
) -> tuple["torch.Tensor", float]:
"""
Computes the inverse frequencies according to the original RoPE implementation
Args:
config ([`~transformers.PreTrainedConfig`]):
The model configuration.
device (`torch.device`):
The device to use for initialization of the inverse frequencies.
seq_len (`int`, *optional*):
The current sequence length. Unused for this type of RoPE.
Returns:
Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
"""
base = config.rope_parameters["rope_theta"]
dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
attention_factor = 1.0 # Unused in this type of RoPE
# Compute the inverse frequencies
inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
)
# Special to ernie, we prerotate on the hw dim
mrope_section = config.rope_parameters.get("mrope_section", [22, 22, 20])
hw_dim = mrope_section[0] + mrope_section[1]
t_dim = mrope_section[2]
inv_freq_3d = torch.empty_like(inv_freq)
# (Pre-)Rotate to avoid another rotation during the forward
inv_freq_3d[:hw_dim] = torch.cat([inv_freq[:-t_dim][0::2], inv_freq[:-t_dim][1::2]])
inv_freq_3d[-t_dim:] = inv_freq[-t_dim:]
return inv_freq_3d, attention_factor
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = (
self.inv_freq[None, None, :, None].float().expand(3, position_ids.shape[1], -1, 1).to(x.device)
)
position_ids_expanded = position_ids[:, :, None, :].float() # shape (3, bs, 1, positions)
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with maybe_autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(2, 3)
cos = freqs.cos() * self.attention_scaling
sin = freqs.sin() * self.attention_scaling
sin = self.recomposition_to_3d(sin)
cos = self.recomposition_to_3d(cos)
return cos, sin
def recomposition_to_3d(self, freq):
freq_h, freq_w, freq_t = (m[(i + 1) % 3] for i, m in enumerate(freq.split([*self.mrope_section], dim=-1)))
freq_hw = torch.stack([freq_h, freq_w], dim=-1).flatten(-2)
freq_hwt = torch.cat([freq_hw, freq_t], dim=-1)
return freq_hwt.repeat_interleave(2, dim=-1)
def rotate_half_text(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., 0::2]
x2 = x[..., 1::2]
return torch.stack((-x2, x1), dim=-1).flatten(-2)
def apply_rotary_pos_emb(q, k, cos, sin, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
original_dtype = q.dtype
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = (q.float() * cos) + (rotate_half_text(q).float() * sin)
k_embed = (k.float() * cos) + (rotate_half_text(k).float() * sin)
return q_embed.to(original_dtype), k_embed.to(original_dtype)
class Ernie4_5_VLMoeTextAttention(Ernie4_5_MoeAttention):
pass
class Ernie4_5_VLMoeRMSNorm(Ernie4_5_MoeRMSNorm):
pass
class Ernie4_5_VLMoeMLP(Ernie4_5_MoeMLP):
pass
class Ernie4_5_VLMoeMoeStatics(Ernie4_5_MoeStatics):
pass
class Ernie4_5_VLMoeMoeTopKRouter(Ernie4_5_MoeTopKRouter):
def __init__(self, config):
super().__init__(config)
self.moe_statics = Ernie4_5_VLMoeMoeStatics(config)
class Ernie4_5_VLMoeMoeExperts(Ernie4_5_MoeExperts):
def __init__(self, config, intermediate_size=None):
super().__init__(config)
self.intermediate_dim = config.moe_intermediate_size if intermediate_size is None else intermediate_size
class Ernie4_5_VLMoeSparseMoeBlock(nn.Module):
def __init__(self, config, intermediate_size):
super().__init__()
self.hidden_dim = config.hidden_size
self.num_experts = config.moe_num_experts
self.top_k = config.moe_k
self.gate = Ernie4_5_VLMoeMoeTopKRouter(config)
self.experts = Ernie4_5_VLMoeMoeExperts(config, intermediate_size)
def forward(
self,
hidden_states: torch.Tensor,
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
hidden_states = hidden_states.view(-1, self.hidden_dim)
router_logits, top_k_index, top_k_weights = self.gate(hidden_states)
final_hidden_states = self.experts(hidden_states, top_k_index, top_k_weights)
# moe results are changed to a flattened shape to ease the modality isolated assigning of results
return final_hidden_states.flatten(), router_logits.flatten()
class Ernie4_5_VLMoeMoeBlock(nn.Module):
"""
Similar to `Ernie4_5_Moe` where we have modality isolated experts:
- A set of text experts that are only run on text tokens
- A set of vision experts that are only run on vision (image/video) tokens
This modality isolation is unique to the Ernie 4.5 VL Moe models.
"""
def __init__(self, config):
super().__init__()
self.num_experts = config.moe_num_experts
self.text_moe = Ernie4_5_VLMoeSparseMoeBlock(config, intermediate_size=config.moe_intermediate_size[0])
self.vision_moe = Ernie4_5_VLMoeSparseMoeBlock(config, intermediate_size=config.moe_intermediate_size[1])
self.shared_experts = None
if config.moe_num_shared_experts > 0:
self.shared_experts = Ernie4_5_VLMoeMLP(
config, config.moe_intermediate_size[0] * config.moe_num_shared_experts
)
def forward(
self,
hidden_states: torch.Tensor,
moe_mm_token_type_ids: torch.IntTensor | None = None,
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
batch_size, sequence_length, hidden_dim = hidden_states.shape
# (Optional) shared experts
if self.shared_experts is not None:
shared_output = self.shared_experts(hidden_states)
if moe_mm_token_type_ids is not None and moe_mm_token_type_ids.any():
final_hidden_states = torch.zeros_like(hidden_states)
router_logits = torch.zeros(
size=(batch_size * sequence_length, self.num_experts),
device=final_hidden_states.device,
dtype=torch.float,
)
# True (1 or 2) == vision, False (0) == text tokens
moe_mm_token_type_ids = moe_mm_token_type_ids.bool()
token_type_ids_router = moe_mm_token_type_ids.reshape(-1)[:, None].expand(-1, self.num_experts)
token_type_ids_states = moe_mm_token_type_ids[..., None].expand(-1, -1, hidden_dim)
# Run moe on each modality and assign their results to the original token positions
final_hidden_states[~token_type_ids_states], router_logits[~token_type_ids_router] = self.text_moe(
hidden_states[~token_type_ids_states]
)
final_hidden_states[token_type_ids_states], router_logits[token_type_ids_router] = self.vision_moe(
hidden_states[token_type_ids_states]
)
else:
final_hidden_states, router_logits = self.text_moe(hidden_states)
final_hidden_states = final_hidden_states.reshape(batch_size, sequence_length, hidden_dim)
router_logits = router_logits.reshape(-1, self.num_experts)
# Add (optional) shared experts to the result
if self.shared_experts is not None:
final_hidden_states = final_hidden_states + shared_output
return final_hidden_states, router_logits
class Ernie4_5_VLMoeDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config, layer_idx):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = Ernie4_5_VLMoeTextAttention(config, layer_idx)
if config.mlp_layer_types[layer_idx] == "sparse":
self.mlp = Ernie4_5_VLMoeMoeBlock(config)
else:
self.mlp = Ernie4_5_VLMoeMLP(config)
self.input_layernorm = Ernie4_5_VLMoeRMSNorm(config.hidden_size, config.rms_norm_eps)
self.post_attention_layernorm = Ernie4_5_VLMoeRMSNorm(config.hidden_size, config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: torch.Tensor | None = None,
position_ids: torch.Tensor | None = None,
moe_mm_token_type_ids: torch.IntTensor | None = None,
past_key_values: Cache | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, tuple[torch.Tensor, torch.Tensor] | None]:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
position_embeddings=position_embeddings,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
cache_position=cache_position,
**kwargs,
)
hidden_states = hidden_states + residual
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
if isinstance(self.mlp, Ernie4_5_VLMoeMoeBlock):
hidden_states, _ = self.mlp(hidden_states, moe_mm_token_type_ids)
else:
hidden_states = self.mlp(hidden_states)
hidden_states = hidden_states + residual
return hidden_states
class Ernie4_5_VLMoeVisionAttention(Qwen2_5_VLVisionAttention):
pass
class Ernie4_5_VLMoeVisionBlock(Qwen2_5_VLVisionBlock):
def __init__(self, config) -> None:
super().__init__(config, None)
self.norm1 = nn.LayerNorm(config.hidden_size, config.rms_norm_eps)
self.norm2 = nn.LayerNorm(config.hidden_size, config.rms_norm_eps)
self.mlp = Ernie4_5VLVisionMLP(
dim=config.hidden_size,
hidden_dim=config.intermediate_size,
hidden_act=config.hidden_act,
)
class Ernie4_5_VLMoePreTrainedModel(Qwen2_5_VLPreTrainedModel):
_can_compile_fullgraph = False
_can_record_outputs = {
"router_logits": OutputRecorder(Ernie4_5_VLMoeMoeBlock, index=1),
"hidden_states": Ernie4_5_VLMoeDecoderLayer,
"attentions": Ernie4_5_VLMoeTextAttention,
}
_keep_in_fp32_modules_strict = ["gate.weight", "moe_statics"]
def _init_weights(self, module):
PreTrainedModel._init_weights(self, module)
if isinstance(module, Ernie4_5_VLMoeMoeTopKRouter):
init.zeros_(module.moe_statics.e_score_correction_bias)
init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
elif isinstance(module, Ernie4_5_VLMoeMoeExperts):
init.normal_(module.gate_up_proj, mean=0.0, std=self.config.initializer_range)
init.normal_(module.down_proj, mean=0.0, std=self.config.initializer_range)
elif isinstance(module, Ernie4_5_VLMoeVisionRotaryEmbedding):
inv_freq = 1.0 / (module.theta ** (torch.arange(0, module.dim, 2, dtype=torch.float) / module.dim))
init.copy_(module.inv_freq, inv_freq)
class Ernie4_5_VLMoeTextModel(Ernie4_5_MoeModel):
config: Ernie4_5_VLMoeTextConfig
def __init__(self, config: Ernie4_5_VLMoeTextConfig):
super().__init__(config)
self.rotary_emb = Ernie4_5_VLMoeTextRotaryEmbedding(config=config)
@merge_with_config_defaults
@capture_outputs
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
moe_mm_token_type_ids: torch.IntTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
use_cache: bool | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> MoeModelOutputWithPast:
r"""
moe_mm_token_type_ids (`torch.IntTensor` of shape `(batch_size, sequence_length)`, *optional*):
The same as `mm_token_type_ids` while additionally considering start/end image/video tokens as respective vision tokens.
"""
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
# the hard coded `3` is for temporal, height and width.
if position_ids is None:
position_ids = cache_position.view(1, 1, -1).expand(3, inputs_embeds.shape[0], -1)
elif position_ids.ndim == 2:
position_ids = position_ids[None, ...].expand(3, position_ids.shape[0], -1)
# NOTE: we need to pass text position ids for packing. Ernie 4.5 VL uses 3D positions
# where each dim indicates visual spatial positions for temporal/height/width grids.
# There are is only one scenario when FA2-like packed masking might be activated.
# 1. User specifically passed packed `position_ids` and no attention mask.
# In this case we expect the useer to create correct position ids for all 3 grids
# and prepend text-only position ids to it. The final tensor will be [4, bs, seq-len]
if position_ids.ndim == 3 and position_ids.shape[0] == 4:
text_position_ids = position_ids[0]
position_ids = position_ids[1:]
else:
# If inputs are not packed (usual 3D positions), do not prepare mask from position_ids
text_position_ids = None
attention_mask = create_causal_mask(
config=self.config,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
position_ids=text_position_ids,
)
hidden_states = inputs_embeds
# create position embeddings to be shared across the decoder layers
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
hidden_states = decoder_layer(
hidden_states,
position_embeddings=position_embeddings,
attention_mask=attention_mask,
position_ids=position_ids,
moe_mm_token_type_ids=moe_mm_token_type_ids,
past_key_values=past_key_values,
cache_position=cache_position,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return MoeModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
class Ernie4_5VLVisionMLP(VisionMlp):
pass
class Ernie4_5_VLMoePatchEmbed(Qwen2_5_VisionPatchEmbed):
def __init__(
self,
patch_size: int = 14,
in_channels: int = 3,
embed_dim: int = 1152,
) -> None:
super().__init__(patch_size, in_channels, embed_dim)
del self.temporal_patch_size
del kernel_size # noqa: F821
self.proj = nn.Linear(in_channels * patch_size * patch_size, embed_dim, bias=False)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
target_dtype = self.proj.weight.dtype
return self.proj(hidden_states.to(target_dtype))
class Ernie4_5_VLMoeVisionRotaryEmbedding(Qwen2_5_VisionRotaryEmbedding):
pass
class Ernie4_5_VLMoeVisionTransformerPretrainedModel(Qwen2VisionTransformerPretrainedModel):
_can_record_outputs = {
"router_logits": OutputRecorder(Ernie4_5_VLMoeMoeBlock, index=1),
"hidden_states": Ernie4_5_VLMoeVisionBlock,
"attentions": Ernie4_5_VLMoeVisionAttention,
}
def __init__(self, config) -> None:
super().__init__(config)
del self.merger
self.patch_embed = Ernie4_5_VLMoePatchEmbed(
patch_size=config.patch_size,
in_channels=config.in_channels,
embed_dim=config.hidden_size,
)
head_dim = config.hidden_size // config.num_heads
self.rotary_pos_emb = Ernie4_5_VLMoeVisionRotaryEmbedding(head_dim // 2)
self.ln = nn.LayerNorm(config.hidden_size, eps=config.rms_norm_eps)
def get_dtype(self):
raise AttributeError("Ernie 4.5 VL Moe does not need this!")
def get_device(self):
raise AttributeError("Ernie 4.5 VL Moe does not need this!")
@merge_with_config_defaults
@capture_outputs
def forward(
self, hidden_states: torch.Tensor, grid_thw: torch.Tensor, **kwargs: Unpack[TransformersKwargs]
) -> tuple | BaseModelOutputWithPooling:
hidden_states = self.patch_embed(hidden_states)
rotary_pos_emb = self.rot_pos_emb(grid_thw)
emb = torch.cat((rotary_pos_emb, rotary_pos_emb), dim=-1)
position_embeddings = (emb.cos(), emb.sin())
cu_seqlens = torch.repeat_interleave(grid_thw[:, 1] * grid_thw[:, 2], grid_thw[:, 0]).cumsum(
dim=0,
# Select dtype based on the following factors:
# - FA2 requires that cu_seqlens_q must have dtype int32
# - torch.onnx.export requires that cu_seqlens_q must have same dtype as grid_thw
# See https://github.com/huggingface/transformers/pull/34852 for more information
dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32,
)
cu_seqlens = F.pad(cu_seqlens, (1, 0), value=0)
for block in self.blocks:
hidden_states = block(
hidden_states,
cu_seqlens=cu_seqlens,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = self.ln(hidden_states)
return BaseModelOutputWithPooling(last_hidden_state=hidden_states)
class Ernie4_5_VLMoeVisionMLP(nn.Module):
def __init__(self, config, in_dim, out_dim):
super().__init__()
self.fc1 = nn.Linear(in_dim, out_dim)
self.act_fn = nn.GELU()
self.fc2 = nn.Linear(out_dim, out_dim)
self.ln = nn.LayerNorm(out_dim, eps=config.vision_config.rms_norm_eps)
def forward(self, hidden_states):
hidden_states = self.fc1(hidden_states)
hidden_states = self.act_fn(hidden_states)
hidden_states = self.fc2(hidden_states)
hidden_states = self.ln(hidden_states)
return hidden_states
class Ernie4_5_VLMoeVariableResolutionResamplerModel(nn.Module):
def __init__(self, config: Ernie4_5_VLMoeConfig):
super().__init__()
self.config = config
self.in_dim = config.vision_config.hidden_size
self.out_dim = config.text_config.hidden_size
self.spatial_merge_size = config.vision_config.spatial_merge_size
self.temporal_merge_size = config.vision_config.temporal_merge_size
# compress 2d conv(picture) to 1d
self.spatial_dim = self.in_dim * self.spatial_merge_size**2
# compress 3d conv(video) to 1d
self.temporal_dim = self.in_dim * self.spatial_merge_size**2 * self.temporal_merge_size
self.spatial_linear = Ernie4_5_VLMoeVisionMLP(config, self.spatial_dim, self.spatial_dim)
self.temporal_linear = Ernie4_5_VLMoeVisionMLP(config, self.temporal_dim, self.spatial_dim)
self.mlp = nn.Linear(self.spatial_dim, self.out_dim)
self.after_norm = Ernie4_5_VLMoeRMSNorm(self.out_dim, config.text_config.rms_norm_eps)
def _temporal_slicing(self, hidden_states, grid_thw):
"""
Slices along the temporal dimension in even/odd patterns (usually if we have a video input)
or duplicates along temporal dimension (usually if we have an image input).
Example:
Video input with temporal pattern of [1, -1, 2, -2, 3, -3]
> Even input [1, 2, 3], odd input [-1, -2, -3]
> Reorderd via slices to [1, 2, 3, -1, -2, -3]
Image input with temporal pattern [1]
> Duplicate input [1], [1]
> Reordered to [1, 1]
NOTE: This is hard-coded for `temporal_merge_size == 2` and won't work otherwise.
"""
# Calculating offsets on spatial dim (based on flattened tensors)
grid_t, grid_hw = grid_thw[:, 0], grid_thw[:, 1:]
grid_hw_after_conv = grid_hw.prod(-1) // (self.spatial_merge_size**2)
# Calculating offsets on batch dim (based on flattened tensors)
tokens_per_img_or_vid = (grid_thw.prod(-1) // (self.spatial_merge_size**2)).flatten()
batch_offsets = torch.empty(tokens_per_img_or_vid.size(), dtype=tokens_per_img_or_vid.dtype)
batch_offsets[0] = 0
batch_offsets[1:] = tokens_per_img_or_vid.cumsum(dim=0)[:-1]
first_slice_offsets = []
second_slice_offsets = []
for temporal_size, spatial_size, batch_offset in zip(grid_t, grid_hw_after_conv, batch_offsets):
# Depending on temporal, we may interleave:
# - Images have temporal == 1 --> same offsets (duplicate "frame" image)
# - Videos have temporal > 1 --> different offsets (even, odd)
first_offset_range = range(0, temporal_size, 2)
second_offset_range = range(1 if temporal_size > 1 else 0, temporal_size, 2)
for temporal_offset_even, temporal_offset_odd in zip(first_offset_range, second_offset_range):
first_slice_offsets.append(
torch.arange(
batch_offset + (temporal_offset_even) * spatial_size,
batch_offset + (temporal_offset_even + 1) * spatial_size,
)
)
second_slice_offsets.append(
torch.arange(
batch_offset + (temporal_offset_odd) * spatial_size,
batch_offset + (temporal_offset_odd + 1) * spatial_size,
)
)
# Input: [1, -1, 2, -2, 3, -3] or [1]
# Indices: [0, 2, 4] (even) or [0] (duplicate)
first_slice_offsets = torch.cat(first_slice_offsets, dim=-1).to(hidden_states.device)
# Indices: [1, 3, 5] (odd) or [0] (duplicate)
second_slice_offsets = torch.cat(second_slice_offsets, dim=-1).to(hidden_states.device)
# Output: [1, 2, 3, -1, -2, -3] or [1, 1]
return torch.concat(
[
torch.index_select(hidden_states, dim=0, index=first_slice_offsets),
torch.index_select(hidden_states, dim=0, index=second_slice_offsets),
],
dim=-1,
)
def forward(self, hidden_states, grid_thw):
# image spatial
# reshape imitates convolution via linear projection
hidden_states = hidden_states.reshape([-1, hidden_states.shape[-1] * (self.spatial_merge_size**2)])
hidden_states = self.spatial_linear(hidden_states)
# video temporal
hidden_states = self._temporal_slicing(hidden_states, grid_thw)
hidden_states = self.temporal_linear(hidden_states)
# final mlp
hidden_states = self.mlp(hidden_states)
hidden_states = self.after_norm(hidden_states)
return hidden_states
class Ernie4_5_VLMoeModel(Qwen2VLModel):
_checkpoint_conversion_mapping = {"^norm": "language_model.norm"}
config: Ernie4_5_VLMoeConfig
_no_split_modules = ["Ernie4_5_VLMoeDecoderLayer", "Ernie4_5_VLMoeVisionBlock"]
def __init__(self, config: Ernie4_5_VLMoeConfig):
super().__init__(config)
del self.visual
self.vision_tower = Ernie4_5_VLMoeVisionTransformerPretrainedModel._from_config(config.vision_config)
self.resampler_model = Ernie4_5_VLMoeVariableResolutionResamplerModel(config)
def get_rope_index(
self,
input_ids: torch.LongTensor,
mm_token_type_ids: torch.IntTensor,
image_grid_thw: torch.LongTensor | None = None,
video_grid_thw: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
**kwargs,
) -> tuple[torch.Tensor, torch.Tensor]:
"""
Calculate the 3D rope index based on image and video's sizes. The utility expects a `vision + text`
sequence and will error out otherwise. For pure text sequence, please rely on model's auto-inferred
position ids. In a mixed vision + text sequence, vision tokens use 3D RoPE (temporal, height, width)
while text tokens use standard 1D RoPE.
Example:
Temporal patches: 3; Height patches: 2; Width patches: 2
Each vision input results in (temporal x height × width) positions. Here: 3 x 2 × 2 = 12 positions total.
Temporal position IDs are spaced by:
`interval = tokens_per_second * temporal_patch_size / fps`
If fps = 1; tokens_per_second = 25; temporal_patch_size = 2, temporal IDs increase by 50 for each temporal patch:
`[0, 0, 0, 0, 50, 50, 50, 50, 100, 100, 100, 100]`
Height IDs repeat per row: `[0, 0, 1, 1, ...]`
Width IDs alternate per column: `[0, 1, 0, 1, ...]`
Text tokens follow standard 1D RoPE and the position IDs grow consequently with a step of `1`
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
mm_token_type_ids (`torch.IntTensor` of shape `(batch_size, sequence_length)`):
Token type ids matching each modality to a different value in the input sequence, i.e. text (0), image (1), video (2).
image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
The temporal, height and width of feature shape of each image in LLM.
video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
The temporal, height and width of feature shape of each video in LLM.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
Returns:
position_ids (`torch.LongTensor` of shape `(3, batch_size, sequence_length)`)
mrope_position_deltas (`torch.Tensor` of shape `(batch_size)`)
"""
temporal_merge_size = self.config.vision_config.temporal_merge_size
spatial_merge_size = self.config.vision_config.spatial_merge_size
mrope_position_deltas = []
position_ids = torch.zeros(
3,
input_ids.shape[0],
input_ids.shape[1],
dtype=input_ids.dtype,
device=input_ids.device,
)
grid_iters = {
1: iter(image_grid_thw) if image_grid_thw is not None else None,
2: iter(video_grid_thw) if video_grid_thw is not None else None,
}
for batch_idx, current_input_ids in enumerate(input_ids):
input_token_type = mm_token_type_ids[batch_idx]
if attention_mask is not None:
current_input_ids = current_input_ids[attention_mask[batch_idx].bool()]
input_token_type = input_token_type[attention_mask[batch_idx].bool()]
input_type_group = []
for key, group in itertools.groupby(enumerate(input_token_type.tolist()), lambda x: x[1]):
group = list(group)
start_index = group[0][0]
end_index = group[-1][0] + 1
input_type_group.append((key, start_index, end_index))
current_pos = 0
llm_pos_ids_list = []
for modality_type, start_idx, end_idx in input_type_group:
# text == 0
if modality_type == 0:
text_len = end_idx - start_idx
llm_pos_ids_list.append(
torch.arange(text_len, device=input_ids.device).view(1, -1).expand(3, -1) + current_pos
)
current_pos += text_len
# image == 1, video == 2
else:
grid_thw = next(grid_iters[modality_type])
t_merge_size = 1 if modality_type == 1 else temporal_merge_size
vision_position_ids = self.get_vision_position_ids(
current_pos, grid_thw, t_merge_size, spatial_merge_size, device=input_ids.device
)
llm_pos_ids_list.append(vision_position_ids)
current_pos += max(grid_thw[1], grid_thw[2]) // spatial_merge_size
llm_positions = torch.cat(llm_pos_ids_list, dim=1).reshape(3, -1)
if attention_mask is not None:
position_ids[:, batch_idx, attention_mask[batch_idx].bool()] = llm_positions.to(position_ids.device)
else:
position_ids[:, batch_idx] = llm_positions.to(position_ids.device)
mrope_position_deltas.append(llm_positions.max() + 1 - len(current_input_ids))
mrope_position_deltas = torch.tensor(mrope_position_deltas, device=input_ids.device).unsqueeze(1)
return position_ids, mrope_position_deltas
@can_return_tuple
@auto_docstring
def get_video_features(
self,
pixel_values_videos: torch.FloatTensor,
video_grid_thw: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | BaseModelOutputWithPooling:
video_outputs = self.vision_tower(pixel_values_videos, video_grid_thw, return_dict=True, **kwargs)
video_embeds = self.resampler_model(video_outputs.last_hidden_state, video_grid_thw)
split_sizes = (
video_grid_thw.prod(-1)
// self.vision_tower.spatial_merge_size**2
// self.resampler_model.temporal_merge_size
).tolist()
video_embeds = torch.split(video_embeds, split_sizes)
video_outputs.pooler_output = video_embeds
return video_outputs
@can_return_tuple
@auto_docstring
def get_image_features(
self,
pixel_values: torch.FloatTensor,
image_grid_thw: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | BaseModelOutputWithPooling:
image_outputs = self.vision_tower(pixel_values, image_grid_thw, return_dict=True, **kwargs)
image_embeds = self.resampler_model(image_outputs.last_hidden_state, image_grid_thw)
split_sizes = (image_grid_thw.prod(-1) // self.vision_tower.spatial_merge_size**2).tolist()
image_embeds = torch.split(image_embeds, split_sizes)
image_outputs.pooler_output = image_embeds
return image_outputs
@auto_docstring
@can_return_tuple
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
mm_token_type_ids: torch.IntTensor | None = None,
moe_mm_token_type_ids: torch.IntTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
use_cache: bool | None = None,
pixel_values: torch.Tensor | None = None,
pixel_values_videos: torch.FloatTensor | None = None,
image_grid_thw: torch.LongTensor | None = None,
video_grid_thw: torch.LongTensor | None = None,
rope_deltas: torch.LongTensor | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | MoeModelOutputWithPast:
r"""
mm_token_type_ids (`torch.IntTensor` of shape `(batch_size, sequence_length)`, *optional*):
Token type ids matching each modality to a different value in the input sequence, i.e. text (0), image (1), video (2).
moe_mm_token_type_ids (`torch.IntTensor` of shape `(batch_size, sequence_length)`, *optional*):
The same as `mm_token_type_ids` while additionally considering start/end image/video tokens as respective vision tokens.
image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
The temporal, height and width of feature shape of each image in LLM.
video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
The temporal, height and width of feature shape of each video in LLM.
rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*):
The rope index difference between sequence length and multimodal rope.
"""
if inputs_embeds is None:
inputs_embeds = self.get_input_embeddings()(input_ids)
if pixel_values is not None:
image_embeds = self.get_image_features(pixel_values, image_grid_thw, return_dict=True).pooler_output
image_embeds = torch.cat(image_embeds, dim=0).to(inputs_embeds.device, inputs_embeds.dtype)
image_mask, _ = self.get_placeholder_mask(
input_ids, inputs_embeds=inputs_embeds, image_features=image_embeds
)
inputs_embeds = inputs_embeds.masked_scatter(image_mask, image_embeds)
if pixel_values_videos is not None:
video_embeds = self.get_video_features(pixel_values_videos, video_grid_thw, return_dict=True).pooler_output
video_embeds = torch.cat(video_embeds, dim=0).to(inputs_embeds.device, inputs_embeds.dtype)
_, video_mask = self.get_placeholder_mask(
input_ids, inputs_embeds=inputs_embeds, video_features=video_embeds
)
inputs_embeds = inputs_embeds.masked_scatter(video_mask, video_embeds)
if position_ids is None:
position_ids = self.compute_3d_position_ids(
input_ids=input_ids,
image_grid_thw=image_grid_thw,
video_grid_thw=video_grid_thw,
mm_token_type_ids=mm_token_type_ids,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
past_key_values=past_key_values,
)
outputs = self.language_model(
input_ids=None,
position_ids=position_ids,
moe_mm_token_type_ids=moe_mm_token_type_ids,
attention_mask=attention_mask,
use_cache=use_cache,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
return_dict=True,
cache_position=cache_position,
**kwargs,
)
return MoeModelOutputWithPast(
last_hidden_state=outputs.last_hidden_state,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
router_logits=outputs.router_logits,
)
class Ernie4_5_VLMoeForConditionalGeneration(Glm4vForConditionalGeneration, GenerationMixin):
_checkpoint_conversion_mapping = {"^model.norm": "model.language_model.norm"}
def __init__(self, config):
super().__init__(config)
self.router_aux_loss_coef = config.text_config.router_aux_loss_coef
self.num_experts = config.text_config.moe_num_experts
self.num_experts_per_tok = config.text_config.moe_k
@auto_docstring
def get_video_features(self, **super_kwargs):
return super().get_video_features(**super_kwargs)
@auto_docstring
def get_image_features(self, **super_kwargs):
return super().get_image_features(**super_kwargs)
def prepare_inputs_for_generation(
self,
input_ids,
inputs_embeds=None,
attention_mask=None,
cache_position=None,
past_key_values=None,
image_grid_thw=None,
video_grid_thw=None,
use_cache=True,
is_first_iteration=False,
position_ids=None,
**kwargs,
):
model_inputs = super().prepare_inputs_for_generation(
input_ids,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
image_grid_thw=image_grid_thw,
video_grid_thw=video_grid_thw,
use_cache=use_cache,
is_first_iteration=is_first_iteration,
position_ids=position_ids,
**kwargs,
)
if not is_first_iteration and use_cache:
model_inputs["pixel_values"] = None
model_inputs["pixel_values_videos"] = None
model_inputs["mm_token_type_ids"] = None
model_inputs["moe_mm_token_type_ids"] = None
return model_inputs
@auto_docstring
@can_return_tuple
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
mm_token_type_ids: torch.IntTensor | None = None,
moe_mm_token_type_ids: torch.IntTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
labels: torch.LongTensor | None = None,
use_cache: bool | None = None,
output_router_logits: bool | None = None,
pixel_values: torch.Tensor | None = None,
pixel_values_videos: torch.FloatTensor | None = None,
image_grid_thw: torch.LongTensor | None = None,
video_grid_thw: torch.LongTensor | None = None,
rope_deltas: torch.LongTensor | None = None,
cache_position: torch.LongTensor | None = None,
logits_to_keep: int | torch.Tensor = 0,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | MoeCausalLMOutputWithPast:
r"""
mm_token_type_ids (`torch.IntTensor` of shape `(batch_size, sequence_length)`, *optional*):
Token type ids matching each modality to a different value in the input sequence, i.e. text (0), image (1), video (2).
moe_mm_token_type_ids (`torch.IntTensor` of shape `(batch_size, sequence_length)`, *optional*):
The same as `mm_token_type_ids` while additionally considering start/end image/video tokens as respective vision tokens.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
The temporal, height and width of feature shape of each image in LLM.
video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
The temporal, height and width of feature shape of each video in LLM.
rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*):
The rope index difference between sequence length and multimodal rope.
"""
output_router_logits = (
output_router_logits if output_router_logits is not None else self.config.text_config.output_router_logits
)
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
mm_token_type_ids=mm_token_type_ids,
moe_mm_token_type_ids=moe_mm_token_type_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_router_logits=output_router_logits,
return_dict=True,
pixel_values=pixel_values,
pixel_values_videos=pixel_values_videos,
image_grid_thw=image_grid_thw,
video_grid_thw=video_grid_thw,
rope_deltas=rope_deltas,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size)
aux_loss = None
if output_router_logits:
aux_loss = load_balancing_loss_func(
outputs.router_logits,
self.num_experts,
self.num_experts_per_tok,
attention_mask,
)
if labels is not None:
loss += self.router_aux_loss_coef * aux_loss.to(loss.device) # make sure to reside in the same device
return MoeCausalLMOutputWithPast(
loss=loss,
aux_loss=aux_loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
router_logits=outputs.router_logits,
)
class Ernie4_5_VLMoeImageProcessorKwargs(Glm4vImageProcessorKwargs):
r"""
patch_size (`int`, *optional*, defaults to 14):
The spatial patch size of the vision encoder.
temporal_patch_size (`int`, *optional*):
The temporal patch size of the vision encoder. Unused in the image processor, only used for videos.
merge_size (`int`, *optional*, defaults to 2):
The merge size of the vision encoder to llm encoder.
"""
class Ernie4_5_VLMoeImageProcessor(Glm4vImageProcessor):
r"""
Constructs a Ernie 4.5 VL image processor that dynamically resizes images based on the original images.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions.
size (`dict[str, int]`, *optional*, defaults to `{"shortest_edge": 56 * 56, "longest_edge": 28 * 28 * 6177}`):
Size of the image after resizing. `shortest_edge` and `longest_edge` keys must be present.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
Resampling filter to use when resizing the image.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`):
Mean to use if normalizing the image. This is a float or list of floats for each channel in the image.
image_std (`float` or `list[float]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`):
Standard deviation to use if normalizing the image. This is a float or list of floats for each channel
in the image.
do_convert_rgb (`bool`, *optional*, defaults to `True`):
Whether to convert the image to RGB.
patch_size (`int`, *optional*, defaults to 14):
The spatial patch size of the vision encoder.
temporal_patch_size (`int`, *optional*):
The temporal patch size of the vision encoder. Unused in the image processor, only used for videos.
merge_size (`int`, *optional*, defaults to 2):
The merge size of the vision encoder to llm encoder.
"""
def __init__(
self,
do_resize: bool = True,
size: dict[str, int] | None = None,
resample: PILImageResampling = PILImageResampling.BICUBIC,
do_rescale: bool = True,
rescale_factor: int | float = 1 / 255,
do_normalize: bool = True,
image_mean: float | list[float] | None = None,
image_std: float | list[float] | None = None,
do_convert_rgb: bool = True,
patch_size: int = 14,
temporal_patch_size: int | None = None,
merge_size: int = 2,
**kwargs,
) -> None:
BaseImageProcessor.__init__(**kwargs)
if size is not None:
if "shortest_edge" not in size or "longest_edge" not in size:
raise ValueError("size must contain 'shortest_edge' and 'longest_edge' keys.")
size = {"shortest_edge": size["shortest_edge"], "longest_edge": size["longest_edge"]}
else:
size = {"shortest_edge": 56 * 56, "longest_edge": 6177 * 28 * 28}
self.size = size
self.do_resize = do_resize
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD
self.patch_size = patch_size
self.temporal_patch_size = temporal_patch_size
self.merge_size = merge_size
self.do_convert_rgb = do_convert_rgb
def _preprocess(
self,
images: ImageInput,
do_resize: bool | None = None,
size: dict[str, int] | None = None,
resample: PILImageResampling = None,
do_rescale: bool | None = None,
rescale_factor: float | None = None,
do_normalize: bool | None = None,
image_mean: float | list[float] | None = None,
image_std: float | list[float] | None = None,
patch_size: int | None = None,
temporal_patch_size: int | None = None,
merge_size: int | None = None,
do_convert_rgb: bool | None = None,
data_format: ChannelDimension | None = ChannelDimension.FIRST,
input_data_format: str | ChannelDimension | None = None,
):
"""
Preprocess an image or batch of images. Copy of the `preprocess` method from `CLIPImageProcessor`.
Args:
images (`ImageInput`):
Image or batch of images to preprocess. Expects pixel values ranging from 0 to 255. If pixel values range from 0 to 1, set `do_rescale=False`.
vision_info (`list[Dict]`, *optional*):
Optional list of dictionaries containing additional information about vision inputs.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing. `shortest_edge` and `longest_edge` keys must be present.
resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the `PILImageResampling` enums.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Scale factor to use if rescaling the image.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Mean to use if normalizing the image. Can be a float or a list of floats corresponding to the number of channels in the image.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Standard deviation to use if normalizing the image. Can be a float or a list of floats corresponding to the number of channels in the image.
patch_size (`int`, *optional*, defaults to `self.patch_size`):
The spatial patch size of the vision encoder.
temporal_patch_size (`int`, *optional*):
The temporal patch size of the vision encoder. Unused in the image processor, only used for videos.
merge_size (`int`, *optional*, defaults to `self.merge_size`):
The merge size of the vision encoder to llm encoder.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
Whether to convert the image to RGB.
data_format (`ChannelDimension`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
images = make_list_of_images(images)
if do_convert_rgb:
images = [convert_to_rgb(image) for image in images]
# All transformations expect numpy arrays.
images = [to_numpy_array(image) for image in images]
if do_rescale and is_scaled_image(images[0]):
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])
height, width = get_image_size(images[0], channel_dim=input_data_format)
resized_height, resized_width = height, width
processed_images = []
for image in images:
if do_resize:
resized_height, resized_width = smart_resize(
height,
width,
factor=patch_size * merge_size,
min_pixels=size["shortest_edge"],
max_pixels=size["longest_edge"],
)
image = resize(
image, size=(resized_height, resized_width), resample=resample, input_data_format=input_data_format
)
if do_rescale:
image = self.rescale(image, scale=rescale_factor, input_data_format=input_data_format)
if do_normalize:
image = self.normalize(
image=image, mean=image_mean, std=image_std, input_data_format=input_data_format
)
image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
processed_images.append(image)
patches = np.array(processed_images)
if data_format == ChannelDimension.LAST:
patches = patches.transpose([0, 3, 1, 2])
# Main difference to Qwen2 VL - no temporal patches
channel = patches.shape[1]
grid_t = patches.shape[0]
grid_h, grid_w = resized_height // patch_size, resized_width // patch_size
patches = patches.reshape(
[
grid_t,
channel,
grid_h // merge_size,
merge_size,
patch_size,
grid_w // merge_size,
merge_size,
patch_size,
]
)
# [grid_t, grid_h/merge, grid_w/merge, merge, merge, channel, patch, patch]
patches = patches.transpose([0, 2, 5, 3, 6, 1, 4, 7])
flatten_patches = patches.reshape(grid_t * grid_h * grid_w, channel * patch_size * patch_size)
return flatten_patches, (grid_t, grid_h, grid_w)
def get_number_of_image_patches(self, height: int, width: int, images_kwargs=None):
"""
A utility that returns number of image patches for a given image size.
Args:
height (`int`):
Height of the input image.
width (`int`):
Width of the input image.
images_kwargs (`dict`, *optional*)
Any kwargs to override defaults of the image processor.
Returns:
`int`: Number of image patches per image.
"""
min_pixels = self.size["shortest_edge"]
max_pixels = self.size["longest_edge"]
patch_size = images_kwargs.get("patch_size", self.patch_size)
merge_size = images_kwargs.get("merge_size", self.merge_size)
factor = patch_size * merge_size
resized_height, resized_width = smart_resize(
height, width, factor, min_pixels=min_pixels, max_pixels=max_pixels
)
grid_h, grid_w = resized_height // patch_size, resized_width // patch_size
return grid_h * grid_w
class Ernie4_5_VLMoeImageProcessorFast(Glm4vImageProcessorFast):
size = {"shortest_edge": 56 * 56, "longest_edge": 28 * 28 * 6177}
temporal_patch_size = None # Unused
def _preprocess(
self,
images: list["torch.Tensor"],
do_resize: bool,
size: SizeDict,
interpolation: Optional["F.InterpolationMode"],
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
image_mean: float | list[float] | None,
image_std: float | list[float] | None,
patch_size: int,
merge_size: int,
disable_grouping: bool | None,
return_tensors: str | TensorType | None,
**kwargs,
):
# Group images by size for batched resizing
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
resized_images_grouped = {}
for shape, stacked_images in grouped_images.items():
height, width = stacked_images.shape[-2:]
if do_resize:
resized_height, resized_width = smart_resize(
height,
width,
factor=patch_size * merge_size,
min_pixels=size["shortest_edge"],
max_pixels=size["longest_edge"],
)
stacked_images = self.resize(
image=stacked_images,
size=SizeDict(height=resized_height, width=resized_width),
interpolation=interpolation,
)
resized_images_grouped[shape] = stacked_images
resized_images = reorder_images(resized_images_grouped, grouped_images_index)
# Group images by size for further processing
# Needed in case do_resize is False, or resize returns images with different sizes
grouped_images, grouped_images_index = group_images_by_shape(resized_images, disable_grouping=disable_grouping)
processed_images_grouped = {}
processed_grids = {}
for shape, stacked_images in grouped_images.items():
resized_height, resized_width = stacked_images.shape[-2:]
# Fused rescale and normalize
patches = self.rescale_and_normalize(
stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
if patches.ndim == 4:
# add a temporal dimension if we have images
patches = patches.unsqueeze(1)
# Main difference to Qwen2 VL - no temporal patches
batch_size, grid_t, channel = patches.shape[:3]
grid_h, grid_w = resized_height // patch_size, resized_width // patch_size
patches = patches.view(
batch_size,
grid_t,
channel,
grid_h // merge_size,
merge_size,
patch_size,
grid_w // merge_size,
merge_size,
patch_size,
)
# Reorder dimensions to group grid and patch information for subsequent flattening.
# [batch, grid_t, grid_h/merge, grid_w/merge, merge, merge, channel, patch, patch]
patches = patches.permute(0, 1, 3, 6, 4, 7, 2, 5, 8)
flatten_patches = patches.reshape(
batch_size,
grid_t * grid_h * grid_w,
channel * patch_size * patch_size,
)
processed_images_grouped[shape] = flatten_patches
processed_grids[shape] = [[grid_t, grid_h, grid_w]] * batch_size
processed_images = reorder_images(processed_images_grouped, grouped_images_index)
processed_grids = reorder_images(processed_grids, grouped_images_index)
pixel_values = torch.cat(processed_images, dim=0)
image_grid_thw = torch.tensor(processed_grids)
return BatchFeature(
data={"pixel_values": pixel_values, "image_grid_thw": image_grid_thw}, tensor_type=return_tensors
)
def get_number_of_image_patches(self, height: int, width: int, images_kwargs=None):
"""
A utility that returns number of image patches for a given image size.
Note: Do not remove this method! It is used by vLLM to infer the number of patches and placeholders
without an image input.
Args:
height (`int`):
Height of the input image.
width (`int`):
Width of the input image.
images_kwargs (`dict`, *optional*)
Any kwargs to override defaults of the image processor.
Returns:
`int`: Number of image patches per image.
"""
min_pixels = self.size["shortest_edge"]
max_pixels = self.size["longest_edge"]
patch_size = images_kwargs.get("patch_size", self.patch_size)
merge_size = images_kwargs.get("merge_size", self.merge_size)
factor = patch_size * merge_size
resized_height, resized_width = smart_resize(
height, width, factor, min_pixels=min_pixels, max_pixels=max_pixels
)
grid_h, grid_w = resized_height // patch_size, resized_width // patch_size
return grid_h * grid_w
# Keep aliases for BC
class Ernie4_5_VL_MoeForConditionalGeneration(Ernie4_5_VLMoeForConditionalGeneration):
def __init__(self, *args, **kwargs):
logger.warning_once(
"`Ernie4_5_VL_MoeForConditionalGeneration` is deprecated; "
"please use `Ernie4_5_VLMoeForConditionalGeneration` instead.",
)
super().__init__(*args, **kwargs)
class Ernie4_5_VL_MoeConfig(Ernie4_5_VLMoeConfig):
def __init__(self, *args, **kwargs):
logger.warning_once(
"`Ernie4_5_VL_MoeConfig` is deprecated; please use `Ernie4_5_VLMoeConfig` instead.",
)
super().__init__(*args, **kwargs)
class Ernie4_5_VL_MoeTextConfig(Ernie4_5_VLMoeTextConfig):
def __init__(self, *args, **kwargs):
logger.warning_once(
"`Ernie4_5_VL_MoeTextConfig` is deprecated; please use `Ernie4_5_VLMoeTextConfig` instead.",
)
super().__init__(*args, **kwargs)
class Ernie4_5_VL_MoeVisionConfig(Ernie4_5_VLMoeVisionConfig):
def __init__(self, *args, **kwargs):
logger.warning_once(
"`Ernie4_5_VL_MoeVisionConfig` is deprecated; please use `Ernie4_5_VLMoeVisionConfig` instead.",
)
super().__init__(*args, **kwargs)
class Ernie4_5_VL_MoePreTrainedModel(Ernie4_5_VLMoePreTrainedModel):
def post_init(self):
logger.warning_once(
"`Ernie4_5_VL_MoePreTrainedModel` is deprecated; please use `Ernie4_5_VLMoePreTrainedModel` instead.",
)
super().post_init()
class Ernie4_5_VL_MoeModel(Ernie4_5_VLMoeModel):
def __init__(self, *args, **kwargs):
logger.warning_once(
"`Ernie4_5_VL_MoeModel` is deprecated; please use `Ernie4_5_VLMoeModel` instead.",
)
super().__init__(*args, **kwargs)
class Ernie4_5_VL_MoeTextModel(Ernie4_5_VLMoeTextModel):
def __init__(self, *args, **kwargs):
logger.warning_once(
"`Ernie4_5_VL_MoeTextModel` is deprecated; please use `Ernie4_5_VLMoeTextModel` instead.",
)
super().__init__(*args, **kwargs)
class Ernie4_5_VL_MoeVisionTransformerPretrainedModel(Ernie4_5_VLMoeVisionTransformerPretrainedModel):
def __init__(self, *args, **kwargs):
logger.warning_once(
"`Ernie4_5_VL_MoeVisionTransformerPretrainedModel` is deprecated; "
"please use `Ernie4_5_VLMoeVisionTransformerPretrainedModel` instead.",
)
super().__init__(*args, **kwargs)
class Ernie4_5_VL_MoeVariableResolutionResamplerModel(Ernie4_5_VLMoeVariableResolutionResamplerModel):
def __init__(self, *args, **kwargs):
logger.warning_once(
"`Ernie4_5_VL_MoeVariableResolutionResamplerModel` is deprecated; "
"please use `Ernie4_5_VLMoeVariableResolutionResamplerModel` instead.",
)
super().__init__(*args, **kwargs)
class Ernie4_5_VL_MoeImageProcessor(Ernie4_5_VLMoeImageProcessor):
def __init__(self, *args, **kwargs):
logger.warning_once(
"`Ernie4_5_VL_MoeImageProcessor` is deprecated; please use `Ernie4_5_VLMoeImageProcessor` instead.",
)
super().__init__(*args, **kwargs)
class Ernie4_5_VL_MoeImageProcessorFast(Ernie4_5_VLMoeImageProcessorFast):
def __init__(self, *args, **kwargs):
logger.warning_once(
"`Ernie4_5_VL_MoeImageProcessorFast` is deprecated; "
"please use `Ernie4_5_VLMoeImageProcessorFast` instead.",
)
super().__init__(*args, **kwargs)
__all__ = [
"Ernie4_5_VL_MoeConfig",
"Ernie4_5_VL_MoeTextConfig",
"Ernie4_5_VL_MoeVisionConfig",
"Ernie4_5_VL_MoePreTrainedModel",
"Ernie4_5_VL_MoeForConditionalGeneration",
"Ernie4_5_VL_MoeModel",
"Ernie4_5_VL_MoeTextModel",
"Ernie4_5_VL_MoeVisionTransformerPretrainedModel",
"Ernie4_5_VL_MoeVariableResolutionResamplerModel",
"Ernie4_5_VL_MoeImageProcessor",
"Ernie4_5_VL_MoeImageProcessorFast",
"Ernie4_5_VLMoeConfig",
"Ernie4_5_VLMoeTextConfig",
"Ernie4_5_VLMoeVisionConfig",
"Ernie4_5_VLMoePreTrainedModel",
"Ernie4_5_VLMoeForConditionalGeneration",
"Ernie4_5_VLMoeModel",
"Ernie4_5_VLMoeTextModel",
"Ernie4_5_VLMoeVisionTransformerPretrainedModel",
"Ernie4_5_VLMoeVariableResolutionResamplerModel",
"Ernie4_5_VLMoeImageProcessor",
"Ernie4_5_VLMoeImageProcessorFast",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/ernie4_5_vl_moe/modular_ernie4_5_vl_moe.py",
"license": "Apache License 2.0",
"lines": 1634,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/ernie4_5_vl_moe/processing_ernie4_5_vl_moe.py | # Copyright 2025 Baidu and HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
from pathlib import Path
from shutil import SameFileError, copyfile
import numpy as np
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import MultiModalData, ProcessingKwargs, ProcessorMixin, Unpack
from ...tokenization_utils_base import PreTokenizedInput, TextInput
from ...video_utils import VideoInput
class Ernie4_5_VLMoeProcessorKwargs(ProcessingKwargs, total=False):
_defaults = {
"text_kwargs": {
"padding": False,
"return_token_type_ids": False,
"return_mm_token_type_ids": True,
},
}
class Ernie4_5_VLMoeProcessor(ProcessorMixin):
r"""
Constructs a Ernie 4.5 VL processor which wraps a Ernie 4.5 VL image processor and a Llama tokenizer into a single processor.
[`Ernie4_5_VLMoeProcessor`] offers all the functionalities of [`Ernie4_5_VLMoeImageProcessor`] and [`LlamaTokenizerFast`]. See the
[`~Ernie4_5_VLMoeProcessor.__call__`] and [`~Ernie4_5_VLMoeProcessor.decode`] for more information.
Args:
image_processor ([`Ernie4_5_VLMoeImageProcessor`], *optional*):
The image processor is a required input.
tokenizer ([`LlamaTokenizerFast`], *optional*):
The tokenizer is a required input.
video_processor ([`Ernie4_5_VLMoeVideoProcessor`], *optional*):
The video processor is a required input.
chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
in a chat into a tokenizable string.
"""
def __init__(self, image_processor=None, tokenizer=None, video_processor=None, chat_template=None, **kwargs):
self.image_token = tokenizer.image_token
self.image_end_token = tokenizer.image_end_token
self.image_start_token = tokenizer.image_start_token
self.video_token = tokenizer.video_token
self.video_end_token = tokenizer.video_end_token
self.video_start_token = tokenizer.video_start_token
self.image_token_id = tokenizer.image_token_id
self.image_end_token_id = tokenizer.image_end_token_id
self.image_start_token_id = tokenizer.image_start_token_id
self.video_token_id = tokenizer.video_token_id
self.video_end_token_id = tokenizer.video_end_token_id
self.video_start_token_id = tokenizer.video_start_token_id
super().__init__(image_processor, tokenizer, video_processor, chat_template=chat_template)
def save_pretrained(self, save_directory, push_to_hub: bool = False, **kwargs):
"""We additionally save a copy of the font to the `save_directory` (if we found a file there)"""
os.makedirs(save_directory, exist_ok=True)
if os.path.isfile(self.video_processor.font):
try:
copyfile(self.video_processor.font, Path(save_directory, Path(self.video_processor.font).name))
except SameFileError: # already exists which we allow (copy if needed)
pass
return super().save_pretrained(save_directory, push_to_hub, **kwargs)
def __call__(
self,
images: ImageInput | None = None,
text: TextInput | PreTokenizedInput | list[TextInput] | list[PreTokenizedInput] = None,
videos: VideoInput | None = None,
**kwargs: Unpack[Ernie4_5_VLMoeProcessorKwargs],
) -> BatchFeature:
"""
Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
and `kwargs` arguments to Qwen2TokenizerFast's [`~Qwen2TokenizerFast.__call__`] if `text` is not `None` to encode
the text. To prepare the vision inputs, this method forwards the `vision_infos` and `kwargs` arguments to
Ernie4_5_VLMoeImageProcessor's [`~Ernie4_5_VLMoeImageProcessor.__call__`] if `vision_infos` is not `None`.
Args:
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. Both channels-first and channels-last formats are supported.
text (`str`, `list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
videos (`np.ndarray`, `torch.Tensor`, `list[np.ndarray]`, `list[torch.Tensor]`):
The image or batch of videos to be prepared. Each video can be a 4D NumPy array or PyTorch
tensor, or a nested list of 3D frames. Both channels-first and channels-last formats are supported.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors of a particular framework. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
- **pixel_values_videos** -- Pixel values of videos to be fed to a model. Returned when `videos` is not `None`.
- **image_grid_thw** -- List of image 3D grid in LLM. Returned when `images` is not `None`.
- **video_grid_thw** -- List of video 3D grid in LLM. Returned when `videos` is not `None`.
- **mm_token_type_ids** -- List of token type ids differentiating between image, video and text input.
Returned when `text` is not `None`.
"""
output_kwargs = self._merge_kwargs(
Ernie4_5_VLMoeProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
image_inputs = videos_inputs = {}
if images is not None:
image_inputs = self.image_processor(images=images, **output_kwargs["images_kwargs"])
image_grid_thw = image_inputs["image_grid_thw"]
if videos is not None:
videos_inputs = self.video_processor(videos=videos, **output_kwargs["videos_kwargs"])
video_grid_thw = videos_inputs["video_grid_thw"]
if not isinstance(text, list):
text = [text]
text = text.copy() # below lines change text in-place
if images is not None:
merge_length = self.image_processor.merge_size**2
index = 0
for i in range(len(text)):
while self.image_token in text[i]:
num_image_tokens = image_grid_thw[index].prod() // merge_length
text[i] = text[i].replace(self.image_token, "<|placeholder|>" * num_image_tokens, 1)
index += 1
text[i] = text[i].replace("<|placeholder|>", self.image_token)
if videos is not None:
merge_length = self.video_processor.merge_size**2 * self.video_processor.temporal_patch_size
index = 0
for i in range(len(text)):
while self.video_token in text[i]:
num_video_tokens = video_grid_thw[index].prod() // merge_length
text[i] = text[i].replace(self.video_token, "<|placeholder|>" * num_video_tokens, 1)
index += 1
text[i] = text[i].replace("<|placeholder|>", self.video_token)
return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None)
return_mm_token_type_ids = output_kwargs["text_kwargs"].pop("return_mm_token_type_ids", False)
text_inputs = self.tokenizer(text, **output_kwargs["text_kwargs"], return_tensors=None)
self._check_special_mm_tokens(text, text_inputs, modalities=["image", "video"])
if return_mm_token_type_ids:
array_ids = np.array(text_inputs["input_ids"])
mm_token_type_ids = np.zeros_like(text_inputs["input_ids"]) # text
mm_token_type_ids[array_ids == self.image_token_id] = 1 # img
mm_token_type_ids[array_ids == self.video_token_id] = 2 # vid
# moe additionally adds start/end tokens
moe_mm_token_type_ids = np.copy(mm_token_type_ids)
for token_id in [
self.image_start_token_id,
self.image_end_token_id,
]:
moe_mm_token_type_ids[array_ids == token_id] = 1
for token_id in [
self.video_start_token_id,
self.video_end_token_id,
]:
moe_mm_token_type_ids[array_ids == token_id] = 2
# convert to base type
text_inputs["mm_token_type_ids"] = mm_token_type_ids.astype(int).tolist()
text_inputs["moe_mm_token_type_ids"] = moe_mm_token_type_ids.astype(int).tolist()
return BatchFeature(data={**text_inputs, **image_inputs, **videos_inputs}, tensor_type=return_tensors)
@property
def model_input_names(self):
"""Additional `mm_token_type_ids` used for modality isolated MoE"""
model_input_names = super().model_input_names
model_input_names.append("mm_token_type_ids")
model_input_names.append("moe_mm_token_type_ids")
return model_input_names
def _get_num_multimodal_tokens(self, image_sizes=None, video_sizes=None, **kwargs):
"""
Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.
Args:
image_sizes (`list[list[int]]`, *optional*):
The input sizes formatted as (height, width) per each image.
video_sizes (`list[list[int]]`, *optional*):
The input sizes formatted as (num_frames, height, width) per each video.
Returns:
`MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided
input modalities, along with other useful data.
"""
vision_data = {}
if image_sizes is not None:
images_kwargs = Ernie4_5_VLMoeProcessorKwargs._defaults.get("images_kwargs", {})
images_kwargs.update(kwargs)
merge_size = images_kwargs.get("merge_size", None) or self.image_processor.merge_size
num_image_patches = [
self.image_processor.get_number_of_image_patches(*image_size, images_kwargs)
for image_size in image_sizes
]
num_image_tokens = [(num_patches // merge_size**2) for num_patches in num_image_patches]
vision_data.update({"num_image_tokens": num_image_tokens, "num_image_patches": num_image_patches})
if video_sizes is not None:
videos_kwargs = Ernie4_5_VLMoeProcessorKwargs._defaults.get("videos_kwargs", {})
videos_kwargs.update(kwargs)
temporal_merge_size = (
videos_kwargs.get("temporal_patch_size", None) or self.video_processor.temporal_patch_size
)
num_video_patches = [
self.video_processor.get_number_of_video_patches(*video_size, videos_kwargs)
for video_size in video_sizes
]
num_video_tokens = [
(num_patches // merge_size**2 // temporal_merge_size) for num_patches in num_video_patches
]
vision_data["num_video_tokens"] = num_video_tokens
return MultiModalData(**vision_data)
__all__ = ["Ernie4_5_VLMoeProcessor"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/ernie4_5_vl_moe/processing_ernie4_5_vl_moe.py",
"license": "Apache License 2.0",
"lines": 212,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/ernie4_5_vl_moe/video_processing_ernie4_5_vl_moe.py | # Copyright 2025 Baidu and HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
from functools import partial
from pathlib import Path
from shutil import SameFileError, copyfile
from typing import Any
import numpy as np
import torch
from huggingface_hub import is_offline_mode
from huggingface_hub.dataclasses import validate_typed_dict
from PIL import ImageDraw, ImageFont
from torchvision.transforms.functional import pil_to_tensor, to_pil_image
from ...image_processing_utils import BatchFeature
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
PILImageResampling,
SizeDict,
get_image_size,
validate_kwargs,
)
from ...processing_utils import Unpack, VideosKwargs
from ...utils import (
IMAGE_PROCESSOR_NAME,
PROCESSOR_NAME,
VIDEO_PROCESSOR_NAME,
TensorType,
add_start_docstrings,
logging,
safe_load_json_file,
)
from ...utils.hub import cached_file
from ...utils.import_utils import is_tracing, requires
from ...video_processing_utils import BASE_VIDEO_PROCESSOR_DOCSTRING, BaseVideoProcessor
from ...video_utils import (
VideoInput,
VideoMetadata,
group_videos_by_shape,
infer_channel_dimension_format,
reorder_videos,
)
from .image_processing_ernie4_5_vl_moe import smart_resize
logger = logging.get_logger(__name__)
class Ernie4_5_VLMoeVideoProcessorInitKwargs(VideosKwargs, total=False):
patch_size: int
temporal_patch_size: int
merge_size: int
min_frames: int
max_frames: int
draw_on_frames: bool
font: str
@add_start_docstrings(
"Constructs a fast Ernie 4.5 VL image processor that dynamically resizes videos based on the original videos.",
BASE_VIDEO_PROCESSOR_DOCSTRING,
"""
patch_size (`int`, *optional*, defaults to 14):
The spacial patch size of the vision encoder.
temporal_patch_size (`int`, *optional*, defaults to 2):
The temporal patch size of the vision encoder.
merge_size (`int`, *optional*, defaults to 2):
The merge size of the vision encoder to llm encoder.
min_frames (`int`, *optional*, defaults to 16):
The minimum number of frames that can be sampled.
max_frames (`int`, *optional*, defaults to 180):
The maximum number of frames that can be sampled.
draw_on_frames (`bool`, *optional*, defaults to `True`):
Whether to draw timestamps on each frame or not.
This does not work with `torch.compile` but resembles
the performance of the original model.
font (`str`, *optional*, defaults to "Roboto-Regular.ttf"):
The associated font name for drawing on frames.
Defaults to "Roboto-Regular.ttf" and is expected to be
saved along the processor as separate file.
""",
)
@requires(backends=("torchvision",))
class Ernie4_5_VLMoeVideoProcessor(BaseVideoProcessor):
resample = PILImageResampling.BICUBIC
size = {"shortest_edge": 299 * 28 * 28, "longest_edge": 1196 * 28 * 28}
image_mean = OPENAI_CLIP_MEAN
image_std = OPENAI_CLIP_STD
do_resize = True
do_rescale = True
do_normalize = True
do_convert_rgb = True
patch_size = 14
temporal_patch_size = 2
merge_size = 2
min_frames = 16
max_frames = 180
do_sample_frames = True
draw_on_frames = True
font = "Roboto-Regular.ttf"
valid_kwargs = Ernie4_5_VLMoeVideoProcessorInitKwargs
model_input_names = ["pixel_values_videos", "video_grid_thw"]
def __init__(self, **kwargs: Unpack[Ernie4_5_VLMoeVideoProcessorInitKwargs]):
temporal_patch_size = kwargs.get("temporal_patch_size", 2)
if temporal_patch_size is None or temporal_patch_size != 2:
raise ValueError("`Ernie 4.5 VL` only supports a temporal patch size of 2")
size = kwargs.pop("size", None)
size = self.size if size is None else size
if "shortest_edge" not in size or "longest_edge" not in size:
raise ValueError("size must contain 'shortest_edge' and 'longest_edge' keys.")
super().__init__(size=size, **kwargs)
@classmethod
def get_video_processor_dict(
cls, pretrained_model_name_or_path: str | os.PathLike, **kwargs
) -> tuple[dict[str, Any], dict[str, Any]]:
"""Overriden to additionally load the font for drawing on frames."""
cache_dir = kwargs.pop("cache_dir", None)
force_download = kwargs.pop("force_download", False)
proxies = kwargs.pop("proxies", None)
token = kwargs.pop("token", None)
local_files_only = kwargs.pop("local_files_only", False)
revision = kwargs.pop("revision", None)
subfolder = kwargs.pop("subfolder", "")
from_pipeline = kwargs.pop("_from_pipeline", None)
from_auto_class = kwargs.pop("_from_auto", False)
user_agent = {"file_type": "video processor", "from_auto_class": from_auto_class}
if from_pipeline is not None:
user_agent["using_pipeline"] = from_pipeline
if is_offline_mode() and not local_files_only:
logger.info("Offline mode: forcing local_files_only=True")
local_files_only = True
pretrained_model_name_or_path = str(pretrained_model_name_or_path)
is_local = os.path.isdir(pretrained_model_name_or_path)
if os.path.isfile(pretrained_model_name_or_path):
resolved_video_processor_file = pretrained_model_name_or_path
resolved_processor_file = None
is_local = True
else:
video_processor_file = VIDEO_PROCESSOR_NAME
try:
# Try to load with a new config name first and if not successful try with the old file name
# NOTE: we save all processor configs as nested dict in PROCESSOR_NAME from v5, which is the standard
resolved_processor_file = cached_file(
pretrained_model_name_or_path,
filename=PROCESSOR_NAME,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
local_files_only=local_files_only,
token=token,
user_agent=user_agent,
revision=revision,
subfolder=subfolder,
_raise_exceptions_for_missing_entries=False,
)
resolved_video_processor_files = [
resolved_file
for filename in [video_processor_file, IMAGE_PROCESSOR_NAME]
if (
resolved_file := cached_file(
pretrained_model_name_or_path,
filename=filename,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
local_files_only=local_files_only,
token=token,
user_agent=user_agent,
revision=revision,
subfolder=subfolder,
_raise_exceptions_for_missing_entries=False,
)
)
is not None
]
resolved_video_processor_file = (
resolved_video_processor_files[0] if resolved_video_processor_files else None
)
except OSError:
# Raise any OS error raise by `cached_file`. It will have a helpful error message adapted to
# the original exception.
raise
except Exception:
# For any other exception, we throw a generic error.
raise OSError(
f"Can't load video processor for '{pretrained_model_name_or_path}'. If you were trying to load"
" it from 'https://huggingface.co/models', make sure you don't have a local directory with the"
f" same name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a"
f" directory containing a {video_processor_file} file"
)
# Load video_processor dict. Priority goes as (nested config if found -> video processor config -> image processor config)
# We are downloading both configs because almost all models have a `processor_config.json` but
# not all of these are nested. We need to check if it was saved recebtly as nested or if it is legacy style
video_processor_dict = None
if resolved_processor_file is not None:
processor_dict = safe_load_json_file(resolved_processor_file)
if "video_processor" in processor_dict:
video_processor_dict = processor_dict["video_processor"]
if resolved_video_processor_file is not None and video_processor_dict is None:
video_processor_dict = safe_load_json_file(resolved_video_processor_file)
if video_processor_dict is None:
raise OSError(
f"Can't load video processor for '{pretrained_model_name_or_path}'. If you were trying to load"
" it from 'https://huggingface.co/models', make sure you don't have a local directory with the"
f" same name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a"
f" directory containing a {video_processor_file} file"
)
# Specific to Ernie 4.5 VL Moe, we load the font file along the json (if we draw on frames)
draws_on_frames = video_processor_dict.get("draw_on_frames")
if (font_name := video_processor_dict.get("font")) is None and draws_on_frames:
raise AttributeError(
"Expected a `font` to be saved when using `draw_on_frames` in Ernie 4.5 VL Moe; found nothing."
)
if font_name is not None and draws_on_frames:
video_processor_dict["font"] = cached_file(
pretrained_model_name_or_path,
filename=font_name,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
local_files_only=local_files_only,
token=token,
user_agent=user_agent,
revision=revision,
subfolder=subfolder,
_raise_exceptions_for_missing_entries=False,
)
try:
ImageFont.truetype(video_processor_dict["font"])
except (TypeError, OSError):
raise OSError(
f"Could not find an associated font file for {video_processor_dict['font']}. "
"Make sure to save a font file along for Ernie 4.5 VL Moe."
)
if is_local:
logger.info(f"loading configuration file {resolved_video_processor_file}")
else:
logger.info(
f"loading configuration file {video_processor_file} from cache at {resolved_video_processor_file}"
)
return video_processor_dict, kwargs
def to_dict(self) -> dict[str, Any]:
"""Overriden to strip the prefix of the full path for the font, e.g. `tmp/folder/font.tff` -> `font.tff`"""
output = super().to_dict()
if os.path.isfile(output.get("font")):
output["font"] = Path(output["font"]).name
elif output.get("draw_on_frames"):
raise ValueError(
f"The video processor dict contains an invalid path to its font: {output['font']}. "
"Please make sure to contain a valid path or disable `draw_on_frames`."
)
return output
def save_pretrained(self, save_directory: str | os.PathLike, push_to_hub: bool = False, **kwargs):
"""We additionally save a copy of the font to the `save_directory` (if we found a file there)"""
os.makedirs(save_directory, exist_ok=True)
if os.path.isfile(self.font):
try:
copyfile(self.font, Path(save_directory, Path(self.font).name))
except SameFileError: # already exists which we allow (copy if needed)
pass
return super().save_pretrained(save_directory, push_to_hub, **kwargs)
def _further_process_kwargs(
self,
size: SizeDict | None = None,
**kwargs,
) -> dict:
"""
Update kwargs that need further processing before being validated
Can be overridden by subclasses to customize the processing of kwargs.
"""
if size is not None and ("shortest_edge" not in size or "longest_edge" not in size):
raise ValueError("size must contain 'shortest_edge' and 'longest_edge' keys.")
return super()._further_process_kwargs(size=size, **kwargs)
def sample_frames(
self,
metadata: VideoMetadata,
min_frames: int | None = None,
max_frames: int | None = None,
num_frames: int | None = None,
fps: int | float | None = None,
**kwargs,
):
if fps is not None and num_frames is not None:
raise ValueError("`num_frames` and `fps` are mutually exclusive arguments, please use only one!")
num_frames = num_frames if num_frames is not None else self.num_frames
min_frames = min_frames if min_frames is not None else self.min_frames
max_frames = max_frames if max_frames is not None else self.max_frames
total_num_frames = metadata.total_num_frames
if num_frames is not None:
if num_frames < min_frames or num_frames > max_frames:
raise ValueError(f"`num_frames` must be {min_frames} <= x <= {max_frames}. Got {num_frames} instead.")
else:
if fps is not None and (metadata is None or metadata.fps is None):
raise ValueError(
"Asked to sample `fps` frames per second but no video metadata was provided which is required when sampling with `fps`. "
"Please pass in `VideoMetadata` object or use a fixed `num_frames` per input video"
)
num_frames = total_num_frames / metadata.fps * fps if fps is not None else total_num_frames
num_frames = min(max(num_frames, min_frames), max_frames, total_num_frames)
if num_frames > total_num_frames:
raise ValueError(
f"Video can't be sampled. The inferred `num_frames={num_frames}` exceeds `total_num_frames={total_num_frames}`. "
"Decrease `num_frames` or `fps` for sampling."
)
indices = torch.arange(0, total_num_frames, total_num_frames / num_frames).int()
return indices
def _convert_timestamp(self, time_stamp_in_seconds):
"""Convert to `time: hr:min:sec` format"""
hours = time_stamp_in_seconds // 3600
time_stamp_in_seconds = time_stamp_in_seconds % 3600
mins = time_stamp_in_seconds // 60
time_stamp_in_seconds = time_stamp_in_seconds % 60
return f"time: {int(hours):02d}:{int(mins):02d}:{time_stamp_in_seconds:05.02f}"
def _render_image_with_timestamp(self, image: torch.Tensor, timestamp: str, size_factor: float = 0.1):
"""Draws a black timestamp with a white border on the corner of the frame"""
if self.font is None:
raise AttributeError("To draw on frames with Ernie 4.5 VL, you need an associated font; found nothing")
# FIXME: conversion `torch->PIL->torch` is inefficient ~6ms per frame
# Left for optimization if anyone want to pick it up
#
# This can take up to ~1s in preprocessing (if default sampling is used):
# 180 (frames) x 6ms = 1080ms = ~1,1s
image = to_pil_image(image)
font_size = int(min(*image.size) * size_factor)
outline_size = int(font_size * size_factor)
font = ImageFont.truetype(self.font, font_size)
# Draw a black text with a white border
draw = ImageDraw.Draw(image)
draw.text(
(0, 0),
timestamp,
font=font,
fill=(0, 0, 0),
stroke_width=outline_size,
stroke_fill=(255, 255, 255),
)
return pil_to_tensor(image)
def _prepare_input_videos(
self,
videos: VideoInput,
input_data_format: str | ChannelDimension | None = None,
device: str | None = None,
video_metadata: list[VideoMetadata] | None = None,
draw_on_frames: bool = True,
) -> list["torch.Tensor"]:
"""
Prepare the input videos for processing.
"""
processed_videos = []
for video, metadata in zip(videos, video_metadata):
# Check for attributes that are necessary to draw timestamps on frames
if draw_on_frames:
if metadata is None:
raise ValueError("Need video metadata to process videos in Ernie 4.5 VL using `draw_on_frames`")
elif metadata.fps is None:
metadata.fps = 24
logger.warning_once(
"Could not infer the fps of a video due to the metadata not being available, "
"defaulting to `24`. Please provide `video_metadata` for more accurate results."
)
# `make_batched_videos` always returns a 4D array per video
if isinstance(video, np.ndarray):
# not using F.to_tensor as it doesn't handle (C, H, W) numpy arrays
video = torch.from_numpy(video).contiguous()
# Infer the channel dimension format if not provided
if input_data_format is None:
input_data_format = infer_channel_dimension_format(video)
if input_data_format == ChannelDimension.LAST:
video = video.permute(0, 3, 1, 2).contiguous()
# specific to ernie, draws timestamps on each frame (if enabled)
if draw_on_frames:
if is_tracing(video):
raise RuntimeError(
"Using `torch.compile` is not compatible with drawing on frames. "
"Either don't use `torch.compile` or don't draw on frames via the kwarg `draw_on_frames=False`."
)
for idx, frame in enumerate(video):
video[idx] = self._render_image_with_timestamp(
frame, self._convert_timestamp(metadata.timestamps[idx])
)
# last frame is copied if uneven (mitigating issues for temporal patch size)
if video.shape[0] % 2 != 0:
video = torch.cat((video, video[-1].detach().clone()[None, ...]), dim=0)
if device is not None:
video = video.to(device)
processed_videos.append(video)
return processed_videos
def _preprocess(
self,
videos: list[torch.Tensor],
do_convert_rgb: bool = True,
do_resize: bool = True,
size: SizeDict | None = None,
interpolation: PILImageResampling = PILImageResampling.BICUBIC,
do_rescale: bool = True,
rescale_factor: float = 1 / 255.0,
do_normalize: bool = True,
image_mean: float | list[float] | None = None,
image_std: float | list[float] | None = None,
patch_size: int | None = None,
merge_size: int | None = None,
return_tensors: str | TensorType | None = None,
**kwargs,
):
# Group videos by size for batched resizing
grouped_videos, grouped_videos_index = group_videos_by_shape(videos)
resized_videos_grouped = {}
for shape, stacked_videos in grouped_videos.items():
if do_convert_rgb:
stacked_videos = self.convert_to_rgb(stacked_videos)
height, width = get_image_size(stacked_videos[0], channel_dim=ChannelDimension.FIRST)
resized_height, resized_width = height, width
if do_resize:
resized_height, resized_width = smart_resize(
height,
width,
factor=patch_size * merge_size,
min_pixels=size["shortest_edge"],
max_pixels=size["longest_edge"],
)
stacked_videos = self.resize(
image=stacked_videos,
size=SizeDict(height=resized_height, width=resized_width),
interpolation=interpolation,
)
resized_videos_grouped[shape] = stacked_videos
resized_videos = reorder_videos(resized_videos_grouped, grouped_videos_index)
# Group videos by size for further processing
# Needed in case do_resize is False, or resize returns videos with different sizes
grouped_videos, grouped_videos_index = group_videos_by_shape(resized_videos)
processed_videos_grouped = {}
processed_grids = {}
for shape, stacked_videos in grouped_videos.items():
resized_height, resized_width = get_image_size(stacked_videos[0], channel_dim=ChannelDimension.FIRST)
# Fused rescale and normalize
stacked_videos = self.rescale_and_normalize(
stacked_videos, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
patches = stacked_videos
batch_size, grid_t, channel = patches.shape[:3]
grid_h, grid_w = resized_height // patch_size, resized_width // patch_size
patches = patches.view(
batch_size,
grid_t,
channel,
grid_h // merge_size,
merge_size,
patch_size,
grid_w // merge_size,
merge_size,
patch_size,
)
# Reorder dimensions to group grid and patch information for subsequent flattening.
# [batch, grid_t, grid_h/merge, grid_w/merge, merge, merge, channel, patch, patch]
patches = patches.permute(0, 1, 3, 6, 4, 7, 2, 5, 8)
flatten_patches = patches.reshape(
batch_size,
grid_t * grid_h * grid_w,
channel * patch_size * patch_size,
)
processed_videos_grouped[shape] = flatten_patches
processed_grids[shape] = [[grid_t, grid_h, grid_w]] * batch_size
processed_videos = reorder_videos(processed_videos_grouped, grouped_videos_index)
processed_grids = reorder_videos(processed_grids, grouped_videos_index)
pixel_values_videos = torch.cat(processed_videos, dim=0)
video_grid_thw = torch.tensor(processed_grids)
return BatchFeature(
data={"pixel_values_videos": pixel_values_videos, "video_grid_thw": video_grid_thw},
tensor_type=return_tensors,
)
@add_start_docstrings(
BASE_VIDEO_PROCESSOR_DOCSTRING,
)
def preprocess(
self,
videos: VideoInput,
**kwargs: Unpack[VideosKwargs],
) -> BatchFeature:
validate_kwargs(
captured_kwargs=kwargs.keys(),
valid_processor_keys=list(self.valid_kwargs.__annotations__.keys()) + ["return_tensors"],
)
# Perform type validation on received kwargs
validate_typed_dict(self.valid_kwargs, kwargs)
# Set default kwargs from self. This ensures that if a kwarg is not provided
# by the user, it gets its default value from the instance, or is set to None.
for kwarg_name in self.valid_kwargs.__annotations__:
kwargs.setdefault(kwarg_name, getattr(self, kwarg_name, None))
input_data_format = kwargs.pop("input_data_format")
do_sample_frames = kwargs.pop("do_sample_frames")
device = kwargs.pop("device")
video_metadata = kwargs.pop("video_metadata")
draw_on_frames = kwargs.pop("draw_on_frames")
sample_indices_fn = partial(self.sample_frames, **kwargs) if do_sample_frames else None
videos, video_metadata = self._decode_and_sample_videos(
videos,
video_metadata=video_metadata,
do_sample_frames=do_sample_frames,
sample_indices_fn=sample_indices_fn,
)
videos = self._prepare_input_videos(
videos=videos,
input_data_format=input_data_format,
device=device,
video_metadata=video_metadata,
draw_on_frames=draw_on_frames,
)
kwargs = self._further_process_kwargs(**kwargs)
self._validate_preprocess_kwargs(**kwargs)
# Pop kwargs that are not needed in _preprocess
kwargs.pop("data_format")
return_metadata = kwargs.pop("return_metadata")
preprocessed_videos = self._preprocess(videos=videos, **kwargs)
if return_metadata:
preprocessed_videos["video_metadata"] = video_metadata
return preprocessed_videos
__all__ = ["Ernie4_5_VLMoeVideoProcessor"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/ernie4_5_vl_moe/video_processing_ernie4_5_vl_moe.py",
"license": "Apache License 2.0",
"lines": 523,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/ernie4_5_vl_moe/test_image_processing_ernie4_5_vl_moe.py | # Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import tempfile
import unittest
import numpy as np
from transformers.image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, load_image
from transformers.models.ernie4_5_vl_moe.image_processing_ernie4_5_vl_moe import smart_resize
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available
from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs
from ...test_processing_common import url_to_local_path
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import Ernie4_5_VLMoeImageProcessor
if is_torchvision_available():
from transformers import Ernie4_5_VLMoeImageProcessorFast
class Ernie4_5_VLMoeImageProcessorTester:
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
min_resolution=56,
max_resolution=1024,
size=None,
do_resize=True,
do_normalize=True,
do_convert_rgb=True,
image_mean=OPENAI_CLIP_MEAN,
image_std=OPENAI_CLIP_STD,
patch_size=14,
merge_size=2,
):
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.min_resolution = min_resolution
self.max_resolution = max_resolution
if size is None:
size = {"shortest_edge": 56 * 56, "longest_edge": 6177 * 28 * 28}
self.size = size
self.do_resize = do_resize
self.do_normalize = do_normalize
self.do_convert_rgb = do_convert_rgb
self.image_mean = image_mean
self.image_std = image_std
self.patch_size = patch_size
self.merge_size = merge_size
def prepare_image_processor_dict(self):
return {
"do_resize": self.do_resize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"size": self.size,
"patch_size": self.patch_size,
"merge_size": self.merge_size,
}
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
images = prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
return [[image] for image in images]
@require_torch
@require_vision
class Ernie4_5_VLMoeImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = Ernie4_5_VLMoeImageProcessor if is_vision_available() else None
fast_image_processing_class = Ernie4_5_VLMoeImageProcessorFast if is_torchvision_available() else None
def setUp(self):
super().setUp()
self.image_processor_tester = Ernie4_5_VLMoeImageProcessorTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "do_normalize"))
self.assertTrue(hasattr(image_processing, "image_mean"))
self.assertTrue(hasattr(image_processing, "image_std"))
self.assertTrue(hasattr(image_processing, "do_resize"))
self.assertTrue(hasattr(image_processing, "size"))
self.assertTrue(hasattr(image_processing, "do_convert_rgb"))
self.assertTrue(hasattr(image_processing, "patch_size"))
self.assertTrue(hasattr(image_processing, "merge_size"))
def test_image_processor_from_dict_with_kwargs(self):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size["shortest_edge"], 56 * 56)
self.assertEqual(image_processor.size["longest_edge"], 6177 * 28 * 28)
image_processor = image_processing_class.from_dict(
self.image_processor_dict,
size={"shortest_edge": 256 * 256, "longest_edge": 640 * 640},
)
self.assertEqual(image_processor.size["shortest_edge"], 256 * 256)
self.assertEqual(image_processor.size["longest_edge"], 640 * 640)
def test_select_best_resolution(self):
# Test with a final resize resolution
best_resolution = smart_resize(561, 278, factor=28)
self.assertEqual(best_resolution, (560, 280))
def test_call_pil(self):
for image_processing_class in self.image_processor_list:
# Initialize image_processing
image_processing = image_processing_class(**self.image_processor_dict)
# create random PIL images
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True)
for image in image_inputs:
self.assertIsInstance(image[0], Image.Image)
# Test not batched input
process_out = image_processing(image_inputs[0], return_tensors="pt")
encoded_images = process_out.pixel_values
image_grid_thws = process_out.image_grid_thw
expected_output_image_shape = (5476, 588)
expected_image_grid_thws = torch.Tensor([[1, 74, 74]])
self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape)
self.assertTrue((image_grid_thws == expected_image_grid_thws).all())
# Test batched
process_out = image_processing(image_inputs, return_tensors="pt")
encoded_images = process_out.pixel_values
image_grid_thws = process_out.image_grid_thw
expected_output_image_shape = (38332, 588)
expected_image_grid_thws = torch.Tensor([[1, 74, 74]] * 7)
self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape)
self.assertTrue((image_grid_thws == expected_image_grid_thws).all())
def test_call_numpy(self):
for image_processing_class in self.image_processor_list:
# Initialize image_processing
image_processing = image_processing_class(**self.image_processor_dict)
# create random numpy tensors
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, numpify=True)
for image in image_inputs:
self.assertIsInstance(image[0], np.ndarray)
# Test not batched input
process_out = image_processing(image_inputs[0], return_tensors="pt")
encoded_images = process_out.pixel_values
image_grid_thws = process_out.image_grid_thw
expected_output_image_shape = (5476, 588)
expected_image_grid_thws = torch.Tensor([[1, 74, 74]])
self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape)
self.assertTrue((image_grid_thws == expected_image_grid_thws).all())
# Test batched
process_out = image_processing(image_inputs, return_tensors="pt")
encoded_images = process_out.pixel_values
image_grid_thws = process_out.image_grid_thw
expected_output_image_shape = (38332, 588)
expected_image_grid_thws = torch.Tensor([[1, 74, 74]] * 7)
self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape)
self.assertTrue((image_grid_thws == expected_image_grid_thws).all())
def test_call_pytorch(self):
for image_processing_class in self.image_processor_list:
# Initialize image_processing
image_processing = image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, torchify=True)
for image in image_inputs:
self.assertIsInstance(image[0], torch.Tensor)
# Test not batched input
process_out = image_processing(image_inputs[0], return_tensors="pt")
encoded_images = process_out.pixel_values
image_grid_thws = process_out.image_grid_thw
expected_output_image_shape = (5476, 588)
expected_image_grid_thws = torch.Tensor([[1, 74, 74]])
self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape)
self.assertTrue((image_grid_thws == expected_image_grid_thws).all())
# Test batched
process_out = image_processing(image_inputs, return_tensors="pt")
encoded_images = process_out.pixel_values
image_grid_thws = process_out.image_grid_thw
expected_output_image_shape = (38332, 588)
expected_image_grid_thws = torch.Tensor([[1, 74, 74]] * 7)
self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape)
self.assertTrue((image_grid_thws == expected_image_grid_thws).all())
@unittest.skip(reason="Erni4_5_VLImageProcessor doesn't treat 4 channel PIL and numpy consistently yet")
def test_call_numpy_4_channels(self):
pass
def test_nested_input(self):
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True)
# Test batched as a list of images
process_out = image_processing(image_inputs, return_tensors="pt")
encoded_images = process_out.pixel_values
image_grid_thws = process_out.image_grid_thw
expected_output_image_shape = (38332, 588)
expected_image_grid_thws = torch.Tensor([[1, 74, 74]] * 7)
self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape)
self.assertTrue((image_grid_thws == expected_image_grid_thws).all())
# Test batched as a nested list of images, where each sublist is one batch
image_inputs_nested = image_inputs[:3] + image_inputs[3:]
process_out = image_processing(image_inputs_nested, return_tensors="pt")
encoded_images_nested = process_out.pixel_values
image_grid_thws_nested = process_out.image_grid_thw
expected_output_image_shape = (38332, 588)
expected_image_grid_thws = torch.Tensor([[1, 74, 74]] * 7)
self.assertEqual(tuple(encoded_images_nested.shape), expected_output_image_shape)
self.assertTrue((image_grid_thws == expected_image_grid_thws).all())
# Image processor should return same pixel values, independently of ipnut format
self.assertTrue((encoded_images_nested == encoded_images).all())
self.assertTrue((image_grid_thws_nested == expected_image_grid_thws).all())
def test_custom_image_size(self):
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
image_processing.save_pretrained(tmpdirname)
image_processor_loaded = image_processing_class.from_pretrained(
tmpdirname, size={"shortest_edge": 28 * 28, "longest_edge": 56 * 56}
)
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True)
process_out = image_processor_loaded(image_inputs, return_tensors="pt")
expected_output_image_shape = [112, 588]
self.assertListEqual(list(process_out.pixel_values.shape), expected_output_image_shape)
def test_custom_pixels(self):
pixel_choices = frozenset(itertools.product((100, 150, 200, 20000), (100, 150, 200, 20000)))
for image_processing_class in self.image_processor_list:
image_processor_dict = self.image_processor_dict.copy()
for a_pixels, b_pixels in pixel_choices:
image_processor_dict["size"] = {
"shortest_edge": min(a_pixels, b_pixels),
"longest_edge": max(a_pixels, b_pixels),
}
image_processor = image_processing_class(**image_processor_dict)
image_inputs = self.image_processor_tester.prepare_image_inputs()
# Just checking that it doesn't raise an error
image_processor(image_inputs, return_tensors="pt")
@require_vision
@require_torch
def test_slow_fast_equivalence(self):
dummy_image = load_image(url_to_local_path("http://images.cocodataset.org/val2017/000000039769.jpg"))
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast equivalence test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
encoding_slow = image_processor_slow(dummy_image, return_tensors="pt")
encoding_fast = image_processor_fast(dummy_image, return_tensors="pt")
self._assert_slow_fast_tensors_equivalence(encoding_slow.pixel_values, encoding_fast.pixel_values)
self.assertEqual(encoding_slow.image_grid_thw.dtype, encoding_fast.image_grid_thw.dtype)
self._assert_slow_fast_tensors_equivalence(
encoding_slow.image_grid_thw.float(), encoding_fast.image_grid_thw.float()
)
@require_vision
@require_torch
def test_slow_fast_equivalence_batched(self):
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast equivalence test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
if hasattr(self.image_processor_tester, "do_center_crop") and self.image_processor_tester.do_center_crop:
self.skipTest(
reason="Skipping as do_center_crop is True and center_crop functions are not equivalent for fast and slow processors"
)
dummy_images = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
encoding_slow = image_processor_slow(dummy_images, return_tensors="pt")
encoding_fast = image_processor_fast(dummy_images, return_tensors="pt")
self._assert_slow_fast_tensors_equivalence(encoding_slow.pixel_values, encoding_fast.pixel_values)
self.assertEqual(encoding_slow.image_grid_thw.dtype, encoding_fast.image_grid_thw.dtype)
self._assert_slow_fast_tensors_equivalence(
encoding_slow.image_grid_thw.float(), encoding_fast.image_grid_thw.float()
)
def test_get_num_patches_without_images(self):
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
num_patches = image_processing.get_number_of_image_patches(height=100, width=100, images_kwargs={})
self.assertEqual(num_patches, 64)
num_patches = image_processing.get_number_of_image_patches(height=200, width=50, images_kwargs={})
self.assertEqual(num_patches, 56)
num_patches = image_processing.get_number_of_image_patches(
height=100, width=100, images_kwargs={"patch_size": 28}
)
self.assertEqual(num_patches, 16)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/ernie4_5_vl_moe/test_image_processing_ernie4_5_vl_moe.py",
"license": "Apache License 2.0",
"lines": 292,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/models/ernie4_5_vl_moe/test_processing_ernie4_5_vl_moe.py | # Copyright 2025 HuggingFace Inc team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import AutoProcessor, TokenizersBackend
from transformers.testing_utils import require_av, require_torch, require_torchvision, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_processing_common import ProcessorTesterMixin
if is_vision_available():
from transformers import Ernie4_5_VLMoeImageProcessorFast, Ernie4_5_VLMoeProcessor
if is_torch_available():
import torch
@require_vision
@require_torch
@require_torchvision
class Ernie4_5_VLMoeProcessorTest(ProcessorTesterMixin, unittest.TestCase):
processor_class = Ernie4_5_VLMoeProcessor
@classmethod
def setUpClass(cls):
cls.tmpdirname = tempfile.mkdtemp()
processor = Ernie4_5_VLMoeProcessor.from_pretrained(
"hf-internal-testing/Ernie-VL-Moe-Small",
patch_size=4,
size={"shortest_edge": 28 * 28, "longest_edge": 56 * 56},
)
processor.save_pretrained(cls.tmpdirname)
cls.image_token = processor.image_token
def get_tokenizer(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer
def get_image_processor(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor
def get_video_processor(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).video_processor
def get_processor(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs)
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.tmpdirname, ignore_errors=True)
# Copied from tests.models.llava.test_processing_llava.LlavaProcessorTest.test_get_num_vision_tokens
def test_get_num_vision_tokens(self):
"Tests general functionality of the helper used internally in vLLM"
processor = self.get_processor()
output = processor._get_num_multimodal_tokens(image_sizes=[(100, 100), (300, 100), (500, 30)])
self.assertTrue("num_image_tokens" in output)
self.assertEqual(len(output["num_image_tokens"]), 3)
self.assertTrue("num_image_patches" in output)
self.assertEqual(len(output["num_image_patches"]), 3)
def test_save_load_pretrained_default(self):
tokenizer = self.get_tokenizer()
image_processor = self.get_image_processor()
video_processor = self.get_video_processor()
processor = Ernie4_5_VLMoeProcessor(
tokenizer=tokenizer, image_processor=image_processor, video_processor=video_processor
)
processor.save_pretrained(self.tmpdirname)
processor = Ernie4_5_VLMoeProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab())
self.assertEqual(processor.image_processor.to_json_string(), image_processor.to_json_string())
self.assertIsInstance(processor.tokenizer, TokenizersBackend)
self.assertIsInstance(processor.image_processor, Ernie4_5_VLMoeImageProcessorFast)
def test_image_processor(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
video_processor = self.get_video_processor()
processor = Ernie4_5_VLMoeProcessor(
tokenizer=tokenizer, image_processor=image_processor, video_processor=video_processor
)
image_input = self.prepare_image_inputs()
input_image_proc = image_processor(image_input, return_tensors="pt")
input_processor = processor(images=image_input, text="dummy", return_tensors="pt")
for key in input_image_proc:
self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1e-2)
def test_processor(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
video_processor = self.get_video_processor()
processor = Ernie4_5_VLMoeProcessor(
tokenizer=tokenizer, image_processor=image_processor, video_processor=video_processor
)
input_str = "lower newer"
image_input = self.prepare_image_inputs()
inputs = processor(text=input_str, images=image_input)
self.assertListEqual(
list(inputs.keys()),
[
"input_ids",
"attention_mask",
"mm_token_type_ids",
"moe_mm_token_type_ids",
"pixel_values",
"image_grid_thw",
],
)
# test if it raises when no input is passed
with pytest.raises(ValueError):
processor()
# test if it raises when no text is passed
with pytest.raises(TypeError):
processor(images=image_input)
@require_torch
@require_av
def _test_apply_chat_template(
self,
modality: str,
batch_size: int,
return_tensors: str,
input_name: str,
processor_name: str,
input_data: list[str],
):
processor = self.get_processor()
if processor.chat_template is None:
self.skipTest("Processor has no chat template")
if processor_name not in self.processor_class.get_attributes():
self.skipTest(f"{processor_name} attribute not present in {self.processor_class}")
batch_messages = [
[
{
"role": "user",
"content": [{"type": "text", "text": "Describe this."}],
},
]
] * batch_size
# Test that jinja can be applied
formatted_prompt = processor.apply_chat_template(batch_messages, add_generation_prompt=True, tokenize=False)
self.assertEqual(len(formatted_prompt), batch_size)
# Test that tokenizing with template and directly with `self.tokenizer` gives same output
formatted_prompt_tokenized = processor.apply_chat_template(
batch_messages, add_generation_prompt=True, tokenize=True, return_tensors=return_tensors
)
add_special_tokens = True
if processor.tokenizer.bos_token is not None and formatted_prompt[0].startswith(processor.tokenizer.bos_token):
add_special_tokens = False
tok_output = processor.tokenizer(
formatted_prompt, return_tensors=return_tensors, add_special_tokens=add_special_tokens
)
expected_output = tok_output.input_ids
self.assertListEqual(expected_output.tolist(), formatted_prompt_tokenized.tolist())
# Test that kwargs passed to processor's `__call__` are actually used
tokenized_prompt_100 = processor.apply_chat_template(
batch_messages,
add_generation_prompt=True,
tokenize=True,
padding="max_length",
truncation=True,
return_tensors=return_tensors,
max_length=100,
)
self.assertEqual(len(tokenized_prompt_100[0]), 100)
# Test that `return_dict=True` returns text related inputs in the dict
out_dict_text = processor.apply_chat_template(
batch_messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors=return_tensors,
)
self.assertTrue(
all(
key in out_dict_text
for key in ["input_ids", "attention_mask", "mm_token_type_ids", "moe_mm_token_type_ids"]
)
)
self.assertEqual(len(out_dict_text["input_ids"]), batch_size)
self.assertEqual(len(out_dict_text["attention_mask"]), batch_size)
self.assertEqual(len(out_dict_text["mm_token_type_ids"]), batch_size)
self.assertEqual(len(out_dict_text["moe_mm_token_type_ids"]), batch_size)
# Test that with modality URLs and `return_dict=True`, we get modality inputs in the dict
for idx, url in enumerate(input_data[:batch_size]):
batch_messages[idx][0]["content"] = [batch_messages[idx][0]["content"][0], {"type": modality, "url": url}]
out_dict = processor.apply_chat_template(
batch_messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors=return_tensors,
max_frames=2, # by default no more than 2 frames, otherwise too slow
)
input_name = getattr(self, input_name)
self.assertTrue(input_name in out_dict)
self.assertEqual(len(out_dict["input_ids"]), batch_size)
self.assertEqual(len(out_dict["attention_mask"]), batch_size)
self.assertEqual(len(out_dict["mm_token_type_ids"]), batch_size)
self.assertEqual(len(out_dict["moe_mm_token_type_ids"]), batch_size)
if modality == "video":
# qwen pixels don't scale with bs same way as other models, calculate expected video token count based on video_grid_thw
expected_video_token_count = 0
for thw in out_dict["video_grid_thw"]:
expected_video_token_count += thw[0] * thw[1] * thw[2]
mm_len = expected_video_token_count
else:
# Calculate expected image token count based on image_grid_thw
expected_image_token_count = 0
for thw in out_dict["image_grid_thw"]:
expected_image_token_count += thw[0] * thw[1] * thw[2]
mm_len = expected_image_token_count
self.assertEqual(len(out_dict[input_name]), mm_len)
return_tensor_to_type = {"pt": torch.Tensor, "np": np.ndarray, None: list}
for k in out_dict:
self.assertIsInstance(out_dict[k], return_tensor_to_type[return_tensors])
@require_av
def test_apply_chat_template_video_frame_sampling(self):
processor = self.get_processor()
if processor.chat_template is None:
self.skipTest("Processor has no chat template")
signature = inspect.signature(processor.__call__)
if "videos" not in {*signature.parameters.keys()} or (
signature.parameters.get("videos") is not None
and signature.parameters["videos"].annotation == inspect._empty
):
self.skipTest("Processor doesn't accept videos at input")
messages = [
[
{
"role": "user",
"content": [
{"type": "video"},
{"type": "text", "text": "What is shown in this video?"},
],
},
]
]
formatted_prompt = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)
self.assertEqual(len(formatted_prompt), 1)
formatted_prompt_tokenized = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True)
expected_output = processor.tokenizer(formatted_prompt, return_tensors=None).input_ids
self.assertListEqual(expected_output, formatted_prompt_tokenized)
out_dict = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True, return_dict=True)
self.assertListEqual(
list(out_dict.keys()), ["input_ids", "attention_mask", "mm_token_type_ids", "moe_mm_token_type_ids"]
)
# Add video URL for return dict and load with `num_frames` arg
messages[0][0]["content"][0] = {
"type": "video",
"url": "https://huggingface.co/datasets/raushan-testing-hf/videos-test/resolve/main/tiny_video.mp4",
}
num_frames = 3
out_dict_with_video = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
num_frames=num_frames,
min_frames=3, # default is 16
)
self.assertTrue(self.videos_input_name in out_dict_with_video)
self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 720)
# Load with `fps` arg
fps = 1
out_dict_with_video = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
fps=fps,
)
self.assertTrue(self.videos_input_name in out_dict_with_video)
self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 2160)
# Load with `fps` and `num_frames` args, should raise an error
with self.assertRaises(ValueError):
out_dict_with_video = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
fps=fps,
num_frames=num_frames,
)
# Load without any arg should load the whole video
out_dict_with_video = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
)
self.assertTrue(self.videos_input_name in out_dict_with_video)
self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 2160)
# Load video as a list of frames (i.e. images). NOTE: each frame should have same size
# because we assume they come from one video
messages[0][0]["content"][0] = {
"type": "video",
"url": [
"https://www.ilankelman.org/stopsigns/australia.jpg",
"https://www.ilankelman.org/stopsigns/australia.jpg",
],
}
out_dict_with_video = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
do_sample_frames=False,
)
self.assertTrue(self.videos_input_name in out_dict_with_video)
self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 320)
def test_kwargs_overrides_custom_image_processor_kwargs(self):
processor = self.get_processor()
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs()
image_input = self.prepare_image_inputs()
size = {"shortest_edge": processor.image_processor.size["shortest_edge"], "longest_edge": 56 * 56 * 4}
inputs = processor(text=input_str, images=image_input, size=size, return_tensors="pt")
self.assertEqual(inputs[self.images_input_name].shape[0], 612)
inputs = processor(text=input_str, images=image_input, return_tensors="pt")
self.assertEqual(inputs[self.images_input_name].shape[0], 100)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/ernie4_5_vl_moe/test_processing_ernie4_5_vl_moe.py",
"license": "Apache License 2.0",
"lines": 320,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/models/ernie4_5_vl_moe/test_video_processing_ernie4_5_vl_moe.py | # Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from transformers.image_utils import IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available
from ...test_video_processing_common import VideoProcessingTestMixin, prepare_video_inputs
if is_torch_available():
from PIL import Image
if is_vision_available():
if is_torchvision_available():
from transformers import Ernie4_5_VLMoeVideoProcessor
from transformers.models.ernie4_5_vl_moe.video_processing_ernie4_5_vl_moe import smart_resize
class Ernie4_5_VLMoeVideoProcessingTester:
def __init__(
self,
parent,
batch_size=5,
num_frames=8,
num_channels=3,
min_resolution=30,
max_resolution=80,
temporal_patch_size=2,
patch_size=14,
merge_size=2,
do_resize=True,
size=None,
do_normalize=True,
image_mean=IMAGENET_STANDARD_MEAN,
image_std=IMAGENET_STANDARD_STD,
do_convert_rgb=True,
draw_on_frames=False,
):
size = size if size is not None else {"longest_edge": 20, "shortest_edge": 10}
self.parent = parent
self.batch_size = batch_size
self.num_frames = num_frames
self.num_channels = num_channels
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.do_convert_rgb = do_convert_rgb
self.temporal_patch_size = temporal_patch_size
self.patch_size = patch_size
self.merge_size = merge_size
self.draw_on_frames = draw_on_frames
def prepare_video_processor_dict(self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
"do_sample_frames": True,
"draw_on_frames": self.draw_on_frames,
}
def prepare_video_metadata(self, videos):
video_metadata = []
for video in videos:
if isinstance(video, list):
num_frames = len(video)
elif hasattr(video, "shape"):
if len(video.shape) == 4: # (T, H, W, C)
num_frames = video.shape[0]
else:
num_frames = 1
else:
num_frames = self.num_frames
metadata = {
"fps": 2,
"duration": num_frames / 2,
"total_num_frames": num_frames,
}
video_metadata.append(metadata)
return video_metadata
def expected_output_video_shape(self, videos):
grid_t = self.num_frames
hidden_dim = self.num_channels * self.patch_size * self.patch_size
seq_len = 0
for video in videos:
if isinstance(video, list) and isinstance(video[0], Image.Image):
video = np.stack([np.array(frame) for frame in video])
elif hasattr(video, "shape"):
pass
else:
video = np.array(video)
if hasattr(video, "shape") and len(video.shape) >= 3:
if len(video.shape) == 4:
_, height, width = video.shape[:3]
elif len(video.shape) == 3:
height, width = video.shape[:2]
else:
height, width = self.num_frames, self.min_resolution, self.min_resolution
else:
height, width = self.min_resolution, self.min_resolution
resized_height, resized_width = smart_resize(
height,
width,
factor=self.patch_size * self.merge_size,
min_pixels=self.size["shortest_edge"],
max_pixels=self.size["longest_edge"],
)
grid_h, grid_w = resized_height // self.patch_size, resized_width // self.patch_size
seq_len += grid_t * grid_h * grid_w
return [seq_len, hidden_dim]
def prepare_video_inputs(self, equal_resolution=False, return_tensors="pil"):
videos = prepare_video_inputs(
batch_size=self.batch_size,
num_frames=self.num_frames,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
return_tensors=return_tensors,
)
return videos
@require_torch
@require_vision
class Ernie4_5_VLMoeVideoProcessingTest(VideoProcessingTestMixin, unittest.TestCase):
fast_video_processing_class = Ernie4_5_VLMoeVideoProcessor if is_torchvision_available() else None
input_name = "pixel_values_videos"
def setUp(self):
super().setUp()
self.video_processor_tester = Ernie4_5_VLMoeVideoProcessingTester(self)
@property
def video_processor_dict(self):
return self.video_processor_tester.prepare_video_processor_dict()
def test_video_processor_from_dict_with_kwargs(self):
video_processor = self.fast_video_processing_class.from_dict(self.video_processor_dict)
self.assertEqual(video_processor.size, {"longest_edge": 20, "shortest_edge": 10})
video_processor = self.fast_video_processing_class.from_dict(
self.video_processor_dict, size={"longest_edge": 42, "shortest_edge": 42}
)
self.assertEqual(video_processor.size, {"longest_edge": 42, "shortest_edge": 42})
def test_call_pil(self):
for video_processing_class in self.video_processor_list:
video_processing = video_processing_class(**self.video_processor_dict)
video_inputs = self.video_processor_tester.prepare_video_inputs(
equal_resolution=False, return_tensors="pil"
)
for video in video_inputs:
self.assertIsInstance(video[0], Image.Image)
video_metadata = self.video_processor_tester.prepare_video_metadata(video_inputs)
encoded_videos = video_processing(
video_inputs[0], video_metadata=[video_metadata[0]], return_tensors="pt"
)[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]])
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
encoded_videos = video_processing(video_inputs, video_metadata=video_metadata, return_tensors="pt")[
self.input_name
]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs)
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
def test_call_numpy(self):
for video_processing_class in self.video_processor_list:
video_processing = video_processing_class(**self.video_processor_dict)
video_inputs = self.video_processor_tester.prepare_video_inputs(
equal_resolution=False, return_tensors="np"
)
video_metadata = self.video_processor_tester.prepare_video_metadata(video_inputs)
encoded_videos = video_processing(
video_inputs[0], video_metadata=[video_metadata[0]], return_tensors="pt"
)[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]])
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
encoded_videos = video_processing(video_inputs, video_metadata=video_metadata, return_tensors="pt")[
self.input_name
]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs)
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
def test_call_pytorch(self):
for video_processing_class in self.video_processor_list:
video_processing = video_processing_class(**self.video_processor_dict)
video_inputs = self.video_processor_tester.prepare_video_inputs(
equal_resolution=False, return_tensors="pt"
)
video_metadata = self.video_processor_tester.prepare_video_metadata(video_inputs)
encoded_videos = video_processing(
video_inputs[0], video_metadata=[video_metadata[0]], return_tensors="pt"
)[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]])
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
encoded_videos = video_processing(video_inputs, video_metadata=video_metadata, return_tensors="pt")[
self.input_name
]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs)
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
@unittest.skip("Skip for now, the test needs adjustment for Ernie 4.5 VL")
def test_call_numpy_4_channels(self):
for video_processing_class in self.video_processor_list:
# Test that can process videos which have an arbitrary number of channels
# Initialize video_processing
video_processor = video_processing_class(**self.video_processor_dict)
# create random numpy tensors
self.video_processor_tester.num_channels = 4
video_inputs = self.video_processor_tester.prepare_video_inputs(
equal_resolution=False, return_tensors="np"
)
# Test not batched input
encoded_videos = video_processor(
video_inputs[0],
return_tensors="pt",
input_data_format="channels_last",
image_mean=(0.0, 0.0, 0.0, 0.0),
image_std=(1.0, 1.0, 1.0, 1.0),
)[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]])
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
# Test batched
encoded_videos = video_processor(
video_inputs,
return_tensors="pt",
input_data_format="channels_last",
image_mean=(0.0, 0.0, 0.0, 0.0),
image_std=(1.0, 1.0, 1.0, 1.0),
)[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs)
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
def test_nested_input(self):
"""Tests that the processor can work with nested list where each video is a list of arrays"""
for video_processing_class in self.video_processor_list:
video_processing = video_processing_class(**self.video_processor_dict)
video_inputs = self.video_processor_tester.prepare_video_inputs(
equal_resolution=False, return_tensors="np"
)
video_inputs_nested = [list(video) for video in video_inputs]
video_metadata = self.video_processor_tester.prepare_video_metadata(video_inputs)
# Test not batched input
encoded_videos = video_processing(
video_inputs_nested[0], video_metadata=[video_metadata[0]], return_tensors="pt"
)[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]])
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
# Test batched
encoded_videos = video_processing(video_inputs_nested, video_metadata=video_metadata, return_tensors="pt")[
self.input_name
]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs)
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
def test_call_sample_frames(self):
for video_processing_class in self.video_processor_list:
video_processor_dict = self.video_processor_dict.copy()
video_processing = video_processing_class(**video_processor_dict)
prev_num_frames = self.video_processor_tester.num_frames
self.video_processor_tester.num_frames = 8
prev_min_resolution = getattr(self.video_processor_tester, "min_resolution", None)
prev_max_resolution = getattr(self.video_processor_tester, "max_resolution", None)
self.video_processor_tester.min_resolution = 56
self.video_processor_tester.max_resolution = 112
video_inputs = self.video_processor_tester.prepare_video_inputs(
equal_resolution=False,
return_tensors="torch",
)
metadata = [[{"total_num_frames": 8, "fps": 4}]]
batched_metadata = metadata * len(video_inputs)
encoded_videos = video_processing(video_inputs[0], return_tensors="pt", video_metadata=metadata)[
self.input_name
]
encoded_videos_batched = video_processing(
video_inputs, return_tensors="pt", video_metadata=batched_metadata
)[self.input_name]
self.assertIsNotNone(encoded_videos)
self.assertIsNotNone(encoded_videos_batched)
self.assertEqual(len(encoded_videos.shape), 2)
self.assertEqual(len(encoded_videos_batched.shape), 2)
# error out when sampled frames would go over total number of frames
with self.assertRaises(ValueError):
video_processing(video_inputs[0], num_frames=10, return_tensors="pt")[self.input_name]
self.video_processor_tester.num_frames = prev_num_frames
if prev_min_resolution is not None:
self.video_processor_tester.min_resolution = prev_min_resolution
if prev_max_resolution is not None:
self.video_processor_tester.max_resolution = prev_max_resolution
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/ernie4_5_vl_moe/test_video_processing_ernie4_5_vl_moe.py",
"license": "Apache License 2.0",
"lines": 290,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.